Merge "arm/dt: msm8974-cdp: Add BUS voting and wakeup from XO support"
diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq
index 23d78b5..0ba6ea2 100644
--- a/Documentation/ABI/testing/sysfs-class-devfreq
+++ b/Documentation/ABI/testing/sysfs-class-devfreq
@@ -11,7 +11,7 @@
Date: September 2011
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
Description:
- The /sys/class/devfreq/.../governor shows the name of the
+ The /sys/class/devfreq/.../governor show or set the name of the
governor used by the corresponding devfreq object.
What: /sys/class/devfreq/.../cur_freq
@@ -19,15 +19,16 @@
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
Description:
The /sys/class/devfreq/.../cur_freq shows the current
- frequency of the corresponding devfreq object.
+ frequency of the corresponding devfreq object. Same as
+ target_freq when get_cur_freq() is not implemented by
+ devfreq driver.
-What: /sys/class/devfreq/.../central_polling
-Date: September 2011
-Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
+What: /sys/class/devfreq/.../target_freq
+Date: September 2012
+Contact: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Description:
- The /sys/class/devfreq/.../central_polling shows whether
- the devfreq ojbect is using devfreq-provided central
- polling mechanism or not.
+ The /sys/class/devfreq/.../target_freq shows the next governor
+ predicted target frequency of the corresponding devfreq object.
What: /sys/class/devfreq/.../polling_interval
Date: September 2011
@@ -43,6 +44,17 @@
(/sys/class/devfreq/.../central_polling is 0), this value
may be useless.
+What: /sys/class/devfreq/.../trans_stat
+Date: October 2012
+Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
+Descrtiption:
+ This ABI shows the statistics of devfreq behavior on a
+ specific device. It shows the time spent in each state and
+ the number of transitions between states.
+ In order to activate this ABI, the devfreq target device
+ driver should provide the list of available frequencies
+ with its profile.
+
What: /sys/class/devfreq/.../userspace/set_freq
Date: September 2011
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -50,3 +62,19 @@
The /sys/class/devfreq/.../userspace/set_freq shows and
sets the requested frequency for the devfreq object if
userspace governor is in effect.
+
+What: /sys/class/devfreq/.../available_frequencies
+Date: October 2012
+Contact: Nishanth Menon <nm@ti.com>
+Description:
+ The /sys/class/devfreq/.../available_frequencies shows
+ the available frequencies of the corresponding devfreq object.
+ This is a snapshot of available frequencies and not limited
+ by the min/max frequency restrictions.
+
+What: /sys/class/devfreq/.../available_governors
+Date: October 2012
+Contact: Nishanth Menon <nm@ti.com>
+Description:
+ The /sys/class/devfreq/.../available_governors shows
+ currently available governors in the system.
diff --git a/Documentation/arm/msm/n_smux.txt b/Documentation/arm/msm/n_smux.txt
new file mode 100644
index 0000000..c9123ad
--- /dev/null
+++ b/Documentation/arm/msm/n_smux.txt
@@ -0,0 +1,630 @@
+Introduction
+============
+
+The Serial Mux (SMUX) is a TTY Line Discipline that multiplexes
+multiple logical channels onto a single TTY serial channel. The
+logical channels are exposed through a kernel API.
+
+Companion adaptation drivers use the kernel API to expose logical
+channels as character devices (SMUX CTL - smux_ctl.c) to the user-space
+and as net devices (SMUX RMNET - msm_rmnet_smux.c) to the TCP/IP stack.
+
+Power control calls are supported to the physical serial driver to
+optimize power usage.
+
+
+Software description
+====================
+
+The Serial Mux driver will be similar in design to the SDIO DMUX and
+BAM DMUX drivers and will use the same multiplexing protocol with
+additional commands to support inactivity timeouts along with
+power-down and wake-up handshaking. Companion adaptation drivers will
+support data-plane traffic through TCP/IP (SMUX_RMNET) and control
+plane traffic through SMUX_CTL.
+
+
+ ttyHS0 RMNET[0..N] smuxctl[0..M]
+ | | |
+ | | |
+ | ------------------ -------------------
+ | | IP Framework | | VFS Framework |
+ | ------------------ -------------------
+ | | | | |
+ | | | | |
+ | ------- ------- ------- -------
+ | | Rmnet | | Rmnet | | CDEV | | CDEV |
+ | | Dev 0 |...| Dev N | | Dev 0 |...| Dev M |
+ | --------------------- --------------------- -------------
+ | | | | | | Other |
+ | | msm_rmnet_smux | | smux_ctl | | Kernel-only |
+ | | | | | | Clients |
+ | --------------------- --------------------- -------------
+ | | | | | |
+ | | | | | |
+ | | | | | |
+ | | | | | |
+ | ---------------------------------------------------
+ | |
+ | |
+ | |
+ | |
+ --------------- --------------------
+ | | | |
+ | TTY Framework | <- Line Discipline -> | SMUX |
+ | | | |
+ --------------- --------------------
+ | |
+ | |
+ --------------- |
+ | | |
+ | HS UART |<---------- Power API -----------
+ | |
+ ---------------
+ |
+ V
+ To Remote System
+
+
+Each logical channel will contain package management structures
+including a watermark to ensure fair usage of the physical layer by
+competing logical channels. All data for logical channels will be
+processed in FIFO order.
+
+Once data has been queued with SMUX, processing, copying of data, and
+notification to clients will be done using a combination of the TTY
+framework notification context and a workqueue. The lifetime of all
+buffers is controlled by the clients with SMUX taking temporary
+ownership of the buffers for read and write operations.
+
+The physical transport is assumed to be perfect and all errors will be
+handled by notifying the client of the failure. Watermark support and
+round-robin scheduling ensure that individual logical channels do not
+starve other logical channels.
+
+Data stalls caused by failure of the remote system are handled by
+Subsystem Restart. The restart logic will notify clients of the
+failure and all read and write buffers will be returned to the client
+with an error notification.
+
+Design
+======
+
+The goals for SMUX are to:
+ 1) multiplex multiple logical channels into a single physical
+ channel
+ 2) support a kernel API
+ 3) provide power control of the physical layer
+
+In addition, the companion adapter modules have the goals:
+ 1) support userspace character-device clients (smux_ctl)
+ 2) support net devices through the TCP/IP stack (msm_rmnet_smux)
+
+Alternate designs consider including 3GPP 27.010 MUX protocol
+implementations and existing SDIO CMUX/DMUX implementations.
+
+The 3GPP 27.010 MUX protocol as implemented in both n_gsm.c and OpenEZX
+looked promising at first glance. However, upon further inspection,
+the implementations did not fully implement the power-control portions
+of the 27.010 MUX protocol. They also did not support kernel clients
+as they were designed to work only with userspace through TTY devices.
+The code was reviewed to determine the effort to add power-control
+signaling to the physical transport driver and to add a kernel API, but
+it was deemed that adding the API to support both of these behaviors
+would be difficult to do in a generic way such that it would be
+accepted by the upstream community.
+
+The SDIO CMUX/DMUX drivers do not have power control in them and the
+CMUX and DMUX drivers both require a separate physical channel. To use
+them, we would need to create an additional mux layer that would sit
+between CMUX/DMUX and the HS UART driver which would add another MUX
+header.
+
+Design - MUX Protocol
+=====================
+The MUX packet consists of a header (detailed below) and a payload.
+All values are in little-endian format and all reserved fields are set
+to zero unless otherwise mentioned in the individual command
+descriptions.
+
+Invalid commands and malformed commands will be logged to the kernel log
+as an error and ignored.
+
+ -----------------------------------------
+ |31 24| 16| 8| 0|
+ |----------|---------|----------|---------|
+ | Magic Number | Flags | CMD |
+ |----------|---------|----------|---------|
+ | Pad Len | LCID | Packet Length (N) |
+ |-----------------------------------------|
+ | Data Payload (0..N bytes) |
+ |-----------------------------------------|
+ | Pad Data (0..Pad Len bytes) |
+ -----------------------------------------
+
+Field definitions:
+ * Magic Number - always 0x33FC
+ * Flags - flags for individual commands
+ * CMD - SMUX command
+ * Pad Len - Padding in bytes at the end of the payload
+ * LCID - Logical channel ID
+ * Packet Length - Length of the data payload in bytes
+
+Commands
+ 0x0 - Data
+ 0x1 - Open Logical Channel
+ 0x2 - Close Logical Channel
+ 0x3 - Status
+ 0x4 - Power Control
+
+Data Command
+------------
+The Data command sends data on an already fully-opened logical channel.
+
+Flags:
+ * Bits 0:7 - Reserved
+
+Open Logical Channel Command
+----------------------------
+The Open command is a request to open a logical channel. Each channel
+will have a local and remote open flag. The remote open flag will be
+set to open when receiving an open command and responding with an open
+ACK. The local open flag is set to open when sending an open command
+and receiving an ACK.
+
+ Remote Side | Local Side
+ |
+ SMUX Client SMUX SMUX SMUX Client
+ | | | |
+ | Open | | |
+ |--------->| | |
+ | | Open Logical Channel | |
+ | |---------------------->| |
+ | | |--- |
+ | | | | Set Remote Open |
+ | | |<-- |
+ | | Open ACK | |
+ | |<----------------------| |
+ | |--- | |
+ | | | Set Local Open | |
+ | |<-- | |
+ | ... ... ...
+ | | | msm_smux_open() |
+ | | |<-----------------------|
+ | | Open Logical Channel | |
+ | |<----------------------| |
+ | | | |
+ | |--- | |
+ | | | Set Remote Open | |
+ | |<-- | |
+ | | Open ACK | |
+ | |---------------------->| |
+ | | |--- |
+ | | | | Set Local Open |
+ | | |<-- |
+ | | | notify(SMUX_CONNECTED) |
+ | | |----------------------->|
+
+
+ Logical channel is now fully open and can receive
+ and transmit data.
+
+No data shall be transmitted over the physical link for the logical
+channel unless the channel is open.
+
+Flags:
+ * Bit 0 - 1 = ACK
+ * Bit 1 - Power Collapse Enable
+ * Bit 2 - Remote Loopback Enable
+ * Bits 3:7 - Reserved
+
+Power Collapse Enable (bit 1) enables power-collapse handshaking when
+processing an open command. The first logical channel open command
+received from the remote side will set the global power control state
+and all subsequent open commands should use the same value of the Power
+Collapse bit. The value of this bit can be changed during runtime by
+closing all logical channels and then re-opening them with the new
+global state.
+
+If the protocol stack does not support power collapse and it receives
+an open command with the Power Collapse Enable bit set, then it
+shall respond with an open command with the Power Collapse Enable bit
+cleared.
+
+If Power Collapse is disabled, then Power Control Commands should not
+be sent.
+
+Remote Loopback Enable (bit 2) enables loopback support when data is
+received from the remote side. In this case, SMUX should echo the
+received data packet back to the sender.
+
+Close Logical Channel Command
+-----------------------------
+The Close command closes the logical channel and updates the internal
+open state flags. The remote open flag will be set to closed when
+receiving a close command and responding with an close ACK. The local
+open flag is set to closed when sending a close command and receiving an
+ACK.
+
+No data shall be transmitted over the physical link for the logical
+channel after receiving a close command and responding with the close
+ACK.
+
+Flags:
+ * Bit 0 - ACK (when set to 1)
+ * Bits 1:7 - Reserved
+
+
+Status Command
+--------------
+The Status Command updates the channel status signals which include four
+ITU v.24 status bits in the lower nibble of the flags field along with a
+logical channel flow-control signal. The v.24 signals are pass-through
+and do not affect the state of SMUX.
+
+The Logical Channel Flow Control bit will disable TX on the logical
+channel when set and send a flow-control notification to the logical
+channel client. Any further attempts to transmit will result in an
+error return code.
+
+Flags:
+ * Bit 0 - RTC (DTR/DSR)
+ * Bit 1 - RTR (RTS/CTS)
+ * Bit 2 - RI
+ * Bit 3 - DCD
+ * Bit 4 - Logical Channel Flow Control
+ * Bits 5:7 - Reserved
+
+
+Power Control Command
+---------------------
+The physical layer requires a variable amount of time to wakeup from
+power collapse, reconfigure the hardware, and start processing data.
+Data may be lost until the wakeup has been completed. Because of this,
+a character-based wakeup method will be used to ensure that the remote
+side is active and ready before sending SMUX commands.
+
+If the remote side has previously requested power-down (boot-up state),
+then a wakeup request character is sent at periodic intervals (1 ms or
+8 character-widths, whichever is larger) until a wakeup-acknowledge character
+has been received. Normal transmit operations can then be performed. Once an
+activity timeout occurs, then a sleep vote should be sent to the remote side to
+let it know that the channel is no longer needed. The remote side should
+respond with an ACK.
+
+The following state diagram shows the full sequence of power state transitions.
+This state machine is identical on both the local and remote sides. The states
+marked "(internal)" are transitional states used in this driver that are not
+part of the power states tracked by the remote side. The edges are labeled
+using the format CONDITION:ACTION where condition is the guard condition that
+must be true for the edge to be taken and ACTION is the action that will be
+taken.
+
+ +--------------+ RX Sleep ACK || RX Sleep Request
+ :Flush and power-down | Powering |<---------+
+ UART +---------+ Down Flush | |
+ | | (internal) | |
+ | +--------------+ |
+ | ^ |
+ | | +-------+------+
+ v | | | |
+ +--------------+ | | Powering |
+ | | | | Down |<-----+
+Init --->| OFF | | | | |
+ | |------+ | +--------------+ |
+ | | | | |
+ +------+-------+ | | TX Sleep Request
+ | | | Complete
+ | | | |
+ Data ready to send | | |
+ :TX Wakeup Request | |RX Sleep Request |
+ | | |:TX Sleep ACK |
+ | | +------------+ +-------+------+
+ | | | | Turning Off^|
+ | | | | Flush |
+ | | | | (internal) |
+ | |RX Wakeup Request | +--------------+
+ | |:TX Wakeup ACK | ^
+ | | | |
+ | | | Inactivity Timeout
+ | +--------------+ | :TX Sleep Request
+ | | | |
+ v v | |
+ +--------------+ RX Wakeup ACK +-------+------+ |
+ | +------------------>| UP |+-----+
+ | Powering | | |
+ | Up +------------------>| Packet TX/RX |
+ +----->| | RX Wakeup Request | is now active|<------+
+ | +--+-----------+ :TX Wakeup ACK +----------+---+ |
+ | | | |
+ +---------+ +-----------+
+ Wakeup Request Timeout RX Wakeup Request
+ :TX Wakeup Request :TX Wakeup ACK
+
+
+In-band wakeup bytes:
+ * 0xfd - Wakeup Request
+ * 0xfe - Wakeup Acknowledge
+
+Flags:
+ * Bit 0 - ACK (when set to 1)
+ * Bit 1 - 1 = Sleep Request
+ * Bits 2:7 - Reserved
+
+Initial SMUX State
+------------------
+The boot-up state of SMUX is in power-disabled mode and all logical
+channels closed. Before sending any commands to open logical channels,
+the remote SMUX must be woken up.
+
+Power Management
+================
+
+Power management will consist of wakeup and shutdown control of the
+physical layer based upon an activity timeout. Wakelocks will be
+utilized to prevent the system from going to sleep while the transport
+is active. The activity timeout is anticipated to be 500 ms, but this
+is subject to tuning to meet power and performance specifications.
+
+SMP/multi-core
+==============
+
+Locking and synchronization will be done using mutexes or spinlocks
+where appropriate. The software will be structured such that locking
+can be kept to a minimum by only locking when items are added and
+removed from lists.
+
+Security
+========
+
+No new security issues are anticipated as communication with userspace
+is done through the existing TCP/IP and CDEV frameworks.
+
+Performance
+===========
+
+The throughput requirements for this design are on the order of 250Kbps
+so throughput concerns are not expected to be an issue. However, in
+the hope that this driver can be leveraged in the future instead of
+writing yet another multiplexing layer, performance will be considered
+when making design decisions.
+
+Interface
+=========
+
+The kernel API consists of commands to read and write data, vote for
+power, and verify the state of the logical channels.
+
+
+Open
+----
+int msm_smux_open(uint32_t lcid, void *priv,
+ void (*notify)(void *priv, int event_type, void *metadata),
+ int (*get_rx_buffer)(void *priv, void **pkt_priv,
+ void **buffer, int size))
+
+Open a logical channel. The channel will be first opened locally and
+an open command will be sent to the remote processor. Once the remote
+processor sends an open command, then the port will be fully open and
+ready for data operations -- the client will be notified with an
+SMUX_CONNECTED notification.
+
+For receive notifications, the driver will read the SMUX header into
+temporary storage and then when the logical channel and size are both
+known, the driver will call the get_rx_buffer() function to request a
+buffer for the data. The client should return 0 upon success or < 0
+using standard Linux error codes if an error occurred. If the error
+code is EAGAIN, then the call to get_rx_buffer() will be retried once,
+otherwise the data will be discarded by the driver and a kernel error
+message logged.
+
+Once the receive data has been processed, the notify() function will be
+called with metadata pointing to an instance of struct smux_meta_read
+and the event type will either be SMUX_READ_DONE for successful cases or
+SMUX_READ_FAIL for failure cases.
+
+/*
+ * Notification events that are passed to the notify() function.
+ *
+ * If the @metadata argument in the notifier is non-null, then it will
+ * point to the associated struct smux_meta_* structure.
+ */
+enum {
+ SMUX_CONNECTED, /* @metadata is null */
+ SMUX_DISCONNECTED,
+ SMUX_READ_DONE,
+ SMUX_READ_FAIL,
+ SMUX_WRITE_DONE,
+ SMUX_WRITE_FAIL,
+ SMUX_TIOCM_UPDATE,
+ SMUX_LOW_WM_HIT, /* @metadata is NULL */
+ SMUX_HIGH_WM_HIT, /* @metadata is NULL */
+};
+
+/*
+ * Metadata for SMUX_READ_DONE/SMUX_READ_FAIL notification
+ *
+ * @pkt_priv: Packet-specific private data
+ * @buffer: Buffer pointer passed into msm_smux_write
+ * @len: Buffer length passed into msm_smux_write
+ */
+struct smux_meta_read {
+ void *pkt_priv;
+ void *buffer;
+ int len;
+};
+
+Close
+-----
+int msm_smux_close(uint32_t lcid)
+
+Closes a logical channel locally and sends a close command to the
+remote host.
+
+If there is pending transmit or receive data, then SMUX_WRITE_FAIL and
+SMUX_READ_FAIL notifications will be made to return ownership of the
+buffers to the client.
+
+Once the remote side of the port has been closed, the notify function
+will be called with the event SMUX_DISCONNECTED and metadata pointing
+to a struct smux_meta_disconnected structure. After this point, no
+further notifications will be performed.
+
+/*
+ * Metadata for SMUX_DISCONNECTED notification
+ *
+ * @is_ssr: Disconnect caused by subsystem restart
+ */
+struct smux_meta_disconnected {
+ int is_ssr;
+};
+
+
+Write
+-----
+int msm_smux_write(uint32_t lcid, void *pkt_priv, void *data, int len)
+
+Queues data for transmit. Once the data has been transmitted, the
+SMUX_WRITE_DONE or SMUX_WRITE_FAIL notifications will be sent with
+metadata pointing to an instance of struct smux_meta_write.
+
+If the high watermark has been exceeded, then further writes will
+return -EAGAIN.
+
+Data may be written as soon as the local side of the port has been
+opened, but the data will not be transmitted until the channel has been
+fully opened and the SMUX_CONNECTED event has been sent.
+
+/*
+ * Metadata for SMUX_WRITE_DONE/SMUX_WRITE_FAIL notification
+ *
+ * @pkt_priv: Packet-specific private data
+ * @buffer: Buffer pointer returned by get_rx_buffer()
+ * @len: Buffer length returned by get_rx_buffer()
+ */
+struct smux_meta_write {
+ void *pkt_priv;
+ void *buffer;
+ int len;
+};
+
+Watermark
+---------
+ int msm_smux_is_ch_full(uint32_t lcid)
+ int msm_smux_is_ch_low(uint32_t lcid)
+
+A channel watermark is used to keep individual clients from using
+excessive internal resources. The client may call
+msm_smux_is_ch_full() after every msm_smux_write() operation and if the
+watermark is high, it should not queue any more packets for
+transmission. As an alternative, the client may base this decision
+upon receiving an SMUX_HIGH_WM_HIT notification.
+
+Likewise, the client may call msm_smux_is_ch_low() after every
+SMUX_WRITE_DONE or SMUX_WRITE_FAIL notification and if the watermark is
+low, then new transmit operations can be started. As an alternative,
+the client may base this decision upon receiving an SMUX_LOW_WM_HIT
+notification.
+
+Control Signals
+---------------
+ long msm_smux_tiocm_get(uint32_t lcid)
+ long msm_smux_tiocm_set(uint32_t lcid, uint32_t set, uint32_t clear)
+
+The TIOCM bits do not affect the SMUX internal state as they are
+pass-through for the clients. The client can receive notifications of
+state changes through the SMUX_TIOCM_UPDATE command.
+
+See the "Status Command" section for details on the TIOCM bits.
+
+/*
+ * Metadata for SMUX_TIOCM_UPDATE notification
+ *
+ * @previous: Previous TIOCM state
+ * @current: Current TIOCM state
+ *
+ */
+struct smux_meta_tiocm {
+ uint32_t previous;
+ uint32_t current;
+};
+
+Subsystem Restart
+-----------------
+Subsystem restart is handled by sending a disconnect notification
+followed by sending read and write fail notifications to each client.
+This returns ownership of the read and write buffers to the clients for
+client-appropriate handling.
+
+The sequence of notifications shall be:
+ 1) SMUX_DISCONNECTED notification with @metadata->is_ssr == 1
+ 2) SMUX_WRITE_FAIL for each packet in TX queue
+ 3) SMUX_READ_FAIL for any RX packet in progress
+
+After the completion of the sequence, the client should call msm_smux_close()
+followed by a call to msm_smux_open() to re-open the port.
+
+
+Debug / Testing
+---------------
+
+Several debugfs nodes will be exported under the n_gsm directory for
+testing and debugging.
+ * tbl - prints table of logical channels
+ * stats - prints transfer statistics
+ * enable_local_loopback - echo LCID to enable loopback
+ * disable_local_loopback - echo LCID to enable loopback
+ * enable_remote_loopback - echo LCID to enable loopback
+ * disable_remote_loopback - echo LCID to enable loopback
+
+Driver parameters
+=================
+
+A module parameter called debug_mask will be exported to allow a user
+to set the log level for debugging.
+
+Config options
+==============
+
+No configuration options are planned.
+
+Dependencies
+============
+
+The msm_rmnet_smux and smux_ctl drivers are part of this project and
+are used to interface with the Linux TCP/IP framework and user space.
+
+The physical transport is the HS UART driver which will be extended to
+add a kernel API (the current interface is a TTY interface).
+
+Initialization of dependency drivers is handled using the platform
+device framework. When SMUX is loaded as a line discipline of the TTY
+Framework, it will register separate device drivers with the following
+device names:
+ * SMUX_CTL
+ * SMUX_RMNET
+ * SMUX_DUN_DATA_HSUART
+ * SMUX_RMNET_DATA_HSUART
+ * SMUX_RMNET_CTL_HSUART
+ * SMUX_DIAG
+
+The drivers are removed when SMUX is unloaded from the line discipline.
+
+
+User space utilities
+====================
+
+No userspace utilities are planned aside from testing and example
+applications.
+
+Known issues
+============
+
+None.
+
+To do
+=====
+Once completed, benchmark to determine if FIFO packet scheduling is
+sufficient or if a different scheduling algorithm such as deficit
+round-robin or deficit FIFO scheduling is needed to fairly handle
+variable-sized packets.
diff --git a/Documentation/devicetree/bindings/arm/msm/smp2p.txt b/Documentation/devicetree/bindings/arm/msm/smp2p.txt
index 7a5f506..a7af9e7 100644
--- a/Documentation/devicetree/bindings/arm/msm/smp2p.txt
+++ b/Documentation/devicetree/bindings/arm/msm/smp2p.txt
@@ -2,9 +2,7 @@
Required properties:
-compatible : should be "qcom,smp2p"
--reg : the location and offset of the irq register base memory
--reg-names : "irq-reg-base", "irq-reg-offset" - string to identify the irq
- register region and offset values
+-reg : the location of the irq register base memory
-qcom,remote-pid : the SMP2P remote processor ID (see smp2p_private_api.h)
-qcom,irq-bitmask : the sending irq bitmask
-interrupts : the receiving interrupt line
@@ -13,8 +11,7 @@
qcom,smp2p-modem {
compatible = "qcom,smp2p";
- reg = <0xfa006000 0x1000>, <0x8 0x0>;
- reg-names = "irq-reg-base", "irq-reg-offset";
+ reg = <0xf9011008 0x4>;
qcom,remote-pid = <1>;
qcom,irq-bitmask = <0x4000>;
interrupts = <0 27 1>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt b/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt
new file mode 100644
index 0000000..95f0fa4
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt
@@ -0,0 +1,37 @@
+Qualcomm mdss-qpic-panel
+
+mdss-qpic-panel is a panel device which can be driven by qpic.
+
+Required properties:
+- compatible: Must be "qcom,mdss-qpic-panel"
+- qcom,mdss-pan-res: A two dimensional array that specifies the panel
+ resolution.
+- qcom,mdss-pan-bpp: Specifies the panel bits per pixel.
+- qcom,refresh_rate: Panel refresh rate
+- vdd-supply: Phandle for vdd regulator device node.
+- avdd-supply: Phandle for avdd regulator device node.
+- qcom,cs-gpio: Phandle for cs gpio device node.
+- qcom,te-gpio: Phandle for te gpio device node.
+- qcom,rst-gpio: Phandle for rst gpio device node.
+- qcom,ad8-gpio: Phandle for ad8 gpio device node.
+
+Optional properties:
+- label: A string used as a descriptive name of the panel
+
+
+Example:
+/ {
+ qcom,mdss_lcdc_ili9341_qvga {
+ compatible = "qcom,mdss-qpic-panel";
+ label = "ili qvga lcdc panel";
+ vdd-supply = <&pm8019_l11>;
+ avdd-supply = <&pm8019_l14>;
+ qcom,cs-gpio = <&msmgpio 21 0>;
+ qcom,te-gpio = <&msmgpio 22 0>;
+ qcom,rst-gpio = <&msmgpio 23 0>;
+ qcom,ad8-gpio = <&msmgpio 20 0>;
+ qcom,mdss-pan-res = <240 320>;
+ qcom,mdss-pan-bpp = <18>;
+ qcom,refresh_rate = <60>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/fb/mdss-qpic.txt b/Documentation/devicetree/bindings/fb/mdss-qpic.txt
new file mode 100644
index 0000000..0fa3a32
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-qpic.txt
@@ -0,0 +1,18 @@
+Qualcomm mdss-qpic
+
+mdss-qpic is a qpic controller device which supports dma transmission to MIPI
+and LCDC panel.
+
+Required properties:
+- compatible: must be "qcom,mdss_qpic"
+- reg: offset and length of the register set for the device.
+- reg-names : names to refer to register sets related to this device
+- interrupts: IRQ line
+
+Example:
+ qcom,msm_qpic@f9ac0000 {
+ compatible = "qcom,mdss_qpic";
+ reg = <0xf9ac0000 0x24000>;
+ reg-names = "qpic_base";
+ interrupts = <0 251 0>;
+ };
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index f97e063..bebdb5a 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -39,6 +39,7 @@
- qcom,buffer-type-tz-usage-table : a key-value pair, mapping a buffer type
(enum hal_buffer) to its corresponding TZ usage. The TZ usages are defined
as "enum cp_mem_usage" in include/linux/msm_ion.h
+- qcom,has-ocmem: indicate the target has ocmem if this property exists
Example:
@@ -55,6 +56,7 @@
<243000 133330000>,
<108000 100000000>,
<36000 50000000>;
+ qcom,has-ocmem;
qcom,hfi = "venus";
qcom,reg-presets = <0x80004 0x1>,
<0x80178 0x00001FFF>;
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
index 2764657..70f8b55 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
@@ -10,7 +10,7 @@
- reg: Pairs of physical base addresses and region sizes of
memory mapped registers.
- reg-names: Names of the bases for the above registers. "qdsp6_base"
- and "halt_base" are expected.
+ "halt_base", and "restart_reg" are expected.
- interrupts: The lpass watchdog interrupt
- vdd_cx-supply: Reference to the regulator that supplies the vdd_cx domain.
- qcom,firmware-name: Base name of the firmware image. Ex. "lpass"
@@ -23,8 +23,9 @@
qcom,lpass@fe200000 {
compatible = "qcom,pil-q6v5-lpass";
reg = <0xfe200000 0x00100>,
- <0xfd485100 0x00010>;
- reg-names = "qdsp6_base", "halt_base";
+ <0xfd485100 0x00010>,
+ <0xfc4016c0 0x00004>;
+ reg-names = "qdsp6_base", "halt_base", "restart_reg";
interrupts = <0 194 1>;
vdd_cx-supply = <&pm8841_s2>;
qcom,firmware-name = "lpass";
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index df3f71c..86b3ccb 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -17,6 +17,8 @@
- vdd_cx-supply: Reference to the regulator that supplies the vdd_cx domain.
- vdd_mx-supply: Reference to the regulator that supplies the memory rail.
- qcom,firmware-name: Base name of the firmware image. Ex. "mdsp"
+- qcom,gpio-err-fatal: GPIO used by the modem to indicate error fatal to the apps.
+- qcom,gpio-force-stop: GPIO used by the apps to force the modem to shutdown.
Optional properties:
- vdd_pll-supply: Reference to the regulator that supplies the PLL's rail.
@@ -44,4 +46,10 @@
qcom,is-loadable;
qcom,firmware-name = "mba";
qcom,pil-self-auth;
+
+ /* GPIO inputs from mss */
+ gpio_err_fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+
+ /* GPIO output to mss */
+ gpio_force_stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
};
diff --git a/Documentation/devicetree/bindings/power/opp.txt b/Documentation/devicetree/bindings/power/opp.txt
new file mode 100644
index 0000000..74499e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/opp.txt
@@ -0,0 +1,25 @@
+* Generic OPP Interface
+
+SoCs have a standard set of tuples consisting of frequency and
+voltage pairs that the device will support per voltage domain. These
+are called Operating Performance Points or OPPs.
+
+Properties:
+- operating-points: An array of 2-tuples items, and each item consists
+ of frequency and voltage like <freq-kHz vol-uV>.
+ freq: clock frequency in kHz
+ vol: voltage in microvolt
+
+Examples:
+
+cpu@0 {
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ next-level-cache = <&L2>;
+ operating-points = <
+ /* kHz uV */
+ 792000 1100000
+ 396000 950000
+ 198000 850000
+ >;
+};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9a1c759..a5fac80 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -522,7 +522,7 @@
coherent_pool=nn[KMG] [ARM,KNL]
Sets the size of memory pool for coherent, atomic dma
- allocations if Contiguous Memory Allocator (CMA) is used.
+ allocations, by default set to 256K.
code_bytes [X86] How many bytes of object code to print
in an oops report.
diff --git a/arch/arm/boot/dts/msm-pm8110.dtsi b/arch/arm/boot/dts/msm-pm8110.dtsi
index c488ab1..ec42cfc 100644
--- a/arch/arm/boot/dts/msm-pm8110.dtsi
+++ b/arch/arm/boot/dts/msm-pm8110.dtsi
@@ -95,5 +95,225 @@
reg = <0x1>;
#address-cells = <1>;
#size-cells = <1>;
+
+ regulator@1400 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_s1";
+ spmi-dev-container;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x1400 0x300>;
+ status = "disabled";
+
+ qcom,ctl@1400 {
+ reg = <0x1400 0x100>;
+ };
+ qcom,ps@1500 {
+ reg = <0x1500 0x100>;
+ };
+ qcom,freq@1600 {
+ reg = <0x1600 0x100>;
+ };
+ };
+
+ regulator@1700 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_s2";
+ spmi-dev-container;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x1700 0x300>;
+ status = "disabled";
+
+ qcom,ctl@1700 {
+ reg = <0x1700 0x100>;
+ };
+ qcom,ps@1800 {
+ reg = <0x1800 0x100>;
+ };
+ qcom,freq@1900 {
+ reg = <0x1900 0x100>;
+ };
+ };
+
+ regulator@1a00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_s3";
+ spmi-dev-container;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x1a00 0x300>;
+ status = "disabled";
+
+ qcom,ctl@1a00 {
+ reg = <0x1a00 0x100>;
+ };
+ qcom,ps@1b00 {
+ reg = <0x1b00 0x100>;
+ };
+ qcom,freq@1c00 {
+ reg = <0x1c00 0x100>;
+ };
+ };
+
+ regulator@1d00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_s4";
+ spmi-dev-container;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x1d00 0x300>;
+ status = "disabled";
+
+ qcom,ctl@1d00 {
+ reg = <0x1d00 0x100>;
+ };
+ qcom,ps@1e00 {
+ reg = <0x1e00 0x100>;
+ };
+ qcom,freq@1f00 {
+ reg = <0x1f00 0x100>;
+ };
+ };
+
+ regulator@4000 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l1";
+ reg = <0x4000 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4100 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l2";
+ reg = <0x4100 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4200 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l3";
+ reg = <0x4200 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4300 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l4";
+ reg = <0x4300 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4400 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l5";
+ reg = <0x4400 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4500 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l6";
+ reg = <0x4500 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4600 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l7";
+ reg = <0x4600 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4700 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l8";
+ reg = <0x4700 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4800 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l9";
+ reg = <0x4800 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4900 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l10";
+ reg = <0x4900 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4b00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l12";
+ reg = <0x4b00 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4d00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l14";
+ reg = <0x4d00 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4e00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l15";
+ reg = <0x4e00 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4f00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l16";
+ reg = <0x4f00 0x100>;
+ status = "disabled";
+ };
+
+ regulator@5000 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l17";
+ reg = <0x5000 0x100>;
+ status = "disabled";
+ };
+
+ regulator@5100 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l18";
+ reg = <0x5100 0x100>;
+ status = "disabled";
+ };
+
+ regulator@5200 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l19";
+ reg = <0x5200 0x100>;
+ status = "disabled";
+ };
+
+ regulator@5300 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l20";
+ reg = <0x5300 0x100>;
+ status = "disabled";
+ };
+
+ regulator@5400 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l21";
+ reg = <0x5400 0x100>;
+ status = "disabled";
+ };
+
+ regulator@5500 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8110_l22";
+ reg = <0x5500 0x100>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/msm8226-cdp.dts b/arch/arm/boot/dts/msm8226-cdp.dts
index fa77c35..800cd8f 100644
--- a/arch/arm/boot/dts/msm8226-cdp.dts
+++ b/arch/arm/boot/dts/msm8226-cdp.dts
@@ -90,6 +90,7 @@
sound {
qcom,cdc-mclk-gpios = <&pm8226_gpios 1 0>;
+ qcom,cdc-vdd-spkr-gpios = <&pm8226_gpios 2 0>;
};
};
@@ -193,6 +194,13 @@
};
gpio@c100 { /* GPIO 2 */
+ qcom,mode = <1>;
+ qcom,output-type = <0>;
+ qcom,pull = <5>;
+ qcom,vin-sel = <2>;
+ qcom,out-strength = <3>;
+ qcom,src-sel = <2>;
+ qcom,master-en = <1>;
};
gpio@c200 { /* GPIO 3 */
diff --git a/arch/arm/boot/dts/msm8226-coresight.dtsi b/arch/arm/boot/dts/msm8226-coresight.dtsi
index 30d79df..b891d3d 100644
--- a/arch/arm/boot/dts/msm8226-coresight.dtsi
+++ b/arch/arm/boot/dts/msm8226-coresight.dtsi
@@ -23,6 +23,7 @@
coresight-id = <0>;
coresight-name = "coresight-tmc-etr";
coresight-nr-inports = <1>;
+ coresight-ctis = <&cti0 &cti8>;
};
tpiu: tpiu@fc318000 {
@@ -60,6 +61,7 @@
coresight-child-list = <&replicator>;
coresight-child-ports = <0>;
coresight-default-sink;
+ coresight-ctis = <&cti0 &cti8>;
};
funnel_merg: funnel@fc31b000 {
@@ -213,4 +215,144 @@
qcom,blk-size = <3>;
};
+
+ cti0: cti@fc308000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc308000 0x1000>;
+ reg-names = "cti0-base";
+
+ coresight-id = <15>;
+ coresight-name = "coresight-cti0";
+ coresight-nr-inports = <0>;
+ };
+
+ cti1: cti@fc309000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc309000 0x1000>;
+ reg-names = "cti1-base";
+
+ coresight-id = <16>;
+ coresight-name = "coresight-cti1";
+ coresight-nr-inports = <0>;
+ };
+
+ cti2: cti@fc30a000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30a000 0x1000>;
+ reg-names = "cti2-base";
+
+ coresight-id = <17>;
+ coresight-name = "coresight-cti2";
+ coresight-nr-inports = <0>;
+ };
+
+ cti3: cti@fc30b000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30b000 0x1000>;
+ reg-names = "cti3-base";
+
+ coresight-id = <18>;
+ coresight-name = "coresight-cti3";
+ coresight-nr-inports = <0>;
+ };
+
+ cti4: cti@fc30c000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30c000 0x1000>;
+ reg-names = "cti4-base";
+
+ coresight-id = <19>;
+ coresight-name = "coresight-cti4";
+ coresight-nr-inports = <0>;
+ };
+
+ cti5: cti@fc30d000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30d000 0x1000>;
+ reg-names = "cti5-base";
+
+ coresight-id = <20>;
+ coresight-name = "coresight-cti5";
+ coresight-nr-inports = <0>;
+ };
+
+ cti6: cti@fc30e000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30e000 0x1000>;
+ reg-names = "cti6-base";
+
+ coresight-id = <21>;
+ coresight-name = "coresight-cti6";
+ coresight-nr-inports = <0>;
+ };
+
+ cti7: cti@fc30f000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30f000 0x1000>;
+ reg-names = "cti7-base";
+
+ coresight-id = <22>;
+ coresight-name = "coresight-cti7";
+ coresight-nr-inports = <0>;
+ };
+
+ cti8: cti@fc310000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc310000 0x1000>;
+ reg-names = "cti8-base";
+
+ coresight-id = <23>;
+ coresight-name = "coresight-cti8";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_l2: cti@fc340000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc340000 0x1000>;
+ reg-names = "cti-l2-base";
+
+ coresight-id = <24>;
+ coresight-name = "coresight-cti-l2";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu0: cti@fc341000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc341000 0x1000>;
+ reg-names = "cti-cpu0-base";
+
+ coresight-id = <25>;
+ coresight-name = "coresight-cti-cpu0";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu1: cti@fc342000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc342000 0x1000>;
+ reg-names = "cti-cpu1-base";
+
+ coresight-id = <26>;
+ coresight-name = "coresight-cti-cpu1";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu2: cti@fc343000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc343000 0x1000>;
+ reg-names = "cti-cpu2-base";
+
+ coresight-id = <27>;
+ coresight-name = "coresight-cti-cpu2";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu3: cti@fc344000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc344000 0x1000>;
+ reg-names = "cti-cpu3-base";
+
+ coresight-id = <28>;
+ coresight-name = "coresight-cti-cpu3";
+ coresight-nr-inports = <0>;
+ };
};
diff --git a/arch/arm/boot/dts/msm8226-iommu-domains.dtsi b/arch/arm/boot/dts/msm8226-iommu-domains.dtsi
new file mode 100644
index 0000000..6ea5b9e
--- /dev/null
+++ b/arch/arm/boot/dts/msm8226-iommu-domains.dtsi
@@ -0,0 +1,31 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ qcom,iommu-domains {
+ compatible = "qcom,iommu-domains";
+
+ venus_domain_ns: qcom,iommu-domain1 {
+ label = "venus_ns";
+ qcom,iommu-contexts = <&venus_ns>;
+ qcom,virtual-addr-pool = <0x40000000 0x3f000000
+ 0x7f000000 0x1000000>;
+ };
+
+ venus_domain_cp: qcom,iommu-domain2 {
+ label = "venus_cp";
+ qcom,iommu-contexts = <&venus_cp>;
+ qcom,virtual-addr-pool = <0x1000000 0x3f000000>;
+ qcom,secure-domain;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msm8226-ion.dtsi b/arch/arm/boot/dts/msm8226-ion.dtsi
index b06ad42..9a35507 100644
--- a/arch/arm/boot/dts/msm8226-ion.dtsi
+++ b/arch/arm/boot/dts/msm8226-ion.dtsi
@@ -24,8 +24,7 @@
compatible = "qcom,msm-ion-reserve";
reg = <8>;
qcom,heap-align = <0x1000>;
- qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
- qcom,memory-reservation-size = <0x3800000>;
+ linux,contiguous-region = <&secure_mem>;
};
qcom,ion-heap@25 { /* IOMMU HEAP */
diff --git a/arch/arm/boot/dts/msm8226-mtp.dts b/arch/arm/boot/dts/msm8226-mtp.dts
index e747cb5..3d1fe19 100644
--- a/arch/arm/boot/dts/msm8226-mtp.dts
+++ b/arch/arm/boot/dts/msm8226-mtp.dts
@@ -90,6 +90,7 @@
sound {
qcom,cdc-mclk-gpios = <&pm8226_gpios 1 0>;
+ qcom,cdc-vdd-spkr-gpios = <&pm8226_gpios 2 0>;
};
};
@@ -190,6 +191,13 @@
};
gpio@c100 { /* GPIO 2 */
+ qcom,mode = <1>;
+ qcom,output-type = <0>;
+ qcom,pull = <5>;
+ qcom,vin-sel = <2>;
+ qcom,out-strength = <3>;
+ qcom,src-sel = <2>;
+ qcom,master-en = <1>;
};
gpio@c200 { /* GPIO 3 */
diff --git a/arch/arm/boot/dts/msm8226-qrd.dts b/arch/arm/boot/dts/msm8226-qrd.dts
index acc4597..4fa37d6 100644
--- a/arch/arm/boot/dts/msm8226-qrd.dts
+++ b/arch/arm/boot/dts/msm8226-qrd.dts
@@ -90,6 +90,7 @@
sound {
qcom,cdc-mclk-gpios = <&pm8226_gpios 1 0>;
+ qcom,cdc-vdd-spkr-gpios = <&pm8226_gpios 2 0>;
};
};
@@ -193,6 +194,13 @@
};
gpio@c100 { /* GPIO 2 */
+ qcom,mode = <1>;
+ qcom,output-type = <0>;
+ qcom,pull = <5>;
+ qcom,vin-sel = <2>;
+ qcom,out-strength = <3>;
+ qcom,src-sel = <2>;
+ qcom,master-en = <1>;
};
gpio@c200 { /* GPIO 3 */
diff --git a/arch/arm/boot/dts/msm8226-smp2p.dtsi b/arch/arm/boot/dts/msm8226-smp2p.dtsi
index 60f63a8..1b08246 100644
--- a/arch/arm/boot/dts/msm8226-smp2p.dtsi
+++ b/arch/arm/boot/dts/msm8226-smp2p.dtsi
@@ -12,8 +12,7 @@
/ {
qcom,smp2p-modem {
compatible = "qcom,smp2p";
- reg = <0xfa006000 0x1000>, <0x8 0x0>;
- reg-names = "irq-reg-base", "irq-reg-offset";
+ reg = <0xf9011008 0x4>;
qcom,remote-pid = <1>;
qcom,irq-bitmask = <0x4000>;
interrupts = <0 27 1>;
@@ -21,8 +20,7 @@
qcom,smp2p-adsp {
compatible = "qcom,smp2p";
- reg = <0xfa006000 0x1000>, <0x8 0x0>;
- reg-names = "irq-reg-base", "irq-reg-offset";
+ reg = <0xf9011008 0x4>;
qcom,remote-pid = <2>;
qcom,irq-bitmask = <0x400>;
interrupts = <0 158 1>;
@@ -30,8 +28,7 @@
qcom,smp2p-wcnss {
compatible = "qcom,smp2p";
- reg = <0xfa006000 0x1000>, <0x8 0x0>;
- reg-names = "irq-reg-base", "irq-reg-offset";
+ reg = <0xf9011008 0x4>;
qcom,remote-pid = <4>;
qcom,irq-bitmask = <0x40000>;
interrupts = <0 143 1>;
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 320c577..ae2a135 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -20,7 +20,7 @@
/include/ "msm8226-bus.dtsi"
/include/ "msm8226-mdss.dtsi"
/include/ "msm8226-coresight.dtsi"
-
+/include/ "msm8226-iommu-domains.dtsi"
/ {
model = "Qualcomm MSM 8226";
compatible = "qcom,msm8226";
@@ -50,12 +50,57 @@
spi0 = &spi_0;
};
+ memory {
+ secure_mem: secure_region {
+ linux,contiguous-region;
+ reg = <0 0x3800000>;
+ label = "secure_mem";
+ };
+ };
+
timer {
compatible = "arm,armv7-timer";
interrupts = <1 2 0 1 3 0>;
clock-frequency = <19200000>;
};
+ qcom,vidc@fdc00000 {
+ compatible = "qcom,msm-vidc";
+ reg = <0xfdc00000 0xff000>;
+ interrupts = <0 44 0>;
+ qcom,load-freq-tbl = <352800 160000000>,
+ <244800 133330000>,
+ <108000 66700000>;
+ qcom,hfi = "venus";
+ qcom,bus-ports = <1>;
+ qcom,reg-presets = <0xE0024 0x0>,
+ <0x80124 0x3>,
+ <0xE0020 0x5555556>,
+ <0x800B0 0x10101001>,
+ <0x800B4 0x00101010>,
+ <0x800C0 0x1010100f>,
+ <0x800C4 0x00101010>,
+ <0x800D0 0x00000010>,
+ <0x800D4 0x00000010>,
+ <0x800D8 0x00000707>;
+ qcom,enc-ddr-ab-ib = <0 0>,
+ <129000 142000>,
+ <384000 422000>,
+ <866000 953000>;
+ qcom,dec-ddr-ab-ib = <0 0>,
+ <103000 134000>,
+ <268000 348000>,
+ <505000 657000>;
+ qcom,iommu-groups = <&venus_domain_ns &venus_domain_cp>;
+ qcom,iommu-group-buffer-types = <0xfff 0x1ff>;
+ qcom,buffer-type-tz-usage-table = <0x1 0x1>,
+ <0x1fe 0x2>;
+ };
+
+ qcom,wfd {
+ compatible = "qcom,msm-wfd";
+ };
+
serial@f991f000 {
compatible = "qcom,msm-lsuart-v14";
reg = <0xf991f000 0x1000>;
@@ -186,6 +231,8 @@
qcom,audio-routing =
"RX_BIAS", "MCLK",
"LDO_H", "MCLK",
+ "SPK_OUT", "MCLK",
+ "SPK_OUT", "EXT_VDD_SPKR",
"AMIC1", "MIC BIAS1 Internal1",
"MIC BIAS1 Internal1", "Handset Mic",
"AMIC2", "MIC BIAS2 External",
@@ -335,6 +382,21 @@
compatible = "qcom,msm-dai-q6-dev";
qcom,msm-dai-q6-dev-id = <240>;
};
+
+ qcom,msm-dai-q6-incall-record-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <32771>;
+ };
+
+ qcom,msm-dai-q6-incall-record-tx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <32772>;
+ };
+
+ qcom,msm-dai-q6-incall-music-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <32773>;
+ };
};
qcom,msm-pcm-hostless {
@@ -546,11 +608,16 @@
qcom,firmware-name = "wcnss";
};
+ qcom,iris-fm {
+ compatible = "qcom,iris_fm";
+ };
+
qcom,lpass@fe200000 {
compatible = "qcom,pil-q6v5-lpass";
reg = <0xfe200000 0x00100>,
- <0xfd485100 0x00010>;
- reg-names = "qdsp6_base", "halt_base";
+ <0xfd485100 0x00010>,
+ <0xfc4016c0 0x00004>;
+ reg-names = "qdsp6_base", "halt_base", "restart_reg";
vdd_cx-supply = <&pm8226_s1_corner>;
interrupts = <0 162 1>;
@@ -650,6 +717,40 @@
compatible = "qcom,tz-log";
reg = <0x0fc5b82c 0x1000>;
};
+
+ jtag_mm0: jtagmm@fc33c000 {
+ compatible = "qcom,jtag-mm";
+ reg = <0xfc33c000 0x1000>,
+ <0xfc330000 0x1000>;
+ reg-names = "etm-base","debug-base";
+ };
+
+ jtag_mm1: jtagmm@fc33d000 {
+ compatible = "qcom,jtag-mm";
+ reg = <0xfc33d000 0x1000>,
+ <0xfc332000 0x1000>;
+ reg-names = "etm-base","debug-base";
+ };
+
+ jtag_mm2: jtagmm@fc33e000 {
+ compatible = "qcom,jtag-mm";
+ reg = <0xfc33e000 0x1000>,
+ <0xfc334000 0x1000>;
+ reg-names = "etm-base","debug-base";
+ };
+
+ jtag_mm3: jtagmm@fc33f000 {
+ compatible = "qcom,jtag-mm";
+ reg = <0xfc33f000 0x1000>,
+ <0xfc336000 0x1000>;
+ reg-names = "etm-base","debug-base";
+ };
+
+ qcom,ipc-spinlock@fd484000 {
+ compatible = "qcom,ipc-spinlock-sfpb";
+ reg = <0xfd484000 0x400>;
+ qcom,num-locks = <8>;
+ };
};
&gdsc_venus {
diff --git a/arch/arm/boot/dts/msm8610-pm.dtsi b/arch/arm/boot/dts/msm8610-pm.dtsi
index d6e143c..ff16b8d 100644
--- a/arch/arm/boot/dts/msm8610-pm.dtsi
+++ b/arch/arm/boot/dts/msm8610-pm.dtsi
@@ -306,10 +306,12 @@
qcom,gic-map = <47 172>, /* usb2_hsic_async_wakeup_irq */
<53 104>, /* mdss_irq */
<62 222>, /* ee0_krait_hlos_spmi_periph_irq */
+ <0xff 56>, /* q6_wdog_expired_irq */
<0xff 57>, /* mss_to_apps_irq(0) */
<0xff 58>, /* mss_to_apps_irq(1) */
<0xff 59>, /* mss_to_apps_irq(2) */
<0xff 60>, /* mss_to_apps_irq(3) */
+ <0xff 61>, /* mss_a2_bam_irq */
<0xff 173>, /* o_wcss_apss_smd_hi */
<0xff 174>, /* o_wcss_apss_smd_med */
<0xff 175>, /* o_wcss_apss_smd_low */
@@ -317,17 +319,17 @@
<0xff 177>, /* o_wcss_apss_wlan_data_xfer_done */
<0xff 178>, /* o_wcss_apss_wlan_rx_data_avail */
<0xff 179>, /* o_wcss_apss_asic_intr
-
+ <0xff 181>, /* o_wcss_apss_wdog_bite_and_reset_rdy */
+ <0xff 161>, /* lpass_irq_out_spare[4] /
+ <0xff 162>, /* lpass_irq_out_spare[5]*/
+ <0xff 234>, /* lpass_irq_out_spare[6]*/
+ <0xff 235>, /* lpass_irq_out_spare[7]*/
<0xff 188>, /* lpass_irq_out_apcs(0) */
<0xff 189>, /* lpass_irq_out_apcs(1) */
<0xff 190>, /* lpass_irq_out_apcs(2) */
<0xff 191>, /* lpass_irq_out_apcs(3) */
<0xff 192>, /* lpass_irq_out_apcs(4) */
- <0xff 193>, /* lpass_irq_out_apcs(5) */
<0xff 194>, /* lpass_irq_out_apcs(6) */
- <0xff 195>, /* lpass_irq_out_apcs(7) */
- <0xff 196>, /* lpass_irq_out_apcs(8) */
- <0xff 197>, /* lpass_irq_out_apcs(9) */
<0xff 200>, /* rpm_ipc(4) */
<0xff 201>, /* rpm_ipc(5) */
<0xff 202>, /* rpm_ipc(6) */
@@ -336,47 +338,54 @@
<0xff 205>, /* rpm_ipc(25) */
<0xff 206>, /* rpm_ipc(26) */
<0xff 207>, /* rpm_ipc(27) */
+ <0xff 258>, /* rpm_ipc(28) */
+ <0xff 259>, /* rpm_ipc(29) */
+ <0xff 275>, /* rpm_ipc(30) */
+ <0xff 276>, /* rpm_ipc(31) */
+ <0xff 269>, /* rpm_wdog_expired_irq */
<0xff 240>; /* summary_irq_kpss */
qcom,gpio-parent = <&msmgpio>;
- qcom,gpio-map = <3 102>,
- <4 1 >,
+ qcom,gpio-map = <3 1>,
+ <4 4 >,
<5 5 >,
<6 9 >,
- <7 18>,
- <8 20>,
- <9 24>,
+ <7 13>,
+ <8 17>,
+ <9 21>,
<10 27>,
- <11 28>,
- <12 34>,
- <13 35>,
- <14 37>,
- <15 42>,
- <16 44>,
- <17 46>,
- <18 50>,
- <19 54>,
- <20 59>,
- <21 61>,
- <22 62>,
- <23 64>,
- <24 65>,
- <25 66>,
- <26 67>,
- <27 68>,
- <28 71>,
- <29 72>,
- <30 73>,
- <31 74>,
- <32 75>,
- <33 77>,
- <34 79>,
- <35 80>,
- <36 82>,
- <37 86>,
- <38 92>,
- <39 93>,
- <40 95>;
+ <11 29>,
+ <12 31>,
+ <13 33>,
+ <14 35>,
+ <15 37>,
+ <16 38>,
+ <17 39>,
+ <18 41>,
+ <19 46>,
+ <20 48>,
+ <21 49>,
+ <22 50>,
+ <23 51>,
+ <24 52>,
+ <25 54>,
+ <26 62>,
+ <27 63>,
+ <28 64>,
+ <29 65>,
+ <30 66>,
+ <31 67>,
+ <32 68>,
+ <33 69>,
+ <34 71>,
+ <35 72>,
+ <36 106>,
+ <37 107>,
+ <38 108>,
+ <39 109>,
+ <40 110>,
+ <54 111>,
+ <55 113>;
};
qcom,pm-8x60@fe805664 {
diff --git a/arch/arm/boot/dts/msm8610-regulator.dtsi b/arch/arm/boot/dts/msm8610-regulator.dtsi
index 362d126..f11f04b 100644
--- a/arch/arm/boot/dts/msm8610-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8610-regulator.dtsi
@@ -12,216 +12,215 @@
/* Stub Regulators */
- / {
- pm8110_s1: regulator-s1 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_s1";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <1150000>;
- regulator-max-microvolt = <1150000>;
- };
-
+/ {
pm8110_s1_corner: regulator-s1-corner {
compatible = "qcom,stub-regulator";
regulator-name = "8110_s1_corner";
+ qcom,hpm-min-load = <100000>;
regulator-min-microvolt = <1>;
regulator-max-microvolt = <7>;
qcom,consumer-supplies = "vdd_dig", "";
};
+};
- pm8110_s2: regulator-s2 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_s2";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1050000>;
- };
+/* QPNP controlled regulators: */
- pm8110_s3: regulator-s3 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_s3";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <1350000>;
- regulator-max-microvolt = <1350000>;
- };
+&spmi_bus {
- pm8110_s4: regulator-s4 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_s4";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <2150000>;
- regulator-max-microvolt = <2150000>;
- };
+ qcom,pm8110@1 {
- pm8110_l1: regulator-l1 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l1";
- parent-supply = <&pm8110_s3>;
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1225000>;
- regulator-max-microvolt = <1225000>;
- };
+ pm8110_s1: regulator@1400 {
+ status = "okay";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ qcom,enable-time = <500>;
+ qcom,system-load = <100000>;
+ regulator-always-on;
+ };
- pm8110_l2: regulator-l2 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l2";
- parent-supply = <&pm8110_s3>;
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- };
+ pm8110_s2: regulator@1700 {
+ status = "okay";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1150000>;
+ qcom,enable-time = <500>;
+ qcom,system-load = <100000>;
+ regulator-always-on;
+ };
- pm8110_l3: regulator-l3 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l3";
- parent-supply = <&pm8110_s3>;
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1150000>;
- regulator-max-microvolt = <1150000>;
- };
+ pm8110_s3: regulator@1a00 {
+ status = "okay";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ qcom,enable-time = <500>;
+ qcom,system-load = <100000>;
+ regulator-always-on;
+ };
- pm8110_l4: regulator-l4 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l4";
- parent-supply = <&pm8110_s3>;
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- };
+ pm8110_s4: regulator@1d00 {
+ status = "okay";
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ qcom,enable-time = <500>;
+ qcom,system-load = <100000>;
+ regulator-always-on;
+ };
- pm8110_l5: regulator-l5 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l5";
- parent-supply = <&pm8110_s3>;
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1300000>;
- regulator-max-microvolt = <1300000>;
- };
+ pm8110_l1: regulator@4000 {
+ status = "okay";
+ parent-supply = <&pm8110_s3>;
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ qcom,enable-time = <200>;
+ };
- pm8110_l6: regulator-l6 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l6";
- parent-supply = <&pm8110_s4>;
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
+ pm8110_l2: regulator@4100 {
+ status = "okay";
+ parent-supply = <&pm8110_s3>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,enable-time = <200>;
+ qcom,system-load = <10000>;
+ regulator-always-on;
+ };
- pm8110_l7: regulator-l7 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l7";
- parent-supply = <&pm8110_s4>;
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <2050000>;
- regulator-max-microvolt = <2050000>;
- };
+ pm8110_l3: regulator@4200 {
+ status = "okay";
+ parent-supply = <&pm8110_s3>;
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ qcom,enable-time = <200>;
+ qcom,system-load = <10000>;
+ regulator-always-on;
+ };
- pm8110_l8: regulator-l8 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l8";
- parent-supply = <&pm8110_s4>;
- qcom,hpm-min-load = <5000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
+ pm8110_l4: regulator@4300 {
+ status = "okay";
+ parent-supply = <&pm8110_s3>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,enable-time = <200>;
+ };
- pm8110_l9: regulator-l9 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l9";
- parent-supply = <&pm8110_s4>;
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <2050000>;
- regulator-max-microvolt = <2050000>;
- };
+ pm8110_l5: regulator@4400 {
+ status = "okay";
+ parent-supply = <&pm8110_s3>;
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ qcom,enable-time = <200>;
+ };
- pm8110_l10: regulator-l10 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l10";
- parent-supply = <&pm8110_s4>;
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- qcom,consumer-supplies = "vdd_sr2_pll", "";
- };
+ pm8110_l6: regulator@4500 {
+ status = "okay";
+ parent-supply = <&pm8110_s4>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,enable-time = <200>;
+ qcom,system-load = <10000>;
+ regulator-always-on;
+ };
- pm8110_l12: regulator-l12 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l12";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- };
+ pm8110_l7: regulator@4600 {
+ status = "okay";
+ parent-supply = <&pm8110_s4>;
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ qcom,enable-time = <200>;
+ };
- pm8110_l14: regulator-l14 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l14";
- parent-supply = <&pm8110_s4>;
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
+ pm8110_l8: regulator@4700 {
+ status = "okay";
+ parent-supply = <&pm8110_s4>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,enable-time = <200>;
+ };
- pm8110_l15: regulator-l15 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l15";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- };
+ pm8110_l9: regulator@4800 {
+ status = "okay";
+ parent-supply = <&pm8110_s4>;
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ qcom,enable-time = <200>;
+ };
- pm8110_l16: regulator-l16 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l16";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- };
+ pm8110_l10: regulator@4900 {
+ status = "okay";
+ parent-supply = <&pm8110_s4>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,enable-time = <200>;
+ qcom,consumer-supplies = "vdd_sr2_pll", "";
+ };
- pm8110_l17: regulator-l17 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l17";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <2900000>;
- regulator-max-microvolt = <2900000>;
- };
+ pm8110_l12: regulator@4b00 {
+ status = "okay";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ qcom,enable-time = <200>;
+ };
- pm8110_l18: regulator-l18 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l18";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2950000>;
- };
+ pm8110_l14: regulator@4d00 {
+ status = "okay";
+ parent-supply = <&pm8110_s4>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,enable-time = <200>;
+ };
- pm8110_l19: regulator-l19 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l19";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <2850000>;
- regulator-max-microvolt = <2850000>;
- };
+ pm8110_l15: regulator@4e00 {
+ status = "okay";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ qcom,enable-time = <200>;
+ };
- pm8110_l20: regulator-l20 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l20";
- qcom,hpm-min-load = <5000>;
- regulator-min-microvolt = <3075000>;
- regulator-max-microvolt = <3075000>;
- };
+ pm8110_l16: regulator@4f00 {
+ status = "okay";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ qcom,enable-time = <200>;
+ };
- pm8110_l21: regulator-l21 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l21";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2950000>;
- };
+ pm8110_l17: regulator@5000 {
+ status = "okay";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ qcom,enable-time = <200>;
+ };
- pm8110_l22: regulator-l22 {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_l22";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
+ pm8110_l18: regulator@5100 {
+ status = "okay";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ qcom,enable-time = <200>;
+ };
+
+ pm8110_l19: regulator@5200 {
+ status = "okay";
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ qcom,enable-time = <200>;
+ };
+
+ pm8110_l20: regulator@5300 {
+ status = "okay";
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ qcom,enable-time = <200>;
+ };
+
+ pm8110_l21: regulator@5400 {
+ status = "okay";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ qcom,enable-time = <200>;
+ };
+
+ pm8110_l22: regulator@5500 {
+ status = "okay";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ qcom,enable-time = <200>;
+ };
};
};
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index b78c3af..53d0d5d 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -236,6 +236,7 @@
compatible = "qcom,rpm-smd";
rpm-channel-name = "rpm_requests";
rpm-channel-type = <15>; /* SMD_APPS_RPM */
+ rpm-standalone;
};
qcom,msm-mem-hole {
@@ -431,8 +432,9 @@
qcom,lpass@fe200000 {
compatible = "qcom,pil-q6v5-lpass";
reg = <0xfe200000 0x00100>,
- <0xfd485100 0x00010>;
- reg-names = "qdsp6_base", "halt_base";
+ <0xfd485100 0x00010>,
+ <0xfc4016c0 0x00004>;
+ reg-names = "qdsp6_base", "halt_base", "restart_reg";
interrupts = <0 162 1>;
vdd_cx-supply = <&pm8110_s1_corner>;
qcom,firmware-name = "adsp";
@@ -487,8 +489,8 @@
/include/ "msm8610-iommu-domains.dtsi"
-/include/ "msm8610-regulator.dtsi"
/include/ "msm-pm8110.dtsi"
+/include/ "msm8610-regulator.dtsi"
&pm8110_vadc {
chan@0 {
diff --git a/arch/arm/boot/dts/msm8974-camera-sensor-cdp-mtp.dtsi b/arch/arm/boot/dts/msm8974-camera-sensor-cdp-mtp.dtsi
index 24438f0..15a549c 100644
--- a/arch/arm/boot/dts/msm8974-camera-sensor-cdp-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera-sensor-cdp-mtp.dtsi
@@ -61,6 +61,44 @@
status = "ok";
};
+ qcom,camera@20 {
+ compatible = "qcom,imx135";
+ reg = <0x20>;
+ qcom,slave-id = <0x20 0x0 0x1210>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <90>;
+ qcom,sensor-name = "imx135";
+ cam_vdig-supply = <&pm8941_l3>;
+ cam_vana-supply = <&pm8941_l17>;
+ cam_vio-supply = <&pm8941_lvs3>;
+ cam_vaf-supply = <&pm8941_l23>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_vaf";
+ qcom,cam-vreg-type = <0 1 0 0>;
+ qcom,cam-vreg-min-voltage = <1225000 0 2850000 3000000>;
+ qcom,cam-vreg-max-voltage = <1225000 0 2850000 3000000>;
+ qcom,cam-vreg-op-mode = <105000 0 80000 100000>;
+ qcom,gpio-no-mux = <0>;
+ gpios = <&msmgpio 15 0>,
+ <&msmgpio 90 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK", "CAM_RESET1";
+ qcom,gpio-set-tbl-num = <1 1>;
+ qcom,gpio-set-tbl-flags = <0 2>;
+ qcom,gpio-set-tbl-delay = <1000 30000>;
+ qcom,csi-lane-assign = <0x4320>;
+ qcom,csi-lane-mask = <0x1F>;
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,sensor-type = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ };
+
+
qcom,camera@6c {
compatible = "qcom,ov2720";
reg = <0x6c>;
diff --git a/arch/arm/boot/dts/msm8974-cdp.dtsi b/arch/arm/boot/dts/msm8974-cdp.dtsi
index a2fba6a..e8ca27c 100644
--- a/arch/arm/boot/dts/msm8974-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-cdp.dtsi
@@ -22,6 +22,7 @@
qcom,mdss_dsi_toshiba_720p_video {
status = "ok";
+ qcom,cont-splash-enabled;
};
qcom,mdss_dsi_orise_720p_video {
@@ -193,6 +194,7 @@
sound {
qcom,model = "msm8974-taiko-cdp-snd-card";
qcom,hdmi-audio-rx;
+ qcom,us-euro-gpios = <&pm8941_gpios 20 0>;
};
usb2_otg_sw: regulator-tpd4s214 {
@@ -373,9 +375,6 @@
&pm8941_chg {
status = "ok";
- qcom,chg-charging-disabled;
- qcom,chg-use-default-batt-values;
-
qcom,chg-chgr@1000 {
status = "ok";
};
@@ -384,10 +383,6 @@
status = "ok";
};
- qcom,chg-bat-if@1200 {
- status = "ok";
- };
-
qcom,chg-usb-chgpth@1300 {
status = "ok";
};
@@ -505,6 +500,14 @@
};
gpio@d300 { /* GPIO 20 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,output-type = <0>; /* QPNP_PIN_OUT_BUF_CMOS */
+ qcom,invert = <0>; /* Output low initially */
+ qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,out-strength = <2>; /* QPNP_PIN_OUT_STRENGTH_MED */
+ qcom,src-sel = <0>; /* QPNP_PIN_SEL_FUNC_CONSTANT */
+ qcom,master-en = <1>;
};
gpio@d400 { /* GPIO 21 */
diff --git a/arch/arm/boot/dts/msm8974-fluid.dtsi b/arch/arm/boot/dts/msm8974-fluid.dtsi
index d22c746..b18bf88 100644
--- a/arch/arm/boot/dts/msm8974-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-fluid.dtsi
@@ -21,6 +21,7 @@
qcom,mdss_dsi_toshiba_720p_video {
status = "ok";
+ qcom,cont-splash-enabled;
};
qcom,hdmi_tx@fd922100 {
diff --git a/arch/arm/boot/dts/msm8974-mtp.dtsi b/arch/arm/boot/dts/msm8974-mtp.dtsi
index 380ec20..4f0469c 100644
--- a/arch/arm/boot/dts/msm8974-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-mtp.dtsi
@@ -21,6 +21,7 @@
qcom,mdss_dsi_toshiba_720p_video {
status = "ok";
+ qcom,cont-splash-enabled;
};
qcom,hdmi_tx@fd922100 {
diff --git a/arch/arm/boot/dts/msm8974-smp2p.dtsi b/arch/arm/boot/dts/msm8974-smp2p.dtsi
index 511f91f..1b08246 100644
--- a/arch/arm/boot/dts/msm8974-smp2p.dtsi
+++ b/arch/arm/boot/dts/msm8974-smp2p.dtsi
@@ -12,8 +12,7 @@
/ {
qcom,smp2p-modem {
compatible = "qcom,smp2p";
- reg = <0xfa006000 0x1000>, <0x8 0x0>;
- reg-names = "irq-reg-base", "irq-reg-offset";
+ reg = <0xf9011008 0x4>;
qcom,remote-pid = <1>;
qcom,irq-bitmask = <0x4000>;
interrupts = <0 27 1>;
@@ -21,8 +20,7 @@
qcom,smp2p-adsp {
compatible = "qcom,smp2p";
- reg = <0xfa006000 0x1000>, <0x8 0x0>;
- reg-names = "irq-reg-base", "irq-reg-offset";
+ reg = <0xf9011008 0x4>;
qcom,remote-pid = <2>;
qcom,irq-bitmask = <0x400>;
interrupts = <0 158 1>;
@@ -30,8 +28,7 @@
qcom,smp2p-wcnss {
compatible = "qcom,smp2p";
- reg = <0xfa006000 0x1000>, <0x8 0x0>;
- reg-names = "irq-reg-base", "irq-reg-offset";
+ reg = <0xf9011008 0x4>;
qcom,remote-pid = <4>;
qcom,irq-bitmask = <0x40000>;
interrupts = <0 143 1>;
@@ -103,6 +100,29 @@
gpios = <&smp2pgpio_smp2p_1_out 0 0>;
};
+ /* SMP2P SSR Driver for inbound entry from modem. */
+ smp2pgpio_ssr_smp2p_1_in: qcom,smp2pgpio-ssr-smp2p-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* SMP2P SSR Driver for outbound entry to modem */
+ smp2pgpio_ssr_smp2p_1_out: qcom,smp2pgpio-ssr-smp2p-1-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
/* SMP2P Test Driver for adsp inbound */
smp2pgpio_smp2p_2_in: qcom,smp2pgpio-smp2p-2-in {
compatible = "qcom,smp2pgpio";
diff --git a/arch/arm/boot/dts/msm8974-v1-cdp.dts b/arch/arm/boot/dts/msm8974-v1-cdp.dts
index 8db99b2..33bd1fb 100644
--- a/arch/arm/boot/dts/msm8974-v1-cdp.dts
+++ b/arch/arm/boot/dts/msm8974-v1-cdp.dts
@@ -19,10 +19,6 @@
model = "Qualcomm MSM 8974 CDP";
compatible = "qcom,msm8974-cdp", "qcom,msm8974";
qcom,msm-id = <126 1 0>;
-
- qcom,mdss_dsi_toshiba_720p_video {
- qcom,cont-splash-enabled;
- };
};
&ehci {
diff --git a/arch/arm/boot/dts/msm8974-v1-fluid.dts b/arch/arm/boot/dts/msm8974-v1-fluid.dts
index 683fe18..9fb287c 100644
--- a/arch/arm/boot/dts/msm8974-v1-fluid.dts
+++ b/arch/arm/boot/dts/msm8974-v1-fluid.dts
@@ -20,9 +20,6 @@
compatible = "qcom,msm8974-fluid", "qcom,msm8974";
qcom,msm-id = <126 3 0>;
- qcom,mdss_dsi_toshiba_720p_video {
- qcom,cont-splash-enabled;
- };
};
&pm8941_chg {
diff --git a/arch/arm/boot/dts/msm8974-v1-mtp.dts b/arch/arm/boot/dts/msm8974-v1-mtp.dts
index 0e5d2ae..205ee24 100644
--- a/arch/arm/boot/dts/msm8974-v1-mtp.dts
+++ b/arch/arm/boot/dts/msm8974-v1-mtp.dts
@@ -19,10 +19,6 @@
model = "Qualcomm MSM 8974 MTP";
compatible = "qcom,msm8974-mtp", "qcom,msm8974";
qcom,msm-id = <126 8 0>;
-
- qcom,mdss_dsi_toshiba_720p_video {
- qcom,cont-splash-enabled;
- };
};
&pm8941_chg {
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index ba03f79..9b9b1d1 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -95,6 +95,7 @@
reg = <0xfdc00000 0xff000>;
interrupts = <0 44 0>;
qcom,hfi = "venus";
+ qcom,has-ocmem;
};
qcom,wfd {
@@ -740,8 +741,9 @@
qcom,lpass@fe200000 {
compatible = "qcom,pil-q6v5-lpass";
reg = <0xfe200000 0x00100>,
- <0xfd485100 0x00010>;
- reg-names = "qdsp6_base", "halt_base";
+ <0xfd485100 0x00010>,
+ <0xfc4016c0 0x00004>;
+ reg-names = "qdsp6_base", "halt_base", "restart_reg";
vdd_cx-supply = <&pm8841_s2_corner>;
interrupts = <0 162 1>;
@@ -992,6 +994,12 @@
qcom,is-loadable;
qcom,firmware-name = "mba";
qcom,pil-self-auth;
+
+ /* GPIO input from mss */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+
+ /* GPIO output to mss */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
};
qcom,pronto@fb21b000 {
diff --git a/arch/arm/boot/dts/msm9625-display.dtsi b/arch/arm/boot/dts/msm9625-display.dtsi
new file mode 100644
index 0000000..a160bae
--- /dev/null
+++ b/arch/arm/boot/dts/msm9625-display.dtsi
@@ -0,0 +1,20 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ qcom,msm_qpic@f9ac0000 {
+ compatible = "qcom,mdss_qpic";
+ reg = <0xf9ac0000 0x24000>;
+ reg-names = "qpic_base";
+ interrupts = <0 251 0>;
+ };
+};
diff --git a/arch/arm/boot/dts/msm9625-smp2p.dtsi b/arch/arm/boot/dts/msm9625-smp2p.dtsi
index 425bf00..02c95e4 100644
--- a/arch/arm/boot/dts/msm9625-smp2p.dtsi
+++ b/arch/arm/boot/dts/msm9625-smp2p.dtsi
@@ -12,8 +12,7 @@
/ {
qcom,smp2p-modem {
compatible = "qcom,smp2p";
- reg = <0xfa006000 0x1000>, <0x8 0x0>;
- reg-names = "irq-reg-base", "irq-reg-offset";
+ reg = <0xf9011008 0x4>;
qcom,remote-pid = <1>;
qcom,irq-bitmask = <0x4000>;
interrupts = <0 27 1>;
@@ -21,8 +20,7 @@
qcom,smp2p-adsp {
compatible = "qcom,smp2p";
- reg = <0xfa006000 0x1000>, <0x8 0x0>;
- reg-names = "irq-reg-base", "irq-reg-offset";
+ reg = <0xf9011008 0x4>;
qcom,remote-pid = <2>;
qcom,irq-bitmask = <0x400>;
interrupts = <0 158 1>;
diff --git a/arch/arm/boot/dts/msm9625-v2-cdp.dts b/arch/arm/boot/dts/msm9625-v2-cdp.dts
index 09a89ab..919c6d5 100644
--- a/arch/arm/boot/dts/msm9625-v2-cdp.dts
+++ b/arch/arm/boot/dts/msm9625-v2-cdp.dts
@@ -13,6 +13,8 @@
/dts-v1/;
/include/ "msm9625-v2.dtsi"
+/include/ "msm9625-display.dtsi"
+/include/ "qpic-panel-ili-qvga.dtsi"
/ {
model = "Qualcomm MSM 9625V2 CDP";
diff --git a/arch/arm/boot/dts/msmzinc-ion.dtsi b/arch/arm/boot/dts/msmzinc-ion.dtsi
new file mode 100644
index 0000000..4bf078a
--- /dev/null
+++ b/arch/arm/boot/dts/msmzinc-ion.dtsi
@@ -0,0 +1,27 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ qcom,ion {
+ compatible = "qcom,msm-ion";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ion-heap@30 { /* SYSTEM HEAP */
+ reg = <30>;
+ };
+
+ qcom,ion-heap@25 { /* IOMMU HEAP */
+ reg = <25>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msmzinc.dtsi b/arch/arm/boot/dts/msmzinc.dtsi
index 8905962..642597d 100644
--- a/arch/arm/boot/dts/msmzinc.dtsi
+++ b/arch/arm/boot/dts/msmzinc.dtsi
@@ -11,6 +11,7 @@
*/
/include/ "skeleton.dtsi"
+/include/ "msmzinc-ion.dtsi"
/ {
model = "Qualcomm MSM ZINC";
@@ -71,13 +72,15 @@
rpm-standalone;
};
- qcom,ion {
- compatible = "qcom,msm-ion";
- #address-cells = <1>;
- #size-cells = <0>;
-
- qcom,ion-heap@30 { /* SYSTEM HEAP */
- reg = <30>;
- };
+ qcom,msm-imem@fe805000 {
+ compatible = "qcom,msm-imem";
+ reg = <0xfe805000 0x1000>; /* Address and size of IMEM */
};
+
+ qcom,msm-rtb {
+ compatible = "qcom,msm-rtb";
+ qcom,memory-reservation-type = "EBI1";
+ qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
+ };
+
};
diff --git a/arch/arm/boot/dts/qpic-panel-ili-qvga.dtsi b/arch/arm/boot/dts/qpic-panel-ili-qvga.dtsi
new file mode 100644
index 0000000..a0c906e
--- /dev/null
+++ b/arch/arm/boot/dts/qpic-panel-ili-qvga.dtsi
@@ -0,0 +1,27 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ qcom,mdss_lcdc_ili9341_qvga {
+ compatible = "qcom,mdss-qpic-panel";
+ label = "ili qvga lcdc panel";
+ vdd-supply = <&pm8019_l11>;
+ avdd-supply = <&pm8019_l14>;
+ qcom,cs-gpio = <&msmgpio 21 0>;
+ qcom,te-gpio = <&msmgpio 22 0>;
+ qcom,rst-gpio = <&msmgpio 23 0>;
+ qcom,ad8-gpio = <&msmgpio 20 0>;
+ qcom,mdss-pan-res = <240 320>;
+ qcom,mdss-pan-bpp = <18>;
+ qcom,refresh_rate = <60>;
+ };
+};
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index dc73f4d..097e830 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -183,6 +183,17 @@
CONFIG_SYNC=y
CONFIG_SW_SYNC=y
CONFIG_CMA=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCISMD=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_MD=y
@@ -233,6 +244,8 @@
CONFIG_REGULATOR=y
CONFIG_REGULATOR_STUB=y
CONFIG_REGULATOR_QPNP=y
+CONFIG_RADIO_IRIS=y
+CONFIG_RADIO_IRIS_TRANSPORT=m
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_MSM_KGSL=y
@@ -334,6 +347,7 @@
CONFIG_CORESIGHT_FUNNEL=y
CONFIG_CORESIGHT_REPLICATOR=y
CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_ETM=y
CONFIG_CORESIGHT_EVENT=m
CONFIG_ENABLE_DEFAULT_TRACERS=y
CONFIG_PM_WAKELOCKS=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 224df83..663e937 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -329,6 +329,7 @@
CONFIG_MSM_CSIPHY=y
CONFIG_MSM_CSID=y
CONFIG_MSM_ISPIF=y
+CONFIG_IMX135=y
CONFIG_S5K3L1YX=y
CONFIG_MSMB_CAMERA=y
CONFIG_MSMB_JPEG=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index e42aa77..2de81d4d1 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -336,6 +336,7 @@
CONFIG_MSM_CSID=y
CONFIG_MSM_ISPIF=y
CONFIG_S5K3L1YX=y
+CONFIG_IMX135=y
CONFIG_MSMB_CAMERA=y
CONFIG_MSMB_JPEG=y
CONFIG_MSM_VIDC_V4L2=y
diff --git a/arch/arm/configs/msm9625-perf_defconfig b/arch/arm/configs/msm9625-perf_defconfig
index 6e8fe3e..c2fb1487 100644
--- a/arch/arm/configs/msm9625-perf_defconfig
+++ b/arch/arm/configs/msm9625-perf_defconfig
@@ -161,6 +161,8 @@
CONFIG_MTD_BLOCK=y
# CONFIG_MTD_MSM_NAND is not set
CONFIG_MTD_MSM_QPIC_NAND=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
# CONFIG_ANDROID_PMEM is not set
@@ -218,6 +220,10 @@
CONFIG_REGULATOR_QPNP=y
CONFIG_ION=y
CONFIG_ION_MSM=y
+CONFIG_FB=y
+CONFIG_FB_MSM=y
+CONFIG_FB_MSM_QPIC=y
+CONFIG_FB_MSM_QPIC_PANEL_DETECT=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index 9a32239..828eaa9 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -161,6 +161,8 @@
CONFIG_MTD_BLOCK=y
# CONFIG_MTD_MSM_NAND is not set
CONFIG_MTD_MSM_QPIC_NAND=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
# CONFIG_ANDROID_PMEM is not set
@@ -219,6 +221,10 @@
CONFIG_REGULATOR_QPNP=y
CONFIG_ION=y
CONFIG_ION_MSM=y
+CONFIG_FB=y
+CONFIG_FB_MSM=y
+CONFIG_FB_MSM_QPIC=y
+CONFIG_FB_MSM_QPIC_PANEL_DETECT=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
diff --git a/arch/arm/configs/msmzinc_defconfig b/arch/arm/configs/msmzinc_defconfig
index b74b204..678b086 100644
--- a/arch/arm/configs/msmzinc_defconfig
+++ b/arch/arm/configs/msmzinc_defconfig
@@ -211,6 +211,7 @@
CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_SYNC=y
CONFIG_SW_SYNC=y
+CONFIG_CMA=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_HAPTIC_ISA1200=y
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index abb222f..3cc2b52 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -318,7 +318,7 @@
* DMA region above it's default value of 2MB. It must be called before the
* memory allocator is initialised, i.e. before any core_initcall.
*/
-extern void __init init_consistent_dma_size(unsigned long size);
+static inline void init_consistent_dma_size(unsigned long size) { }
/*
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index b65016a..f7d993f 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -463,6 +463,7 @@
select ARM_HAS_SG_CHAIN
select REGULATOR
select MSM_RPM_REGULATOR_SMD
+ select MSM_JTAG_MM if CORESIGHT_ETM
endmenu
choice
diff --git a/arch/arm/mach-msm/acpuclock-8226.c b/arch/arm/mach-msm/acpuclock-8226.c
index 8ba1b39..fcbd74d 100644
--- a/arch/arm/mach-msm/acpuclock-8226.c
+++ b/arch/arm/mach-msm/acpuclock-8226.c
@@ -98,13 +98,13 @@
drv_data.apcs_rcg_config = drv_data.apcs_rcg_cmd + 4;
- drv_data.vdd_cpu = regulator_get(&pdev->dev, "a7_cpu");
+ drv_data.vdd_cpu = devm_regulator_get(&pdev->dev, "a7_cpu");
if (IS_ERR(drv_data.vdd_cpu)) {
dev_err(&pdev->dev, "regulator for %s get failed\n", "a7_cpu");
return PTR_ERR(drv_data.vdd_cpu);
}
- drv_data.vdd_mem = regulator_get(&pdev->dev, "a7_mem");
+ drv_data.vdd_mem = devm_regulator_get(&pdev->dev, "a7_mem");
if (IS_ERR(drv_data.vdd_mem)) {
dev_err(&pdev->dev, "regulator for %s get failed\n", "a7_mem");
return PTR_ERR(drv_data.vdd_mem);
diff --git a/arch/arm/mach-msm/acpuclock-9625.c b/arch/arm/mach-msm/acpuclock-9625.c
index b439088..34952fb 100644
--- a/arch/arm/mach-msm/acpuclock-9625.c
+++ b/arch/arm/mach-msm/acpuclock-9625.c
@@ -105,13 +105,13 @@
if (!drv_data.apcs_cpu_pwr_ctl)
return -ENOMEM;
- drv_data.vdd_cpu = regulator_get(&pdev->dev, "a5_cpu");
+ drv_data.vdd_cpu = devm_regulator_get(&pdev->dev, "a5_cpu");
if (IS_ERR(drv_data.vdd_cpu)) {
dev_err(&pdev->dev, "regulator for %s get failed\n", "a5_cpu");
return PTR_ERR(drv_data.vdd_cpu);
}
- drv_data.vdd_mem = regulator_get(&pdev->dev, "a5_mem");
+ drv_data.vdd_mem = devm_regulator_get(&pdev->dev, "a5_mem");
if (IS_ERR(drv_data.vdd_mem)) {
dev_err(&pdev->dev, "regulator for %s get failed\n", "a5_mem");
return PTR_ERR(drv_data.vdd_mem);
diff --git a/arch/arm/mach-msm/acpuclock-cortex.c b/arch/arm/mach-msm/acpuclock-cortex.c
index 9104f98..88bf919 100644
--- a/arch/arm/mach-msm/acpuclock-cortex.c
+++ b/arch/arm/mach-msm/acpuclock-cortex.c
@@ -40,7 +40,7 @@
#define POLL_INTERVAL_US 1
#define APCS_RCG_UPDATE_TIMEOUT_US 20
-static struct acpuclk_drv_data *acpuclk_init_data;
+static struct acpuclk_drv_data *priv;
static uint32_t bus_perf_client;
/* Update the bus bandwidth request. */
@@ -48,7 +48,7 @@
{
int ret;
- if (bw >= acpuclk_init_data->bus_scale->num_usecases) {
+ if (bw >= priv->bus_scale->num_usecases) {
pr_err("invalid bandwidth request (%d)\n", bw);
return;
}
@@ -67,15 +67,13 @@
int rc = 0;
/* Increase vdd_mem before vdd_cpu. vdd_mem should be >= vdd_cpu. */
- rc = regulator_set_voltage(acpuclk_init_data->vdd_mem, vdd_mem,
- acpuclk_init_data->vdd_max_mem);
+ rc = regulator_set_voltage(priv->vdd_mem, vdd_mem, priv->vdd_max_mem);
if (rc) {
pr_err("vdd_mem increase failed (%d)\n", rc);
return rc;
}
- rc = regulator_set_voltage(acpuclk_init_data->vdd_cpu, vdd_cpu,
- acpuclk_init_data->vdd_max_cpu);
+ rc = regulator_set_voltage(priv->vdd_cpu, vdd_cpu, priv->vdd_max_cpu);
if (rc)
pr_err("vdd_cpu increase failed (%d)\n", rc);
@@ -88,16 +86,14 @@
int ret;
/* Update CPU voltage. */
- ret = regulator_set_voltage(acpuclk_init_data->vdd_cpu, vdd_cpu,
- acpuclk_init_data->vdd_max_cpu);
+ ret = regulator_set_voltage(priv->vdd_cpu, vdd_cpu, priv->vdd_max_cpu);
if (ret) {
pr_err("vdd_cpu decrease failed (%d)\n", ret);
return;
}
/* Decrease vdd_mem after vdd_cpu. vdd_mem should be >= vdd_cpu. */
- ret = regulator_set_voltage(acpuclk_init_data->vdd_mem, vdd_mem,
- acpuclk_init_data->vdd_max_mem);
+ ret = regulator_set_voltage(priv->vdd_mem, vdd_mem, priv->vdd_max_mem);
if (ret)
pr_err("vdd_mem decrease failed (%d)\n", ret);
}
@@ -142,14 +138,14 @@
{
int rc = 0;
unsigned int tgt_freq_hz = tgt_s->khz * 1000;
- struct clkctl_acpu_speed *strt_s = acpuclk_init_data->current_speed;
- struct clkctl_acpu_speed *cxo_s = &acpuclk_init_data->freq_tbl[0];
- struct clk *strt = acpuclk_init_data->src_clocks[strt_s->src].clk;
- struct clk *tgt = acpuclk_init_data->src_clocks[tgt_s->src].clk;
+ struct clkctl_acpu_speed *strt_s = priv->current_speed;
+ struct clkctl_acpu_speed *cxo_s = &priv->freq_tbl[0];
+ struct clk *strt = priv->src_clocks[strt_s->src].clk;
+ struct clk *tgt = priv->src_clocks[tgt_s->src].clk;
if (strt_s->src == ACPUPLL && tgt_s->src == ACPUPLL) {
/* Switch to another always on src */
- select_clk_source_div(acpuclk_init_data, cxo_s);
+ select_clk_source_div(priv, cxo_s);
/* Re-program acpu pll */
if (atomic)
@@ -167,7 +163,7 @@
BUG_ON(clk_prepare_enable(tgt));
/* Switch back to acpu pll */
- select_clk_source_div(acpuclk_init_data, tgt_s);
+ select_clk_source_div(priv, tgt_s);
} else if (strt_s->src != ACPUPLL && tgt_s->src == ACPUPLL) {
rc = clk_set_rate(tgt, tgt_freq_hz);
@@ -177,16 +173,16 @@
}
if (atomic)
- clk_enable(tgt);
+ rc = clk_enable(tgt);
else
- clk_prepare_enable(tgt);
+ rc = clk_prepare_enable(tgt);
if (rc) {
pr_err("ACPU PLL enable failed\n");
return rc;
}
- select_clk_source_div(acpuclk_init_data, tgt_s);
+ select_clk_source_div(priv, tgt_s);
if (atomic)
clk_disable(strt);
@@ -195,17 +191,17 @@
} else {
if (atomic)
- clk_enable(tgt);
+ rc = clk_enable(tgt);
else
- clk_prepare_enable(tgt);
+ rc = clk_prepare_enable(tgt);
if (rc) {
pr_err("%s enable failed\n",
- acpuclk_init_data->src_clocks[tgt_s->src].name);
+ priv->src_clocks[tgt_s->src].name);
return rc;
}
- select_clk_source_div(acpuclk_init_data, tgt_s);
+ select_clk_source_div(priv, tgt_s);
if (atomic)
clk_disable(strt);
@@ -224,16 +220,16 @@
int rc = 0;
if (reason == SETRATE_CPUFREQ)
- mutex_lock(&acpuclk_init_data->lock);
+ mutex_lock(&priv->lock);
- strt_s = acpuclk_init_data->current_speed;
+ strt_s = priv->current_speed;
/* Return early if rate didn't change */
if (rate == strt_s->khz)
goto out;
/* Find target frequency */
- for (tgt_s = acpuclk_init_data->freq_tbl; tgt_s->khz != 0; tgt_s++)
+ for (tgt_s = priv->freq_tbl; tgt_s->khz != 0; tgt_s++)
if (tgt_s->khz == rate)
break;
if (tgt_s->khz == 0) {
@@ -261,7 +257,7 @@
if (rc)
goto out;
- acpuclk_init_data->current_speed = tgt_s;
+ priv->current_speed = tgt_s;
pr_debug("CPU speed change complete\n");
/* Nothing else to do for SWFI or power-collapse. */
@@ -277,13 +273,13 @@
out:
if (reason == SETRATE_CPUFREQ)
- mutex_unlock(&acpuclk_init_data->lock);
+ mutex_unlock(&priv->lock);
return rc;
}
static unsigned long acpuclk_cortex_get_rate(int cpu)
{
- return acpuclk_init_data->current_speed->khz;
+ return priv->current_speed->khz;
}
#ifdef CONFIG_CPU_FREQ_MSM
@@ -293,18 +289,17 @@
{
int i, freq_cnt = 0;
- /* Construct the freq_table tables from acpuclk_init_data->freq_tbl. */
- for (i = 0; acpuclk_init_data->freq_tbl[i].khz != 0
+ /* Construct the freq_table tables from priv->freq_tbl. */
+ for (i = 0; priv->freq_tbl[i].khz != 0
&& freq_cnt < ARRAY_SIZE(freq_table); i++) {
- if (!acpuclk_init_data->freq_tbl[i].use_for_scaling)
+ if (!priv->freq_tbl[i].use_for_scaling)
continue;
freq_table[freq_cnt].index = freq_cnt;
- freq_table[freq_cnt].frequency =
- acpuclk_init_data->freq_tbl[i].khz;
+ freq_table[freq_cnt].frequency = priv->freq_tbl[i].khz;
freq_cnt++;
}
/* freq_table not big enough to store all usable freqs. */
- BUG_ON(acpuclk_init_data->freq_tbl[i].khz != 0);
+ BUG_ON(priv->freq_tbl[i].khz != 0);
freq_table[freq_cnt].index = freq_cnt;
freq_table[freq_cnt].frequency = CPUFREQ_TABLE_END;
@@ -332,43 +327,40 @@
unsigned long max_cpu_khz = 0;
int i, rc;
- acpuclk_init_data = data;
- mutex_init(&acpuclk_init_data->lock);
+ priv = data;
+ mutex_init(&priv->lock);
- bus_perf_client = msm_bus_scale_register_client(
- acpuclk_init_data->bus_scale);
+ bus_perf_client = msm_bus_scale_register_client(priv->bus_scale);
if (!bus_perf_client) {
pr_err("Unable to register bus client\n");
BUG();
}
for (i = 0; i < NUM_SRC; i++) {
- if (!acpuclk_init_data->src_clocks[i].name)
+ if (!priv->src_clocks[i].name)
continue;
- acpuclk_init_data->src_clocks[i].clk =
- clk_get(&pdev->dev,
- acpuclk_init_data->src_clocks[i].name);
- BUG_ON(IS_ERR(acpuclk_init_data->src_clocks[i].clk));
+ priv->src_clocks[i].clk =
+ devm_clk_get(&pdev->dev, priv->src_clocks[i].name);
+ BUG_ON(IS_ERR(priv->src_clocks[i].clk));
}
/* Improve boot time by ramping up CPU immediately */
- for (i = 0; acpuclk_init_data->freq_tbl[i].khz != 0; i++)
- if (acpuclk_init_data->freq_tbl[i].use_for_scaling)
- max_cpu_khz = acpuclk_init_data->freq_tbl[i].khz;
+ for (i = 0; priv->freq_tbl[i].khz != 0; i++)
+ if (priv->freq_tbl[i].use_for_scaling)
+ max_cpu_khz = priv->freq_tbl[i].khz;
/* Initialize regulators */
- rc = increase_vdd(acpuclk_init_data->vdd_max_cpu,
- acpuclk_init_data->vdd_max_mem);
+ rc = increase_vdd(priv->vdd_max_cpu, priv->vdd_max_mem);
if (rc)
goto err_vdd;
- rc = regulator_enable(acpuclk_init_data->vdd_mem);
+ rc = regulator_enable(priv->vdd_mem);
if (rc) {
dev_err(&pdev->dev, "regulator_enable for mem failed\n");
goto err_vdd;
}
- rc = regulator_enable(acpuclk_init_data->vdd_cpu);
+ rc = regulator_enable(priv->vdd_cpu);
if (rc) {
dev_err(&pdev->dev, "regulator_enable for cpu failed\n");
goto err_vdd_cpu;
@@ -388,15 +380,7 @@
return 0;
err_vdd_cpu:
- regulator_disable(acpuclk_init_data->vdd_mem);
+ regulator_disable(priv->vdd_mem);
err_vdd:
- regulator_put(acpuclk_init_data->vdd_mem);
- regulator_put(acpuclk_init_data->vdd_cpu);
-
- for (i = 0; i < NUM_SRC; i++) {
- if (!acpuclk_init_data->src_clocks[i].name)
- continue;
- clk_put(acpuclk_init_data->src_clocks[i].clk);
- }
return rc;
}
diff --git a/arch/arm/mach-msm/acpuclock-krait-debug.c b/arch/arm/mach-msm/acpuclock-krait-debug.c
index a29735e..f11b9fc 100644
--- a/arch/arm/mach-msm/acpuclock-krait-debug.c
+++ b/arch/arm/mach-msm/acpuclock-krait-debug.c
@@ -249,6 +249,56 @@
}
DEFINE_SIMPLE_ATTRIBUTE(boost_fops, boost_get, NULL, "%lld\n");
+static int acpu_table_show(struct seq_file *m, void *unused)
+{
+ const struct acpu_level *level;
+
+ seq_printf(m, "CPU_KHz PLL_L_Val L2_KHz VDD_Dig VDD_Mem ");
+ seq_printf(m, "BW_Mbps VDD_Core UA_Core AVS\n");
+
+ for (level = drv->acpu_freq_tbl; level->speed.khz != 0; level++) {
+
+ const struct l2_level *l2 =
+ &drv->l2_freq_tbl[level->l2_level];
+ u32 bw = drv->bus_scale->usecase[l2->bw_level].vectors[0].ib;
+
+ if (!level->use_for_scaling)
+ continue;
+
+ /* CPU speed information */
+ seq_printf(m, "%7lu %9u ",
+ level->speed.khz,
+ level->speed.pll_l_val);
+
+ /* L2 level information */
+ seq_printf(m, "%7lu %7d %7d %7u ",
+ l2->speed.khz,
+ l2->vdd_dig,
+ l2->vdd_mem,
+ bw / 1000000);
+
+ /* Core voltage information */
+ seq_printf(m, "%8d %7d %3d\n",
+ level->vdd_core,
+ level->ua_core,
+ level->avsdscr_setting);
+ }
+
+ return 0;
+}
+
+static int acpu_table_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpu_table_show, inode->i_private);
+}
+
+static const struct file_operations acpu_table_fops = {
+ .open = acpu_table_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static void __cpuinit add_scalable_dir(int sc_id)
{
char sc_name[8];
@@ -326,6 +376,8 @@
&speed_bin_fops);
debugfs_create_file("pvs_bin", S_IRUGO, base_dir, NULL, &pvs_bin_fops);
debugfs_create_file("boost_uv", S_IRUGO, base_dir, NULL, &boost_fops);
+ debugfs_create_file("acpu_table", S_IRUGO, base_dir, NULL,
+ &acpu_table_fops);
for_each_online_cpu(cpu)
add_scalable_dir(cpu);
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index 9566cea..a6f4423 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -44,8 +44,6 @@
#define PRI_SRC_SEL_HFPLL 1
#define PRI_SRC_SEL_HFPLL_DIV2 2
-#define SECCLKAGD BIT(4)
-
static DEFINE_MUTEX(driver_lock);
static DEFINE_SPINLOCK(l2_lock);
@@ -75,20 +73,10 @@
{
u32 regval;
- /* 8064 Errata: disable sec_src clock gating during switch. */
regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
- regval |= SECCLKAGD;
- set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
-
- /* Program the MUX */
regval &= ~(0x3 << 2);
regval |= ((sec_src_sel & 0x3) << 2);
set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
-
- /* 8064 Errata: re-enabled sec_src clock gating. */
- regval &= ~SECCLKAGD;
- set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
-
/* Wait for switch to complete. */
mb();
udelay(1);
@@ -620,9 +608,8 @@
writel_relaxed(drv.hfpll_data->droop_val,
sc->hfpll_base + drv.hfpll_data->droop_offset);
- /* Set an initial rate and enable the PLL. */
+ /* Set an initial PLL rate. */
hfpll_set_rate(sc, tgt_s);
- hfpll_enable(sc, false);
}
static int __cpuinit rpm_regulator_init(struct scalable *sc, enum vregs vreg,
@@ -793,7 +780,9 @@
regval &= ~(0x3 << 6);
set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
- /* Switch to the target clock source. */
+ /* Enable and switch to the target clock source. */
+ if (tgt_s->src == HFPLL)
+ hfpll_enable(sc, false);
set_pri_clk_src(sc, tgt_s->pri_src_sel);
sc->cur_speed = tgt_s;
@@ -922,11 +911,12 @@
static void __init cpufreq_table_init(void)
{
int cpu;
+ int freq_cnt = 0;
for_each_possible_cpu(cpu) {
- int i, freq_cnt = 0;
+ int i;
/* Construct the freq_table tables from acpu_freq_tbl. */
- for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0
+ for (i = 0, freq_cnt = 0; drv.acpu_freq_tbl[i].speed.khz != 0
&& freq_cnt < ARRAY_SIZE(*freq_table); i++) {
if (drv.acpu_freq_tbl[i].use_for_scaling) {
freq_table[cpu][freq_cnt].index = freq_cnt;
@@ -941,12 +931,11 @@
freq_table[cpu][freq_cnt].index = freq_cnt;
freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END;
- dev_info(drv.dev, "CPU%d: %d frequencies supported\n",
- cpu, freq_cnt);
-
/* Register table with CPUFreq. */
cpufreq_frequency_table_get_attr(freq_table[cpu], cpu);
}
+
+ dev_info(drv.dev, "CPU Frequencies Supported: %d\n", freq_cnt);
}
#else
static void __init cpufreq_table_init(void) {}
diff --git a/arch/arm/mach-msm/board-8226-gpiomux.c b/arch/arm/mach-msm/board-8226-gpiomux.c
index e8e75df..2b70e7c 100644
--- a/arch/arm/mach-msm/board-8226-gpiomux.c
+++ b/arch/arm/mach-msm/board-8226-gpiomux.c
@@ -90,6 +90,18 @@
.pull = GPIOMUX_PULL_DOWN,
};
+static struct gpiomux_setting wcnss_5wire_suspend_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting wcnss_5wire_active_cfg = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
static struct gpiomux_setting gpio_i2c_config = {
.func = GPIOMUX_FUNC_3,
.drv = GPIOMUX_DRV_2MA,
@@ -239,6 +251,43 @@
},
};
+static struct msm_gpiomux_config wcnss_5wire_interface[] = {
+ {
+ .gpio = 40,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 41,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 42,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 43,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 44,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+ },
+ },
+};
void __init msm8226_init_gpiomux(void)
{
int rc;
@@ -256,6 +305,8 @@
ARRAY_SIZE(msm_keypad_configs));
msm_gpiomux_install(msm_blsp_configs, ARRAY_SIZE(msm_blsp_configs));
+ msm_gpiomux_install(wcnss_5wire_interface,
+ ARRAY_SIZE(wcnss_5wire_interface));
msm_gpiomux_install(&sd_card_det, 1);
msm_gpiomux_install(msm_synaptics_configs,
diff --git a/arch/arm/mach-msm/board-8610.c b/arch/arm/mach-msm/board-8610.c
index 2723e20..5f5366f 100644
--- a/arch/arm/mach-msm/board-8610.c
+++ b/arch/arm/mach-msm/board-8610.c
@@ -49,6 +49,7 @@
#include "platsmp.h"
#include "spm.h"
#include "lpm_resources.h"
+#include "modem_notifier.h"
static struct memtype_reserve msm8610_reserve_table[] __initdata = {
[MEMTYPE_SMI] = {
@@ -94,6 +95,8 @@
void __init msm8610_add_drivers(void)
{
+ msm_init_modem_notifier_list();
+ msm_smd_init();
msm_rpm_driver_init();
msm_lpmrs_module_init();
msm_spm_device_init();
diff --git a/arch/arm/mach-msm/board-9625-gpiomux.c b/arch/arm/mach-msm/board-9625-gpiomux.c
index 1b76441..75aaaec 100644
--- a/arch/arm/mach-msm/board-9625-gpiomux.c
+++ b/arch/arm/mach-msm/board-9625-gpiomux.c
@@ -276,6 +276,57 @@
},
};
+static struct gpiomux_setting qpic_lcdc_a_d = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_10MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting qpic_lcdc_cs = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_10MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting qpic_lcdc_rs = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_10MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting qpic_lcdc_te = {
+ .func = GPIOMUX_FUNC_7,
+ .drv = GPIOMUX_DRV_10MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct msm_gpiomux_config msm9625_qpic_lcdc_configs[] __initdata = {
+ {
+ .gpio = 20, /* a_d */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &qpic_lcdc_a_d,
+ },
+ },
+ {
+ .gpio = 21, /* cs */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &qpic_lcdc_cs,
+ },
+ },
+ {
+ .gpio = 22, /* te */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &qpic_lcdc_te,
+ },
+ },
+ {
+ .gpio = 23, /* rs */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &qpic_lcdc_rs,
+ },
+ },
+};
+
void __init msm9625_init_gpiomux(void)
{
int rc;
@@ -296,4 +347,7 @@
ARRAY_SIZE(mdm9625_cdc_reset_config));
msm_gpiomux_install(sdc2_card_det_config,
ARRAY_SIZE(sdc2_card_det_config));
+ msm_gpiomux_install(msm9625_qpic_lcdc_configs,
+ ARRAY_SIZE(msm9625_qpic_lcdc_configs));
+
}
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index 2899d93..cc478da 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -1668,7 +1668,7 @@
F_MMSS( 100000000, gpll0, 6, 0, 0),
F_MMSS( 150000000, gpll0, 4, 0, 0),
F_MMSS( 200000000, mmpll0_pll, 4, 0, 0),
- F_MMSS( 266000000, mmpll0_pll, 3, 0, 0),
+ F_MMSS( 266666666, mmpll0_pll, 3, 0, 0),
F_END
};
@@ -1820,14 +1820,17 @@
CLK_INIT(dsipll0_pixel_clk_src),
};
-static struct clk_freq_tbl pixel_freq = {
- .src_clk = &dsipll0_pixel_clk_src,
- .div_src_val = BVAL(10, 8, dsipll0_pixel_mm_source_val),
+static struct clk_freq_tbl pixel_freq_tbl[] = {
+ {
+ .src_clk = &dsipll0_pixel_clk_src,
+ .div_src_val = BVAL(10, 8, dsipll0_pixel_mm_source_val),
+ },
+ F_END
};
static struct rcg_clk pclk0_clk_src = {
.cmd_rcgr_reg = PCLK0_CMD_RCGR,
- .current_freq = &pixel_freq,
+ .current_freq = pixel_freq_tbl,
.base = &virt_bases[MMSS_BASE],
.c = {
.parent = &dsipll0_pixel_clk_src,
@@ -1919,6 +1922,8 @@
};
static struct clk_freq_tbl ftbl_camss_mclk0_1_clk[] = {
+ F_MMSS( 19200000, xo, 1, 0, 0),
+ F_MMSS( 24000000, gpll0, 5, 1, 5),
F_MMSS( 66670000, gpll0, 9, 0, 0),
F_END
};
@@ -2007,14 +2012,17 @@
},
};
-static struct clk_freq_tbl byte_freq = {
- .src_clk = &dsipll0_byte_clk_src,
- .div_src_val = BVAL(10, 8, dsipll0_byte_mm_source_val),
+static struct clk_freq_tbl byte_freq_tbl[] = {
+ {
+ .src_clk = &dsipll0_byte_clk_src,
+ .div_src_val = BVAL(10, 8, dsipll0_byte_mm_source_val),
+ },
+ F_END
};
static struct rcg_clk byte0_clk_src = {
.cmd_rcgr_reg = BYTE0_CMD_RCGR,
- .current_freq = &byte_freq,
+ .current_freq = byte_freq_tbl,
.base = &virt_bases[MMSS_BASE],
.c = {
.parent = &dsipll0_byte_clk_src,
@@ -3062,7 +3070,8 @@
CLK_LOOKUP("a7sspll", a7sspll.c, "f9011050.qcom,acpuclk"),
/* WCNSS CLOCKS */
- CLK_LOOKUP("xo", xo.c, "fb000000.qcom,wcnss-wlan"),
+ CLK_LOOKUP("xo", xo.c, "fb000000.qcom,wcnss-wlan"),
+ CLK_LOOKUP("rf_clk", cxo_a2.c, "fb000000.qcom,wcnss-wlan"),
/* BUS DRIVER */
CLK_LOOKUP("bus_clk", cnoc_msmbus_clk.c, "msm_config_noc"),
@@ -3098,6 +3107,20 @@
CLK_LOOKUP("core_clk", qdss_clk.c, "fc33d000.jtagmm"),
CLK_LOOKUP("core_clk", qdss_clk.c, "fc33e000.jtagmm"),
CLK_LOOKUP("core_clk", qdss_clk.c, "fc33f000.jtagmm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc308000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc309000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30a000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30b000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30c000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30d000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30e000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30f000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc310000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc340000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc341000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc342000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc343000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc344000.cti"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc322000.tmc"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc318000.tpiu"),
@@ -3117,6 +3140,20 @@
CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc33d000.jtagmm"),
CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc33e000.jtagmm"),
CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc33f000.jtagmm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc308000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc309000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30a000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30b000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30c000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30d000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30e000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30f000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc310000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc340000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc341000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc342000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc343000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc344000.cti"),
/* HSUSB-OTG Clocks */
CLK_LOOKUP("xo", xo.c, "f9a55000.usb"),
@@ -3151,6 +3188,11 @@
CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "qseecom"),
CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "qseecom"),
+ CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "scm"),
+ CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "scm"),
+ CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "scm"),
+ CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "scm"),
+
/* SDCC */
CLK_LOOKUP("iface_clk", gcc_sdcc1_ahb_clk.c, "f9824000.qcom,sdcc"),
CLK_LOOKUP("core_clk", gcc_sdcc1_apps_clk.c, "f9824000.qcom,sdcc"),
@@ -3404,6 +3446,12 @@
static void __init msm8226_clock_post_init(void)
{
+ /*
+ * Hold an active set vote for CXO; this is because CXO is expected
+ * to remain on whenever CPUs aren't power collapsed.
+ */
+ clk_prepare_enable(&xo_a_clk.c);
+
/* Set rates for single-rate clocks. */
clk_set_rate(&usb_hs_system_clk_src.c,
usb_hs_system_clk_src.freq_tbl[0].freq_hz);
@@ -3489,11 +3537,8 @@
*/
clk_set_rate(&mmssnoc_ahb_a_clk.c, 40000000);
- /*
- * Hold an active set vote for CXO; this is because CXO is expected
- * to remain on whenever CPUs aren't power collapsed.
- */
- clk_prepare_enable(&xo_a_clk.c);
+ /* Set an initial rate (fmax at nominal) on the MMSSNOC AXI clock */
+ clk_set_rate(&axi_clk_src.c, 200000000);
enable_rpm_scaling();
diff --git a/arch/arm/mach-msm/clock-8610.c b/arch/arm/mach-msm/clock-8610.c
index 0aee878..c517c10 100644
--- a/arch/arm/mach-msm/clock-8610.c
+++ b/arch/arm/mach-msm/clock-8610.c
@@ -3016,7 +3016,12 @@
static struct clk_lookup msm_clocks_8610[] = {
CLK_LOOKUP("xo", gcc_xo_clk_src.c, "msm_otg"),
CLK_LOOKUP("xo", gcc_xo_clk_src.c, "fe200000.qcom,lpass"),
- CLK_LOOKUP("xo", gcc_xo_clk_src.c, "pil-q6v5-mss"),
+
+ CLK_LOOKUP("xo", gcc_xo_clk_src.c, "fc880000.qcom,mss"),
+ CLK_LOOKUP("bus_clk", gcc_mss_q6_bimc_axi_clk.c, "fc880000.qcom,mss"),
+ CLK_LOOKUP("iface_clk", gcc_mss_cfg_ahb_clk.c, "fc880000.qcom,mss"),
+ CLK_LOOKUP("mem_clk", gcc_boot_rom_ahb_clk.c, "fc880000.qcom,mss"),
+
CLK_LOOKUP("xo", gcc_xo_clk_src.c, "pil-mba"),
CLK_LOOKUP("xo", gcc_xo_clk_src.c, "fb000000.qcom,wcnss-wlan"),
CLK_LOOKUP("xo", gcc_xo_clk_src.c, "fb21b000.qcom,pronto"),
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 0657c21..90f11c5 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -3026,14 +3026,17 @@
CLK_INIT(dsipll0_pixel_clk_src),
};
-static struct clk_freq_tbl byte_freq = {
- .src_clk = &dsipll0_byte_clk_src,
- .div_src_val = BVAL(10, 8, dsipll0_byte_mm_source_val),
+static struct clk_freq_tbl byte_freq_tbl[] = {
+ {
+ .src_clk = &dsipll0_byte_clk_src,
+ .div_src_val = BVAL(10, 8, dsipll0_byte_mm_source_val),
+ },
+ F_END
};
static struct rcg_clk byte0_clk_src = {
.cmd_rcgr_reg = BYTE0_CMD_RCGR,
- .current_freq = &byte_freq,
+ .current_freq = byte_freq_tbl,
.base = &virt_bases[MMSS_BASE],
.c = {
.parent = &dsipll0_byte_clk_src,
@@ -3047,7 +3050,7 @@
static struct rcg_clk byte1_clk_src = {
.cmd_rcgr_reg = BYTE1_CMD_RCGR,
- .current_freq = &byte_freq,
+ .current_freq = byte_freq_tbl,
.base = &virt_bases[MMSS_BASE],
.c = {
.parent = &dsipll0_byte_clk_src,
@@ -3228,14 +3231,17 @@
},
};
-static struct clk_freq_tbl pixel_freq = {
- .src_clk = &dsipll0_pixel_clk_src,
- .div_src_val = BVAL(10, 8, dsipll0_pixel_mm_source_val),
+static struct clk_freq_tbl pixel_freq_tbl[] = {
+ {
+ .src_clk = &dsipll0_pixel_clk_src,
+ .div_src_val = BVAL(10, 8, dsipll0_pixel_mm_source_val),
+ },
+ F_END
};
static struct rcg_clk pclk0_clk_src = {
.cmd_rcgr_reg = PCLK0_CMD_RCGR,
- .current_freq = &pixel_freq,
+ .current_freq = pixel_freq_tbl,
.base = &virt_bases[MMSS_BASE],
.c = {
.parent = &dsipll0_pixel_clk_src,
@@ -3248,7 +3254,7 @@
static struct rcg_clk pclk1_clk_src = {
.cmd_rcgr_reg = PCLK1_CMD_RCGR,
- .current_freq = &pixel_freq,
+ .current_freq = pixel_freq_tbl,
.base = &virt_bases[MMSS_BASE],
.c = {
.parent = &dsipll0_pixel_clk_src,
@@ -4739,6 +4745,7 @@
CLK_LOOKUP("xo", cxo_pil_lpass_clk.c, "fe200000.qcom,lpass"),
CLK_LOOKUP("xo", cxo_pil_mss_clk.c, "fc880000.qcom,mss"),
CLK_LOOKUP("xo", cxo_wlan_clk.c, "fb000000.qcom,wcnss-wlan"),
+ CLK_LOOKUP("rf_clk", cxo_a2.c, "fb000000.qcom,wcnss-wlan"),
CLK_LOOKUP("xo", cxo_pil_pronto_clk.c, "fb21b000.qcom,pronto"),
CLK_LOOKUP("xo", cxo_dwc3_clk.c, "msm_dwc3"),
CLK_LOOKUP("xo", cxo_ehci_host_clk.c, "msm_ehci_host"),
@@ -4815,6 +4822,11 @@
CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "qseecom"),
CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "qseecom"),
+ CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "scm"),
+ CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "scm"),
+ CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "scm"),
+ CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "scm"),
+
CLK_LOOKUP("core_clk", gcc_gp1_clk.c, ""),
CLK_LOOKUP("core_clk", gcc_gp2_clk.c, ""),
CLK_LOOKUP("core_clk", gcc_gp3_clk.c, ""),
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index a173ba9..5da1663 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -560,7 +560,11 @@
return to_rcg_clk(c)->enabled;
}
-/* Return a supported rate that's at least the specified rate. */
+/*
+ * Return a supported rate that's at least the specified rate or
+ * the max supported rate if the specified rate is larger than the
+ * max supported rate.
+ */
static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
{
struct rcg_clk *rcg = to_rcg_clk(c);
@@ -570,7 +574,8 @@
if (f->freq_hz >= rate)
return f->freq_hz;
- return -EPERM;
+ f--;
+ return f->freq_hz;
}
/* Return the nth supported frequency for a given clock. */
diff --git a/arch/arm/mach-msm/clock-local2.c b/arch/arm/mach-msm/clock-local2.c
index c3b4ca7..2879b49 100644
--- a/arch/arm/mach-msm/clock-local2.c
+++ b/arch/arm/mach-msm/clock-local2.c
@@ -211,7 +211,11 @@
return 0;
}
-/* Return a supported rate that's at least the specified rate. */
+/*
+ * Return a supported rate that's at least the specified rate or
+ * the max supported rate if the specified rate is larger than the
+ * max supported rate.
+ */
static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
{
struct rcg_clk *rcg = to_rcg_clk(c);
@@ -221,7 +225,8 @@
if (f->freq_hz >= rate)
return f->freq_hz;
- return -EPERM;
+ f--;
+ return f->freq_hz;
}
/* Return the nth supported frequency for a given clock. */
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index c82058b..d2be1f9 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -529,6 +529,18 @@
BUG();
}
+ if (!pll_clk_is_enabled(c))
+ return HANDOFF_DISABLED_CLK;
+
+ /*
+ * Do not call pll_clk_enable() since that function can assume
+ * the PLL is not in use when it's called.
+ */
+ remote_spin_lock(&pll_lock);
+ pll_control->pll[PLL_BASE + pll->id].votes |= BIT(1);
+ pll_control->pll[PLL_BASE + pll->id].on = 1;
+ remote_spin_unlock(&pll_lock);
+
return HANDOFF_ENABLED_CLK;
}
@@ -572,10 +584,22 @@
spin_unlock_irqrestore(&soft_vote_lock, flags);
}
+static enum handoff pll_acpu_vote_clk_handoff(struct clk *c)
+{
+ if (pll_vote_clk_handoff(c) == HANDOFF_DISABLED_CLK)
+ return HANDOFF_DISABLED_CLK;
+
+ if (pll_acpu_vote_clk_enable(c))
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
struct clk_ops clk_ops_pll_acpu_vote = {
.enable = pll_acpu_vote_clk_enable,
.disable = pll_acpu_vote_clk_disable,
.is_enabled = pll_vote_clk_is_enabled,
+ .handoff = pll_acpu_vote_clk_handoff,
};
static void __init __set_fsm_mode(void __iomem *mode_reg,
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
index 2010abb..cc03c48 100644
--- a/arch/arm/mach-msm/include/mach/ipa.h
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -652,6 +652,8 @@
int teth_bridge_connect(struct teth_bridge_connect_params *connect_params);
+int teth_bridge_set_aggr_params(struct teth_aggr_params *aggr_params);
+
#else /* CONFIG_IPA */
static inline int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
@@ -1073,6 +1075,12 @@
return -EPERM;
}
+static inline int teth_bridge_set_aggr_params(struct teth_aggr_params
+ *aggr_params)
+{
+ return -EPERM;
+}
+
#endif /* CONFIG_IPA*/
#endif /* _IPA_H_ */
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index fff7fa4..56c4afd 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -93,9 +93,9 @@
#endif
#define MAX_HOLE_ADDRESS (PHYS_OFFSET + 0x10000000)
-extern unsigned long memory_hole_offset;
-extern unsigned long memory_hole_start;
-extern unsigned long memory_hole_end;
+extern phys_addr_t memory_hole_offset;
+extern phys_addr_t memory_hole_start;
+extern phys_addr_t memory_hole_end;
extern unsigned long memory_hole_align;
extern unsigned long virtual_hole_start;
extern unsigned long virtual_hole_end;
@@ -107,11 +107,13 @@
memory_hole_align)
#define __phys_to_virt(phys) \
+ (unsigned long)\
((MEM_HOLE_END_PHYS_OFFSET && ((phys) >= MEM_HOLE_END_PHYS_OFFSET)) ? \
(phys) - MEM_HOLE_END_PHYS_OFFSET + MEM_HOLE_PAGE_OFFSET : \
(phys) - PHYS_OFFSET + PAGE_OFFSET)
#define __virt_to_phys(virt) \
+ (unsigned long)\
((MEM_HOLE_END_PHYS_OFFSET && ((virt) >= MEM_HOLE_PAGE_OFFSET)) ? \
(virt) - MEM_HOLE_PAGE_OFFSET + MEM_HOLE_END_PHYS_OFFSET : \
(virt) - PAGE_OFFSET + PHYS_OFFSET)
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-zinc.h b/arch/arm/mach-msm/include/mach/msm_iomap-zinc.h
index 8456445..8283622 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-zinc.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-zinc.h
@@ -34,9 +34,6 @@
#define MSMZINC_TLMM_PHYS 0xFD510000
#define MSMZINC_TLMM_SIZE SZ_16K
-#define MSMZINC_IMEM_PHYS 0xFC42B000
-#define MSMZINC_IMEM_SIZE SZ_4K
-
#ifdef CONFIG_DEBUG_MSMZINC_UART
#define MSM_DEBUG_UART_BASE IOMEM(0xFA71E000)
#define MSM_DEBUG_UART_PHYS 0xF991E000
diff --git a/arch/arm/mach-msm/include/mach/scm.h b/arch/arm/mach-msm/include/mach/scm.h
index 8a06fe3..0cc7bbf 100644
--- a/arch/arm/mach-msm/include/mach/scm.h
+++ b/arch/arm/mach-msm/include/mach/scm.h
@@ -21,7 +21,7 @@
#define SCM_SVC_SSD 0x7
#define SCM_SVC_FUSE 0x8
#define SCM_SVC_PWR 0x9
-#define SCM_SVC_CP 0xC
+#define SCM_SVC_MP 0xC
#define SCM_SVC_DCVS 0xD
#define SCM_SVC_TZSCHEDULER 0xFC
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index c7b47a5..c71a79a 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -328,7 +328,6 @@
MSM_CHIP_DEVICE(QGIC_DIST, MSMZINC),
MSM_CHIP_DEVICE(QGIC_CPU, MSMZINC),
MSM_CHIP_DEVICE(TLMM, MSMZINC),
- MSM_CHIP_DEVICE(IMEM, MSMZINC),
{
.virtual = (unsigned long) MSM_SHARED_RAM_BASE,
.length = MSM_SHARED_RAM_SIZE,
@@ -343,6 +342,7 @@
{
msm_shared_ram_phys = MSMZINC_SHARED_RAM_PHYS;
msm_map_io(msm_zinc_io_desc, ARRAY_SIZE(msm_zinc_io_desc));
+ of_scan_flat_dt(msm_scan_dt_map_imem, NULL);
}
#endif /* CONFIG_ARCH_MSMZINC */
diff --git a/arch/arm/mach-msm/jtag-mm.c b/arch/arm/mach-msm/jtag-mm.c
index af05995..8a67614 100644
--- a/arch/arm/mach-msm/jtag-mm.c
+++ b/arch/arm/mach-msm/jtag-mm.c
@@ -124,6 +124,8 @@
#define DBGBCRn(n) (0x140 + (n * 4))
#define DBGWVRn(n) (0x180 + (n * 4))
#define DBGWCRn(n) (0x1C0 + (n * 4))
+#define DBGOSLAR (0x300)
+#define DBGOSLSR (0x304)
#define DBGPRCR (0x310)
#define DBGITMISCOUT (0xEF8)
#define DBGITMISCIN (0xEFC)
@@ -131,6 +133,7 @@
#define DBGCLAIMCLR (0xFA4)
#define DBGDSCR_MASK (0x6C30FC3C)
+#define OSLOCK_MAGIC (0xC5ACCE55)
#define MAX_DBG_STATE_SIZE (90)
#define MAX_ETM_STATE_SIZE (78)
@@ -140,6 +143,7 @@
#define ARCH_V3_5 (0x25)
#define ARM_DEBUG_ARCH_V7B (0x3)
+#define ARM_DEBUG_ARCH_V7p1 (0x5)
#define etm_write(etm, val, off) \
__raw_writel(val, etm->base + off)
@@ -221,12 +225,40 @@
uint8_t nr_ctxid_cmp;
struct etm_cpu_ctx *cpu_ctx[NR_CPUS];
bool save_restore_enabled[NR_CPUS];
+ bool os_lock_present;
};
static struct etm_ctx etm;
static struct clk *clock[NR_CPUS];
+static void etm_os_lock(struct etm_cpu_ctx *etmdata)
+{
+ uint32_t count;
+
+ if (etm.os_lock_present) {
+ etm_write(etmdata, OSLOCK_MAGIC, ETMOSLAR);
+ /* Ensure OS lock is set before proceeding */
+ mb();
+ for (count = TIMEOUT_US; BVAL(etm_read(etmdata, ETMSR), 1) != 1
+ && count > 0; count--)
+ udelay(1);
+ if (count == 0)
+ pr_err_ratelimited(
+ "timeout while setting prog bit, ETMSR: %#x\n",
+ etm_read(etmdata, ETMSR));
+ }
+}
+
+static void etm_os_unlock(struct etm_cpu_ctx *etmdata)
+{
+ if (etm.os_lock_present) {
+ /* Ensure all writes are complete before clearing OS lock */
+ mb();
+ etm_write(etmdata, 0x0, ETMOSLAR);
+ }
+}
+
static void etm_set_pwrdwn(struct etm_cpu_ctx *etmdata)
{
uint32_t etmcr;
@@ -273,6 +305,8 @@
switch (etm.arch) {
case ETM_ARCH_V3_5:
+ etm_os_lock(etmdata);
+
etmdata->state[i++] = etm_read(etmdata, ETMTRIGGER);
etmdata->state[i++] = etm_read(etmdata, ETMASICCTLR);
etmdata->state[i++] = etm_read(etmdata, ETMSR);
@@ -346,6 +380,8 @@
switch (etm.arch) {
case ETM_ARCH_V3_5:
+ etm_os_lock(etmdata);
+
etm_clr_pwrdwn(etmdata);
etm_write(etmdata, etmdata->state[i++], ETMTRIGGER);
etm_write(etmdata, etmdata->state[i++], ETMASICCTLR);
@@ -406,6 +442,8 @@
* bit
*/
etm_write(etmdata, etmdata->state[i++], ETMCR);
+
+ etm_os_unlock(etmdata);
break;
default:
pr_err_ratelimited("unsupported etm arch %d in %s\n", etm.arch,
@@ -422,22 +460,46 @@
i = 0;
DBG_UNLOCK(dbgdata);
- dbgdata->state[i++] = dbg_read(dbgdata, DBGWFAR);
- dbgdata->state[i++] = dbg_read(dbgdata, DBGVCR);
- for (j = 0; j < dbg.nr_bp; j++) {
- dbgdata->state[i++] = dbg_read(dbgdata, DBGBVRn(j));
- dbgdata->state[i++] = dbg_read(dbgdata, DBGBCRn(j));
+ switch (dbg.arch) {
+ case ARM_DEBUG_ARCH_V7B:
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGWFAR);
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGVCR);
+ for (j = 0; j < dbg.nr_bp; j++) {
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGBVRn(j));
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGBCRn(j));
+ }
+ for (j = 0; j < dbg.nr_wp; j++) {
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGWVRn(j));
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGWCRn(j));
+ }
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGPRCR);
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGDTRTXext);
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGDTRRXext);
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGDSCRext);
+ break;
+ case ARM_DEBUG_ARCH_V7p1:
+ /* Set OS Lock */
+ dbg_write(dbgdata, OSLOCK_MAGIC, DBGOSLAR);
+ /* Ensure OS lock is set before proceeding */
+ mb();
+
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGWFAR);
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGVCR);
+ for (j = 0; j < dbg.nr_bp; j++) {
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGBVRn(j));
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGBCRn(j));
+ }
+ for (j = 0; j < dbg.nr_wp; j++) {
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGWVRn(j));
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGWCRn(j));
+ }
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGPRCR);
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGCLAIMCLR);
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGDTRTXext);
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGDTRRXext);
+ dbgdata->state[i++] = dbg_read(dbgdata, DBGDSCRext);
+ break;
}
- for (j = 0; j < dbg.nr_wp; j++) {
- dbgdata->state[i++] = dbg_read(dbgdata, DBGWVRn(j));
- dbgdata->state[i++] = dbg_read(dbgdata, DBGWCRn(j));
- }
- dbgdata->state[i++] = dbg_read(dbgdata, DBGPRCR);
- dbgdata->state[i++] = dbg_read(dbgdata, DBGCLAIMSET);
- dbgdata->state[i++] = dbg_read(dbgdata, DBGCLAIMCLR);
- dbgdata->state[i++] = dbg_read(dbgdata, DBGDTRTXext);
- dbgdata->state[i++] = dbg_read(dbgdata, DBGDTRRXext);
- dbgdata->state[i++] = dbg_read(dbgdata, DBGDSCRext);
DBG_LOCK(dbgdata);
}
@@ -449,23 +511,55 @@
i = 0;
DBG_UNLOCK(dbgdata);
- dbg_write(dbgdata, dbgdata->state[i++], DBGWFAR);
- dbg_write(dbgdata, dbgdata->state[i++], DBGVCR);
- for (j = 0; j < dbg.nr_bp; j++) {
- dbg_write(dbgdata, dbgdata->state[i++], DBGBVRn(j));
- dbg_write(dbgdata, dbgdata->state[i++], DBGBCRn(j));
- }
- for (j = 0; j < dbg.nr_wp; j++) {
- dbg_write(dbgdata, dbgdata->state[i++], DBGWVRn(j));
- dbg_write(dbgdata, dbgdata->state[i++], DBGWCRn(j));
- }
- dbg_write(dbgdata, dbgdata->state[i++], DBGPRCR);
- dbg_write(dbgdata, dbgdata->state[i++], DBGCLAIMSET);
- dbg_write(dbgdata, dbgdata->state[i++], DBGCLAIMCLR);
- dbg_write(dbgdata, dbgdata->state[i++], DBGDTRTXext);
- dbg_write(dbgdata, dbgdata->state[i++], DBGDTRRXext);
- dbg_write(dbgdata, dbgdata->state[i++] & DBGDSCR_MASK,
+ switch (dbg.arch) {
+ case ARM_DEBUG_ARCH_V7B:
+ dbg_write(dbgdata, dbgdata->state[i++], DBGWFAR);
+ dbg_write(dbgdata, dbgdata->state[i++], DBGVCR);
+ for (j = 0; j < dbg.nr_bp; j++) {
+ dbg_write(dbgdata, dbgdata->state[i++], DBGBVRn(j));
+ dbg_write(dbgdata, dbgdata->state[i++], DBGBCRn(j));
+ }
+ for (j = 0; j < dbg.nr_wp; j++) {
+ dbg_write(dbgdata, dbgdata->state[i++], DBGWVRn(j));
+ dbg_write(dbgdata, dbgdata->state[i++], DBGWCRn(j));
+ }
+ dbg_write(dbgdata, dbgdata->state[i++], DBGPRCR);
+ dbg_write(dbgdata, dbgdata->state[i++], DBGDTRTXext);
+ dbg_write(dbgdata, dbgdata->state[i++], DBGDTRRXext);
+ dbg_write(dbgdata, dbgdata->state[i++] & DBGDSCR_MASK,
DBGDSCRext);
+ break;
+ case ARM_DEBUG_ARCH_V7p1:
+ /* Set OS lock. Lock will already be set after power collapse
+ * but this write is included to ensure it is set.
+ */
+ dbg_write(dbgdata, OSLOCK_MAGIC, DBGOSLAR);
+ /* Ensure OS lock is set before proceeding */
+ mb();
+
+ dbg_write(dbgdata, dbgdata->state[i++], DBGWFAR);
+ dbg_write(dbgdata, dbgdata->state[i++], DBGVCR);
+ for (j = 0; j < dbg.nr_bp; j++) {
+ dbg_write(dbgdata, dbgdata->state[i++], DBGBVRn(j));
+ dbg_write(dbgdata, dbgdata->state[i++], DBGBCRn(j));
+ }
+ for (j = 0; j < dbg.nr_wp; j++) {
+ dbg_write(dbgdata, dbgdata->state[i++], DBGWVRn(j));
+ dbg_write(dbgdata, dbgdata->state[i++], DBGWCRn(j));
+ }
+ dbg_write(dbgdata, dbgdata->state[i++], DBGPRCR);
+ dbg_write(dbgdata, dbgdata->state[i++], DBGCLAIMSET);
+ dbg_write(dbgdata, dbgdata->state[i++], DBGDTRTXext);
+ dbg_write(dbgdata, dbgdata->state[i++], DBGDTRRXext);
+ dbg_write(dbgdata, dbgdata->state[i++] & DBGDSCR_MASK,
+ DBGDSCRext);
+ /* Ensure all writes are completing before clearing OS lock */
+ mb();
+ dbg_write(dbgdata, 0x0, DBGOSLAR);
+ /* Ensure OS lock is cleared before proceeding */
+ mb();
+ break;
+ }
DBG_LOCK(dbgdata);
}
@@ -529,6 +623,17 @@
return true;
}
+static void __devinit etm_os_lock_init(struct etm_cpu_ctx *etmdata)
+{
+ uint32_t etmoslsr;
+
+ etmoslsr = etm_read(etmdata, ETMOSLSR);
+ if (!BVAL(etmoslsr, 0) && !BVAL(etmoslsr, 3))
+ etm.os_lock_present = false;
+ else
+ etm.os_lock_present = true;
+}
+
static void __devinit etm_init_arch_data(void *info)
{
uint32_t etmidr;
@@ -541,6 +646,8 @@
*/
ETM_UNLOCK(etmdata);
+ etm_os_lock_init(etmdata);
+
etm_clr_pwrdwn(etmdata);
/* Set prog bit. It will be set from reset but this is included to
* ensure it is set
@@ -578,7 +685,7 @@
etm.cpu_ctx[cpu] = etmdata;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "etm-base");
etmdata->base = devm_ioremap(dev, res->start, resource_size(res));
if (!etmdata->base)
@@ -590,8 +697,11 @@
if (!etmdata->state)
return -ENOMEM;
- smp_call_function_single(0, etm_init_arch_data, etmdata, 1);
-
+ if (cpu == 0) {
+ if (smp_call_function_single(cpu, etm_init_arch_data,
+ etmdata, 1))
+ dev_err(dev, "Jtagmm: ETM arch init failed\n");
+ }
if (etm_arch_supported(etm.arch)) {
if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER)
etm.save_restore_enabled[cpu] = true;
@@ -606,6 +716,7 @@
{
switch (arch) {
case ARM_DEBUG_ARCH_V7B:
+ case ARM_DEBUG_ARCH_V7p1:
break;
default:
return false;
@@ -642,7 +753,7 @@
dbg.cpu_ctx[cpu] = dbgdata;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "debug-base");
dbgdata->base = devm_ioremap(dev, res->start, resource_size(res));
if (!dbgdata->base)
@@ -654,8 +765,11 @@
if (!dbgdata->state)
return -ENOMEM;
- smp_call_function_single(0, dbg_init_arch_data, dbgdata, 1);
-
+ if (cpu == 0) {
+ if (smp_call_function_single(cpu, dbg_init_arch_data,
+ dbgdata, 1))
+ dev_err(dev, "Jtagmm: Dbg arch init failed\n");
+ }
if (dbg_arch_supported(dbg.arch)) {
if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER)
dbg.save_restore_enabled[cpu] = true;
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_arb.c b/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
index 4336945..4adfe4d 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
@@ -562,7 +562,7 @@
int pnode, src, curr, ctx;
uint64_t req_clk, req_bw, curr_clk, curr_bw;
struct msm_bus_client *client = (struct msm_bus_client *)cl;
- if (IS_ERR(client)) {
+ if (IS_ERR_OR_NULL(client)) {
MSM_BUS_ERR("msm_bus_scale_client update req error %d\n",
(uint32_t)client);
return -ENXIO;
diff --git a/arch/arm/mach-msm/pil-q6v5-lpass.c b/arch/arm/mach-msm/pil-q6v5-lpass.c
index dfbda74..ef13c34 100644
--- a/arch/arm/mach-msm/pil-q6v5-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v5-lpass.c
@@ -114,6 +114,8 @@
pil_q6v5_shutdown(pil);
pil_lpass_disable_clks(drv);
+ writel_relaxed(1, drv->restart_reg);
+
drv->is_booted = false;
return 0;
@@ -125,6 +127,11 @@
unsigned long start_addr = pil_get_entry_addr(pil);
int ret;
+ /* Deassert reset to subsystem and wait for propagation */
+ writel_relaxed(0, drv->restart_reg);
+ mb();
+ udelay(2);
+
ret = pil_lpass_enable_clks(drv);
if (ret)
return ret;
@@ -385,6 +392,7 @@
struct lpass_data *drv;
struct q6v5_data *q6;
struct pil_desc *desc;
+ struct resource *res;
int ret;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
@@ -405,6 +413,11 @@
desc->owner = THIS_MODULE;
desc->proxy_timeout = PROXY_TIMEOUT_MS;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
+ q6->restart_reg = devm_request_and_ioremap(&pdev->dev, res);
+ if (!q6->restart_reg)
+ return -ENOMEM;
+
q6->core_clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(q6->core_clk))
return PTR_ERR(q6->core_clk);
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index 1954ec3..2ceb06d 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -24,6 +24,7 @@
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
#include <mach/subsystem_restart.h>
#include <mach/clk.h>
@@ -92,6 +93,8 @@
bool crash_shutdown;
bool ignore_errors;
int is_loadable;
+ int err_fatal_irq;
+ int force_stop_gpio;
};
static int pbl_mba_boot_timeout_ms = 100;
@@ -470,18 +473,17 @@
subsystem_restart_dev(drv->subsys);
}
-static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state)
+static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
{
- struct mba_data *drv = data;
+ struct mba_data *drv = dev_id;
- /* Ignore if we're the one that set SMSM_RESET */
+ /* Ignore if we're the one that set the force stop GPIO */
if (drv->crash_shutdown)
- return;
+ return IRQ_HANDLED;
- if (new_state & SMSM_RESET) {
- pr_err("Probable fatal error on the modem.\n");
- restart_modem(drv);
- }
+ pr_err("Fatal error on the modem.\n");
+ restart_modem(drv);
+ return IRQ_HANDLED;
}
static int modem_shutdown(const struct subsys_desc *subsys)
@@ -521,7 +523,7 @@
{
struct mba_data *drv = subsys_to_drv(subsys);
drv->crash_shutdown = true;
- smsm_reset_modem(SMSM_RESET);
+ gpio_set_value(drv->force_stop_gpio, 1);
}
static struct ramdump_segment smem_segments[] = {
@@ -658,10 +660,11 @@
goto err_irq;
}
- ret = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_RESET,
- smsm_state_cb, drv);
+ ret = devm_request_irq(&pdev->dev, drv->err_fatal_irq,
+ modem_err_fatal_intr_handler,
+ IRQF_TRIGGER_RISING, "pil-mss", drv);
if (ret < 0) {
- dev_err(&pdev->dev, "Unable to register SMSM callback!\n");
+ dev_err(&pdev->dev, "Unable to register SMP2P err fatal handler!\n");
goto err_irq;
}
@@ -671,14 +674,11 @@
ret = PTR_ERR(drv->adsp_state_notifier);
dev_err(&pdev->dev, "%s: Registration with the SSR notification driver failed (%d)",
__func__, ret);
- goto err_smsm;
+ goto err_irq;
}
return 0;
-err_smsm:
- smsm_state_cb_deregister(SMSM_MODEM_STATE, SMSM_RESET, smsm_state_cb,
- drv);
err_irq:
destroy_ramdump_device(drv->smem_ramdump_dev);
err_ramdump_smem:
@@ -794,7 +794,7 @@
static int __devinit pil_mss_driver_probe(struct platform_device *pdev)
{
struct mba_data *drv;
- int ret;
+ int ret, err_fatal_gpio;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
@@ -809,6 +809,22 @@
return ret;
}
+ /* Get the IRQ from the GPIO for registering inbound handler */
+ err_fatal_gpio = of_get_named_gpio(pdev->dev.of_node,
+ "qcom,gpio-err-fatal", 0);
+ if (err_fatal_gpio < 0)
+ return err_fatal_gpio;
+
+ drv->err_fatal_irq = gpio_to_irq(err_fatal_gpio);
+ if (drv->err_fatal_irq < 0)
+ return drv->err_fatal_irq;
+
+ /* Get the GPIO pin for writing the outbound bits: add more as needed */
+ drv->force_stop_gpio = of_get_named_gpio(pdev->dev.of_node,
+ "qcom,gpio-force-stop", 0);
+ if (drv->force_stop_gpio < 0)
+ return drv->force_stop_gpio;
+
return pil_subsys_init(drv, pdev);
}
@@ -818,8 +834,6 @@
subsys_notif_unregister_notifier(drv->adsp_state_notifier,
&adsp_state_notifier_block);
- smsm_state_cb_deregister(SMSM_MODEM_STATE, SMSM_RESET,
- smsm_state_cb, drv);
subsys_unregister(drv->subsys);
destroy_ramdump_device(drv->smem_ramdump_dev);
destroy_ramdump_device(drv->ramdump_dev);
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index 717c057..3c50bc6 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -974,7 +974,7 @@
if (acc_sts & msm_pm_slp_sts[cpu].mask)
return 0;
- usleep(100);
+ udelay(100);
}
pr_info("%s(): Timed out waiting for CPU %u SPM to enter sleep state",
@@ -1021,6 +1021,9 @@
*/
void msm_pm_enable_retention(bool enable)
{
+ if (enable == msm_pm_ldo_retention_enabled)
+ return;
+
msm_pm_ldo_retention_enabled = enable;
/*
* If retention is being disabled, wakeup all online core to ensure
diff --git a/arch/arm/mach-msm/scm-pas.c b/arch/arm/mach-msm/scm-pas.c
index f73055e..b7271bb 100644
--- a/arch/arm/mach-msm/scm-pas.c
+++ b/arch/arm/mach-msm/scm-pas.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,23 @@
#define PAS_SHUTDOWN_CMD 6
#define PAS_IS_SUPPORTED_CMD 7
+enum scm_clock_ids {
+ BUS_CLK = 0,
+ CORE_CLK,
+ IFACE_CLK,
+ CORE_CLK_SRC,
+ NUM_CLKS
+};
+
+static const char * const scm_clock_names[NUM_CLKS] = {
+ [BUS_CLK] = "bus_clk",
+ [CORE_CLK] = "core_clk",
+ [IFACE_CLK] = "iface_clk",
+ [CORE_CLK_SRC] = "core_clk_src",
+};
+
+static struct clk *scm_clocks[NUM_CLKS];
+
int pas_init_image(enum pas_id id, const u8 *metadata, size_t size)
{
int ret;
@@ -108,14 +125,13 @@
};
static uint32_t scm_perf_client;
-static struct clk *scm_bus_clk;
static DEFINE_MUTEX(scm_pas_bw_mutex);
static int scm_pas_bw_count;
static int scm_pas_enable_bw(void)
{
- int ret = 0;
+ int ret = 0, i;
if (!scm_perf_client)
return -EINVAL;
@@ -123,30 +139,40 @@
mutex_lock(&scm_pas_bw_mutex);
if (!scm_pas_bw_count) {
ret = msm_bus_scale_client_update_request(scm_perf_client, 1);
- if (ret) {
- pr_err("bandwidth request failed (%d)\n", ret);
- } else if (scm_bus_clk) {
- ret = clk_prepare_enable(scm_bus_clk);
- if (ret)
- pr_err("clock enable failed\n");
- }
- }
- if (ret)
- msm_bus_scale_client_update_request(scm_perf_client, 0);
- else
+ if (ret)
+ goto err_bus;
scm_pas_bw_count++;
+ }
+ for (i = 0; i < NUM_CLKS; i++)
+ if (clk_prepare_enable(scm_clocks[i]))
+ goto err_clk;
+
+ mutex_unlock(&scm_pas_bw_mutex);
+ return ret;
+
+err_clk:
+ pr_err("clk prepare_enable failed (%s)\n", scm_clock_names[i]);
+ for (i--; i >= 0; i--)
+ clk_disable_unprepare(scm_clocks[i]);
+
+err_bus:
+ pr_err("bandwidth request failed (%d)\n", ret);
+ msm_bus_scale_client_update_request(scm_perf_client, 0);
+
mutex_unlock(&scm_pas_bw_mutex);
return ret;
}
static void scm_pas_disable_bw(void)
{
+ int i;
mutex_lock(&scm_pas_bw_mutex);
if (scm_pas_bw_count-- == 1) {
msm_bus_scale_client_update_request(scm_perf_client, 0);
- if (scm_bus_clk)
- clk_disable_unprepare(scm_bus_clk);
}
+ for (i = NUM_CLKS - 1; i >= 0; i--)
+ clk_disable_unprepare(scm_clocks[i]);
+
mutex_unlock(&scm_pas_bw_mutex);
}
@@ -214,17 +240,25 @@
static int __init scm_pas_init(void)
{
- if (cpu_is_msm8974()) {
+ int i, rate;
+ for (i = 0; i < NUM_CLKS; i++) {
+ scm_clocks[i] = clk_get_sys("scm", scm_clock_names[i]);
+ if (IS_ERR(scm_clocks[i]))
+ scm_clocks[i] = NULL;
+ }
+
+ /* Fail silently if this clock is not supported */
+ rate = clk_round_rate(scm_clocks[CORE_CLK_SRC], 1);
+ clk_set_rate(scm_clocks[CORE_CLK_SRC], rate);
+
+ if (cpu_is_msm8974() || cpu_is_msm8226()) {
scm_pas_bw_tbl[0].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
scm_pas_bw_tbl[1].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
} else {
- scm_bus_clk = clk_get_sys("scm", "bus_clk");
- if (!IS_ERR(scm_bus_clk)) {
- clk_set_rate(scm_bus_clk, 64000000);
- } else {
- scm_bus_clk = NULL;
+ if (!IS_ERR(scm_clocks[BUS_CLK]))
+ clk_set_rate(scm_clocks[BUS_CLK], 64000000);
+ else
pr_warn("unable to get bus clock\n");
- }
}
scm_perf_client = msm_bus_scale_register_client(&scm_pas_bus_pdata);
diff --git a/arch/arm/mach-msm/smp2p.c b/arch/arm/mach-msm/smp2p.c
index 8066005..7bdcce9 100644
--- a/arch/arm/mach-msm/smp2p.c
+++ b/arch/arm/mach-msm/smp2p.c
@@ -1599,8 +1599,8 @@
*/
static int __devinit msm_smp2p_probe(struct platform_device *pdev)
{
- struct resource *irq_out_base;
- struct resource *irq_offset;
+ struct resource *r;
+ void *irq_out_ptr;
char *key;
uint32_t edge;
int ret;
@@ -1617,15 +1617,18 @@
goto fail;
}
- key = "irq-reg-base";
- irq_out_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
- if (!irq_out_base)
- goto missing_key;
-
- key = "irq-reg-offset";
- irq_offset = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
- if (!irq_offset)
- goto missing_key;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ SMP2P_ERR("%s: failed gathering irq-reg resource for edge %d\n"
+ , __func__, edge);
+ goto fail;
+ }
+ irq_out_ptr = ioremap_nocache(r->start, resource_size(r));
+ if (!irq_out_ptr) {
+ SMP2P_ERR("%s: failed remap from phys to virt for edge %d\n",
+ __func__, edge);
+ return -ENOMEM;
+ }
key = "qcom,irq-bitmask";
ret = of_property_read_u32(node, key, &irq_bitmask);
@@ -1656,9 +1659,7 @@
*/
smp2p_int_cfgs[edge].in_int_id = irq_line;
smp2p_int_cfgs[edge].out_int_mask = irq_bitmask;
- smp2p_int_cfgs[edge].out_int_ptr =
- (uint32_t *)((uint32_t)irq_out_base->start +
- (uint32_t)irq_offset->start);
+ smp2p_int_cfgs[edge].out_int_ptr = irq_out_ptr;
smp2p_int_cfgs[edge].is_configured = true;
return 0;
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index efbd8c6..57c85e1 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -44,6 +44,7 @@
HW_PLATFORM_LIQUID = 9,
/* Dragonboard platform id is assigned as 10 in CDT */
HW_PLATFORM_DRAGON = 10,
+ HW_PLATFORM_QRD = 11,
HW_PLATFORM_HRD = 13,
HW_PLATFORM_DTV = 14,
HW_PLATFORM_INVALID
@@ -59,6 +60,7 @@
[HW_PLATFORM_MTP] = "MTP",
[HW_PLATFORM_LIQUID] = "Liquid",
[HW_PLATFORM_DRAGON] = "Dragon",
+ [HW_PLATFORM_QRD] = "QRD",
[HW_PLATFORM_HRD] = "HRD",
[HW_PLATFORM_DTV] = "DTV",
};
@@ -249,6 +251,7 @@
[105] = MSM_CPU_9615,
[106] = MSM_CPU_9615,
[107] = MSM_CPU_9615,
+ [171] = MSM_CPU_9615,
/* 8064 IDs */
[109] = MSM_CPU_8064,
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 56a9a4a..f385fc4 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -22,6 +22,7 @@
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/iommu.h>
+#include <linux/io.h>
#include <linux/vmalloc.h>
#include <asm/memory.h>
@@ -230,116 +231,73 @@
}
#ifdef CONFIG_MMU
-
-#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT)
-#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT)
-
-/*
- * These are the page tables (2MB each) covering uncached, DMA consistent allocations
- */
-static pte_t **consistent_pte;
-
-#define DEFAULT_CONSISTENT_DMA_SIZE (7*SZ_2M)
-
-static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
-
-void __init init_consistent_dma_size(unsigned long size)
-{
- unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M);
-
- BUG_ON(consistent_pte); /* Check we're called before DMA region init */
- BUG_ON(base < VMALLOC_END);
-
- /* Grow region to accommodate specified size */
- if (base < consistent_base)
- consistent_base = base;
-}
-
-#include "vmregion.h"
-
-static struct arm_vmregion_head consistent_head = {
- .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
- .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
- .vm_end = CONSISTENT_END,
-};
-
#ifdef CONFIG_HUGETLB_PAGE
#error ARM Coherent DMA allocator does not (yet) support huge TLB
#endif
-/*
- * Initialise the consistent memory allocation.
- */
-static int __init consistent_init(void)
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+ pgprot_t prot, struct page **ret_page,
+ const void *caller);
+
+static void *
+__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller)
{
- int ret = 0;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int i = 0;
- unsigned long base = consistent_base;
- unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
+ struct vm_struct *area;
+ unsigned long addr;
- if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
- return 0;
+ /*
+ * DMA allocation can be mapped to user space, so lets
+ * set VM_USERMAP flags too.
+ */
+ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
+ caller);
+ if (!area)
+ return NULL;
+ addr = (unsigned long)area->addr;
+ area->phys_addr = __pfn_to_phys(page_to_pfn(page));
- consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
- if (!consistent_pte) {
- pr_err("%s: no memory\n", __func__);
- return -ENOMEM;
+ if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
+ vunmap((void *)addr);
+ return NULL;
}
-
- pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
- consistent_head.vm_start = base;
-
- do {
- pgd = pgd_offset(&init_mm, base);
-
- pud = pud_alloc(&init_mm, pgd, base);
- if (!pud) {
- pr_err("%s: no pud tables\n", __func__);
- ret = -ENOMEM;
- break;
- }
-
- pmd = pmd_alloc(&init_mm, pud, base);
- if (!pmd) {
- pr_err("%s: no pmd tables\n", __func__);
- ret = -ENOMEM;
- break;
- }
- WARN_ON(!pmd_none(*pmd));
-
- pte = pte_alloc_kernel(pmd, base);
- if (!pte) {
- pr_err("%s: no pte tables\n", __func__);
- ret = -ENOMEM;
- break;
- }
-
- consistent_pte[i++] = pte;
- base += PMD_SIZE;
- } while (base < CONSISTENT_END);
-
- return ret;
+ return (void *)addr;
}
-core_initcall(consistent_init);
+
+static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn)
+{
+ unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
+ struct vm_struct *area = find_vm_area(cpu_addr);
+ if (!area || (area->flags & flags) != flags) {
+ if (!no_warn)
+ WARN(1, "trying to free invalid coherent area: %p\n",
+ cpu_addr);
+ return;
+ }
+ unmap_kernel_range((unsigned long)cpu_addr, size);
+ vunmap(cpu_addr);
+}
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
bool no_kernel_mapping, const void *caller);
-static struct arm_vmregion_head coherent_head = {
- .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
- .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
+struct dma_pool {
+ size_t size;
+ spinlock_t lock;
+ unsigned long *bitmap;
+ unsigned long nr_pages;
+ void *vaddr;
+ struct page *page;
};
-static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+static struct dma_pool atomic_pool = {
+ .size = SZ_256K,
+};
static int __init early_coherent_pool(char *p)
{
- coherent_pool_size = memparse(p, &p);
+ atomic_pool.size = memparse(p, &p);
return 0;
}
early_param("coherent_pool", early_coherent_pool);
@@ -347,33 +305,46 @@
/*
* Initialise the coherent pool for atomic allocations.
*/
-static int __init coherent_init(void)
+static int __init atomic_pool_init(void)
{
+ struct dma_pool *pool = &atomic_pool;
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
- size_t size = coherent_pool_size;
+ unsigned long nr_pages = pool->size >> PAGE_SHIFT;
+ unsigned long *bitmap;
struct page *page;
void *ptr;
+ int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
- if (!IS_ENABLED(CONFIG_CMA))
- return 0;
+ bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!bitmap)
+ goto no_bitmap;
- ptr = __alloc_from_contiguous(NULL, size, prot, &page, false,
- coherent_init);
+ if (IS_ENABLED(CONFIG_CMA))
+ ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
+ false, atomic_pool_init);
+ else
+ ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
+ &page, NULL);
if (ptr) {
- coherent_head.vm_start = (unsigned long) ptr;
- coherent_head.vm_end = (unsigned long) ptr + size;
- printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
- (unsigned)size / 1024);
+ spin_lock_init(&pool->lock);
+ pool->vaddr = ptr;
+ pool->page = page;
+ pool->bitmap = bitmap;
+ pool->nr_pages = nr_pages;
+ pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+ (unsigned)pool->size / 1024);
return 0;
}
- printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
- (unsigned)size / 1024);
+ kfree(bitmap);
+no_bitmap:
+ pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+ (unsigned)pool->size / 1024);
return -ENOMEM;
}
/*
* CMA is activated by core_initcall, so we must be called after it.
*/
-postcore_initcall(coherent_init);
+postcore_initcall(atomic_pool_init);
struct dma_contig_early_reserve {
phys_addr_t base;
@@ -421,114 +392,6 @@
}
}
-static void *
-__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
- const void *caller)
-{
- struct arm_vmregion *c;
- size_t align;
- int bit;
-
- if (!consistent_pte) {
- pr_err("%s: not initialised\n", __func__);
- dump_stack();
- return NULL;
- }
-
- /*
- * Align the virtual region allocation - maximum alignment is
- * a section size, minimum is a page size. This helps reduce
- * fragmentation of the DMA space, and also prevents allocations
- * smaller than a section from crossing a section boundary.
- */
- bit = fls(size - 1);
- if (bit > SECTION_SHIFT)
- bit = SECTION_SHIFT;
- align = 1 << bit;
-
- /*
- * Allocate a virtual address in the consistent mapping region.
- */
- c = arm_vmregion_alloc(&consistent_head, align, size,
- gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
- if (c) {
- pte_t *pte;
- int idx = CONSISTENT_PTE_INDEX(c->vm_start);
- u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
-
- pte = consistent_pte[idx] + off;
- c->priv = page;
-
- do {
- BUG_ON(!pte_none(*pte));
-
- set_pte_ext(pte, mk_pte(page, prot), 0);
- page++;
- pte++;
- off++;
- if (off >= PTRS_PER_PTE) {
- off = 0;
- pte = consistent_pte[++idx];
- }
- } while (size -= PAGE_SIZE);
-
- dsb();
-
- return (void *)c->vm_start;
- }
- return NULL;
-}
-
-static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn)
-{
- struct arm_vmregion *c;
- unsigned long addr;
- pte_t *ptep;
- int idx;
- u32 off;
-
- c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
- if (!c) {
- if (!no_warn) {
- pr_err("%s: trying to free invalid coherent area: %p\n",
- __func__, cpu_addr);
- dump_stack();
- }
- return;
- }
-
- if ((c->vm_end - c->vm_start) != size) {
- pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
- __func__, c->vm_end - c->vm_start, size);
- dump_stack();
- size = c->vm_end - c->vm_start;
- }
-
- idx = CONSISTENT_PTE_INDEX(c->vm_start);
- off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
- ptep = consistent_pte[idx] + off;
- addr = c->vm_start;
- do {
- pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
-
- ptep++;
- addr += PAGE_SIZE;
- off++;
- if (off >= PTRS_PER_PTE) {
- off = 0;
- ptep = consistent_pte[++idx];
- }
-
- if (pte_none(pte) || !pte_present(pte))
- pr_crit("%s: bad page in kernel page table\n",
- __func__);
- } while (size -= PAGE_SIZE);
-
- flush_tlb_kernel_range(c->vm_start, c->vm_end);
-
- arm_vmregion_free(&consistent_head, c);
-}
-
static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
void *data)
{
@@ -584,16 +447,17 @@
return ptr;
}
-static void *__alloc_from_pool(struct device *dev, size_t size,
- struct page **ret_page, const void *caller)
+static void *__alloc_from_pool(size_t size, struct page **ret_page)
{
- struct arm_vmregion *c;
+ struct dma_pool *pool = &atomic_pool;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned int pageno;
+ unsigned long flags;
+ void *ptr = NULL;
size_t align;
- if (!coherent_head.vm_start) {
- printk(KERN_ERR "%s: coherent pool not initialised!\n",
- __func__);
- dump_stack();
+ if (!pool->vaddr) {
+ WARN(1, "coherent pool not initialised!\n");
return NULL;
}
@@ -603,35 +467,41 @@
* size. This helps reduce fragmentation of the DMA space.
*/
align = PAGE_SIZE << get_order(size);
- c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
- if (c) {
- void *ptr = (void *)c->vm_start;
- struct page *page = virt_to_page(ptr);
- *ret_page = page;
- return ptr;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
+ 0, count, (1 << align) - 1);
+ if (pageno < pool->nr_pages) {
+ bitmap_set(pool->bitmap, pageno, count);
+ ptr = pool->vaddr + PAGE_SIZE * pageno;
+ *ret_page = pool->page + pageno;
}
- return NULL;
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return ptr;
}
-static int __free_from_pool(void *cpu_addr, size_t size)
+static int __free_from_pool(void *start, size_t size)
{
- unsigned long start = (unsigned long)cpu_addr;
- unsigned long end = start + size;
- struct arm_vmregion *c;
+ struct dma_pool *pool = &atomic_pool;
+ unsigned long pageno, count;
+ unsigned long flags;
- if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+ if (start < pool->vaddr || start > pool->vaddr + pool->size)
return 0;
- c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
-
- if ((c->vm_end - c->vm_start) != size) {
- printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
- __func__, c->vm_end - c->vm_start, size);
- dump_stack();
- size = c->vm_end - c->vm_start;
+ if (start + size > pool->vaddr + pool->size) {
+ WARN(1, "freeing wrong coherent size from pool\n");
+ return 0;
}
- arm_vmregion_free(&coherent_head, c);
+ pageno = (start - pool->vaddr) >> PAGE_SHIFT;
+ count = size >> PAGE_SHIFT;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ bitmap_clear(pool->bitmap, pageno, count);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
return 1;
}
@@ -766,10 +636,10 @@
if (arch_is_coherent() || nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else if (gfp & GFP_ATOMIC)
+ addr = __alloc_from_pool(size, &page);
else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
- else if (gfp & GFP_ATOMIC)
- addr = __alloc_from_pool(dev, size, &page, caller);
else
addr = __alloc_from_contiguous(dev, size, prot, &page,
no_kernel_mapping, caller);
@@ -1067,9 +937,6 @@
static int __init dma_debug_do_init(void)
{
-#ifdef CONFIG_MMU
- arm_vmregion_create_proc("dma-mappings", &consistent_head);
-#endif
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
@@ -1186,61 +1053,32 @@
* Create a CPU mapping for a specified pages
*/
static void *
-__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller)
{
- struct arm_vmregion *c;
- size_t align;
- size_t count = size >> PAGE_SHIFT;
- int bit;
+ unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct vm_struct *area;
+ unsigned long p;
- if (!consistent_pte[0]) {
- pr_err("%s: not initialised\n", __func__);
- dump_stack();
+ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
+ caller);
+ if (!area)
return NULL;
+
+ area->pages = pages;
+ area->nr_pages = nr_pages;
+ p = (unsigned long)area->addr;
+
+ for (i = 0; i < nr_pages; i++) {
+ phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
+ if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
+ goto err;
+ p += PAGE_SIZE;
}
-
- /*
- * Align the virtual region allocation - maximum alignment is
- * a section size, minimum is a page size. This helps reduce
- * fragmentation of the DMA space, and also prevents allocations
- * smaller than a section from crossing a section boundary.
- */
- bit = fls(size - 1);
- if (bit > SECTION_SHIFT)
- bit = SECTION_SHIFT;
- align = 1 << bit;
-
- /*
- * Allocate a virtual address in the consistent mapping region.
- */
- c = arm_vmregion_alloc(&consistent_head, align, size,
- gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
- if (c) {
- pte_t *pte;
- int idx = CONSISTENT_PTE_INDEX(c->vm_start);
- int i = 0;
- u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
-
- pte = consistent_pte[idx] + off;
- c->priv = pages;
-
- do {
- BUG_ON(!pte_none(*pte));
-
- set_pte_ext(pte, mk_pte(pages[i], prot), 0);
- pte++;
- off++;
- i++;
- if (off >= PTRS_PER_PTE) {
- off = 0;
- pte = consistent_pte[++idx];
- }
- } while (i < count);
-
- dsb();
-
- return (void *)c->vm_start;
- }
+ return area->addr;
+err:
+ unmap_kernel_range((unsigned long)area->addr, size);
+ vunmap(area->addr);
return NULL;
}
@@ -1299,6 +1137,16 @@
return 0;
}
+static struct page **__iommu_get_pages(void *cpu_addr)
+{
+ struct vm_struct *area;
+
+ area = find_vm_area(cpu_addr);
+ if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
+ return area->pages;
+ return NULL;
+}
+
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
@@ -1317,7 +1165,8 @@
if (*handle == DMA_ERROR_CODE)
goto err_buffer;
- addr = __iommu_alloc_remap(pages, size, gfp, prot);
+ addr = __iommu_alloc_remap(pages, size, gfp, prot,
+ __builtin_return_address(0));
if (!addr)
goto err_mapping;
@@ -1334,31 +1183,25 @@
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
- struct arm_vmregion *c;
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ struct page **pages = __iommu_get_pages(cpu_addr);
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
- if (c) {
- struct page **pages = c->priv;
+ if (!pages)
+ return -ENXIO;
- unsigned long uaddr = vma->vm_start;
- unsigned long usize = vma->vm_end - vma->vm_start;
- int i = 0;
+ do {
+ int ret = vm_insert_page(vma, uaddr, *pages++);
+ if (ret) {
+ pr_err("Remapping memory failed: %d\n", ret);
+ return ret;
+ }
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
- do {
- int ret;
-
- ret = vm_insert_page(vma, uaddr, pages[i++]);
- if (ret) {
- pr_err("Remapping memory, error: %d\n", ret);
- return ret;
- }
-
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
- }
return 0;
}
@@ -1369,16 +1212,19 @@
void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs)
{
- struct arm_vmregion *c;
+ struct page **pages = __iommu_get_pages(cpu_addr);
size = PAGE_ALIGN(size);
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
- if (c) {
- struct page **pages = c->priv;
- __dma_free_remap(cpu_addr, size, false);
- __iommu_remove_mapping(dev, handle, size);
- __iommu_free_buffer(dev, pages, size);
+ if (!pages) {
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ return;
}
+
+ unmap_kernel_range((unsigned long)cpu_addr, size);
+ vunmap(cpu_addr);
+
+ __iommu_remove_mapping(dev, handle, size);
+ __iommu_free_buffer(dev, pages, size);
}
/*
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index f16f700..66567bb 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -375,11 +375,11 @@
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}
-unsigned long memory_hole_offset;
+phys_addr_t memory_hole_offset;
EXPORT_SYMBOL(memory_hole_offset);
-unsigned long memory_hole_start;
+phys_addr_t memory_hole_start;
EXPORT_SYMBOL(memory_hole_start);
-unsigned long memory_hole_end;
+phys_addr_t memory_hole_end;
EXPORT_SYMBOL(memory_hole_end);
unsigned long memory_hole_align;
EXPORT_SYMBOL(memory_hole_align);
@@ -390,8 +390,8 @@
void find_memory_hole(void)
{
int i;
- unsigned long hole_start;
- unsigned long hole_size;
+ phys_addr_t hole_start;
+ phys_addr_t hole_size;
unsigned long hole_end_virt;
/*
@@ -428,13 +428,13 @@
memory_hole_offset = memory_hole_start - PHYS_OFFSET;
if (!IS_ALIGNED(memory_hole_start, SECTION_SIZE)) {
- pr_err("memory_hole_start %lx is not aligned to %lx\n",
- memory_hole_start, SECTION_SIZE);
+ pr_err("memory_hole_start %pa is not aligned to %lx\n",
+ &memory_hole_start, SECTION_SIZE);
BUG();
}
if (!IS_ALIGNED(memory_hole_end, SECTION_SIZE)) {
- pr_err("memory_hole_end %lx is not aligned to %lx\n",
- memory_hole_end, SECTION_SIZE);
+ pr_err("memory_hole_end %pa is not aligned to %lx\n",
+ &memory_hole_end, SECTION_SIZE);
BUG();
}
@@ -444,8 +444,9 @@
IS_ALIGNED(memory_hole_end, PMD_SIZE)) ||
(IS_ALIGNED(hole_end_virt, PMD_SIZE) &&
!IS_ALIGNED(memory_hole_end, PMD_SIZE))) {
- memory_hole_align = max(hole_end_virt & ~PMD_MASK,
- memory_hole_end & ~PMD_MASK);
+ memory_hole_align = !IS_ALIGNED(hole_end_virt, PMD_SIZE) ?
+ hole_end_virt & ~PMD_MASK :
+ memory_hole_end & ~PMD_MASK;
virtual_hole_start = hole_end_virt;
virtual_hole_end = hole_end_virt + memory_hole_align;
pr_info("Physical memory hole is not aligned. There will be a virtual memory hole from %lx to %lx\n",
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 87fa3f2..a8ee92d 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -62,6 +62,9 @@
#define VM_ARM_MTYPE(mt) ((mt) << 20)
#define VM_ARM_MTYPE_MASK (0x1f << 20)
+/* consistent regions used by dma_alloc_attrs() */
+#define VM_ARM_DMA_CONSISTENT 0x20000000
+
#endif
#ifdef CONFIG_ZONE_DMA
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 0e31910..266be05 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -625,39 +625,60 @@
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static void __init alloc_init_section(pud_t *pud, unsigned long addr,
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
+{
+#ifndef CONFIG_ARM_LPAE
+ /*
+ * In classic MMU format, puds and pmds are folded in to
+ * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
+ * group of L1 entries making up one logical pointer to
+ * an L2 table (2MB), where as PMDs refer to the individual
+ * L1 entries (1MB). Hence increment to get the correct
+ * offset for odd 1MB sections.
+ * (See arch/arm/include/asm/pgtable-2level.h)
+ */
+ if (addr & SECTION_SIZE)
+ pmd++;
+#endif
+ do {
+ *pmd = __pmd(phys | type->prot_sect);
+ phys += SECTION_SIZE;
+ } while (pmd++, addr += SECTION_SIZE, addr != end);
+
+ flush_pmd_entry(pmd);
+}
+
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
const struct mem_type *type)
{
pmd_t *pmd = pmd_offset(pud, addr);
+ unsigned long next;
- /*
- * Try a section mapping - end, addr and phys must all be aligned
- * to a section boundary. Note that PMDs refer to the individual
- * L1 entries, whereas PGDs refer to a group of L1 entries making
- * up one logical pointer to an L2 table.
- */
- if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
- pmd_t *p = pmd;
-
-#ifndef CONFIG_ARM_LPAE
- if (addr & SECTION_SIZE)
- pmd++;
-#endif
-
- do {
- *pmd = __pmd(phys | type->prot_sect);
- phys += SECTION_SIZE;
- } while (pmd++, addr += SECTION_SIZE, addr != end);
-
- flush_pmd_entry(p);
- } else {
+ do {
/*
- * No need to loop; pte's aren't interested in the
- * individual L1 entries.
+ * With LPAE, we must loop over to map
+ * all the pmds for the given range.
*/
- alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
- }
+ next = pmd_addr_end(addr, end);
+
+ /*
+ * Try a section mapping - addr, next and phys must all be
+ * aligned to a section boundary.
+ */
+ if (type->prot_sect &&
+ ((addr | next | phys) & ~SECTION_MASK) == 0) {
+ map_init_section(pmd, addr, next, phys, type);
+ } else {
+ alloc_init_pte(pmd, addr, next,
+ __phys_to_pfn(phys), type);
+ }
+
+ phys += next - addr;
+
+ } while (pmd++, addr = next, addr != end);
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -668,7 +689,7 @@
do {
next = pud_addr_end(addr, end);
- alloc_init_section(pud, addr, next, phys, type);
+ alloc_init_pmd(pud, addr, next, phys, type);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 04604cf..bd50c8e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -29,6 +29,7 @@
#include <linux/fault-inject.h>
#include <linux/list_sort.h>
#include <linux/delay.h>
+#include <linux/ratelimit.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -2203,9 +2204,11 @@
error_type = "I/O";
break;
}
- printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
- error_type, req->rq_disk ? req->rq_disk->disk_name : "?",
- (unsigned long long)blk_rq_pos(req));
+ printk_ratelimited(
+ KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
+ error_type,
+ req->rq_disk ? req->rq_disk->disk_name : "?",
+ (unsigned long long)blk_rq_pos(req));
}
blk_account_io_completion(req, nr_bytes);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ac993ea..50b2831 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -22,6 +22,8 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/opp.h>
+#include <linux/of.h>
+#include <linux/export.h>
/*
* Internal data structure organization with the OPP layer library is as
@@ -64,6 +66,7 @@
unsigned long u_volt;
struct device_opp *dev_opp;
+ struct rcu_head head;
};
/**
@@ -159,6 +162,7 @@
return v;
}
+EXPORT_SYMBOL(opp_get_voltage);
/**
* opp_get_freq() - Gets the frequency corresponding to an available opp
@@ -188,6 +192,7 @@
return f;
}
+EXPORT_SYMBOL(opp_get_freq);
/**
* opp_get_opp_count() - Get number of opps available in the opp list
@@ -220,6 +225,7 @@
return count;
}
+EXPORT_SYMBOL(opp_get_opp_count);
/**
* opp_find_freq_exact() - search for an exact frequency
@@ -229,7 +235,10 @@
*
* Searches for exact match in the opp list and returns pointer to the matching
* opp if found, else returns ERR_PTR in case of error and should be handled
- * using IS_ERR.
+ * using IS_ERR. Error return values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
*
* Note: available is a modifier for the search. if available=true, then the
* match is for exact matching frequency and is available in the stored OPP
@@ -248,7 +257,7 @@
bool available)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
@@ -267,6 +276,7 @@
return opp;
}
+EXPORT_SYMBOL(opp_find_freq_exact);
/**
* opp_find_freq_ceil() - Search for an rounded ceil freq
@@ -277,7 +287,11 @@
* for a device.
*
* Returns matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR.
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
@@ -288,7 +302,7 @@
struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -297,7 +311,7 @@
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp))
- return opp;
+ return ERR_CAST(dev_opp);
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
if (temp_opp->available && temp_opp->rate >= *freq) {
@@ -309,6 +323,7 @@
return opp;
}
+EXPORT_SYMBOL(opp_find_freq_ceil);
/**
* opp_find_freq_floor() - Search for a rounded floor freq
@@ -319,7 +334,11 @@
* for a device.
*
* Returns matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR.
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
@@ -330,7 +349,7 @@
struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -339,7 +358,7 @@
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp))
- return opp;
+ return ERR_CAST(dev_opp);
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
if (temp_opp->available) {
@@ -355,6 +374,7 @@
return opp;
}
+EXPORT_SYMBOL(opp_find_freq_floor);
/**
* opp_add() - Add an OPP table from a table definitions
@@ -511,7 +531,7 @@
list_replace_rcu(&opp->node, &new_opp->node);
mutex_unlock(&dev_opp_list_lock);
- synchronize_rcu();
+ kfree_rcu(opp, head);
/* Notify the change of the OPP availability */
if (availability_req)
@@ -521,13 +541,10 @@
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
new_opp);
- /* clean up old opp */
- new_opp = opp;
- goto out;
+ return 0;
unlock:
mutex_unlock(&dev_opp_list_lock);
-out:
kfree(new_opp);
return r;
}
@@ -551,6 +568,7 @@
{
return opp_set_availability(dev, freq, true);
}
+EXPORT_SYMBOL(opp_enable);
/**
* opp_disable() - Disable a specific OPP
@@ -572,6 +590,7 @@
{
return opp_set_availability(dev, freq, false);
}
+EXPORT_SYMBOL(opp_disable);
#ifdef CONFIG_CPU_FREQ
/**
@@ -674,3 +693,49 @@
return &dev_opp->head;
}
+
+#ifdef CONFIG_OF
+/**
+ * of_init_opp_table() - Initialize opp table from device tree
+ * @dev: device pointer used to lookup device OPPs.
+ *
+ * Register the initial OPP table with the OPP library for given device.
+ */
+int of_init_opp_table(struct device *dev)
+{
+ const struct property *prop;
+ const __be32 *val;
+ int nr;
+
+ prop = of_find_property(dev->of_node, "operating-points", NULL);
+ if (!prop)
+ return -ENODEV;
+ if (!prop->value)
+ return -ENODATA;
+
+ /*
+ * Each OPP is a set of tuples consisting of frequency and
+ * voltage like <freq-kHz vol-uV>.
+ */
+ nr = prop->length / sizeof(u32);
+ if (nr % 2) {
+ dev_err(dev, "%s: Invalid OPP list\n", __func__);
+ return -EINVAL;
+ }
+
+ val = prop->value;
+ while (nr) {
+ unsigned long freq = be32_to_cpup(val++) * 1000;
+ unsigned long volt = be32_to_cpup(val++);
+
+ if (opp_add(dev, freq, volt)) {
+ dev_warn(dev, "%s: Failed to add OPP %ld\n",
+ __func__, freq);
+ continue;
+ }
+ nr -= 2;
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 0b3ffef..e2864ec 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -633,7 +633,7 @@
config MSM_ROTATOR
tristate "MSM Offline Image Rotator Driver"
- depends on (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_MSM8960) && ANDROID_PMEM
+ depends on (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_MSM8960)
default y
help
This driver provides support for the image rotator HW block in the
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 0f4d613..d78327f 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -138,6 +138,15 @@
struct hlist_head htbl[RPC_HASH_SZ];
};
+struct fastrpc_mmap {
+ struct hlist_node hn;
+ struct ion_handle *handle;
+ void *virt;
+ uint32_t vaddrin;
+ uint32_t vaddrout;
+ int size;
+};
+
struct fastrpc_buf {
struct ion_handle *handle;
void *virt;
@@ -146,6 +155,11 @@
int used;
};
+struct file_data {
+ spinlock_t hlock;
+ struct hlist_head hlst;
+};
+
struct fastrpc_device {
uint32_t tgid;
struct hlist_node hn;
@@ -168,18 +182,31 @@
}
}
+static void free_map(struct fastrpc_mmap *map)
+{
+ struct fastrpc_apps *me = &gfa;
+ if (map->handle) {
+ if (map->virt) {
+ ion_unmap_kernel(me->iclient, map->handle);
+ map->virt = 0;
+ }
+ ion_free(me->iclient, map->handle);
+ }
+ map->handle = 0;
+}
+
static int alloc_mem(struct fastrpc_buf *buf)
{
struct ion_client *clnt = gfa.iclient;
struct sg_table *sg;
int err = 0;
-
+ buf->handle = 0;
+ buf->virt = 0;
buf->handle = ion_alloc(clnt, buf->size, SZ_4K,
ION_HEAP(ION_AUDIO_HEAP_ID), 0);
VERIFY(err, 0 == IS_ERR_OR_NULL(buf->handle));
if (err)
goto bail;
- buf->virt = 0;
VERIFY(err, 0 != (buf->virt = ion_map_kernel(clnt, buf->handle)));
if (err)
goto bail;
@@ -211,7 +238,6 @@
static void context_list_dtor(struct smq_context_list *me)
{
kfree(me->ls);
- me->ls = 0;
}
static void context_list_alloc_ctx(struct smq_context_list *me,
@@ -277,7 +303,6 @@
if (rlen < 0) {
rlen = ((uint32_t)pages - (uint32_t)obuf->virt) - obuf->size;
obuf->size += buf_page_size(rlen);
- obuf->handle = 0;
VERIFY(err, 0 == alloc_mem(obuf));
if (err)
goto bail;
@@ -314,7 +339,6 @@
if (obuf->handle != ibuf->handle)
free_mem(obuf);
obuf->size += buf_page_size(sizeof(*pages));
- obuf->handle = 0;
VERIFY(err, 0 == alloc_mem(obuf));
if (err)
goto bail;
@@ -430,10 +454,15 @@
outbufs = REMOTE_SCALARS_OUTBUFS(sc);
for (i = inbufs; i < inbufs + outbufs; ++i) {
if (rpra[i].buf.pv != pra[i].buf.pv) {
- VERIFY(err, 0 == copy_to_user(pra[i].buf.pv,
+ if (!kernel) {
+ VERIFY(err, 0 == copy_to_user(pra[i].buf.pv,
rpra[i].buf.pv, rpra[i].buf.len));
- if (err)
- goto bail;
+ if (err)
+ goto bail;
+ } else {
+ memmove(pra[i].buf.pv, rpra[i].buf.pv,
+ rpra[i].buf.len);
+ }
}
}
size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
@@ -471,15 +500,17 @@
dmac_inv_range(rpra, (char *)rpra + used);
}
-static int fastrpc_invoke_send(struct fastrpc_apps *me, uint32_t handle,
+static int fastrpc_invoke_send(struct fastrpc_apps *me,
+ uint32_t kernel, uint32_t handle,
uint32_t sc, struct smq_invoke_ctx *ctx,
struct fastrpc_buf *buf)
{
struct smq_msg msg;
int err = 0, len;
-
msg.pid = current->tgid;
msg.tid = current->pid;
+ if (kernel)
+ msg.pid = 0;
msg.invoke.header.ctx = ctx;
msg.invoke.header.handle = handle;
msg.invoke.header.sc = sc;
@@ -595,12 +626,15 @@
VERIFY(err, 0 != (fd = kzalloc(sizeof(*fd), GFP_KERNEL)));
if (err)
goto bail;
+
+ INIT_HLIST_NODE(&fd->hn);
+
fd->buf.size = PAGE_SIZE;
VERIFY(err, 0 == alloc_mem(&fd->buf));
if (err)
goto bail;
fd->tgid = current->tgid;
- INIT_HLIST_NODE(&fd->hn);
+
*dev = fd;
bail:
if (err)
@@ -681,8 +715,8 @@
}
context_list_alloc_ctx(&me->clst, &ctx);
- VERIFY(err, 0 == fastrpc_invoke_send(me, invoke->handle, sc, ctx,
- &obuf));
+ VERIFY(err, 0 == fastrpc_invoke_send(me, kernel, invoke->handle, sc,
+ ctx, &obuf));
if (err)
goto bail;
inv_args(sc, rpra, obuf.used);
@@ -730,7 +764,7 @@
ioctl.handle = 1;
ioctl.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
ioctl.pra = ra;
- VERIFY(err, 0 == fastrpc_internal_invoke(me, 1, &ioctl, ra));
+ VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra)));
return err;
}
@@ -748,7 +782,155 @@
ioctl.handle = 1;
ioctl.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
ioctl.pra = ra;
- VERIFY(err, 0 == fastrpc_internal_invoke(me, 1, &ioctl, ra));
+ VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra)));
+ return err;
+}
+
+static int fastrpc_mmap_on_dsp(struct fastrpc_apps *me,
+ struct fastrpc_ioctl_mmap *mmap,
+ struct smq_phy_page *pages,
+ int num)
+{
+ struct fastrpc_ioctl_invoke ioctl;
+ remote_arg_t ra[3];
+ int err = 0;
+ struct {
+ int pid;
+ uint32_t flags;
+ uint32_t vaddrin;
+ int num;
+ } inargs;
+
+ struct {
+ uint32_t vaddrout;
+ } routargs;
+ inargs.pid = current->tgid;
+ inargs.vaddrin = mmap->vaddrin;
+ inargs.flags = mmap->flags;
+ inargs.num = num;
+ ra[0].buf.pv = &inargs;
+ ra[0].buf.len = sizeof(inargs);
+
+ ra[1].buf.pv = pages;
+ ra[1].buf.len = num * sizeof(*pages);
+
+ ra[2].buf.pv = &routargs;
+ ra[2].buf.len = sizeof(routargs);
+
+ ioctl.handle = 1;
+ ioctl.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
+ ioctl.pra = ra;
+ VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra)));
+ mmap->vaddrout = routargs.vaddrout;
+ if (err)
+ goto bail;
+bail:
+ return err;
+}
+
+static int fastrpc_munmap_on_dsp(struct fastrpc_apps *me,
+ struct fastrpc_ioctl_munmap *munmap)
+{
+ struct fastrpc_ioctl_invoke ioctl;
+ remote_arg_t ra[1];
+ int err = 0;
+ struct {
+ int pid;
+ uint32_t vaddrout;
+ int size;
+ } inargs;
+
+ inargs.pid = current->tgid;
+ inargs.size = munmap->size;
+ inargs.vaddrout = munmap->vaddrout;
+ ra[0].buf.pv = &inargs;
+ ra[0].buf.len = sizeof(inargs);
+
+ ioctl.handle = 1;
+ ioctl.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
+ ioctl.pra = ra;
+ VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra)));
+ return err;
+}
+
+static int fastrpc_internal_munmap(struct fastrpc_apps *me,
+ struct file_data *fdata,
+ struct fastrpc_ioctl_munmap *munmap)
+{
+ int err = 0;
+ struct fastrpc_mmap *map = 0, *mapfree = 0;
+ struct hlist_node *pos, *n;
+ VERIFY(err, 0 == (err = fastrpc_munmap_on_dsp(me, munmap)));
+ if (err)
+ goto bail;
+ spin_lock(&fdata->hlock);
+ hlist_for_each_entry_safe(map, pos, n, &fdata->hlst, hn) {
+ if (map->vaddrout == munmap->vaddrout &&
+ map->size == munmap->size) {
+ hlist_del(&map->hn);
+ mapfree = map;
+ map = 0;
+ break;
+ }
+ }
+ spin_unlock(&fdata->hlock);
+bail:
+ if (mapfree) {
+ free_map(mapfree);
+ kfree(mapfree);
+ }
+ return err;
+}
+
+
+static int fastrpc_internal_mmap(struct fastrpc_apps *me,
+ struct file_data *fdata,
+ struct fastrpc_ioctl_mmap *mmap)
+{
+ struct ion_client *clnt = gfa.iclient;
+ struct fastrpc_mmap *map = 0;
+ struct smq_phy_page *pages = 0;
+ void *buf;
+ int len;
+ int num;
+ int err = 0;
+
+ VERIFY(err, 0 != (map = kzalloc(sizeof(*map), GFP_KERNEL)));
+ if (err)
+ goto bail;
+ map->handle = ion_import_dma_buf(clnt, mmap->fd);
+ VERIFY(err, 0 == IS_ERR_OR_NULL(map->handle));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 != (map->virt = ion_map_kernel(clnt, map->handle)));
+ if (err)
+ goto bail;
+ buf = (void *)mmap->vaddrin;
+ len = mmap->size;
+ num = buf_num_pages(buf, len);
+ VERIFY(err, 0 != (pages = kzalloc(num * sizeof(*pages), GFP_KERNEL)));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 < (num = buf_get_pages(buf, len, num, 1, pages, num)));
+ if (err)
+ goto bail;
+
+ VERIFY(err, 0 == fastrpc_mmap_on_dsp(me, mmap, pages, num));
+ if (err)
+ goto bail;
+ map->vaddrin = mmap->vaddrin;
+ map->vaddrout = mmap->vaddrout;
+ map->size = mmap->size;
+ INIT_HLIST_NODE(&map->hn);
+ spin_lock(&fdata->hlock);
+ hlist_add_head(&map->hn, &fdata->hlst);
+ spin_unlock(&fdata->hlock);
+ bail:
+ if (err && map) {
+ free_map(map);
+ kfree(map);
+ }
+ kfree(pages);
return err;
}
@@ -781,22 +963,48 @@
static int fastrpc_device_release(struct inode *inode, struct file *file)
{
+ struct file_data *fdata = (struct file_data *)file->private_data;
(void)fastrpc_release_current_dsp_process();
cleanup_current_dev();
+ if (fdata) {
+ struct fastrpc_mmap *map;
+ struct hlist_node *n, *pos;
+ file->private_data = 0;
+ hlist_for_each_entry_safe(map, pos, n, &fdata->hlst, hn) {
+ hlist_del(&map->hn);
+ free_map(map);
+ kfree(map);
+ }
+ kfree(fdata);
+ }
return 0;
}
static int fastrpc_device_open(struct inode *inode, struct file *filp)
{
int err = 0;
-
+ filp->private_data = 0;
if (0 != try_module_get(THIS_MODULE)) {
+ struct file_data *fdata = 0;
/* This call will cause a dev to be created
* which will addref this module
*/
+ VERIFY(err, 0 != (fdata = kzalloc(sizeof(*fdata), GFP_KERNEL)));
+ if (err)
+ goto bail;
+
+ spin_lock_init(&fdata->hlock);
+ INIT_HLIST_HEAD(&fdata->hlst);
+
VERIFY(err, 0 == fastrpc_create_current_dsp_process());
if (err)
+ goto bail;
+ filp->private_data = fdata;
+bail:
+ if (err) {
cleanup_current_dev();
+ kfree(fdata);
+ }
module_put(THIS_MODULE);
}
return err;
@@ -808,8 +1016,11 @@
{
struct fastrpc_apps *me = &gfa;
struct fastrpc_ioctl_invoke invoke;
+ struct fastrpc_ioctl_mmap mmap;
+ struct fastrpc_ioctl_munmap munmap;
remote_arg_t *pra = 0;
void *param = (char *)ioctl_param;
+ struct file_data *fdata = (struct file_data *)file->private_data;
int bufs, err = 0;
switch (ioctl_num) {
@@ -834,6 +1045,29 @@
if (err)
goto bail;
break;
+ case FASTRPC_IOCTL_MMAP:
+ VERIFY(err, 0 == copy_from_user(&mmap, param,
+ sizeof(mmap)));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_mmap(me, fdata,
+ &mmap)));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == copy_to_user(param, &mmap, sizeof(mmap)));
+ if (err)
+ goto bail;
+ break;
+ case FASTRPC_IOCTL_MUNMAP:
+ VERIFY(err, 0 == copy_from_user(&munmap, param,
+ sizeof(munmap)));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_munmap(me, fdata,
+ &munmap)));
+ if (err)
+ goto bail;
+ break;
default:
err = -ENOTTY;
break;
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index 8932d3c..f2804ad 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -16,7 +16,9 @@
#include <linux/types.h>
-#define FASTRPC_IOCTL_INVOKE _IOWR('R', 1, struct fastrpc_ioctl_invoke)
+#define FASTRPC_IOCTL_INVOKE _IOWR('R', 1, struct fastrpc_ioctl_invoke)
+#define FASTRPC_IOCTL_MMAP _IOWR('R', 2, struct fastrpc_ioctl_mmap)
+#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 3, struct fastrpc_ioctl_munmap)
#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
#define DEVICE_NAME "adsprpc-smd"
@@ -92,6 +94,20 @@
remote_arg_t *pra; /* remote arguments list */
};
+struct fastrpc_ioctl_munmap {
+ uint32_t vaddrout; /* address to unmap */
+ int size; /* size */
+};
+
+
+struct fastrpc_ioctl_mmap {
+ int fd; /* ion fd */
+ uint32_t flags; /* flags for dsp to map with */
+ uint32_t vaddrin; /* optional virtual address */
+ int size; /* size */
+ uint32_t vaddrout; /* dsps virtual address */
+};
+
struct smq_null_invoke {
struct smq_invoke_ctx *ctx; /* invoke caller context */
uint32_t handle; /* handle to invoke */
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 0c93101..071dd69 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -734,8 +734,8 @@
(driver->log_on_demand_support)) {
driver->apps_rsp_buf[0] = 0x78;
/* Copy log code received */
- *(uint16_t *)(driver->apps_rsp_buf+1) =
- *(uint16_t *)buf;
+ *(uint16_t *)(driver->apps_rsp_buf + 1) =
+ *(uint16_t *)(buf + 1);
driver->apps_rsp_buf[3] = 0x1;/* Unknown */
encode_rsp_and_send(3);
}
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index a4eea54..9d9df2a 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -162,6 +162,8 @@
smd_channel_t *ch;
smd_channel_t *ch_save;
+ struct mutex smd_ch_mutex;
+
int in_busy_1;
int in_busy_2;
@@ -249,6 +251,7 @@
unsigned char *buf_event_mask_update;
unsigned char *buf_feature_mask_update;
int read_len_legacy;
+ struct mutex diag_hdlc_mutex;
unsigned char *hdlc_buf;
unsigned hdlc_count;
unsigned hdlc_escape;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 6a14143..79a73f3 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -611,8 +611,12 @@
diag_check_mode_reset(buf)) {
return;
}
+ mutex_lock(&driver->smd_data[index].
+ smd_ch_mutex);
smd_write(driver->smd_data[index].ch,
buf, len);
+ mutex_unlock(&driver->smd_data[index].
+ smd_ch_mutex);
} else {
pr_err("diag: In %s, smd channel %d not open\n",
__func__, index);
@@ -1003,6 +1007,9 @@
{
struct diag_hdlc_decode_type hdlc;
int ret, type = 0;
+
+ mutex_lock(&driver->diag_hdlc_mutex);
+
pr_debug("diag: HDLC decode fn, len of data %d\n", len);
hdlc.dest_ptr = driver->hdlc_buf;
hdlc.dest_size = USB_MAX_OUT_BUF;
@@ -1023,14 +1030,17 @@
if (hdlc.dest_idx < 4) {
pr_err_ratelimited("diag: In %s, message is too short, len: %d, dest len: %d\n",
__func__, len, hdlc.dest_idx);
+ mutex_unlock(&driver->diag_hdlc_mutex);
return;
}
if (ret) {
type = diag_process_apps_pkt(driver->hdlc_buf,
hdlc.dest_idx - 3);
- if (type < 0)
+ if (type < 0) {
+ mutex_unlock(&driver->diag_hdlc_mutex);
return;
+ }
} else if (driver->debug_flag) {
printk(KERN_ERR "Packet dropped due to bad HDLC coding/CRC"
" errors or partial packet received, packet"
@@ -1049,10 +1059,15 @@
if (chk_apps_only()) {
diag_send_error_rsp(hdlc.dest_idx);
} else { /* APQ 8060, Let Q6 respond */
- if (driver->smd_data[LPASS_DATA].ch)
+ if (driver->smd_data[LPASS_DATA].ch) {
+ mutex_lock(&driver->smd_data[LPASS_DATA].
+ smd_ch_mutex);
smd_write(driver->smd_data[LPASS_DATA].ch,
driver->hdlc_buf,
hdlc.dest_idx - 3);
+ mutex_unlock(&driver->smd_data[LPASS_DATA].
+ smd_ch_mutex);
+ }
}
type = 0;
}
@@ -1067,8 +1082,10 @@
if ((driver->smd_data[MODEM_DATA].ch) && (ret) && (type) &&
(hdlc.dest_idx > 3)) {
APPEND_DEBUG('g');
+ mutex_lock(&driver->smd_data[MODEM_DATA].smd_ch_mutex);
smd_write(driver->smd_data[MODEM_DATA].ch,
driver->hdlc_buf, hdlc.dest_idx - 3);
+ mutex_unlock(&driver->smd_data[MODEM_DATA].smd_ch_mutex);
APPEND_DEBUG('h');
#ifdef DIAG_DEBUG
printk(KERN_INFO "writing data to SMD, pkt length %d\n", len);
@@ -1076,6 +1093,7 @@
1, DUMP_PREFIX_ADDRESS, data, len, 1);
#endif /* DIAG DEBUG */
}
+ mutex_unlock(&driver->diag_hdlc_mutex);
}
#ifdef CONFIG_DIAG_OVER_USB
@@ -1411,6 +1429,7 @@
{
smd_info->peripheral = peripheral;
smd_info->type = type;
+ mutex_init(&smd_info->smd_ch_mutex);
switch (peripheral) {
case MODEM_DATA:
@@ -1525,6 +1544,7 @@
diag_debug_buf_idx = 0;
driver->read_len_legacy = 0;
driver->use_device_tree = has_device_tree();
+ mutex_init(&driver->diag_hdlc_mutex);
mutex_init(&driver->diag_cntl_mutex);
success = diag_smd_constructor(&driver->smd_data[MODEM_DATA],
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index 2f87803..b0fb9d8 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -19,7 +19,6 @@
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
-#include <linux/android_pmem.h>
#include <linux/msm_rotator.h>
#include <linux/io.h>
#include <mach/msm_rotator_imem.h>
@@ -856,10 +855,6 @@
struct file *file = NULL;
int put_needed, fb_num;
#endif
-#ifdef CONFIG_ANDROID_PMEM
- unsigned long vstart;
-#endif
-
*p_need = 0;
#ifdef CONFIG_FB
@@ -890,27 +885,14 @@
}
#endif
-#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
return msm_rotator_iommu_map_buf(fbd->memory_id, domain, start,
len, p_ihdl, secure);
-#endif
-#ifdef CONFIG_ANDROID_PMEM
- if (!get_pmem_file(fbd->memory_id, start, &vstart, len, p_file))
- return 0;
- else
- return -ENOMEM;
-#endif
}
static void put_img(struct file *p_file, struct ion_handle *p_ihdl,
int domain, unsigned int secure)
{
-#ifdef CONFIG_ANDROID_PMEM
- if (p_file != NULL)
- put_pmem_file(p_file);
-#endif
-
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
if (!IS_ERR_OR_NULL(p_ihdl)) {
pr_debug("%s(): p_ihdl %p\n", __func__, p_ihdl);
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
index 5c1cc5a..24cf30a 100644
--- a/drivers/crypto/msm/qce.c
+++ b/drivers/crypto/msm/qce.c
@@ -2366,6 +2366,7 @@
ce_support->aes_ccm = false;
ce_support->ota = pce_dev->ota;
ce_support->aligned_only = false;
+ ce_support->is_shared = false;
ce_support->bam = false;
return 0;
}
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index 8a31003..3ff84cf 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -113,6 +113,7 @@
bool ota;
bool aligned_only;
bool bam;
+ bool is_shared;
};
/* Sha operation parameters */
diff --git a/drivers/crypto/msm/qce40.c b/drivers/crypto/msm/qce40.c
index 84d41da..7b0964d 100644
--- a/drivers/crypto/msm/qce40.c
+++ b/drivers/crypto/msm/qce40.c
@@ -2634,6 +2634,7 @@
ce_support->aes_ccm = true;
ce_support->ota = false;
ce_support->aligned_only = false;
+ ce_support->is_shared = false;
ce_support->bam = false;
return 0;
}
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 9d8e825..fe8cf57 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -61,6 +61,7 @@
unsigned char *coh_vmem; /* Allocated coherent virtual memory */
dma_addr_t coh_pmem; /* Allocated coherent physical memory */
int memsize; /* Memory allocated */
+ int is_shared; /* CE HW is shared */
void __iomem *iobase; /* Virtual io base of CE HW */
unsigned int phy_iobase; /* Physical io base of CE HW */
@@ -1047,7 +1048,7 @@
/* Producer pipe will handle this connection */
sps_connect_info->mode = SPS_MODE_SRC;
sps_connect_info->options =
- SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_DESC_DONE;
+ SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
} else {
/* For CE consumer transfer, source should be
* system memory where as destination should
@@ -2333,6 +2334,7 @@
/* Register callback event for EOT (End of transfer) event. */
pce_dev->ce_sps.producer.event.callback = _aead_sps_producer_callback;
+ pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE;
rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
&pce_dev->ce_sps.producer.event);
if (rc) {
@@ -2341,6 +2343,7 @@
}
/* Register callback event for EOT (End of transfer) event. */
pce_dev->ce_sps.consumer.event.callback = _aead_sps_consumer_callback;
+ pce_dev->ce_sps.consumer.event.options = SPS_O_DESC_DONE;
rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
&pce_dev->ce_sps.consumer.event);
if (rc) {
@@ -2400,8 +2403,6 @@
if (totallen_in > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
- pce_dev->ce_sps.producer.event.options =
- SPS_O_DESC_DONE;
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
_qce_sps_add_data(
@@ -2489,6 +2490,7 @@
/* Register callback event for EOT (End of transfer) event. */
pce_dev->ce_sps.producer.event.callback =
_ablk_cipher_sps_producer_callback;
+ pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE;
rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
&pce_dev->ce_sps.producer.event);
if (rc) {
@@ -2498,6 +2500,7 @@
/* Register callback event for EOT (End of transfer) event. */
pce_dev->ce_sps.consumer.event.callback =
_ablk_cipher_sps_consumer_callback;
+ pce_dev->ce_sps.consumer.event.options = SPS_O_DESC_DONE;
rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
&pce_dev->ce_sps.consumer.event);
if (rc) {
@@ -2519,7 +2522,6 @@
if (areq->nbytes > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
- pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE;
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
@@ -2571,6 +2573,7 @@
/* Register callback event for EOT (End of transfer) event. */
pce_dev->ce_sps.producer.event.callback = _sha_sps_producer_callback;
+ pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE;
rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
&pce_dev->ce_sps.producer.event);
if (rc) {
@@ -2580,6 +2583,7 @@
/* Register callback event for EOT (End of transfer) event. */
pce_dev->ce_sps.consumer.event.callback = _sha_sps_consumer_callback;
+ pce_dev->ce_sps.consumer.event.options = SPS_O_DESC_DONE;
rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
&pce_dev->ce_sps.consumer.event);
if (rc) {
@@ -2619,6 +2623,9 @@
struct resource *resource;
int rc = 0;
+ pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,ce-hw-shared");
+
if (of_property_read_u32((&pdev->dev)->of_node,
"qcom,bam-pipe-pair",
&pce_dev->ce_sps.pipe_pair_index)) {
@@ -2902,6 +2909,7 @@
ce_support->aes_xts = true;
ce_support->ota = false;
ce_support->bam = true;
+ ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
ce_support->aes_ccm = true;
if (pce_dev->ce_sps.minor_version)
ce_support->aligned_only = false;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 464fa21..1c63b70 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -30,7 +30,7 @@
comment "DEVFREQ Governors"
config DEVFREQ_GOV_SIMPLE_ONDEMAND
- bool "Simple Ondemand"
+ tristate "Simple Ondemand"
help
Chooses frequency based on the recent load on the device. Works
similar as ONDEMAND governor of CPUFREQ does. A device with
@@ -39,7 +39,7 @@
values to the governor with data field at devfreq_add_device().
config DEVFREQ_GOV_PERFORMANCE
- bool "Performance"
+ tristate "Performance"
help
Sets the frequency at the maximum available frequency.
This governor always returns UINT_MAX as frequency so that
@@ -47,7 +47,7 @@
at any time.
config DEVFREQ_GOV_POWERSAVE
- bool "Powersave"
+ tristate "Powersave"
help
Sets the frequency at the minimum available frequency.
This governor always returns 0 as frequency so that
@@ -55,7 +55,7 @@
at any time.
config DEVFREQ_GOV_USERSPACE
- bool "Userspace"
+ tristate "Userspace"
help
Sets the frequency at the user specified one.
This governor returns the user configured frequency if there
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 70c31d4..89d779c 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -27,21 +27,17 @@
#include <linux/hrtimer.h>
#include "governor.h"
-struct class *devfreq_class;
+static struct class *devfreq_class;
/*
- * devfreq_work periodically monitors every registered device.
- * The minimum polling interval is one jiffy. The polling interval is
- * determined by the minimum polling period among all polling devfreq
- * devices. The resolution of polling interval is one jiffy.
+ * devfreq core provides delayed work based load monitoring helper
+ * functions. Governors can use these or can implement their own
+ * monitoring mechanism.
*/
-static bool polling;
static struct workqueue_struct *devfreq_wq;
-static struct delayed_work devfreq_work;
-/* wait removing if this is to be removed */
-static struct devfreq *wait_remove_device;
-
+/* The list of all device-devfreq governors */
+static LIST_HEAD(devfreq_governor_list);
/* The list of all device-devfreq */
static LIST_HEAD(devfreq_list);
static DEFINE_MUTEX(devfreq_list_lock);
@@ -73,6 +69,79 @@
}
/**
+ * devfreq_get_freq_level() - Lookup freq_table for the frequency
+ * @devfreq: the devfreq instance
+ * @freq: the target frequency
+ */
+static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
+{
+ int lev;
+
+ for (lev = 0; lev < devfreq->profile->max_state; lev++)
+ if (freq == devfreq->profile->freq_table[lev])
+ return lev;
+
+ return -EINVAL;
+}
+
+/**
+ * devfreq_update_status() - Update statistics of devfreq behavior
+ * @devfreq: the devfreq instance
+ * @freq: the update target frequency
+ */
+static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
+{
+ int lev, prev_lev;
+ unsigned long cur_time;
+
+ lev = devfreq_get_freq_level(devfreq, freq);
+ if (lev < 0)
+ return lev;
+
+ cur_time = jiffies;
+ devfreq->time_in_state[lev] +=
+ cur_time - devfreq->last_stat_updated;
+ if (freq != devfreq->previous_freq) {
+ prev_lev = devfreq_get_freq_level(devfreq,
+ devfreq->previous_freq);
+ devfreq->trans_table[(prev_lev *
+ devfreq->profile->max_state) + lev]++;
+ devfreq->total_trans++;
+ }
+ devfreq->last_stat_updated = cur_time;
+
+ return 0;
+}
+
+/**
+ * find_devfreq_governor() - find devfreq governor from name
+ * @name: name of the governor
+ *
+ * Search the list of devfreq governors and return the matched
+ * governor's pointer. devfreq_list_lock should be held by the caller.
+ */
+static struct devfreq_governor *find_devfreq_governor(const char *name)
+{
+ struct devfreq_governor *tmp_governor;
+
+ if (unlikely(IS_ERR_OR_NULL(name))) {
+ pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ WARN(!mutex_is_locked(&devfreq_list_lock),
+ "devfreq_list_lock must be locked.");
+
+ list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
+ if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
+ return tmp_governor;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+/* Load monitoring helper functions for governors use */
+
+/**
* update_devfreq() - Reevaluate the device and configure frequency.
* @devfreq: the devfreq instance.
*
@@ -90,6 +159,9 @@
return -EINVAL;
}
+ if (!devfreq->governor)
+ return -EINVAL;
+
/* Reevaluate the proper frequency */
err = devfreq->governor->get_target_freq(devfreq, &freq);
if (err)
@@ -116,16 +188,173 @@
if (err)
return err;
+ if (devfreq->profile->freq_table)
+ if (devfreq_update_status(devfreq, freq))
+ dev_err(&devfreq->dev,
+ "Couldn't update frequency transition information.\n");
+
devfreq->previous_freq = freq;
return err;
}
+EXPORT_SYMBOL(update_devfreq);
+
+/**
+ * devfreq_monitor() - Periodically poll devfreq objects.
+ * @work: the work struct used to run devfreq_monitor periodically.
+ *
+ */
+static void devfreq_monitor(struct work_struct *work)
+{
+ int err;
+ struct devfreq *devfreq = container_of(work,
+ struct devfreq, work.work);
+
+ mutex_lock(&devfreq->lock);
+ err = update_devfreq(devfreq);
+ if (err)
+ dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
+
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+ mutex_unlock(&devfreq->lock);
+}
+
+/**
+ * devfreq_monitor_start() - Start load monitoring of devfreq instance
+ * @devfreq: the devfreq instance.
+ *
+ * Helper function for starting devfreq device load monitoing. By
+ * default delayed work based monitoring is supported. Function
+ * to be called from governor in response to DEVFREQ_GOV_START
+ * event when device is added to devfreq framework.
+ */
+void devfreq_monitor_start(struct devfreq *devfreq)
+{
+ INIT_DELAYED_WORK_DEFERRABLE(&devfreq->work, devfreq_monitor);
+ if (devfreq->profile->polling_ms)
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+}
+EXPORT_SYMBOL(devfreq_monitor_start);
+
+/**
+ * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
+ * @devfreq: the devfreq instance.
+ *
+ * Helper function to stop devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_STOP
+ * event when device is removed from devfreq framework.
+ */
+void devfreq_monitor_stop(struct devfreq *devfreq)
+{
+ cancel_delayed_work_sync(&devfreq->work);
+}
+EXPORT_SYMBOL(devfreq_monitor_stop);
+
+/**
+ * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
+ * @devfreq: the devfreq instance.
+ *
+ * Helper function to suspend devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_SUSPEND
+ * event or when polling interval is set to zero.
+ *
+ * Note: Though this function is same as devfreq_monitor_stop(),
+ * intentionally kept separate to provide hooks for collecting
+ * transition statistics.
+ */
+void devfreq_monitor_suspend(struct devfreq *devfreq)
+{
+ mutex_lock(&devfreq->lock);
+ if (devfreq->stop_polling) {
+ mutex_unlock(&devfreq->lock);
+ return;
+ }
+
+ devfreq->stop_polling = true;
+ mutex_unlock(&devfreq->lock);
+ cancel_delayed_work_sync(&devfreq->work);
+}
+EXPORT_SYMBOL(devfreq_monitor_suspend);
+
+/**
+ * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
+ * @devfreq: the devfreq instance.
+ *
+ * Helper function to resume devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_RESUME
+ * event or when polling interval is set to non-zero.
+ */
+void devfreq_monitor_resume(struct devfreq *devfreq)
+{
+ mutex_lock(&devfreq->lock);
+ if (!devfreq->stop_polling)
+ goto out;
+
+ if (!delayed_work_pending(&devfreq->work) &&
+ devfreq->profile->polling_ms)
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+ devfreq->stop_polling = false;
+
+out:
+ mutex_unlock(&devfreq->lock);
+}
+EXPORT_SYMBOL(devfreq_monitor_resume);
+
+/**
+ * devfreq_interval_update() - Update device devfreq monitoring interval
+ * @devfreq: the devfreq instance.
+ * @delay: new polling interval to be set.
+ *
+ * Helper function to set new load monitoring polling interval. Function
+ * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
+ */
+void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
+{
+ unsigned int cur_delay = devfreq->profile->polling_ms;
+ unsigned int new_delay = *delay;
+
+ mutex_lock(&devfreq->lock);
+ devfreq->profile->polling_ms = new_delay;
+
+ if (devfreq->stop_polling)
+ goto out;
+
+ /* if new delay is zero, stop polling */
+ if (!new_delay) {
+ mutex_unlock(&devfreq->lock);
+ cancel_delayed_work_sync(&devfreq->work);
+ return;
+ }
+
+ /* if current delay is zero, start polling with new delay */
+ if (!cur_delay) {
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+ goto out;
+ }
+
+ /* if current delay is greater than new delay, restart polling */
+ if (cur_delay > new_delay) {
+ mutex_unlock(&devfreq->lock);
+ cancel_delayed_work_sync(&devfreq->work);
+ mutex_lock(&devfreq->lock);
+ if (!devfreq->stop_polling)
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+ }
+out:
+ mutex_unlock(&devfreq->lock);
+}
+EXPORT_SYMBOL(devfreq_interval_update);
/**
* devfreq_notifier_call() - Notify that the device frequency requirements
* has been changed out of devfreq framework.
- * @nb the notifier_block (supposed to be devfreq->nb)
- * @type not used
- * @devp not used
+ * @nb: the notifier_block (supposed to be devfreq->nb)
+ * @type: not used
+ * @devp: not used
*
* Called by a notifier that uses devfreq->nb.
*/
@@ -143,59 +372,34 @@
}
/**
- * _remove_devfreq() - Remove devfreq from the device.
+ * _remove_devfreq() - Remove devfreq from the list and release its resources.
* @devfreq: the devfreq struct
* @skip: skip calling device_unregister().
- *
- * Note that the caller should lock devfreq->lock before calling
- * this. _remove_devfreq() will unlock it and free devfreq
- * internally. devfreq_list_lock should be locked by the caller
- * as well (not relased at return)
- *
- * Lock usage:
- * devfreq->lock: locked before call.
- * unlocked at return (and freed)
- * devfreq_list_lock: locked before call.
- * kept locked at return.
- * if devfreq is centrally polled.
- *
- * Freed memory:
- * devfreq
*/
static void _remove_devfreq(struct devfreq *devfreq, bool skip)
{
- if (!mutex_is_locked(&devfreq->lock)) {
- WARN(true, "devfreq->lock must be locked by the caller.\n");
+ mutex_lock(&devfreq_list_lock);
+ if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
+ mutex_unlock(&devfreq_list_lock);
+ dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
return;
}
- if (!devfreq->governor->no_central_polling &&
- !mutex_is_locked(&devfreq_list_lock)) {
- WARN(true, "devfreq_list_lock must be locked by the caller.\n");
- return;
- }
+ list_del(&devfreq->node);
+ mutex_unlock(&devfreq_list_lock);
- if (devfreq->being_removed)
- return;
-
- devfreq->being_removed = true;
+ if (devfreq->governor)
+ devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_STOP, NULL);
if (devfreq->profile->exit)
devfreq->profile->exit(devfreq->dev.parent);
- if (devfreq->governor->exit)
- devfreq->governor->exit(devfreq);
-
if (!skip && get_device(&devfreq->dev)) {
device_unregister(&devfreq->dev);
put_device(&devfreq->dev);
}
- if (!devfreq->governor->no_central_polling)
- list_del(&devfreq->node);
-
- mutex_unlock(&devfreq->lock);
mutex_destroy(&devfreq->lock);
-
kfree(devfreq);
}
@@ -210,163 +414,39 @@
static void devfreq_dev_release(struct device *dev)
{
struct devfreq *devfreq = to_devfreq(dev);
- bool central_polling = !devfreq->governor->no_central_polling;
- /*
- * If devfreq_dev_release() was called by device_unregister() of
- * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
- * being_removed is already set. This also partially checks the case
- * where devfreq_dev_release() is called from a thread other than
- * the one called _remove_devfreq(); however, this case is
- * dealt completely with another following being_removed check.
- *
- * Because being_removed is never being
- * unset, we do not need to worry about race conditions on
- * being_removed.
- */
- if (devfreq->being_removed)
- return;
-
- if (central_polling)
- mutex_lock(&devfreq_list_lock);
-
- mutex_lock(&devfreq->lock);
-
- /*
- * Check being_removed flag again for the case where
- * devfreq_dev_release() was called in a thread other than the one
- * possibly called _remove_devfreq().
- */
- if (devfreq->being_removed) {
- mutex_unlock(&devfreq->lock);
- goto out;
- }
-
- /* devfreq->lock is unlocked and removed in _removed_devfreq() */
_remove_devfreq(devfreq, true);
-
-out:
- if (central_polling)
- mutex_unlock(&devfreq_list_lock);
-}
-
-/**
- * devfreq_monitor() - Periodically poll devfreq objects.
- * @work: the work struct used to run devfreq_monitor periodically.
- *
- */
-static void devfreq_monitor(struct work_struct *work)
-{
- static unsigned long last_polled_at;
- struct devfreq *devfreq, *tmp;
- int error;
- unsigned long jiffies_passed;
- unsigned long next_jiffies = ULONG_MAX, now = jiffies;
- struct device *dev;
-
- /* Initially last_polled_at = 0, polling every device at bootup */
- jiffies_passed = now - last_polled_at;
- last_polled_at = now;
- if (jiffies_passed == 0)
- jiffies_passed = 1;
-
- mutex_lock(&devfreq_list_lock);
- list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
- mutex_lock(&devfreq->lock);
- dev = devfreq->dev.parent;
-
- /* Do not remove tmp for a while */
- wait_remove_device = tmp;
-
- if (devfreq->governor->no_central_polling ||
- devfreq->next_polling == 0) {
- mutex_unlock(&devfreq->lock);
- continue;
- }
- mutex_unlock(&devfreq_list_lock);
-
- /*
- * Reduce more next_polling if devfreq_wq took an extra
- * delay. (i.e., CPU has been idled.)
- */
- if (devfreq->next_polling <= jiffies_passed) {
- error = update_devfreq(devfreq);
-
- /* Remove a devfreq with an error. */
- if (error && error != -EAGAIN) {
-
- dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
- error, devfreq->governor->name);
-
- /*
- * Unlock devfreq before locking the list
- * in order to avoid deadlock with
- * find_device_devfreq or others
- */
- mutex_unlock(&devfreq->lock);
- mutex_lock(&devfreq_list_lock);
- /* Check if devfreq is already removed */
- if (IS_ERR(find_device_devfreq(dev)))
- continue;
- mutex_lock(&devfreq->lock);
- /* This unlocks devfreq->lock and free it */
- _remove_devfreq(devfreq, false);
- continue;
- }
- devfreq->next_polling = devfreq->polling_jiffies;
- } else {
- devfreq->next_polling -= jiffies_passed;
- }
-
- if (devfreq->next_polling)
- next_jiffies = (next_jiffies > devfreq->next_polling) ?
- devfreq->next_polling : next_jiffies;
-
- mutex_unlock(&devfreq->lock);
- mutex_lock(&devfreq_list_lock);
- }
- wait_remove_device = NULL;
- mutex_unlock(&devfreq_list_lock);
-
- if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
- polling = true;
- queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
- } else {
- polling = false;
- }
}
/**
* devfreq_add_device() - Add devfreq feature to the device
* @dev: the device to add devfreq feature.
* @profile: device-specific profile to run devfreq.
- * @governor: the policy to choose frequency.
+ * @governor_name: name of the policy to choose frequency.
* @data: private data for the governor. The devfreq framework does not
* touch this value.
*/
struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
- const struct devfreq_governor *governor,
+ const char *governor_name,
void *data)
{
struct devfreq *devfreq;
+ struct devfreq_governor *governor;
int err = 0;
- if (!dev || !profile || !governor) {
+ if (!dev || !profile || !governor_name) {
dev_err(dev, "%s: Invalid parameters.\n", __func__);
return ERR_PTR(-EINVAL);
}
-
- if (!governor->no_central_polling) {
- mutex_lock(&devfreq_list_lock);
- devfreq = find_device_devfreq(dev);
- mutex_unlock(&devfreq_list_lock);
- if (!IS_ERR(devfreq)) {
- dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
- err = -EINVAL;
- goto err_out;
- }
+ mutex_lock(&devfreq_list_lock);
+ devfreq = find_device_devfreq(dev);
+ mutex_unlock(&devfreq_list_lock);
+ if (!IS_ERR(devfreq)) {
+ dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
+ err = -EINVAL;
+ goto err_out;
}
devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
@@ -383,92 +463,316 @@
devfreq->dev.class = devfreq_class;
devfreq->dev.release = devfreq_dev_release;
devfreq->profile = profile;
- devfreq->governor = governor;
+ strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
devfreq->data = data;
- devfreq->next_polling = devfreq->polling_jiffies
- = msecs_to_jiffies(devfreq->profile->polling_ms);
devfreq->nb.notifier_call = devfreq_notifier_call;
+ devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
+ devfreq->profile->max_state *
+ devfreq->profile->max_state,
+ GFP_KERNEL);
+ devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
+ devfreq->profile->max_state,
+ GFP_KERNEL);
+ devfreq->last_stat_updated = jiffies;
+
dev_set_name(&devfreq->dev, dev_name(dev));
err = device_register(&devfreq->dev);
if (err) {
put_device(&devfreq->dev);
+ mutex_unlock(&devfreq->lock);
goto err_dev;
}
- if (governor->init)
- err = governor->init(devfreq);
- if (err)
- goto err_init;
-
mutex_unlock(&devfreq->lock);
- if (governor->no_central_polling)
- goto out;
-
mutex_lock(&devfreq_list_lock);
-
list_add(&devfreq->node, &devfreq_list);
- if (devfreq_wq && devfreq->next_polling && !polling) {
- polling = true;
- queue_delayed_work(devfreq_wq, &devfreq_work,
- devfreq->next_polling);
- }
+ governor = find_devfreq_governor(devfreq->governor_name);
+ if (!IS_ERR(governor))
+ devfreq->governor = governor;
+ if (devfreq->governor)
+ err = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_START, NULL);
mutex_unlock(&devfreq_list_lock);
-out:
+ if (err) {
+ dev_err(dev, "%s: Unable to start governor for the device\n",
+ __func__);
+ goto err_init;
+ }
+
return devfreq;
err_init:
+ list_del(&devfreq->node);
device_unregister(&devfreq->dev);
err_dev:
- mutex_unlock(&devfreq->lock);
kfree(devfreq);
err_out:
return ERR_PTR(err);
}
+EXPORT_SYMBOL(devfreq_add_device);
/**
* devfreq_remove_device() - Remove devfreq feature from a device.
- * @devfreq the devfreq instance to be removed
+ * @devfreq: the devfreq instance to be removed
*/
int devfreq_remove_device(struct devfreq *devfreq)
{
- bool central_polling;
-
if (!devfreq)
return -EINVAL;
- central_polling = !devfreq->governor->no_central_polling;
-
- if (central_polling) {
- mutex_lock(&devfreq_list_lock);
- while (wait_remove_device == devfreq) {
- mutex_unlock(&devfreq_list_lock);
- schedule();
- mutex_lock(&devfreq_list_lock);
- }
- }
-
- mutex_lock(&devfreq->lock);
- _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
-
- if (central_polling)
- mutex_unlock(&devfreq_list_lock);
+ _remove_devfreq(devfreq, false);
return 0;
}
+EXPORT_SYMBOL(devfreq_remove_device);
+
+/**
+ * devfreq_suspend_device() - Suspend devfreq of a device.
+ * @devfreq: the devfreq instance to be suspended
+ */
+int devfreq_suspend_device(struct devfreq *devfreq)
+{
+ if (!devfreq)
+ return -EINVAL;
+
+ if (!devfreq->governor)
+ return 0;
+
+ return devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_SUSPEND, NULL);
+}
+EXPORT_SYMBOL(devfreq_suspend_device);
+
+/**
+ * devfreq_resume_device() - Resume devfreq of a device.
+ * @devfreq: the devfreq instance to be resumed
+ */
+int devfreq_resume_device(struct devfreq *devfreq)
+{
+ if (!devfreq)
+ return -EINVAL;
+
+ if (!devfreq->governor)
+ return 0;
+
+ return devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_RESUME, NULL);
+}
+EXPORT_SYMBOL(devfreq_resume_device);
+
+/**
+ * devfreq_add_governor() - Add devfreq governor
+ * @governor: the devfreq governor to be added
+ */
+int devfreq_add_governor(struct devfreq_governor *governor)
+{
+ struct devfreq_governor *g;
+ struct devfreq *devfreq;
+ int err = 0;
+
+ if (!governor) {
+ pr_err("%s: Invalid parameters.\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&devfreq_list_lock);
+ g = find_devfreq_governor(governor->name);
+ if (!IS_ERR(g)) {
+ pr_err("%s: governor %s already registered\n", __func__,
+ g->name);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ list_add(&governor->node, &devfreq_governor_list);
+
+ list_for_each_entry(devfreq, &devfreq_list, node) {
+ int ret = 0;
+ struct device *dev = devfreq->dev.parent;
+
+ if (!strncmp(devfreq->governor_name, governor->name,
+ DEVFREQ_NAME_LEN)) {
+ /* The following should never occur */
+ if (devfreq->governor) {
+ dev_warn(dev,
+ "%s: Governor %s already present\n",
+ __func__, devfreq->governor->name);
+ ret = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_STOP, NULL);
+ if (ret) {
+ dev_warn(dev,
+ "%s: Governor %s stop = %d\n",
+ __func__,
+ devfreq->governor->name, ret);
+ }
+ /* Fall through */
+ }
+ devfreq->governor = governor;
+ ret = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_START, NULL);
+ if (ret) {
+ dev_warn(dev, "%s: Governor %s start=%d\n",
+ __func__, devfreq->governor->name,
+ ret);
+ }
+ }
+ }
+
+err_out:
+ mutex_unlock(&devfreq_list_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(devfreq_add_governor);
+
+/**
+ * devfreq_remove_device() - Remove devfreq feature from a device.
+ * @governor: the devfreq governor to be removed
+ */
+int devfreq_remove_governor(struct devfreq_governor *governor)
+{
+ struct devfreq_governor *g;
+ struct devfreq *devfreq;
+ int err = 0;
+
+ if (!governor) {
+ pr_err("%s: Invalid parameters.\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&devfreq_list_lock);
+ g = find_devfreq_governor(governor->name);
+ if (IS_ERR(g)) {
+ pr_err("%s: governor %s not registered\n", __func__,
+ governor->name);
+ err = PTR_ERR(g);
+ goto err_out;
+ }
+ list_for_each_entry(devfreq, &devfreq_list, node) {
+ int ret;
+ struct device *dev = devfreq->dev.parent;
+
+ if (!strncmp(devfreq->governor_name, governor->name,
+ DEVFREQ_NAME_LEN)) {
+ /* we should have a devfreq governor! */
+ if (!devfreq->governor) {
+ dev_warn(dev, "%s: Governor %s NOT present\n",
+ __func__, governor->name);
+ continue;
+ /* Fall through */
+ }
+ ret = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_STOP, NULL);
+ if (ret) {
+ dev_warn(dev, "%s: Governor %s stop=%d\n",
+ __func__, devfreq->governor->name,
+ ret);
+ }
+ devfreq->governor = NULL;
+ }
+ }
+
+ list_del(&governor->node);
+err_out:
+ mutex_unlock(&devfreq_list_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(devfreq_remove_governor);
static ssize_t show_governor(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ if (!to_devfreq(dev)->governor)
+ return -EINVAL;
+
return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
}
+static ssize_t store_governor(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ int ret;
+ char str_governor[DEVFREQ_NAME_LEN + 1];
+ struct devfreq_governor *governor;
+
+ ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&devfreq_list_lock);
+ governor = find_devfreq_governor(str_governor);
+ if (IS_ERR(governor)) {
+ ret = PTR_ERR(governor);
+ goto out;
+ }
+ if (df->governor == governor)
+ goto out;
+
+ if (df->governor) {
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+ if (ret) {
+ dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
+ __func__, df->governor->name, ret);
+ goto out;
+ }
+ }
+ df->governor = governor;
+ strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
+ if (ret)
+ dev_warn(dev, "%s: Governor %s not started(%d)\n",
+ __func__, df->governor->name, ret);
+out:
+ mutex_unlock(&devfreq_list_lock);
+
+ if (!ret)
+ ret = count;
+ return ret;
+}
+static ssize_t show_available_governors(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq_governor *tmp_governor;
+ ssize_t count = 0;
+
+ mutex_lock(&devfreq_list_lock);
+ list_for_each_entry(tmp_governor, &devfreq_governor_list, node)
+ count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+ "%s ", tmp_governor->name);
+ mutex_unlock(&devfreq_list_lock);
+
+ /* Truncate the trailing space */
+ if (count)
+ count--;
+
+ count += sprintf(&buf[count], "\n");
+
+ return count;
+}
+
static ssize_t show_freq(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ unsigned long freq;
+ struct devfreq *devfreq = to_devfreq(dev);
+
+ if (devfreq->profile->get_cur_freq &&
+ !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
+ return sprintf(buf, "%lu\n", freq);
+
+ return sprintf(buf, "%lu\n", devfreq->previous_freq);
+}
+
+static ssize_t show_target_freq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
}
@@ -486,39 +790,19 @@
unsigned int value;
int ret;
+ if (!df->governor)
+ return -EINVAL;
+
ret = sscanf(buf, "%u", &value);
if (ret != 1)
- goto out;
+ return -EINVAL;
- mutex_lock(&df->lock);
- df->profile->polling_ms = value;
- df->next_polling = df->polling_jiffies
- = msecs_to_jiffies(value);
- mutex_unlock(&df->lock);
-
+ df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
ret = count;
- if (df->governor->no_central_polling)
- goto out;
-
- mutex_lock(&devfreq_list_lock);
- if (df->next_polling > 0 && !polling) {
- polling = true;
- queue_delayed_work(devfreq_wq, &devfreq_work,
- df->next_polling);
- }
- mutex_unlock(&devfreq_list_lock);
-out:
return ret;
}
-static ssize_t show_central_polling(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n",
- !to_devfreq(dev)->governor->no_central_polling);
-}
-
static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -529,7 +813,7 @@
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
- goto out;
+ return -EINVAL;
mutex_lock(&df->lock);
max = df->max_freq;
@@ -543,7 +827,6 @@
ret = count;
unlock:
mutex_unlock(&df->lock);
-out:
return ret;
}
@@ -563,7 +846,7 @@
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
- goto out;
+ return -EINVAL;
mutex_lock(&df->lock);
min = df->min_freq;
@@ -577,7 +860,6 @@
ret = count;
unlock:
mutex_unlock(&df->lock);
-out:
return ret;
}
@@ -587,34 +869,92 @@
return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
}
+static ssize_t show_available_freqs(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *df = to_devfreq(d);
+ struct device *dev = df->dev.parent;
+ struct opp *opp;
+ ssize_t count = 0;
+ unsigned long freq = 0;
+
+ rcu_read_lock();
+ do {
+ opp = opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ break;
+
+ count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+ "%lu ", freq);
+ freq++;
+ } while (1);
+ rcu_read_unlock();
+
+ /* Truncate the trailing space */
+ if (count)
+ count--;
+
+ count += sprintf(&buf[count], "\n");
+
+ return count;
+}
+
+static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ ssize_t len;
+ int i, j, err;
+ unsigned int max_state = devfreq->profile->max_state;
+
+ err = devfreq_update_status(devfreq, devfreq->previous_freq);
+ if (err)
+ return 0;
+
+ len = sprintf(buf, " From : To\n");
+ len += sprintf(buf + len, " :");
+ for (i = 0; i < max_state; i++)
+ len += sprintf(buf + len, "%8u",
+ devfreq->profile->freq_table[i]);
+
+ len += sprintf(buf + len, " time(ms)\n");
+
+ for (i = 0; i < max_state; i++) {
+ if (devfreq->profile->freq_table[i]
+ == devfreq->previous_freq) {
+ len += sprintf(buf + len, "*");
+ } else {
+ len += sprintf(buf + len, " ");
+ }
+ len += sprintf(buf + len, "%8u:",
+ devfreq->profile->freq_table[i]);
+ for (j = 0; j < max_state; j++)
+ len += sprintf(buf + len, "%8u",
+ devfreq->trans_table[(i * max_state) + j]);
+ len += sprintf(buf + len, "%10u\n",
+ jiffies_to_msecs(devfreq->time_in_state[i]));
+ }
+
+ len += sprintf(buf + len, "Total transition : %u\n",
+ devfreq->total_trans);
+ return len;
+}
+
static struct device_attribute devfreq_attrs[] = {
- __ATTR(governor, S_IRUGO, show_governor, NULL),
+ __ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor),
+ __ATTR(available_governors, S_IRUGO, show_available_governors, NULL),
__ATTR(cur_freq, S_IRUGO, show_freq, NULL),
- __ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
+ __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
+ __ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
__ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
store_polling_interval),
__ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
__ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
+ __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
{ },
};
-/**
- * devfreq_start_polling() - Initialize data structure for devfreq framework and
- * start polling registered devfreq devices.
- */
-static int __init devfreq_start_polling(void)
-{
- mutex_lock(&devfreq_list_lock);
- polling = false;
- devfreq_wq = create_freezable_workqueue("devfreq_wq");
- INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor);
- mutex_unlock(&devfreq_list_lock);
-
- devfreq_monitor(&devfreq_work.work);
- return 0;
-}
-late_initcall(devfreq_start_polling);
-
static int __init devfreq_init(void)
{
devfreq_class = class_create(THIS_MODULE, "devfreq");
@@ -622,7 +962,15 @@
pr_err("%s: couldn't create class\n", __FILE__);
return PTR_ERR(devfreq_class);
}
+
+ devfreq_wq = create_freezable_workqueue("devfreq_wq");
+ if (IS_ERR(devfreq_wq)) {
+ class_destroy(devfreq_class);
+ pr_err("%s: couldn't create workqueue\n", __FILE__);
+ return PTR_ERR(devfreq_wq);
+ }
devfreq_class->dev_attrs = devfreq_attrs;
+
return 0;
}
subsys_initcall(devfreq_init);
@@ -630,6 +978,7 @@
static void __exit devfreq_exit(void)
{
class_destroy(devfreq_class);
+ destroy_workqueue(devfreq_wq);
}
module_exit(devfreq_exit);
@@ -641,10 +990,15 @@
/**
* devfreq_recommended_opp() - Helper function to get proper OPP for the
* freq value given to target callback.
- * @dev The devfreq user device. (parent of devfreq)
- * @freq The frequency given to target function
- * @flags Flags handed from devfreq framework.
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @freq: The frequency given to target function
+ * @flags: Flags handed from devfreq framework.
*
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
u32 flags)
@@ -656,14 +1010,14 @@
opp = opp_find_freq_floor(dev, freq);
/* If not available, use the closest opp */
- if (opp == ERR_PTR(-ENODEV))
+ if (opp == ERR_PTR(-ERANGE))
opp = opp_find_freq_ceil(dev, freq);
} else {
/* The freq is an lower bound. opp should be higher */
opp = opp_find_freq_ceil(dev, freq);
/* If not available, use the closest opp */
- if (opp == ERR_PTR(-ENODEV))
+ if (opp == ERR_PTR(-ERANGE))
opp = opp_find_freq_floor(dev, freq);
}
@@ -674,35 +1028,49 @@
* devfreq_register_opp_notifier() - Helper function to get devfreq notified
* for any changes in the OPP availability
* changes
- * @dev The devfreq user device. (parent of devfreq)
- * @devfreq The devfreq object.
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @devfreq: The devfreq object.
*/
int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
{
- struct srcu_notifier_head *nh = opp_get_notifier(dev);
+ struct srcu_notifier_head *nh;
+ int ret = 0;
+ rcu_read_lock();
+ nh = opp_get_notifier(dev);
if (IS_ERR(nh))
- return PTR_ERR(nh);
- return srcu_notifier_chain_register(nh, &devfreq->nb);
+ ret = PTR_ERR(nh);
+ rcu_read_unlock();
+ if (!ret)
+ ret = srcu_notifier_chain_register(nh, &devfreq->nb);
+
+ return ret;
}
/**
* devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
* notified for any changes in the OPP
* availability changes anymore.
- * @dev The devfreq user device. (parent of devfreq)
- * @devfreq The devfreq object.
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @devfreq: The devfreq object.
*
* At exit() callback of devfreq_dev_profile, this must be included if
* devfreq_recommended_opp is used.
*/
int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
{
- struct srcu_notifier_head *nh = opp_get_notifier(dev);
+ struct srcu_notifier_head *nh;
+ int ret = 0;
+ rcu_read_lock();
+ nh = opp_get_notifier(dev);
if (IS_ERR(nh))
- return PTR_ERR(nh);
- return srcu_notifier_chain_unregister(nh, &devfreq->nb);
+ ret = PTR_ERR(nh);
+ rcu_read_unlock();
+ if (!ret)
+ ret = srcu_notifier_chain_unregister(nh, &devfreq->nb);
+
+ return ret;
}
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
index 88ddc77..f6c54bd 100644
--- a/drivers/devfreq/exynos4_bus.c
+++ b/drivers/devfreq/exynos4_bus.c
@@ -73,6 +73,16 @@
#define EX4210_LV_NUM (LV_2 + 1)
#define EX4x12_LV_NUM (LV_4 + 1)
+/**
+ * struct busfreq_opp_info - opp information for bus
+ * @rate: Frequency in hertz
+ * @volt: Voltage in microvolts corresponding to this OPP
+ */
+struct busfreq_opp_info {
+ unsigned long rate;
+ unsigned long volt;
+};
+
struct busfreq_data {
enum exynos4_busf_type type;
struct device *dev;
@@ -80,7 +90,7 @@
bool disabled;
struct regulator *vdd_int;
struct regulator *vdd_mif; /* Exynos4412/4212 only */
- struct opp *curr_opp;
+ struct busfreq_opp_info curr_oppinfo;
struct exynos4_ppmu dmc[2];
struct notifier_block pm_notifier;
@@ -296,13 +306,14 @@
};
-static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4210_set_busclk(struct busfreq_data *data,
+ struct busfreq_opp_info *oppi)
{
unsigned int index;
unsigned int tmp;
for (index = LV_0; index < EX4210_LV_NUM; index++)
- if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+ if (oppi->rate == exynos4210_busclk_table[index].clk)
break;
if (index == EX4210_LV_NUM)
@@ -361,13 +372,14 @@
return 0;
}
-static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4x12_set_busclk(struct busfreq_data *data,
+ struct busfreq_opp_info *oppi)
{
unsigned int index;
unsigned int tmp;
for (index = LV_0; index < EX4x12_LV_NUM; index++)
- if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+ if (oppi->rate == exynos4x12_mifclk_table[index].clk)
break;
if (index == EX4x12_LV_NUM)
@@ -576,11 +588,12 @@
return -EINVAL;
}
-static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
- struct opp *oldopp)
+static int exynos4_bus_setvolt(struct busfreq_data *data,
+ struct busfreq_opp_info *oppi,
+ struct busfreq_opp_info *oldoppi)
{
int err = 0, tmp;
- unsigned long volt = opp_get_voltage(opp);
+ unsigned long volt = oppi->volt;
switch (data->type) {
case TYPE_BUSF_EXYNOS4210:
@@ -595,11 +608,11 @@
if (err)
break;
- tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+ tmp = exynos4x12_get_intspec(oppi->rate);
if (tmp < 0) {
err = tmp;
regulator_set_voltage(data->vdd_mif,
- opp_get_voltage(oldopp),
+ oldoppi->volt,
MAX_SAFEVOLT);
break;
}
@@ -609,7 +622,7 @@
/* Try to recover */
if (err)
regulator_set_voltage(data->vdd_mif,
- opp_get_voltage(oldopp),
+ oldoppi->volt,
MAX_SAFEVOLT);
break;
default:
@@ -626,17 +639,26 @@
struct platform_device *pdev = container_of(dev, struct platform_device,
dev);
struct busfreq_data *data = platform_get_drvdata(pdev);
- struct opp *opp = devfreq_recommended_opp(dev, _freq, flags);
- unsigned long freq = opp_get_freq(opp);
- unsigned long old_freq = opp_get_freq(data->curr_opp);
+ struct opp *opp;
+ unsigned long freq;
+ unsigned long old_freq = data->curr_oppinfo.rate;
+ struct busfreq_opp_info new_oppinfo;
- if (IS_ERR(opp))
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(dev, _freq, flags);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
return PTR_ERR(opp);
+ }
+ new_oppinfo.rate = opp_get_freq(opp);
+ new_oppinfo.volt = opp_get_voltage(opp);
+ rcu_read_unlock();
+ freq = new_oppinfo.rate;
if (old_freq == freq)
return 0;
- dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+ dev_dbg(dev, "targetting %lukHz %luuV\n", freq, new_oppinfo.volt);
mutex_lock(&data->lock);
@@ -644,17 +666,18 @@
goto out;
if (old_freq < freq)
- err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ err = exynos4_bus_setvolt(data, &new_oppinfo,
+ &data->curr_oppinfo);
if (err)
goto out;
if (old_freq != freq) {
switch (data->type) {
case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_set_busclk(data, opp);
+ err = exynos4210_set_busclk(data, &new_oppinfo);
break;
case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_set_busclk(data, opp);
+ err = exynos4x12_set_busclk(data, &new_oppinfo);
break;
default:
err = -EINVAL;
@@ -664,11 +687,12 @@
goto out;
if (old_freq > freq)
- err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ err = exynos4_bus_setvolt(data, &new_oppinfo,
+ &data->curr_oppinfo);
if (err)
goto out;
- data->curr_opp = opp;
+ data->curr_oppinfo = new_oppinfo;
out:
mutex_unlock(&data->lock);
return err;
@@ -702,7 +726,7 @@
exynos4_read_ppmu(data);
busier_dmc = exynos4_get_busier_dmc(data);
- stat->current_frequency = opp_get_freq(data->curr_opp);
+ stat->current_frequency = data->curr_oppinfo.rate;
if (busier_dmc)
addr = S5P_VA_DMC1;
@@ -933,6 +957,7 @@
struct busfreq_data *data = container_of(this, struct busfreq_data,
pm_notifier);
struct opp *opp;
+ struct busfreq_opp_info new_oppinfo;
unsigned long maxfreq = ULONG_MAX;
int err = 0;
@@ -943,18 +968,29 @@
data->disabled = true;
+ rcu_read_lock();
opp = opp_find_freq_floor(data->dev, &maxfreq);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(data->dev, "%s: unable to find a min freq\n",
+ __func__);
+ return PTR_ERR(opp);
+ }
+ new_oppinfo.rate = opp_get_freq(opp);
+ new_oppinfo.volt = opp_get_voltage(opp);
+ rcu_read_unlock();
- err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ err = exynos4_bus_setvolt(data, &new_oppinfo,
+ &data->curr_oppinfo);
if (err)
goto unlock;
switch (data->type) {
case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_set_busclk(data, opp);
+ err = exynos4210_set_busclk(data, &new_oppinfo);
break;
case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_set_busclk(data, opp);
+ err = exynos4x12_set_busclk(data, &new_oppinfo);
break;
default:
err = -EINVAL;
@@ -962,7 +998,7 @@
if (err)
goto unlock;
- data->curr_opp = opp;
+ data->curr_oppinfo = new_oppinfo;
unlock:
mutex_unlock(&data->lock);
if (err)
@@ -987,7 +1023,7 @@
struct device *dev = &pdev->dev;
int err = 0;
- data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data), GFP_KERNEL);
if (data == NULL) {
dev_err(dev, "Cannot allocate memory.\n");
return -ENOMEM;
@@ -1012,63 +1048,52 @@
err = -EINVAL;
}
if (err)
- goto err_regulator;
+ return err;
- data->vdd_int = regulator_get(dev, "vdd_int");
+ data->vdd_int = devm_regulator_get(dev, "vdd_int");
if (IS_ERR(data->vdd_int)) {
dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
- err = PTR_ERR(data->vdd_int);
- goto err_regulator;
+ return PTR_ERR(data->vdd_int);
}
if (data->type == TYPE_BUSF_EXYNOS4x12) {
- data->vdd_mif = regulator_get(dev, "vdd_mif");
+ data->vdd_mif = devm_regulator_get(dev, "vdd_mif");
if (IS_ERR(data->vdd_mif)) {
dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
- err = PTR_ERR(data->vdd_mif);
- regulator_put(data->vdd_int);
- goto err_regulator;
-
+ return PTR_ERR(data->vdd_mif);
}
}
+ rcu_read_lock();
opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
if (IS_ERR(opp)) {
+ rcu_read_unlock();
dev_err(dev, "Invalid initial frequency %lu kHz.\n",
- exynos4_devfreq_profile.initial_freq);
- err = PTR_ERR(opp);
- goto err_opp_add;
+ exynos4_devfreq_profile.initial_freq);
+ return PTR_ERR(opp);
}
- data->curr_opp = opp;
+ data->curr_oppinfo.rate = opp_get_freq(opp);
+ data->curr_oppinfo.volt = opp_get_voltage(opp);
+ rcu_read_unlock();
platform_set_drvdata(pdev, data);
busfreq_mon_reset(data);
data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
- &devfreq_simple_ondemand, NULL);
- if (IS_ERR(data->devfreq)) {
- err = PTR_ERR(data->devfreq);
- goto err_opp_add;
- }
+ "simple_ondemand", NULL);
+ if (IS_ERR(data->devfreq))
+ return PTR_ERR(data->devfreq);
devfreq_register_opp_notifier(dev, data->devfreq);
err = register_pm_notifier(&data->pm_notifier);
if (err) {
dev_err(dev, "Failed to setup pm notifier\n");
- goto err_devfreq_add;
+ devfreq_remove_device(data->devfreq);
+ return err;
}
return 0;
-err_devfreq_add:
- devfreq_remove_device(data->devfreq);
-err_opp_add:
- if (data->vdd_mif)
- regulator_put(data->vdd_mif);
- regulator_put(data->vdd_int);
-err_regulator:
- kfree(data);
- return err;
}
static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
@@ -1077,10 +1102,6 @@
unregister_pm_notifier(&data->pm_notifier);
devfreq_remove_device(data->devfreq);
- regulator_put(data->vdd_int);
- if (data->vdd_mif)
- regulator_put(data->vdd_mif);
- kfree(data);
return 0;
}
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index ea7f13c..fad7d63 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -18,7 +18,24 @@
#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
+/* Devfreq events */
+#define DEVFREQ_GOV_START 0x1
+#define DEVFREQ_GOV_STOP 0x2
+#define DEVFREQ_GOV_INTERVAL 0x3
+#define DEVFREQ_GOV_SUSPEND 0x4
+#define DEVFREQ_GOV_RESUME 0x5
+
/* Caution: devfreq->lock must be locked before calling update_devfreq */
extern int update_devfreq(struct devfreq *devfreq);
+extern void devfreq_monitor_start(struct devfreq *devfreq);
+extern void devfreq_monitor_stop(struct devfreq *devfreq);
+extern void devfreq_monitor_suspend(struct devfreq *devfreq);
+extern void devfreq_monitor_resume(struct devfreq *devfreq);
+extern void devfreq_interval_update(struct devfreq *devfreq,
+ unsigned int *delay);
+
+extern int devfreq_add_governor(struct devfreq_governor *governor);
+extern int devfreq_remove_governor(struct devfreq_governor *governor);
+
#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index af75ddd..c72f942 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -10,6 +10,7 @@
*/
#include <linux/devfreq.h>
+#include <linux/module.h>
#include "governor.h"
static int devfreq_performance_func(struct devfreq *df,
@@ -26,14 +27,41 @@
return 0;
}
-static int performance_init(struct devfreq *devfreq)
+static int devfreq_performance_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
{
- return update_devfreq(devfreq);
+ int ret = 0;
+
+ if (event == DEVFREQ_GOV_START) {
+ mutex_lock(&devfreq->lock);
+ ret = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+ }
+
+ return ret;
}
-const struct devfreq_governor devfreq_performance = {
+static struct devfreq_governor devfreq_performance = {
.name = "performance",
- .init = performance_init,
.get_target_freq = devfreq_performance_func,
- .no_central_polling = true,
+ .event_handler = devfreq_performance_handler,
};
+
+static int __init devfreq_performance_init(void)
+{
+ return devfreq_add_governor(&devfreq_performance);
+}
+subsys_initcall(devfreq_performance_init);
+
+static void __exit devfreq_performance_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_performance);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+ return;
+}
+module_exit(devfreq_performance_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index fec0cdb..0c6bed5 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -10,6 +10,7 @@
*/
#include <linux/devfreq.h>
+#include <linux/module.h>
#include "governor.h"
static int devfreq_powersave_func(struct devfreq *df,
@@ -23,14 +24,41 @@
return 0;
}
-static int powersave_init(struct devfreq *devfreq)
+static int devfreq_powersave_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
{
- return update_devfreq(devfreq);
+ int ret = 0;
+
+ if (event == DEVFREQ_GOV_START) {
+ mutex_lock(&devfreq->lock);
+ ret = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+ }
+
+ return ret;
}
-const struct devfreq_governor devfreq_powersave = {
+static struct devfreq_governor devfreq_powersave = {
.name = "powersave",
- .init = powersave_init,
.get_target_freq = devfreq_powersave_func,
- .no_central_polling = true,
+ .event_handler = devfreq_powersave_handler,
};
+
+static int __init devfreq_powersave_init(void)
+{
+ return devfreq_add_governor(&devfreq_powersave);
+}
+subsys_initcall(devfreq_powersave_init);
+
+static void __exit devfreq_powersave_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_powersave);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+ return;
+}
+module_exit(devfreq_powersave_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index a2e3eae..0720ba8 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -10,8 +10,10 @@
*/
#include <linux/errno.h>
+#include <linux/module.h>
#include <linux/devfreq.h>
#include <linux/math64.h>
+#include "governor.h"
/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
#define DFSO_UPTHRESHOLD (90)
@@ -88,7 +90,58 @@
return 0;
}
-const struct devfreq_governor devfreq_simple_ondemand = {
+static int devfreq_simple_ondemand_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ devfreq_monitor_start(devfreq);
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ devfreq_monitor_stop(devfreq);
+ break;
+
+ case DEVFREQ_GOV_INTERVAL:
+ devfreq_interval_update(devfreq, (unsigned int *)data);
+ break;
+
+ case DEVFREQ_GOV_SUSPEND:
+ devfreq_monitor_suspend(devfreq);
+ break;
+
+ case DEVFREQ_GOV_RESUME:
+ devfreq_monitor_resume(devfreq);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct devfreq_governor devfreq_simple_ondemand = {
.name = "simple_ondemand",
.get_target_freq = devfreq_simple_ondemand_func,
+ .event_handler = devfreq_simple_ondemand_handler,
};
+
+static int __init devfreq_simple_ondemand_init(void)
+{
+ return devfreq_add_governor(&devfreq_simple_ondemand);
+}
+subsys_initcall(devfreq_simple_ondemand_init);
+
+static void __exit devfreq_simple_ondemand_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_simple_ondemand);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+ return;
+}
+module_exit(devfreq_simple_ondemand_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 0681246..35de6e8 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -14,6 +14,7 @@
#include <linux/devfreq.h>
#include <linux/pm.h>
#include <linux/mutex.h>
+#include <linux/module.h>
#include "governor.h"
struct userspace_data {
@@ -116,10 +117,46 @@
devfreq->data = NULL;
}
-const struct devfreq_governor devfreq_userspace = {
+static int devfreq_userspace_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ int ret = 0;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ ret = userspace_init(devfreq);
+ break;
+ case DEVFREQ_GOV_STOP:
+ userspace_exit(devfreq);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static struct devfreq_governor devfreq_userspace = {
.name = "userspace",
.get_target_freq = devfreq_userspace_func,
- .init = userspace_init,
- .exit = userspace_exit,
- .no_central_polling = true,
+ .event_handler = devfreq_userspace_handler,
};
+
+static int __init devfreq_userspace_init(void)
+{
+ return devfreq_add_governor(&devfreq_userspace);
+}
+subsys_initcall(devfreq_userspace_init);
+
+static void __exit devfreq_userspace_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_userspace);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+ return;
+}
+module_exit(devfreq_userspace_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/qpnp-pin.c b/drivers/gpio/qpnp-pin.c
index f0c12f9..64341e9 100644
--- a/drivers/gpio/qpnp-pin.c
+++ b/drivers/gpio/qpnp-pin.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -821,7 +821,7 @@
param.invert = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
Q_REG_OUT_INVERT_SHIFT,
Q_REG_OUT_INVERT_MASK);
- param.pull = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+ param.pull = q_reg_get(&q_spec->regs[Q_REG_I_DIG_PULL_CTL],
Q_REG_PULL_SHIFT, Q_REG_PULL_MASK);
param.vin_sel = q_reg_get(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
Q_REG_VIN_SHIFT, Q_REG_VIN_MASK);
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index b51fa6a..798c027 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -241,25 +241,78 @@
void *vaddr, unsigned int offset, unsigned int length,
unsigned int cmd)
{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+ void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
+ unsigned int size_to_vmap, total_size;
+ int i, j;
+ void *ptr = NULL;
+ ion_phys_addr_t buff_phys = buffer->priv_phys;
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
+ if (!vaddr) {
+ /*
+ * Split the vmalloc space into smaller regions in
+ * order to clean and/or invalidate the cache.
+ */
+ size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
+ total_size = buffer->size;
+
+ for (i = 0; i < total_size; i += size_to_vmap) {
+ size_to_vmap = min(size_to_vmap, total_size - i);
+ for (j = 0; j < 10 && size_to_vmap; ++j) {
+ ptr = ioremap(buff_phys, size_to_vmap);
+ if (ptr) {
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ dmac_clean_range(ptr,
+ ptr + size_to_vmap);
+ outer_cache_op =
+ outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ dmac_inv_range(ptr,
+ ptr + size_to_vmap);
+ outer_cache_op =
+ outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ dmac_flush_range(ptr,
+ ptr + size_to_vmap);
+ outer_cache_op =
+ outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+ buff_phys += size_to_vmap;
+ break;
+ } else {
+ size_to_vmap >>= 1;
+ }
+ }
+ if (!ptr) {
+ pr_err("Couldn't io-remap the memory\n");
+ return -EINVAL;
+ }
+ iounmap(ptr);
+ }
+ } else {
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ dmac_clean_range(vaddr, vaddr + length);
+ outer_cache_op = outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ dmac_inv_range(vaddr, vaddr + length);
+ outer_cache_op = outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ dmac_flush_range(vaddr, vaddr + length);
+ outer_cache_op = outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
}
if (carveout_heap->has_outer_cache) {
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
index 8063138..4f12e38 100644
--- a/drivers/gpu/ion/ion_cma_heap.c
+++ b/drivers/gpu/ion/ion_cma_heap.c
@@ -127,8 +127,8 @@
struct device *dev = heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
- dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
- info->handle);
+ dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+ &info->handle);
*addr = info->handle;
*len = buffer->size;
@@ -281,15 +281,30 @@
switch (cmd) {
case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
+ if (!vaddr)
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_TO_DEVICE);
+ else
+ dmac_clean_range(vaddr, vaddr + length);
outer_cache_op = outer_clean_range;
break;
case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
+ if (!vaddr)
+ dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_FROM_DEVICE);
+ else
+ dmac_inv_range(vaddr, vaddr + length);
outer_cache_op = outer_inv_range;
break;
case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
+ if (!vaddr) {
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_TO_DEVICE);
+ dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_FROM_DEVICE);
+ } else {
+ dmac_flush_range(vaddr, vaddr + length);
+ }
outer_cache_op = outer_flush_range;
break;
default:
diff --git a/drivers/gpu/ion/ion_cma_secure_heap.c b/drivers/gpu/ion/ion_cma_secure_heap.c
index 496e5b4..d7a5920 100644
--- a/drivers/gpu/ion/ion_cma_secure_heap.c
+++ b/drivers/gpu/ion/ion_cma_secure_heap.c
@@ -73,6 +73,8 @@
{
struct device *dev = heap->priv;
struct ion_secure_cma_buffer_info *info;
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
dev_dbg(dev, "Request buffer allocation len %ld\n", len);
@@ -82,12 +84,7 @@
return ION_CMA_ALLOCATE_FAILED;
}
- if (!ION_IS_CACHED(flags))
- info->cpu_addr = dma_alloc_writecombine(dev, len,
- &(info->handle), 0);
- else
- info->cpu_addr = dma_alloc_nonconsistent(dev, len,
- &(info->handle), 0);
+ info->cpu_addr = dma_alloc_attrs(dev, len, &(info->handle), 0, &attrs);
if (!info->cpu_addr) {
dev_err(dev, "Fail to allocate buffer\n");
@@ -100,8 +97,6 @@
goto err;
}
- info->is_cached = ION_IS_CACHED(flags);
-
ion_secure_cma_get_sgtable(dev,
info->table, info->cpu_addr, info->handle, len);
@@ -131,6 +126,13 @@
return -ENOMEM;
}
+ if (ION_IS_CACHED(flags)) {
+ pr_err("%s: cannot allocate cached memory from secure heap %s\n",
+ __func__, heap->name);
+ return -ENOMEM;
+ }
+
+
buf = __ion_secure_cma_allocate(heap, buffer, len, align, flags);
if (buf) {
@@ -164,8 +166,8 @@
struct device *dev = heap->priv;
struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
- dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
- info->handle);
+ dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+ &info->handle);
*addr = info->handle;
*len = buffer->size;
@@ -191,24 +193,22 @@
struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
+ pr_info("%s: mmaping from secure heap %s disallowed\n",
+ __func__, mapper->name);
return -EINVAL;
}
static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
-
- atomic_inc(&info->secure.map_cnt);
- return info->cpu_addr;
+ pr_info("%s: kernel mapping from secure heap %s disallowed\n",
+ __func__, heap->name);
+ return NULL;
}
static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
-
- atomic_dec(&info->secure.map_cnt);
return;
}
@@ -311,32 +311,9 @@
unsigned int offset, unsigned int length,
unsigned int cmd)
{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
-
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
-
- if (cma_heap_has_outer_cache) {
- struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
-
- outer_cache_op(info->handle, info->handle + length);
- }
-
- return 0;
+ pr_info("%s: cache operations disallowed from secure heap %s\n",
+ __func__, heap->name);
+ return -EINVAL;
}
static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index a7473e2..3000252 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -731,26 +731,78 @@
void *vaddr, unsigned int offset, unsigned int length,
unsigned int cmd)
{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+ void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
+ container_of(heap, struct ion_cp_heap, heap);
+ unsigned int size_to_vmap, total_size;
struct ion_cp_buffer *buf = buffer->priv_virt;
+ int i, j;
+ void *ptr = NULL;
+ ion_phys_addr_t buff_phys = buffer->priv_phys;
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
+ if (!vaddr) {
+ /*
+ * Split the vmalloc space into smaller regions in
+ * order to clean and/or invalidate the cache.
+ */
+ size_to_vmap = (VMALLOC_END - VMALLOC_START)/8;
+ total_size = buffer->size;
+ for (i = 0; i < total_size; i += size_to_vmap) {
+ size_to_vmap = min(size_to_vmap, total_size - i);
+ for (j = 0; j < 10 && size_to_vmap; ++j) {
+ ptr = ioremap(buff_phys, size_to_vmap);
+ if (ptr) {
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ dmac_clean_range(ptr,
+ ptr + size_to_vmap);
+ outer_cache_op =
+ outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ dmac_inv_range(ptr,
+ ptr + size_to_vmap);
+ outer_cache_op =
+ outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ dmac_flush_range(ptr,
+ ptr + size_to_vmap);
+ outer_cache_op =
+ outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+ buff_phys += size_to_vmap;
+ break;
+ } else {
+ size_to_vmap >>= 1;
+ }
+ }
+ if (!ptr) {
+ pr_err("Couldn't io-remap the memory\n");
+ return -EINVAL;
+ }
+ iounmap(ptr);
+ }
+ } else {
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ dmac_clean_range(vaddr, vaddr + length);
+ outer_cache_op = outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ dmac_inv_range(vaddr, vaddr + length);
+ outer_cache_op = outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ dmac_flush_range(vaddr, vaddr + length);
+ outer_cache_op = outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
}
if (cp_heap->has_outer_cache) {
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 761fdde..ba48b50 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -409,15 +409,30 @@
switch (cmd) {
case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
+ if (!vaddr)
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_TO_DEVICE);
+ else
+ dmac_clean_range(vaddr, vaddr + length);
outer_cache_op = outer_clean_range;
break;
case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
+ if (!vaddr)
+ dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_FROM_DEVICE);
+ else
+ dmac_inv_range(vaddr, vaddr + length);
outer_cache_op = outer_inv_range;
break;
case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
+ if (!vaddr) {
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_TO_DEVICE);
+ dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_FROM_DEVICE);
+ } else {
+ dmac_flush_range(vaddr, vaddr + length);
+ }
outer_cache_op = outer_flush_range;
break;
default:
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index f33fc18..7ca2cd2 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -28,6 +28,7 @@
#include <mach/memory.h>
#include <asm/cacheflush.h>
#include <linux/msm_ion.h>
+#include <linux/dma-mapping.h>
static atomic_t system_heap_allocated;
static atomic_t system_contig_heap_allocated;
@@ -184,15 +185,30 @@
switch (cmd) {
case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
+ if (!vaddr)
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_TO_DEVICE);
+ else
+ dmac_clean_range(vaddr, vaddr + length);
outer_cache_op = outer_clean_range;
break;
case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
+ if (!vaddr)
+ dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_FROM_DEVICE);
+ else
+ dmac_inv_range(vaddr, vaddr + length);
outer_cache_op = outer_inv_range;
break;
case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
+ if (!vaddr) {
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_TO_DEVICE);
+ dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_FROM_DEVICE);
+ } else {
+ dmac_flush_range(vaddr, vaddr + length);
+ }
outer_cache_op = outer_flush_range;
break;
default:
diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c
index 8c9b95d..48c2efb 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.c
+++ b/drivers/gpu/ion/msm/ion_cp_common.c
@@ -55,7 +55,7 @@
cmd.permission_type = permission_type;
cmd.lock = SCM_CP_PROTECT;
- return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
+ return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID,
&cmd, sizeof(cmd), NULL, 0);
}
@@ -68,7 +68,7 @@
cmd.permission_type = permission_type;
cmd.lock = SCM_CP_UNPROTECT;
- return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
+ return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID,
&cmd, sizeof(cmd), NULL, 0);
}
@@ -154,7 +154,7 @@
request.chunks.chunk_list_size = nchunks;
request.chunks.chunk_size = chunk_size;
- return scm_call(SCM_SVC_CP, MEM_PROTECT_LOCK_ID,
+ return scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID,
&request, sizeof(request), &resp, sizeof(resp));
}
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index bd27385..1f73d47 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -715,7 +715,7 @@
start = (unsigned long) data.vaddr;
end = (unsigned long) data.vaddr + data.length;
- if (check_vaddr_bounds(start, end)) {
+ if (start && check_vaddr_bounds(start, end)) {
up_read(&mm->mmap_sem);
pr_err("%s: virtual address %p is out of bounds\n",
__func__, data.vaddr);
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index bf45a63..9f4d791 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/delay.h>
#include <mach/socinfo.h>
#include <mach/msm_bus_board.h>
@@ -121,14 +122,21 @@
* If the values of these registers are same after
* KGSL_TIMEOUT_PART time, GPU hang is reported in
* kernel log.
+ * *****ALERT******ALERT********ALERT*************
+ * Order of registers below is important, registers
+ * from LONG_IB_DETECT_REG_INDEX_START to
+ * LONG_IB_DETECT_REG_INDEX_END are used in long ib detection.
*/
-unsigned int hang_detect_regs[] = {
+#define LONG_IB_DETECT_REG_INDEX_START 1
+#define LONG_IB_DETECT_REG_INDEX_END 5
+
+unsigned int ft_detect_regs[] = {
A3XX_RBBM_STATUS,
- REG_CP_RB_RPTR,
+ REG_CP_RB_RPTR, /* LONG_IB_DETECT_REG_INDEX_START */
REG_CP_IB1_BASE,
REG_CP_IB1_BUFSZ,
REG_CP_IB2_BASE,
- REG_CP_IB2_BUFSZ,
+ REG_CP_IB2_BUFSZ, /* LONG_IB_DETECT_REG_INDEX_END */
0,
0,
0,
@@ -137,7 +145,7 @@
0
};
-const unsigned int hang_detect_regs_count = ARRAY_SIZE(hang_detect_regs);
+const unsigned int ft_detect_regs_count = ARRAY_SIZE(ft_detect_regs);
/*
* This is the master list of all GPU cores that are supported by this
@@ -1222,7 +1230,7 @@
int status = -EINVAL;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- if (KGSL_STATE_DUMP_AND_RECOVER != device->state)
+ if (KGSL_STATE_DUMP_AND_FT != device->state)
kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
/* Power up the device */
@@ -1277,16 +1285,16 @@
/* Assign correct RBBM status register to hang detect regs
*/
- hang_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status;
+ ft_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status;
/* Add A3XX specific registers for hang detection */
if (adreno_is_a3xx(adreno_dev)) {
- hang_detect_regs[6] = A3XX_RBBM_PERFCTR_SP_7_LO;
- hang_detect_regs[7] = A3XX_RBBM_PERFCTR_SP_7_HI;
- hang_detect_regs[8] = A3XX_RBBM_PERFCTR_SP_6_LO;
- hang_detect_regs[9] = A3XX_RBBM_PERFCTR_SP_6_HI;
- hang_detect_regs[10] = A3XX_RBBM_PERFCTR_SP_5_LO;
- hang_detect_regs[11] = A3XX_RBBM_PERFCTR_SP_5_HI;
+ ft_detect_regs[6] = A3XX_RBBM_PERFCTR_SP_7_LO;
+ ft_detect_regs[7] = A3XX_RBBM_PERFCTR_SP_7_HI;
+ ft_detect_regs[8] = A3XX_RBBM_PERFCTR_SP_6_LO;
+ ft_detect_regs[9] = A3XX_RBBM_PERFCTR_SP_6_HI;
+ ft_detect_regs[10] = A3XX_RBBM_PERFCTR_SP_5_LO;
+ ft_detect_regs[11] = A3XX_RBBM_PERFCTR_SP_5_HI;
}
status = kgsl_mmu_start(device);
@@ -1309,12 +1317,9 @@
if (status)
goto error_irq_off;
- /*
- * While recovery is on we do not want timer to
- * fire and attempt to change any device state
- */
-
- if (KGSL_STATE_DUMP_AND_RECOVER != device->state)
+ /* While fault tolerance is on we do not want timer to
+ * fire and attempt to change any device state */
+ if (KGSL_STATE_DUMP_AND_FT != device->state)
mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
device->reset_counter++;
@@ -1328,7 +1333,8 @@
kgsl_mmu_stop(&device->mmu);
error_clk_off:
- kgsl_pwrctrl_disable(device);
+ if (KGSL_STATE_DUMP_AND_FT != device->state)
+ kgsl_pwrctrl_disable(device);
return status;
}
@@ -1356,26 +1362,26 @@
}
static void adreno_mark_context_status(struct kgsl_device *device,
- int recovery_status)
+ int ft_status)
{
struct kgsl_context *context;
int next = 0;
/*
* Set the reset status of all contexts to
* INNOCENT_CONTEXT_RESET_EXT except for the bad context
- * since thats the guilty party, if recovery failed then
+ * since thats the guilty party, if fault tolerance failed then
* mark all as guilty
*/
while ((context = idr_get_next(&device->context_idr, &next))) {
struct adreno_context *adreno_context = context->devctxt;
- if (recovery_status) {
+ if (ft_status) {
context->reset_status =
KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
} else if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
context->reset_status) {
if (adreno_context->flags & (CTXT_FLAGS_GPU_HANG |
- CTXT_FLAGS_GPU_HANG_RECOVERED))
+ CTXT_FLAGS_GPU_HANG_FT))
context->reset_status =
KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
else
@@ -1410,103 +1416,305 @@
}
}
-static void adreno_destroy_recovery_data(struct adreno_recovery_data *rec_data)
+static void adreno_destroy_ft_data(struct adreno_ft_data *ft_data)
{
- vfree(rec_data->rb_buffer);
- vfree(rec_data->bad_rb_buffer);
+ vfree(ft_data->rb_buffer);
+ vfree(ft_data->bad_rb_buffer);
+ vfree(ft_data->good_rb_buffer);
}
-static int adreno_setup_recovery_data(struct kgsl_device *device,
- struct adreno_recovery_data *rec_data)
+static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
+ unsigned int *ptr,
+ bool inc)
+{
+ int status = -EINVAL;
+ unsigned int val1;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int start_ptr = *ptr;
+
+ while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
+ if (inc)
+ start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
+ size);
+ else
+ start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
+ size);
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
+ if (KGSL_CMD_IDENTIFIER == val1) {
+ if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
+ start_ptr = adreno_ringbuffer_dec_wrapped(
+ start_ptr, size);
+ *ptr = start_ptr;
+ status = 0;
+ break;
+ }
+ }
+ return status;
+}
+
+static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
+ unsigned int *rb_rptr,
+ unsigned int global_eop,
+ bool inc)
+{
+ int status = -EINVAL;
+ unsigned int temp_rb_rptr = *rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[3];
+ int i = 0;
+ bool check = false;
+
+ if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
+ return status;
+
+ do {
+ /*
+ * when decrementing we need to decrement first and
+ * then read make sure we cover all the data
+ */
+ if (!inc)
+ temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
+ temp_rb_rptr, size);
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
+ temp_rb_rptr);
+
+ if (check && ((inc && val[i] == global_eop) ||
+ (!inc && (val[i] ==
+ cp_type3_packet(CP_MEM_WRITE, 2) ||
+ val[i] == CACHE_FLUSH_TS)))) {
+ /* decrement i, i.e i = (i - 1 + 3) % 3 if
+ * we are going forward, else increment i */
+ i = (i + 2) % 3;
+ if (val[i] == rb->device->memstore.gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp)) {
+ int j = ((i + 2) % 3);
+ if ((inc && (val[j] == CACHE_FLUSH_TS ||
+ val[j] == cp_type3_packet(
+ CP_MEM_WRITE, 2))) ||
+ (!inc && val[j] == global_eop)) {
+ /* Found the global eop */
+ status = 0;
+ break;
+ }
+ }
+ /* if no match found then increment i again
+ * since we decremented before matching */
+ i = (i + 1) % 3;
+ }
+ if (inc)
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
+ temp_rb_rptr, size);
+
+ i = (i + 1) % 3;
+ if (2 == i)
+ check = true;
+ } while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
+ /* temp_rb_rptr points to the command stream after global eop,
+ * move backward till the start of command sequence */
+ if (!status) {
+ status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
+ if (!status) {
+ *rb_rptr = temp_rb_rptr;
+ KGSL_FT_INFO(rb->device,
+ "Offset of cmd sequence after eop timestamp: 0x%x\n",
+ temp_rb_rptr / sizeof(unsigned int));
+ }
+ }
+ if (status)
+ KGSL_FT_ERR(rb->device,
+ "Failed to find the command sequence after eop timestamp\n");
+ return status;
+}
+
+static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
+ unsigned int *rb_rptr,
+ unsigned int ib1)
+{
+ int status = -EINVAL;
+ unsigned int temp_rb_rptr = *rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[2];
+ int i = 0;
+ bool check = false;
+ bool ctx_switch = false;
+
+ while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+
+ if (check && val[i] == ib1) {
+ /* decrement i, i.e i = (i - 1 + 2) % 2 */
+ i = (i + 1) % 2;
+ if (adreno_cmd_is_ib(val[i])) {
+ /* go till start of command sequence */
+ status = _find_start_of_cmd_seq(rb,
+ &temp_rb_rptr, false);
+
+ KGSL_FT_INFO(rb->device,
+ "Found the hanging IB at offset 0x%x\n",
+ temp_rb_rptr / sizeof(unsigned int));
+ break;
+ }
+ /* if no match the increment i since we decremented
+ * before checking */
+ i = (i + 1) % 2;
+ }
+ /* Make sure you do not encounter a context switch twice, we can
+ * encounter it once for the bad context as the start of search
+ * can point to the context switch */
+ if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+ if (ctx_switch) {
+ KGSL_FT_ERR(rb->device,
+ "Context switch encountered before bad "
+ "IB found\n");
+ break;
+ }
+ ctx_switch = true;
+ }
+ i = (i + 1) % 2;
+ if (1 == i)
+ check = true;
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
+ size);
+ }
+ if (!status)
+ *rb_rptr = temp_rb_rptr;
+ return status;
+}
+
+static void adreno_setup_ft_data(struct kgsl_device *device,
+ struct adreno_ft_data *ft_data)
{
int ret = 0;
- unsigned int ib1_sz, ib2_sz;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ struct kgsl_context *context;
+ struct adreno_context *adreno_context;
+ unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
- memset(rec_data, 0, sizeof(*rec_data));
+ memset(ft_data, 0, sizeof(*ft_data));
+ ft_data->start_of_replay_cmds = 0xFFFFFFFF;
+ ft_data->replay_for_snapshot = 0xFFFFFFFF;
- adreno_regread(device, REG_CP_IB1_BUFSZ, &ib1_sz);
- adreno_regread(device, REG_CP_IB2_BUFSZ, &ib2_sz);
- if (ib1_sz || ib2_sz)
- adreno_regread(device, REG_CP_IB1_BASE, &rec_data->ib1);
+ adreno_regread(device, REG_CP_IB1_BASE, &ft_data->ib1);
- kgsl_sharedmem_readl(&device->memstore, &rec_data->context_id,
+ kgsl_sharedmem_readl(&device->memstore, &ft_data->context_id,
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
current_context));
kgsl_sharedmem_readl(&device->memstore,
- &rec_data->global_eop,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp));
+ &ft_data->global_eop,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp));
- rec_data->rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!rec_data->rb_buffer) {
+ ft_data->rb_buffer = vmalloc(rb->buffer_desc.size);
+ if (!ft_data->rb_buffer) {
KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
rb->buffer_desc.size);
- return -ENOMEM;
+ return;
}
- rec_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!rec_data->bad_rb_buffer) {
+ ft_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
+ if (!ft_data->bad_rb_buffer) {
KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
rb->buffer_desc.size);
- ret = -ENOMEM;
- goto done;
+ return;
}
- rec_data->fault = device->mmu.fault;
-done:
+ ft_data->good_rb_buffer = vmalloc(rb->buffer_desc.size);
+ if (!ft_data->good_rb_buffer) {
+ KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+ rb->buffer_desc.size);
+ return;
+ }
+ ft_data->status = 0;
+
+ /* find the start of bad command sequence in rb */
+ context = idr_find(&device->context_idr, ft_data->context_id);
+ /* Look for the command stream that is right after the global eop */
+
+ if (!context) {
+ /*
+ * If there is no context then fault tolerance does not need to
+ * replay anything, just reset GPU and thats it
+ */
+ return;
+ }
+
+ ft_data->ft_policy = adreno_dev->ft_policy;
+
+ if (!ft_data->ft_policy)
+ ft_data->ft_policy = KGSL_FT_DEFAULT_POLICY;
+
+ ret = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
+ ft_data->global_eop + 1, false);
if (ret) {
- vfree(rec_data->rb_buffer);
- vfree(rec_data->bad_rb_buffer);
+ ft_data->ft_policy |= KGSL_FT_TEMP_DISABLE;
+ return;
+ } else
+ ft_data->ft_policy &= ~KGSL_FT_TEMP_DISABLE;
+
+ ft_data->start_of_replay_cmds = rb_rptr;
+
+ adreno_context = context->devctxt;
+ if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
+ if (ft_data->ib1) {
+ ret = _find_hanging_ib_sequence(rb,
+ &rb_rptr, ft_data->ib1);
+ if (ret) {
+ KGSL_FT_ERR(device,
+ "Start not found for replay IB sequence\n");
+ ret = 0;
+ return;
+ }
+ ft_data->start_of_replay_cmds = rb_rptr;
+ ft_data->replay_for_snapshot = rb_rptr;
+ }
}
- return ret;
}
static int
-_adreno_recover_hang(struct kgsl_device *device,
- struct adreno_recovery_data *rec_data,
- bool try_bad_commands)
+_adreno_check_long_ib(struct kgsl_device *device)
{
- int ret;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- struct kgsl_context *context;
- struct adreno_context *adreno_context = NULL;
- struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
+ unsigned int curr_global_ts = 0;
- context = idr_find(&device->context_idr, rec_data->context_id);
- if (context == NULL) {
- KGSL_DRV_ERR(device, "Last context unknown id:%d\n",
- rec_data->context_id);
+ /* check if the global ts is still the same */
+ kgsl_sharedmem_readl(&device->memstore,
+ &curr_global_ts,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp));
+
+ /* Mark long ib as handled */
+ adreno_dev->long_ib = 0;
+
+ if (curr_global_ts == adreno_dev->long_ib_ts) {
+ KGSL_FT_ERR(device,
+ "IB ran too long, invalidate ctxt\n");
+ return 1;
} else {
- adreno_context = context->devctxt;
- adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
- /*
- * set the invalid ts flag to 0 for this context since we have
- * detected a hang for it
- */
- context->wait_on_invalid_ts = false;
+ /* Do nothing GPU has gone ahead */
+ KGSL_FT_INFO(device, "false long ib detection return\n");
+ return 0;
}
+}
- /* Extract valid contents from rb which can still be executed after
- * hang */
- ret = adreno_ringbuffer_extract(rb, rec_data);
- if (ret)
- goto done;
+static int
+_adreno_ft_restart_device(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+
+ struct adreno_context *adreno_context = context->devctxt;
/* restart device */
- ret = adreno_stop(device);
- if (ret) {
- KGSL_DRV_ERR(device, "Device stop failed in recovery\n");
- goto done;
+ if (adreno_stop(device)) {
+ KGSL_FT_ERR(device, "Device stop failed\n");
+ return 1;
}
- ret = adreno_start(device, true);
- if (ret) {
- KGSL_DRV_ERR(device, "Device start failed in recovery\n");
- goto done;
+ if (adreno_start(device, true)) {
+ KGSL_FT_ERR(device, "Device start failed\n");
+ return 1;
}
if (context)
@@ -1516,83 +1724,284 @@
/* If iommu is used then we need to make sure that the iommu clocks
* are on since there could be commands in pipeline that touch iommu */
if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
- ret = kgsl_mmu_enable_clk(&device->mmu,
- KGSL_IOMMU_CONTEXT_USER);
- if (ret)
- goto done;
+ if (kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER))
+ return 1;
}
- /* Do not try the bad commands if recovery has failed bad commands
- * once already or if hang is due to a fault */
- if (!try_bad_commands || rec_data->fault)
- rec_data->bad_rb_size = 0;
+ return 0;
+}
- if (rec_data->bad_rb_size) {
- int idle_ret;
- /* submit the bad and good context commands and wait for
- * them to pass */
- adreno_ringbuffer_restore(rb, rec_data->bad_rb_buffer,
- rec_data->bad_rb_size);
- idle_ret = adreno_idle(device);
- if (idle_ret) {
- ret = adreno_stop(device);
- if (ret) {
- KGSL_DRV_ERR(device,
- "Device stop failed in recovery\n");
- goto done;
- }
- ret = adreno_start(device, true);
- if (ret) {
- KGSL_DRV_ERR(device,
- "Device start failed in recovery\n");
- goto done;
- }
- if (context)
- kgsl_mmu_setstate(&device->mmu,
- adreno_context->pagetable,
- KGSL_MEMSTORE_GLOBAL);
+static inline void
+_adreno_debug_ft_info(struct kgsl_device *device,
+ struct adreno_ft_data *ft_data)
+{
- if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
- ret = kgsl_mmu_enable_clk(&device->mmu,
- KGSL_IOMMU_CONTEXT_USER);
- if (ret)
- goto done;
- }
+ /*
+ * Dumping rb is a very useful tool to debug FT.
+ * It will tell us if we are extracting the rb correctly
+ * NOP'ing the right IB, skipping the EOF correctly etc.
+ */
+ if (device->ft_log >= 7) {
- ret = idle_ret;
- KGSL_DRV_ERR(device,
- "Bad context commands hung in recovery\n");
- } else {
- KGSL_DRV_ERR(device,
- "Bad context commands succeeded in recovery\n");
- if (adreno_context)
- adreno_context->flags = (adreno_context->flags &
- ~CTXT_FLAGS_GPU_HANG) |
- CTXT_FLAGS_GPU_HANG_RECOVERED;
- adreno_dev->drawctxt_active = last_active_ctx;
- }
+ /* Print fault tolerance data here */
+ KGSL_FT_INFO(device, "Temp RB buffer size 0x%X\n",
+ ft_data->rb_size);
+ adreno_dump_rb(device, ft_data->rb_buffer,
+ ft_data->rb_size<<2, 0, ft_data->rb_size);
+
+ KGSL_FT_INFO(device, "Bad RB buffer size 0x%X\n",
+ ft_data->bad_rb_size);
+ adreno_dump_rb(device, ft_data->bad_rb_buffer,
+ ft_data->bad_rb_size<<2, 0, ft_data->bad_rb_size);
+
+ KGSL_FT_INFO(device, "Good RB buffer size 0x%X\n",
+ ft_data->good_rb_size);
+ adreno_dump_rb(device, ft_data->good_rb_buffer,
+ ft_data->good_rb_size<<2, 0, ft_data->good_rb_size);
+
}
- /* If either the bad command sequence failed or we did not play it */
- if (ret || !rec_data->bad_rb_size) {
- adreno_ringbuffer_restore(rb, rec_data->rb_buffer,
- rec_data->rb_size);
+}
+
+static int
+_adreno_ft_resubmit_rb(struct kgsl_device *device,
+ struct adreno_ringbuffer *rb,
+ struct kgsl_context *context,
+ struct adreno_ft_data *ft_data,
+ unsigned int *buff, unsigned int size)
+{
+ unsigned int ret = 0;
+ unsigned int retry_num = 0;
+
+ _adreno_debug_ft_info(device, ft_data);
+
+ do {
+ ret = _adreno_ft_restart_device(device, context);
+ if (ret == 0)
+ break;
+ /*
+ * If device restart fails sleep for 20ms before
+ * attempting restart. This allows GPU HW to settle
+ * and improve the chances of next restart to be
+ * successful.
+ */
+ msleep(20);
+ KGSL_FT_ERR(device, "Retry device restart %d\n", retry_num);
+ retry_num++;
+ } while (retry_num < 4);
+
+ if (ret) {
+ KGSL_FT_ERR(device, "Device restart failed\n");
+ BUG_ON(1);
+ goto done;
+ }
+
+ if (size) {
+
+ /* submit commands and wait for them to pass */
+ adreno_ringbuffer_restore(rb, buff, size);
+
ret = adreno_idle(device);
- if (ret) {
- /* If we fail here we can try to invalidate another
- * context and try recovering again */
- ret = -EAGAIN;
- goto done;
+ }
+
+done:
+ return ret;
+}
+
+
+static int
+_adreno_ft(struct kgsl_device *device,
+ struct adreno_ft_data *ft_data)
+{
+ int ret = 0, i;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ struct kgsl_context *context;
+ struct adreno_context *adreno_context = NULL;
+ struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
+ unsigned int long_ib = 0;
+
+ context = idr_find(&device->context_idr, ft_data->context_id);
+ if (context == NULL) {
+ KGSL_FT_ERR(device, "Last context unknown id:%d\n",
+ ft_data->context_id);
+ goto play_good_cmds;
+ } else {
+ adreno_context = context->devctxt;
+ adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
+ /*
+ * set the invalid ts flag to 0 for this context since we have
+ * detected a hang for it
+ */
+ context->wait_on_invalid_ts = false;
+
+ if (!(adreno_context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
+ KGSL_FT_ERR(device, "Fault tolerance not supported\n");
+ goto play_good_cmds;
}
- /* ringbuffer now has data from the last valid context id,
- * so restore the active_ctx to the last valid context */
- if (rec_data->last_valid_ctx_id) {
- struct kgsl_context *last_ctx =
- idr_find(&device->context_idr,
- rec_data->last_valid_ctx_id);
- if (last_ctx)
- adreno_dev->drawctxt_active = last_ctx->devctxt;
+
+ /*
+ * This flag will be set by userspace for contexts
+ * that do not want to be fault tolerant (ex: OPENCL)
+ */
+ if (adreno_context->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE) {
+ ft_data->status = 1;
+ KGSL_FT_ERR(device,
+ "No FT set for this context play good cmds\n");
+ goto play_good_cmds;
+ }
+
+ }
+
+ /* Check if we detected a long running IB, if false return */
+ if (adreno_dev->long_ib) {
+ long_ib = _adreno_check_long_ib(device);
+ if (!long_ib) {
+ adreno_context->flags &= ~CTXT_FLAGS_GPU_HANG;
+ return 0;
}
}
+
+ /*
+ * Extract valid contents from rb which can still be executed after
+ * hang
+ */
+ adreno_ringbuffer_extract(rb, ft_data);
+
+ /* If long IB detected do not attempt replay of bad cmds */
+ if (long_ib) {
+ _adreno_debug_ft_info(device, ft_data);
+ goto play_good_cmds;
+ }
+
+ if ((ft_data->ft_policy & KGSL_FT_DISABLE) ||
+ (ft_data->ft_policy & KGSL_FT_TEMP_DISABLE)) {
+ KGSL_FT_ERR(device, "NO FT policy play only good cmds\n");
+ ft_data->status = 1;
+ goto play_good_cmds;
+ }
+
+ /* Do not try the reply if hang is due to a pagefault */
+ if (adreno_context->pagefault) {
+ if ((ft_data->context_id == adreno_context->id) &&
+ (ft_data->global_eop == adreno_context->pagefault_ts)) {
+ ft_data->ft_policy &= ~KGSL_FT_REPLAY;
+ KGSL_FT_ERR(device, "MMU fault skipping replay\n");
+ }
+
+ adreno_context->pagefault = 0;
+ }
+
+ if (ft_data->ft_policy & KGSL_FT_REPLAY) {
+ ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
+ ft_data->bad_rb_buffer, ft_data->bad_rb_size);
+
+ if (ret) {
+ KGSL_FT_ERR(device, "Replay status: 1\n");
+ ft_data->status = 1;
+ } else
+ goto play_good_cmds;
+ }
+
+ if (ft_data->ft_policy & KGSL_FT_SKIPIB) {
+ for (i = 0; i < ft_data->bad_rb_size; i++) {
+ if ((ft_data->bad_rb_buffer[i] ==
+ CP_HDR_INDIRECT_BUFFER_PFD) &&
+ (ft_data->bad_rb_buffer[i+1] == ft_data->ib1)) {
+
+ ft_data->bad_rb_buffer[i] = cp_nop_packet(2);
+ ft_data->bad_rb_buffer[i+1] =
+ KGSL_NOP_IB_IDENTIFIER;
+ ft_data->bad_rb_buffer[i+2] =
+ KGSL_NOP_IB_IDENTIFIER;
+ break;
+ }
+ }
+
+ if ((i == (ft_data->bad_rb_size)) || (!ft_data->ib1)) {
+ KGSL_FT_ERR(device, "Bad IB to NOP not found\n");
+ ft_data->status = 1;
+ goto play_good_cmds;
+ }
+
+ ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
+ ft_data->bad_rb_buffer, ft_data->bad_rb_size);
+
+ if (ret) {
+ KGSL_FT_ERR(device, "NOP faulty IB status: 1\n");
+ ft_data->status = 1;
+ } else {
+ ft_data->status = 0;
+ goto play_good_cmds;
+ }
+ }
+
+ if (ft_data->ft_policy & KGSL_FT_SKIPFRAME) {
+ for (i = 0; i < ft_data->bad_rb_size; i++) {
+ if (ft_data->bad_rb_buffer[i] ==
+ KGSL_END_OF_FRAME_IDENTIFIER) {
+ ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
+ break;
+ }
+ }
+
+ /* EOF not found in RB, discard till EOF in
+ next IB submission */
+ if (i == ft_data->bad_rb_size) {
+ adreno_context->flags |= CTXT_FLAGS_SKIP_EOF;
+ KGSL_FT_INFO(device,
+ "EOF not found in RB, skip next issueib till EOF\n");
+ ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
+ }
+
+ ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
+ ft_data->bad_rb_buffer, ft_data->bad_rb_size);
+
+ if (ret) {
+ KGSL_FT_ERR(device, "Skip EOF status: 1\n");
+ ft_data->status = 1;
+ } else {
+ ft_data->status = 0;
+ goto play_good_cmds;
+ }
+ }
+
+play_good_cmds:
+
+ if (ft_data->status)
+ KGSL_FT_ERR(device, "Bad context commands failed\n");
+ else {
+ KGSL_FT_INFO(device, "Bad context commands success\n");
+
+ if (adreno_context) {
+ adreno_context->flags = (adreno_context->flags &
+ ~CTXT_FLAGS_GPU_HANG) | CTXT_FLAGS_GPU_HANG_FT;
+ }
+ adreno_dev->drawctxt_active = last_active_ctx;
+ }
+
+ ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
+ ft_data->good_rb_buffer, ft_data->good_rb_size);
+
+ if (ret) {
+ /* If we fail here we can try to invalidate another
+ * context and try fault tolerance again */
+ ret = -EAGAIN;
+ KGSL_FT_ERR(device, "Playing good commands unsuccessful\n");
+ goto done;
+ } else
+ KGSL_FT_INFO(device, "Playing good commands successful\n");
+
+ /* ringbuffer now has data from the last valid context id,
+ * so restore the active_ctx to the last valid context */
+ if (ft_data->last_valid_ctx_id) {
+ struct kgsl_context *last_ctx =
+ idr_find(&device->context_idr,
+ ft_data->last_valid_ctx_id);
+ if (last_ctx)
+ adreno_dev->drawctxt_active = last_ctx->devctxt;
+ }
+
done:
/* Turn off iommu clocks */
if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
@@ -1601,40 +2010,38 @@
}
static int
-adreno_recover_hang(struct kgsl_device *device,
- struct adreno_recovery_data *rec_data)
+adreno_ft(struct kgsl_device *device,
+ struct adreno_ft_data *ft_data)
{
int ret = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
unsigned int timestamp;
- KGSL_DRV_ERR(device,
- "Starting recovery from 3D GPU hang. Recovery parameters: IB1: 0x%X, "
+ KGSL_FT_INFO(device,
+ "Start Parameters: IB1: 0x%X, "
"Bad context_id: %u, global_eop: 0x%x\n",
- rec_data->ib1, rec_data->context_id, rec_data->global_eop);
+ ft_data->ib1, ft_data->context_id, ft_data->global_eop);
timestamp = rb->timestamp[KGSL_MEMSTORE_GLOBAL];
- KGSL_DRV_ERR(device, "Last issued global timestamp: %x\n", timestamp);
+ KGSL_FT_INFO(device, "Last issued global timestamp: %x\n", timestamp);
/* We may need to replay commands multiple times based on whether
* multiple contexts hang the GPU */
while (true) {
- if (!ret)
- ret = _adreno_recover_hang(device, rec_data, true);
- else
- ret = _adreno_recover_hang(device, rec_data, false);
+
+ ret = _adreno_ft(device, ft_data);
if (-EAGAIN == ret) {
- /* setup new recovery parameters and retry, this
+ /* setup new fault tolerance parameters and retry, this
* means more than 1 contexts are causing hang */
- adreno_destroy_recovery_data(rec_data);
- adreno_setup_recovery_data(device, rec_data);
- KGSL_DRV_ERR(device,
- "Retry recovery from 3D GPU hang. Recovery parameters: "
+ adreno_destroy_ft_data(ft_data);
+ adreno_setup_ft_data(device, ft_data);
+ KGSL_FT_INFO(device,
+ "Retry. Parameters: "
"IB1: 0x%X, Bad context_id: %u, global_eop: 0x%x\n",
- rec_data->ib1, rec_data->context_id,
- rec_data->global_eop);
+ ft_data->ib1, ft_data->context_id,
+ ft_data->global_eop);
} else {
break;
}
@@ -1643,7 +2050,7 @@
if (ret)
goto done;
- /* Restore correct states after recovery */
+ /* Restore correct states after fault tolerance */
if (adreno_dev->drawctxt_active)
device->mmu.hwpagetable =
adreno_dev->drawctxt_active->pagetable;
@@ -1662,34 +2069,39 @@
done:
adreno_set_max_ts_for_bad_ctxs(device);
adreno_mark_context_status(device, ret);
- if (!ret)
- KGSL_DRV_ERR(device, "Recovery succeeded\n");
- else
- KGSL_DRV_ERR(device, "Recovery failed\n");
+ KGSL_FT_ERR(device, "policy 0x%X status 0x%x\n",
+ ft_data->ft_policy, ret);
return ret;
}
int
-adreno_dump_and_recover(struct kgsl_device *device)
+adreno_dump_and_exec_ft(struct kgsl_device *device)
{
int result = -ETIMEDOUT;
- struct adreno_recovery_data rec_data;
+ struct adreno_ft_data ft_data;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ unsigned int curr_pwrlevel;
if (device->state == KGSL_STATE_HUNG)
goto done;
- if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
+ if (device->state == KGSL_STATE_DUMP_AND_FT) {
mutex_unlock(&device->mutex);
- wait_for_completion(&device->recovery_gate);
+ wait_for_completion(&device->ft_gate);
mutex_lock(&device->mutex);
if (device->state != KGSL_STATE_HUNG)
result = 0;
} else {
- kgsl_pwrctrl_set_state(device, KGSL_STATE_DUMP_AND_RECOVER);
- INIT_COMPLETION(device->recovery_gate);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_DUMP_AND_FT);
+ INIT_COMPLETION(device->ft_gate);
/* Detected a hang */
- /* Get the recovery data as soon as hang is detected */
- result = adreno_setup_recovery_data(device, &rec_data);
+ /* Run fault tolerance at max power level */
+ curr_pwrlevel = pwr->active_pwrlevel;
+ kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel);
+
+ /* Get the fault tolerance data as soon as hang is detected */
+ adreno_setup_ft_data(device, &ft_data);
/*
* Trigger an automatic dump of the state to
* the console
@@ -1697,25 +2109,43 @@
kgsl_postmortem_dump(device, 0);
/*
- * Make a GPU snapshot. For now, do it after the PM dump so we
- * can at least be sure the PM dump will work as it always has
+ * If long ib is detected, do not attempt postmortem or
+ * snapshot, if GPU is still executing commands
+ * we will get errors
*/
- kgsl_device_snapshot(device, 1);
+ if (!adreno_dev->long_ib) {
+ /*
+ * Trigger an automatic dump of the state to
+ * the console
+ */
+ kgsl_postmortem_dump(device, 0);
- result = adreno_recover_hang(device, &rec_data);
- adreno_destroy_recovery_data(&rec_data);
+ /*
+ * Make a GPU snapshot. For now, do it after the
+ * PM dump so we can at least be sure the PM dump
+ * will work as it always has
+ */
+ kgsl_device_snapshot(device, 1);
+ }
+
+ result = adreno_ft(device, &ft_data);
+ adreno_destroy_ft_data(&ft_data);
+
+ /* restore power level */
+ kgsl_pwrctrl_pwrlevel_change(device, curr_pwrlevel);
+
if (result) {
kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
} else {
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
}
- complete_all(&device->recovery_gate);
+ complete_all(&device->ft_gate);
}
done:
return result;
}
-EXPORT_SYMBOL(adreno_dump_and_recover);
+EXPORT_SYMBOL(adreno_dump_and_exec_ft);
static int adreno_getproperty(struct kgsl_device *device,
enum kgsl_property_type type,
@@ -1882,7 +2312,7 @@
do {
if (time_after(jiffies, wait)) {
/* Check to see if the core is hung */
- if (adreno_hang_detect(device, regs))
+ if (adreno_ft_detect(device, regs))
return -ETIMEDOUT;
wait = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
@@ -1906,7 +2336,7 @@
unsigned int rbbm_status;
unsigned long wait_time;
unsigned long wait_time_part;
- unsigned int prev_reg_val[hang_detect_regs_count];
+ unsigned int prev_reg_val[ft_detect_regs_count];
memset(prev_reg_val, 0, sizeof(prev_reg_val));
@@ -1939,7 +2369,7 @@
if (time_after(jiffies, wait_time_part)) {
wait_time_part = jiffies +
msecs_to_jiffies(KGSL_TIMEOUT_PART);
- if ((adreno_hang_detect(device, prev_reg_val)))
+ if ((adreno_ft_detect(device, prev_reg_val)))
goto err;
}
@@ -1947,9 +2377,9 @@
err:
KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n");
- if (KGSL_STATE_DUMP_AND_RECOVER != device->state &&
- !adreno_dump_and_recover(device)) {
- wait_time = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
+ if (KGSL_STATE_DUMP_AND_FT != device->state &&
+ !adreno_dump_and_exec_ft(device)) {
+ wait_time = jiffies + ADRENO_IDLE_TIMEOUT;
goto retry;
}
return -ETIMEDOUT;
@@ -2297,16 +2727,28 @@
-unsigned int adreno_hang_detect(struct kgsl_device *device,
+unsigned int adreno_ft_detect(struct kgsl_device *device,
unsigned int *prev_reg_val)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int curr_reg_val[hang_detect_regs_count];
- unsigned int hang_detected = 1;
+ unsigned int curr_reg_val[ft_detect_regs_count];
+ unsigned int fast_hang_detected = 1;
+ unsigned int long_ib_detected = 1;
unsigned int i;
static unsigned long next_hang_detect_time;
+ static unsigned int prev_global_ts;
+ unsigned int curr_global_ts = 0;
+ unsigned int curr_context_id = 0;
+ static struct adreno_context *curr_context;
+ static struct kgsl_context *context;
if (!adreno_dev->fast_hang_detect)
+ fast_hang_detected = 0;
+
+ if (!adreno_dev->long_ib_detect)
+ long_ib_detected = 0;
+
+ if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED))
return 0;
if (is_adreno_rbbm_status_idle(device)) {
@@ -2340,20 +2782,125 @@
next_hang_detect_time = (jiffies +
msecs_to_jiffies(KGSL_TIMEOUT_PART-1));
- for (i = 0; i < hang_detect_regs_count; i++) {
-
- if (hang_detect_regs[i] == 0)
+ /* Read the current Hang detect reg values here */
+ for (i = 0; i < ft_detect_regs_count; i++) {
+ if (ft_detect_regs[i] == 0)
continue;
-
- adreno_regread(device, hang_detect_regs[i],
- &curr_reg_val[i]);
- if (curr_reg_val[i] != prev_reg_val[i]) {
- prev_reg_val[i] = curr_reg_val[i];
- hang_detected = 0;
- }
+ adreno_regread(device, ft_detect_regs[i],
+ &curr_reg_val[i]);
}
- return hang_detected;
+ /* Read the current global timestamp here */
+ kgsl_sharedmem_readl(&device->memstore,
+ &curr_global_ts,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp));
+ /* Make sure the memstore read has posted */
+ mb();
+
+ if (curr_global_ts == prev_global_ts) {
+
+ /* Get the current context here */
+ if (context == NULL) {
+ kgsl_sharedmem_readl(&device->memstore,
+ &curr_context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ current_context));
+ /* Make sure the memstore read has posted */
+ mb();
+ context = idr_find(&device->context_idr,
+ curr_context_id);
+ if (context != NULL) {
+ curr_context = context->devctxt;
+ curr_context->ib_gpu_time_used = 0;
+ } else {
+ KGSL_DRV_ERR(device,
+ "Fault tolerance no context found\n");
+ }
+ }
+
+ if (curr_context != NULL) {
+
+ curr_context->ib_gpu_time_used += KGSL_TIMEOUT_PART;
+ KGSL_FT_INFO(device,
+ "Proc %s used GPU Time %d ms on timestamp 0x%X\n",
+ curr_context->pid_name, curr_context->ib_gpu_time_used,
+ curr_global_ts+1);
+
+ for (i = 0; i < ft_detect_regs_count; i++) {
+ if (curr_reg_val[i] != prev_reg_val[i]) {
+ fast_hang_detected = 0;
+
+ /* Check for long IB here */
+ if ((i >=
+ LONG_IB_DETECT_REG_INDEX_START)
+ &&
+ (i <=
+ LONG_IB_DETECT_REG_INDEX_END))
+ long_ib_detected = 0;
+ }
+ }
+
+ if (fast_hang_detected) {
+ KGSL_FT_ERR(device,
+ "Proc %s, ctxt_id %d ts %d triggered fault tolerance"
+ " on global ts %d\n",
+ curr_context->pid_name, curr_context->id
+ , (kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED)+1),
+ curr_global_ts+1);
+ return 1;
+ }
+
+ if ((long_ib_detected) &&
+ (!(curr_context->flags &
+ CTXT_FLAGS_NO_FAULT_TOLERANCE))) {
+ curr_context->ib_gpu_time_used +=
+ KGSL_TIMEOUT_PART;
+ if (curr_context->ib_gpu_time_used >
+ KGSL_TIMEOUT_LONG_IB_DETECTION) {
+ if (adreno_dev->long_ib_ts !=
+ curr_global_ts) {
+ KGSL_FT_ERR(device,
+ "Proc %s, ctxt_id %d ts %d"
+ "used GPU for %d ms long ib "
+ "detected on global ts %d\n",
+ curr_context->pid_name,
+ curr_context->id,
+ (kgsl_readtimestamp(device,
+ context,
+ KGSL_TIMESTAMP_RETIRED)+1),
+ curr_context->ib_gpu_time_used,
+ curr_global_ts+1);
+ adreno_dev->long_ib = 1;
+ adreno_dev->long_ib_ts =
+ curr_global_ts;
+ curr_context->ib_gpu_time_used =
+ 0;
+ return 1;
+ }
+ }
+ }
+ } else {
+ KGSL_FT_ERR(device,
+ "Last context unknown id:%d\n",
+ curr_context_id);
+ }
+ } else {
+ /* GPU is moving forward */
+ prev_global_ts = curr_global_ts;
+ context = NULL;
+ curr_context = NULL;
+ adreno_dev->long_ib = 0;
+ adreno_dev->long_ib_ts = 0;
+ }
+
+
+ /* If hangs are not detected copy the current reg values
+ * to previous values and return no hang */
+ for (i = 0; i < ft_detect_regs_count; i++)
+ prev_reg_val[i] = curr_reg_val[i];
+ return 0;
}
/**
@@ -2362,7 +2909,8 @@
* @context - pointer to the active KGSL context
* @timestamp - the timestamp that the process was waiting for
*
- * Process a possible GPU hang and try to recover from it cleanly
+ * Process a possible GPU hang and try fault tolerance from it
+ * cleanly
*/
static int adreno_handle_hang(struct kgsl_device *device,
struct kgsl_context *context, unsigned int timestamp)
@@ -2370,6 +2918,7 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int context_id = _get_context_id(context);
unsigned int ts_issued;
+ unsigned int rptr;
/* Do one last check to see if we somehow made it through */
if (kgsl_check_timestamp(device, context, timestamp))
@@ -2377,15 +2926,22 @@
ts_issued = adreno_dev->ringbuffer.timestamp[context_id];
- KGSL_DRV_ERR(device,
+ adreno_regread(device, REG_CP_RB_RPTR, &rptr);
+
+ /* Make sure timestamp check finished before triggering a hang */
+ mb();
+
+ KGSL_DRV_WARN(device,
"Device hang detected while waiting for timestamp: "
"<%d:0x%x>, last submitted timestamp: <%d:0x%x>, "
- "wptr: 0x%x\n",
- context_id, timestamp, context_id, ts_issued,
- adreno_dev->ringbuffer.wptr);
+ "retired timestamp: <%d:0x%x>, wptr: 0x%x, rptr: 0x%x\n",
+ context_id, timestamp, context_id, ts_issued, context_id,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ adreno_dev->ringbuffer.wptr, rptr);
- /* Return 0 after a successful recovery */
- if (!adreno_dump_and_recover(device))
+ /* Return 0 after a successful fault tolerance */
+ if (!adreno_dump_and_exec_ft(device))
return 0;
return -ETIMEDOUT;
@@ -2439,7 +2995,7 @@
struct adreno_context *adreno_ctx = context ? context->devctxt : NULL;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
unsigned int context_id = _get_context_id(context);
- unsigned int prev_reg_val[hang_detect_regs_count];
+ unsigned int prev_reg_val[ft_detect_regs_count];
unsigned int time_elapsed = 0;
unsigned int wait;
int ts_compare = 1;
@@ -2498,7 +3054,7 @@
}
/* Check to see if the GPU is hung */
- if (adreno_hang_detect(device, prev_reg_val)) {
+ if (adreno_ft_detect(device, prev_reg_val)) {
ret = adreno_handle_hang(device, context, timestamp);
break;
}
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index c1f2423..6c57078 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -34,6 +34,7 @@
#define KGSL_CMD_FLAGS_NONE 0x00000000
#define KGSL_CMD_FLAGS_PMODE 0x00000001
#define KGSL_CMD_FLAGS_INTERNAL_ISSUE 0x00000002
+#define KGSL_CMD_FLAGS_EOF 0x00000100
/* Command identifiers */
#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
@@ -41,6 +42,8 @@
#define KGSL_CMD_INTERNAL_IDENTIFIER 0x2EEDD00D
#define KGSL_START_OF_IB_IDENTIFIER 0x2EADEABE
#define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD
+#define KGSL_END_OF_FRAME_IDENTIFIER 0x2E0F2E0F
+#define KGSL_NOP_IB_IDENTIFIER 0x20F20F20
#ifdef CONFIG_MSM_SCM
#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz)
@@ -101,9 +104,15 @@
unsigned int instruction_size;
unsigned int ib_check_level;
unsigned int fast_hang_detect;
+ unsigned int ft_policy;
+ unsigned int long_ib_detect;
+ unsigned int long_ib;
+ unsigned int long_ib_ts;
+ unsigned int ft_pf_policy;
unsigned int gpulist_index;
struct ocmem_buf *ocmem_hdl;
unsigned int ocmem_base;
+ unsigned int gpu_cycles;
};
struct adreno_gpudev {
@@ -133,8 +142,8 @@
};
/*
- * struct adreno_recovery_data - Structure that contains all information to
- * perform gpu recovery from hangs
+ * struct adreno_ft_data - Structure that contains all information to
+ * perform gpu fault tolerance
* @ib1 - IB1 that the GPU was executing when hang happened
* @context_id - Context which caused the hang
* @global_eop - eoptimestamp at time of hang
@@ -142,11 +151,19 @@
* @rb_size - Number of valid dwords in rb_buffer
* @bad_rb_buffer - Buffer that holds commands from the hanging context
* bad_rb_size - Number of valid dwords in bad_rb_buffer
+ * @good_rb_buffer - Buffer that holds commands from good contexts
+ * good_rb_size - Number of valid dwords in good_rb_buffer
* @last_valid_ctx_id - The last context from which commands were placed in
* ringbuffer before the GPU hung
+ * @step - Current fault tolerance step being executed
+ * @err_code - Fault tolerance error code
* @fault - Indicates whether the hang was caused due to a pagefault
+ * @start_of_replay_cmds - Offset in ringbuffer from where commands can be
+ * replayed during fault tolerance
+ * @replay_for_snapshot - Offset in ringbuffer where IB's can be saved for
+ * replaying with snapshot
*/
-struct adreno_recovery_data {
+struct adreno_ft_data {
unsigned int ib1;
unsigned int context_id;
unsigned int global_eop;
@@ -154,10 +171,32 @@
unsigned int rb_size;
unsigned int *bad_rb_buffer;
unsigned int bad_rb_size;
+ unsigned int *good_rb_buffer;
+ unsigned int good_rb_size;
unsigned int last_valid_ctx_id;
- int fault;
+ unsigned int status;
+ unsigned int ft_policy;
+ unsigned int err_code;
+ unsigned int start_of_replay_cmds;
+ unsigned int replay_for_snapshot;
};
+/* Fault Tolerance policy flags */
+#define KGSL_FT_DISABLE BIT(0)
+#define KGSL_FT_REPLAY BIT(1)
+#define KGSL_FT_SKIPIB BIT(2)
+#define KGSL_FT_SKIPFRAME BIT(3)
+#define KGSL_FT_TEMP_DISABLE BIT(4)
+#define KGSL_FT_DEFAULT_POLICY (KGSL_FT_REPLAY + KGSL_FT_SKIPIB)
+
+/* Pagefault policy flags */
+#define KGSL_FT_PAGEFAULT_INT_ENABLE 0x00000001
+#define KGSL_FT_PAGEFAULT_GPUHALT_ENABLE 0x00000002
+#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE 0x00000004
+#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT 0x00000008
+#define KGSL_FT_PAGEFAULT_DEFAULT_POLICY (KGSL_FT_PAGEFAULT_INT_ENABLE + \
+ KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
+
extern struct adreno_gpudev adreno_a2xx_gpudev;
extern struct adreno_gpudev adreno_a3xx_gpudev;
@@ -179,8 +218,8 @@
extern const unsigned int a330_registers[];
extern const unsigned int a330_registers_count;
-extern unsigned int hang_detect_regs[];
-extern const unsigned int hang_detect_regs_count;
+extern unsigned int ft_detect_regs[];
+extern const unsigned int ft_detect_regs_count;
int adreno_idle(struct kgsl_device *device);
@@ -211,9 +250,12 @@
void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
int hang);
-int adreno_dump_and_recover(struct kgsl_device *device);
+int adreno_dump_and_exec_ft(struct kgsl_device *device);
-unsigned int adreno_hang_detect(struct kgsl_device *device,
+void adreno_dump_rb(struct kgsl_device *device, const void *buf,
+ size_t len, int start, int size);
+
+unsigned int adreno_ft_detect(struct kgsl_device *device,
unsigned int *prev_reg_val);
static inline int adreno_is_a200(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 08c800e..f1024d6 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -52,8 +52,8 @@
0x2240, 0x227e,
0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
- 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
- 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
+ 0x22ff, 0x22ff, 0x2340, 0x2343,
+ 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
@@ -61,8 +61,8 @@
0x25f0, 0x25f0,
0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
- 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
- 0x2750, 0x2756, 0x2760, 0x2760, 0x300C, 0x300E, 0x301C, 0x301D,
+ 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
+ 0x300C, 0x300E, 0x301C, 0x301D,
0x302A, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031, 0x3034, 0x3036,
0x303C, 0x303C, 0x305E, 0x305F,
};
@@ -70,7 +70,7 @@
const unsigned int a3xx_registers_count = ARRAY_SIZE(a3xx_registers) / 2;
/* Removed the following HLSQ register ranges from being read during
- * recovery since reading the registers may cause the device to hang:
+ * fault tolerance since reading the registers may cause the device to hang:
*/
const unsigned int a3xx_hlsq_registers[] = {
0x0e00, 0x0e05, 0x0e0c, 0x0e0c, 0x0e22, 0x0e23,
@@ -2704,26 +2704,22 @@
static unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = &adreno_dev->dev;
- unsigned int reg, val;
-
- /* Freeze the counter */
- adreno_regread(device, A3XX_RBBM_RBBM_CTL, ®);
- reg &= ~RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
- adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
+ unsigned int val;
+ unsigned int ret = 0;
/* Read the value */
adreno_regread(device, A3XX_RBBM_PERFCTR_PWR_1_LO, &val);
- /* Reset the counter */
- reg |= RBBM_RBBM_CTL_RESET_PWR_CTR1;
- adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
+ /* Return 0 for the first read */
+ if (adreno_dev->gpu_cycles != 0) {
+ if (val < adreno_dev->gpu_cycles)
+ ret = (0xFFFFFFFF - adreno_dev->gpu_cycles) + val;
+ else
+ ret = val - adreno_dev->gpu_cycles;
+ }
- /* Re-enable the counter */
- reg &= ~RBBM_RBBM_CTL_RESET_PWR_CTR1;
- reg |= RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
- adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
-
- return val;
+ adreno_dev->gpu_cycles = val;
+ return ret;
}
struct a3xx_vbif_data {
@@ -2824,10 +2820,6 @@
{ A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
/* Set up VBIF_ROUND_ROBIN_QOS_ARB */
{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
- /* Disable VBIF clock gating. This is to enable AXI running
- * higher frequency than GPU.
- */
- { A3XX_VBIF_CLKON, 1 },
{0, 0},
};
@@ -2848,6 +2840,7 @@
struct kgsl_device *device = &adreno_dev->dev;
struct a3xx_vbif_data *vbif = NULL;
int i;
+ unsigned int reg;
for (i = 0; i < ARRAY_SIZE(a3xx_vbif_platforms); i++) {
if (a3xx_vbif_platforms[i].devfunc(adreno_dev)) {
@@ -2928,6 +2921,20 @@
adreno_regwrite(device, A3XX_SP_PERFCOUNTER7_SELECT,
SP_FS_CFLOW_INSTRUCTIONS);
}
+
+ adreno_regwrite(device, A3XX_SP_PERFCOUNTER7_SELECT,
+ SP_FS_FULL_ALU_INSTRUCTIONS);
+
+ /* Turn on the GPU busy counter and let it run free */
+
+ adreno_dev->gpu_cycles = 0;
+
+ adreno_regread(device, A3XX_RBBM_RBBM_CTL, ®);
+ reg |= RBBM_RBBM_CTL_RESET_PWR_CTR1;
+ adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
+ reg &= ~RBBM_RBBM_CTL_RESET_PWR_CTR1;
+ reg |= RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
+ adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
}
/* Defined in adreno_a3xx_snapshot.c */
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 1989ff5..ef599e9 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -64,5 +64,33 @@
adreno_dev->fast_hang_detect = 1;
debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs,
&adreno_dev->fast_hang_detect);
+ /*
+ * FT policy can be set to any of the options below.
+ * KGSL_FT_DISABLE -> BIT(0) Set to disable FT
+ * KGSL_FT_REPLAY -> BIT(1) Set to enable replay
+ * KGSL_FT_SKIPIB -> BIT(2) Set to skip IB
+ * KGSL_FT_SKIPFRAME -> BIT(3) Set to skip frame
+ * by default set FT policy to KGSL_FT_DEFAULT_POLICY
+ */
+ adreno_dev->ft_policy = KGSL_FT_DEFAULT_POLICY;
+ debugfs_create_u32("ft_policy", 0644, device->d_debugfs,
+ &adreno_dev->ft_policy);
+ /* By default enable long IB detection */
+ adreno_dev->long_ib_detect = 1;
+ debugfs_create_u32("long_ib_detect", 0644, device->d_debugfs,
+ &adreno_dev->long_ib_detect);
+ /*
+ * FT pagefault policy can be set to any of the options below.
+ * KGSL_FT_PAGEFAULT_INT_ENABLE -> BIT(0) set to enable pagefault INT
+ * KGSL_FT_PAGEFAULT_GPUHALT_ENABLE -> BIT(1) Set to enable GPU HALT on
+ * pagefaults. This stalls the GPU on a pagefault on IOMMU v1 HW.
+ * KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE -> BIT(2) Set to log only one
+ * pagefault per page.
+ * KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT -> BIT(3) Set to log only one
+ * pagefault per INT.
+ */
+ adreno_dev->ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY;
+ debugfs_create_u32("ft_pagefault_policy", 0644, device->d_debugfs,
+ &adreno_dev->ft_pf_policy);
}
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index b109e14..023b057 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -155,6 +155,8 @@
if (drawctxt == NULL)
return -ENOMEM;
+ drawctxt->pid = task_pid_nr(current);
+ strlcpy(drawctxt->pid_name, current->comm, TASK_COMM_LEN);
drawctxt->pagetable = pagetable;
drawctxt->bin_base_offset = 0;
drawctxt->id = context->id;
@@ -177,6 +179,9 @@
drawctxt->flags |= CTXT_FLAGS_USER_GENERATED_TS;
}
+ if (flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ drawctxt->flags |= CTXT_FLAGS_NO_FAULT_TOLERANCE;
+
ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
if (ret)
goto err;
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 65dbd4c..aba29ae 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,8 @@
#ifndef __ADRENO_DRAWCTXT_H
#define __ADRENO_DRAWCTXT_H
+#include <linux/sched.h>
+
#include "adreno_pm4types.h"
#include "a2xx_reg.h"
@@ -44,12 +46,16 @@
#define CTXT_FLAGS_TRASHSTATE BIT(10)
/* per context timestamps enabled */
#define CTXT_FLAGS_PER_CONTEXT_TS BIT(11)
-/* Context has caused a GPU hang and recovered properly */
-#define CTXT_FLAGS_GPU_HANG_RECOVERED BIT(12)
+/* Context has caused a GPU hang and fault tolerance successful */
+#define CTXT_FLAGS_GPU_HANG_FT BIT(12)
/* Context is being destroyed so dont save it */
#define CTXT_FLAGS_BEING_DESTROYED BIT(13)
/* User mode generated timestamps enabled */
#define CTXT_FLAGS_USER_GENERATED_TS BIT(14)
+/* Context skip till EOF */
+#define CTXT_FLAGS_SKIP_EOF BIT(15)
+/* Context no fault tolerance */
+#define CTXT_FLAGS_NO_FAULT_TOLERANCE BIT(16)
struct kgsl_device;
struct adreno_device;
@@ -82,8 +88,13 @@
};
struct adreno_context {
+ pid_t pid;
+ char pid_name[TASK_COMM_LEN];
unsigned int id;
+ unsigned int ib_gpu_time_used;
uint32_t flags;
+ uint32_t pagefault;
+ unsigned long pagefault_ts;
struct kgsl_pagetable *pagetable;
struct kgsl_memdesc gpustate;
unsigned int reg_restore[3];
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 2b9c286..29d689b 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -305,7 +305,7 @@
#endif
}
-static void adreno_dump_rb(struct kgsl_device *device, const void *buf,
+void adreno_dump_rb(struct kgsl_device *device, const void *buf,
size_t len, int start, int size)
{
const uint32_t *ptr = buf;
@@ -729,6 +729,7 @@
unsigned int ts_processed = 0xdeaddead;
struct kgsl_context *context;
unsigned int context_id;
+ unsigned int rbbm_status;
static struct ib_list ib_list;
@@ -738,12 +739,16 @@
mb();
- msm_clk_dump_debug_info();
+ if (device->pm_dump_enable) {
+ msm_clk_dump_debug_info();
- if (adreno_is_a2xx(adreno_dev))
- adreno_dump_a2xx(device);
- else if (adreno_is_a3xx(adreno_dev))
- adreno_dump_a3xx(device);
+ if (adreno_is_a2xx(adreno_dev))
+ adreno_dump_a2xx(device);
+ else if (adreno_is_a3xx(adreno_dev))
+ adreno_dump_a3xx(device);
+ }
+
+ kgsl_regread(device, adreno_dev->gpudev->reg_rbbm_status, &rbbm_status);
pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
cur_pt_base = pt_base;
@@ -758,6 +763,18 @@
kgsl_regread(device, REG_CP_IB2_BASE, &cp_ib2_base);
kgsl_regread(device, REG_CP_IB2_BUFSZ, &cp_ib2_bufsz);
+ /* If postmortem dump is not enabled, dump minimal set and return */
+ if (!device->pm_dump_enable) {
+
+ KGSL_LOG_DUMP(device,
+ "STATUS %08X | IB1:%08X/%08X | IB2: %08X/%08X"
+ " | RPTR: %04X | WPTR: %04X\n",
+ rbbm_status, cp_ib1_base, cp_ib1_bufsz, cp_ib2_base,
+ cp_ib2_bufsz, cp_rb_rptr, cp_rb_wptr);
+
+ return 0;
+ }
+
kgsl_sharedmem_readl(&device->memstore,
(unsigned int *) &context_id,
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
@@ -766,7 +783,7 @@
if (context) {
ts_processed = kgsl_readtimestamp(device, context,
KGSL_TIMESTAMP_RETIRED);
- KGSL_LOG_DUMP(device, "CTXT: %d TIMESTM RTRD: %08X\n",
+ KGSL_LOG_DUMP(device, "FT CTXT: %d TIMESTM RTRD: %08X\n",
context->id, ts_processed);
} else
KGSL_LOG_DUMP(device, "BAD CTXT: %d\n", context_id);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 69b34fa..4333f2f 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -64,7 +64,7 @@
unsigned long wait_time;
unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
unsigned long wait_time_part;
- unsigned int prev_reg_val[hang_detect_regs_count];
+ unsigned int prev_reg_val[ft_detect_regs_count];
memset(prev_reg_val, 0, sizeof(prev_reg_val));
@@ -109,7 +109,7 @@
if (time_after(jiffies, wait_time_part)) {
wait_time_part = jiffies +
msecs_to_jiffies(KGSL_TIMEOUT_PART);
- if ((adreno_hang_detect(rb->device,
+ if ((adreno_ft_detect(rb->device,
prev_reg_val))){
KGSL_DRV_ERR(rb->device,
"Hang detected while waiting for freespace in"
@@ -129,7 +129,7 @@
continue;
err:
- if (!adreno_dump_and_recover(rb->device)) {
+ if (!adreno_dump_and_exec_ft(rb->device)) {
if (context && context->flags & CTXT_FLAGS_GPU_HANG) {
KGSL_CTXT_WARN(rb->device,
"Context %p caused a gpu hang. Will not accept commands for context %d\n",
@@ -138,7 +138,7 @@
}
wait_time = jiffies + wait_timeout;
} else {
- /* GPU is hung and we cannot recover */
+ /* GPU is hung and fault tolerance failed */
BUG();
}
}
@@ -581,7 +581,7 @@
if (adreno_is_a2xx(adreno_dev))
total_sizedwords += 2; /* CP_WAIT_FOR_IDLE */
- total_sizedwords += 2; /* scratchpad ts for recovery */
+ total_sizedwords += 2; /* scratchpad ts for fault tolerance */
total_sizedwords += 3; /* sop timestamp */
total_sizedwords += 4; /* eop timestamp */
@@ -629,7 +629,7 @@
}
timestamp = rb->timestamp[context_id];
- /* scratchpad ts for recovery */
+ /* scratchpad ts for fault tolerance */
GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
@@ -756,6 +756,11 @@
GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
}
+ if (flags & KGSL_CMD_FLAGS_EOF) {
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_END_OF_FRAME_IDENTIFIER);
+ }
+
adreno_ringbuffer_submit(rb);
return timestamp;
@@ -999,20 +1004,12 @@
drawctxt = context->devctxt;
if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
- KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
+ KGSL_CTXT_ERR(device, "proc %s failed fault tolerance"
" will not accept commands for context %d\n",
- drawctxt, drawctxt->id);
+ drawctxt->pid_name, drawctxt->id);
return -EDEADLK;
}
- cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
- GFP_KERNEL);
- if (!link) {
- KGSL_CORE_ERR("kzalloc(%d) failed\n",
- sizeof(unsigned int) * (numibs * 3 + 4));
- return -ENOMEM;
- }
-
/*When preamble is enabled, the preamble buffer with state restoration
commands are stored in the first node of the IB chain. We can skip that
if a context switch hasn't occured */
@@ -1021,6 +1018,27 @@
adreno_dev->drawctxt_active == drawctxt)
start_index = 1;
+ if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
+ KGSL_CTXT_ERR(device,
+ "proc %s triggered fault tolerance"
+ " skipping commands for context till EOF %d\n",
+ drawctxt->pid_name, drawctxt->id);
+ if (flags & KGSL_CMD_FLAGS_EOF)
+ drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
+ if (start_index)
+ numibs = 1;
+ else
+ numibs = 0;
+ }
+
+ cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
+ GFP_KERNEL);
+ if (!link) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(unsigned int) * (numibs * 3 + 4));
+ return -ENOMEM;
+ }
+
if (!start_index) {
*cmds++ = cp_nop_packet(1);
*cmds++ = KGSL_START_OF_IB_IDENTIFIER;
@@ -1060,7 +1078,7 @@
*timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
drawctxt,
- 0,
+ (flags & KGSL_CMD_FLAGS_EOF),
&link[0], (cmds - link), *timestamp);
#ifdef CONFIG_MSM_KGSL_CFF_DUMP
@@ -1072,173 +1090,21 @@
adreno_idle(device);
#endif
- /* If context hung and recovered then return error so that the
- * application may handle it */
-
- ret = (drawctxt->flags & CTXT_FLAGS_GPU_HANG_RECOVERED) ?
- -EDEADLK : 0;
+ /*
+ * If context hung and recovered then return error so that the
+ * application may handle it
+ */
+ if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_FT) {
+ drawctxt->flags &= ~CTXT_FLAGS_GPU_HANG_FT;
+ ret = -EPROTO;
+ } else
+ ret = 0;
done:
kfree(link);
return ret;
}
-static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
- unsigned int *ptr,
- bool inc)
-{
- int status = -EINVAL;
- unsigned int val1;
- unsigned int size = rb->buffer_desc.size;
- unsigned int start_ptr = *ptr;
-
- while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
- if (inc)
- start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
- size);
- else
- start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
- size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
- if (KGSL_CMD_IDENTIFIER == val1) {
- if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
- start_ptr = adreno_ringbuffer_dec_wrapped(
- start_ptr, size);
- *ptr = start_ptr;
- status = 0;
- break;
- }
- }
- return status;
-}
-
-static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
- unsigned int *rb_rptr,
- unsigned int global_eop,
- bool inc)
-{
- int status = -EINVAL;
- unsigned int temp_rb_rptr = *rb_rptr;
- unsigned int size = rb->buffer_desc.size;
- unsigned int val[3];
- int i = 0;
- bool check = false;
-
- if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
- return status;
-
- do {
- /* when decrementing we need to decrement first and
- * then read make sure we cover all the data */
- if (!inc)
- temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
- temp_rb_rptr, size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
- temp_rb_rptr);
-
- if (check && ((inc && val[i] == global_eop) ||
- (!inc && (val[i] ==
- cp_type3_packet(CP_MEM_WRITE, 2) ||
- val[i] == CACHE_FLUSH_TS)))) {
- /* decrement i, i.e i = (i - 1 + 3) % 3 if
- * we are going forward, else increment i */
- i = (i + 2) % 3;
- if (val[i] == rb->device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp)) {
- int j = ((i + 2) % 3);
- if ((inc && (val[j] == CACHE_FLUSH_TS ||
- val[j] == cp_type3_packet(
- CP_MEM_WRITE, 2))) ||
- (!inc && val[j] == global_eop)) {
- /* Found the global eop */
- status = 0;
- break;
- }
- }
- /* if no match found then increment i again
- * since we decremented before matching */
- i = (i + 1) % 3;
- }
- if (inc)
- temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
- temp_rb_rptr, size);
-
- i = (i + 1) % 3;
- if (2 == i)
- check = true;
- } while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
- /* temp_rb_rptr points to the command stream after global eop,
- * move backward till the start of command sequence */
- if (!status) {
- status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
- if (!status) {
- *rb_rptr = temp_rb_rptr;
- KGSL_DRV_ERR(rb->device,
- "Offset of cmd sequence after eop timestamp: 0x%x\n",
- temp_rb_rptr / sizeof(unsigned int));
- }
- }
- if (status)
- KGSL_DRV_ERR(rb->device,
- "Failed to find the command sequence after eop timestamp\n");
- return status;
-}
-
-static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
- unsigned int *rb_rptr,
- unsigned int ib1)
-{
- int status = -EINVAL;
- unsigned int temp_rb_rptr = *rb_rptr;
- unsigned int size = rb->buffer_desc.size;
- unsigned int val[2];
- int i = 0;
- bool check = false;
- bool ctx_switch = false;
-
- while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
-
- if (check && val[i] == ib1) {
- /* decrement i, i.e i = (i - 1 + 2) % 2 */
- i = (i + 1) % 2;
- if (adreno_cmd_is_ib(val[i])) {
- /* go till start of command sequence */
- status = _find_start_of_cmd_seq(rb,
- &temp_rb_rptr, false);
- KGSL_DRV_ERR(rb->device,
- "Found the hanging IB at offset 0x%x\n",
- temp_rb_rptr / sizeof(unsigned int));
- break;
- }
- /* if no match the increment i since we decremented
- * before checking */
- i = (i + 1) % 2;
- }
- /* Make sure you do not encounter a context switch twice, we can
- * encounter it once for the bad context as the start of search
- * can point to the context switch */
- if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
- if (ctx_switch) {
- KGSL_DRV_ERR(rb->device,
- "Context switch encountered before bad "
- "IB found\n");
- break;
- }
- ctx_switch = true;
- }
- i = (i + 1) % 2;
- if (1 == i)
- check = true;
- temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
- size);
- }
- if (!status)
- *rb_rptr = temp_rb_rptr;
- return status;
-}
-
static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
unsigned int rb_rptr)
{
@@ -1261,7 +1127,7 @@
kgsl_sharedmem_writel(&rb->buffer_desc,
temp_rb_rptr, cp_nop_packet(1));
}
- KGSL_DRV_ERR(rb->device,
+ KGSL_FT_INFO(rb->device,
"Turned preamble on at offset 0x%x\n",
temp_rb_rptr / 4);
break;
@@ -1283,21 +1149,41 @@
}
}
-static void _copy_valid_rb_content(struct adreno_ringbuffer *rb,
- unsigned int rb_rptr, unsigned int *temp_rb_buffer,
- int *rb_size, unsigned int *bad_rb_buffer,
- int *bad_rb_size,
- int *last_valid_ctx_id)
+void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+ struct adreno_ft_data *ft_data)
{
- unsigned int good_rb_idx = 0, cmd_start_idx = 0;
+ struct kgsl_device *device = rb->device;
+ unsigned int rb_rptr = ft_data->start_of_replay_cmds;
+ unsigned int good_rb_idx = 0, bad_rb_idx = 0, temp_rb_idx = 0;
+ unsigned int last_good_cmd_end_idx = 0, last_bad_cmd_end_idx = 0;
+ unsigned int cmd_start_idx = 0;
unsigned int val1 = 0;
- struct kgsl_context *k_ctxt;
- struct adreno_context *a_ctxt;
- unsigned int bad_rb_idx = 0;
int copy_rb_contents = 0;
unsigned int temp_rb_rptr;
+ struct kgsl_context *k_ctxt;
+ struct adreno_context *a_ctxt;
unsigned int size = rb->buffer_desc.size;
- unsigned int good_cmd_start_idx = 0;
+ unsigned int *temp_rb_buffer = ft_data->rb_buffer;
+ int *rb_size = &ft_data->rb_size;
+ unsigned int *bad_rb_buffer = ft_data->bad_rb_buffer;
+ int *bad_rb_size = &ft_data->bad_rb_size;
+ unsigned int *good_rb_buffer = ft_data->good_rb_buffer;
+ int *good_rb_size = &ft_data->good_rb_size;
+
+ /*
+ * If the start index from where commands need to be copied is invalid
+ * then no need to save off any commands
+ */
+ if (0xFFFFFFFF == ft_data->start_of_replay_cmds)
+ return;
+
+ k_ctxt = idr_find(&device->context_idr, ft_data->context_id);
+ if (k_ctxt) {
+ a_ctxt = k_ctxt->devctxt;
+ if (a_ctxt->flags & CTXT_FLAGS_PREAMBLE)
+ _turn_preamble_on_for_ib_seq(rb, rb_rptr);
+ }
+ k_ctxt = NULL;
/* Walk the rb from the context switch. Omit any commands
* for an invalid context. */
@@ -1307,9 +1193,11 @@
if (KGSL_CMD_IDENTIFIER == val1) {
/* Start is the NOP dword that comes before
* KGSL_CMD_IDENTIFIER */
- cmd_start_idx = bad_rb_idx - 1;
- if (copy_rb_contents)
- good_cmd_start_idx = good_rb_idx - 1;
+ cmd_start_idx = temp_rb_idx - 1;
+ if ((copy_rb_contents) && (good_rb_idx))
+ last_good_cmd_end_idx = good_rb_idx - 1;
+ if ((!copy_rb_contents) && (bad_rb_idx))
+ last_bad_cmd_end_idx = bad_rb_idx - 1;
}
/* check for context switch indicator */
@@ -1335,86 +1223,48 @@
!(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
!k_ctxt)) {
for (temp_idx = cmd_start_idx;
- temp_idx < bad_rb_idx;
+ temp_idx < temp_rb_idx;
temp_idx++)
- temp_rb_buffer[good_rb_idx++] =
- bad_rb_buffer[temp_idx];
- *last_valid_ctx_id = val2;
+ good_rb_buffer[good_rb_idx++] =
+ temp_rb_buffer[temp_idx];
+ ft_data->last_valid_ctx_id = val2;
copy_rb_contents = 1;
+ /* remove the good commands from bad buffer */
+ bad_rb_idx = last_bad_cmd_end_idx;
} else if (copy_rb_contents && k_ctxt &&
(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
- /* If we are changing to bad context then remove
- * the dwords we copied for this sequence from
- * the good buffer */
- good_rb_idx = good_cmd_start_idx;
+
+ /* If we are changing back to a bad context
+ * from good ctxt and were not copying commands
+ * to bad ctxt then copy over commands to
+ * the bad context */
+ for (temp_idx = cmd_start_idx;
+ temp_idx < temp_rb_idx;
+ temp_idx++)
+ bad_rb_buffer[bad_rb_idx++] =
+ temp_rb_buffer[temp_idx];
+ /* If we are changing to bad context then
+ * remove the dwords we copied for this
+ * sequence from the good buffer */
+ good_rb_idx = last_good_cmd_end_idx;
copy_rb_contents = 0;
}
}
}
if (copy_rb_contents)
- temp_rb_buffer[good_rb_idx++] = val1;
- /* Copy both good and bad commands for replay to the bad
- * buffer */
- bad_rb_buffer[bad_rb_idx++] = val1;
+ good_rb_buffer[good_rb_idx++] = val1;
+ else
+ bad_rb_buffer[bad_rb_idx++] = val1;
+
+ /* Copy both good and bad commands to temp buffer */
+ temp_rb_buffer[temp_rb_idx++] = val1;
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
}
- *rb_size = good_rb_idx;
+ *good_rb_size = good_rb_idx;
*bad_rb_size = bad_rb_idx;
-}
-
-int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
- struct adreno_recovery_data *rec_data)
-{
- int status;
- struct kgsl_device *device = rb->device;
- unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
- struct kgsl_context *context;
- struct adreno_context *adreno_context;
-
- context = idr_find(&device->context_idr, rec_data->context_id);
-
- /* Look for the command stream that is right after the global eop */
- status = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
- rec_data->global_eop + 1, false);
- if (status)
- goto done;
-
- if (context) {
- adreno_context = context->devctxt;
-
- if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
- if (rec_data->ib1) {
- status = _find_hanging_ib_sequence(rb, &rb_rptr,
- rec_data->ib1);
- if (status)
- goto copy_rb_contents;
- }
- _turn_preamble_on_for_ib_seq(rb, rb_rptr);
- } else {
- status = -EINVAL;
- }
- }
-
-copy_rb_contents:
- _copy_valid_rb_content(rb, rb_rptr, rec_data->rb_buffer,
- &rec_data->rb_size,
- rec_data->bad_rb_buffer,
- &rec_data->bad_rb_size,
- &rec_data->last_valid_ctx_id);
- /* If we failed to get the hanging IB sequence then we cannot execute
- * commands from the bad context or preambles not supported */
- if (status) {
- rec_data->bad_rb_size = 0;
- status = 0;
- }
- /* If there is no context then that means there are no commands for
- * good case */
- if (!context)
- rec_data->rb_size = 0;
-done:
- return status;
+ *rb_size = temp_rb_idx;
}
void
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index e87b506..fa03c05 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -27,7 +27,7 @@
struct kgsl_device;
struct kgsl_device_private;
-struct adreno_recovery_data;
+struct adreno_ft_data;
#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8
struct kgsl_rbmemptrs {
@@ -114,8 +114,8 @@
void kgsl_cp_intrcallback(struct kgsl_device *device);
-int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
- struct adreno_recovery_data *rec_data);
+void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+ struct adreno_ft_data *ft_data);
void
adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index e2869d4..2b0d5d9 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -3047,21 +3047,21 @@
kgsl_idle(device);
}
- KGSL_LOG_DUMP(device, "|%s| Dump Started\n", device->name);
- KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
+
+ if (device->pm_dump_enable) {
+
+ KGSL_LOG_DUMP(device,
+ "POWER: NAP ALLOWED = %d | START_STOP_SLEEP_WAKE = %d\n"
+ , pwr->nap_allowed, pwr->strtstp_sleepwake);
+
+ KGSL_LOG_DUMP(device,
+ "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
pwr->power_flags, pwr->active_pwrlevel);
- KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
- pwr->interval_timeout);
+ KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
+ pwr->interval_timeout);
- KGSL_LOG_DUMP(device, "POWER: NAP ALLOWED = %d | START_STOP_SLEEP_WAKE = %d\n",
- pwr->nap_allowed, pwr->strtstp_sleepwake);
-
- KGSL_LOG_DUMP(device, "GRP_CLK = %lu ",
- kgsl_get_clkrate(pwr->grp_clks[0]));
-
- KGSL_LOG_DUMP(device, "BUS CLK = %lu ",
- kgsl_get_clkrate(pwr->ebi1_clk));
+ }
/* Disable the idle timer so we don't get interrupted */
del_timer_sync(&device->idle_timer);
@@ -3089,7 +3089,7 @@
/* On a manual trigger, turn on the interrupts and put
the clocks to sleep. They will recover themselves
on the next event. For a hang, leave things as they
- are until recovery kicks in. */
+ are until fault tolerance kicks in. */
if (manual) {
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
@@ -3099,8 +3099,6 @@
kgsl_pwrctrl_sleep(device);
}
- KGSL_LOG_DUMP(device, "|%s| Dump Finished\n", device->name);
-
return 0;
}
EXPORT_SYMBOL(kgsl_postmortem_dump);
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 76998db..9dfda32 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -70,6 +70,20 @@
return 0;
}
+static int pm_enabled_set(void *data, u64 val)
+{
+ struct kgsl_device *device = data;
+ device->pm_dump_enable = val;
+ return 0;
+}
+
+static int pm_enabled_get(void *data, u64 *val)
+{
+ struct kgsl_device *device = data;
+ *val = device->pm_dump_enable;
+ return 0;
+}
+
DEFINE_SIMPLE_ATTRIBUTE(pm_regs_enabled_fops,
pm_regs_enabled_get,
@@ -79,6 +93,10 @@
pm_ib_enabled_get,
pm_ib_enabled_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(pm_enabled_fops,
+ pm_enabled_get,
+ pm_enabled_set, "%llu\n");
+
static inline int kgsl_log_set(unsigned int *log_val, void *data, u64 val)
{
*log_val = min((unsigned int)val, (unsigned int)KGSL_LOG_LEVEL_MAX);
@@ -105,6 +123,7 @@
KGSL_DEBUGFS_LOG(ctxt_log);
KGSL_DEBUGFS_LOG(mem_log);
KGSL_DEBUGFS_LOG(pwr_log);
+KGSL_DEBUGFS_LOG(ft_log);
static int memfree_hist_print(struct seq_file *s, void *unused)
{
@@ -166,6 +185,7 @@
device->drv_log = KGSL_LOG_LEVEL_DEFAULT;
device->mem_log = KGSL_LOG_LEVEL_DEFAULT;
device->pwr_log = KGSL_LOG_LEVEL_DEFAULT;
+ device->ft_log = KGSL_LOG_LEVEL_DEFAULT;
debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device,
&cmd_log_fops);
@@ -179,6 +199,8 @@
&pwr_log_fops);
debugfs_create_file("memfree_history", 0444, device->d_debugfs, device,
&memfree_hist_fops);
+ debugfs_create_file("log_level_ft", 0644, device->d_debugfs, device,
+ &ft_log_fops);
/* Create postmortem dump control files */
@@ -193,6 +215,9 @@
&pm_regs_enabled_fops);
debugfs_create_file("ib_enabled", 0644, pm_d_debugfs, device,
&pm_ib_enabled_fops);
+ device->pm_dump_enable = 0;
+ debugfs_create_file("enable", 0644, pm_d_debugfs, device,
+ &pm_enabled_fops);
}
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 66390fc..aca5660 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -24,9 +24,10 @@
#include "kgsl_pwrscale.h"
#include <linux/sync.h>
-#define KGSL_TIMEOUT_NONE 0
-#define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF
-#define KGSL_TIMEOUT_PART 50 /* 50 msec */
+#define KGSL_TIMEOUT_NONE 0
+#define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF
+#define KGSL_TIMEOUT_PART 50 /* 50 msec */
+#define KGSL_TIMEOUT_LONG_IB_DETECTION 2000 /* 2 sec*/
#define FIRST_TIMEOUT (HZ / 2)
@@ -46,7 +47,7 @@
#define KGSL_STATE_SLEEP 0x00000008
#define KGSL_STATE_SUSPEND 0x00000010
#define KGSL_STATE_HUNG 0x00000020
-#define KGSL_STATE_DUMP_AND_RECOVER 0x00000040
+#define KGSL_STATE_DUMP_AND_FT 0x00000040
#define KGSL_STATE_SLUMBER 0x00000080
#define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK 0x1000000
@@ -185,7 +186,7 @@
wait_queue_head_t wait_queue;
struct workqueue_struct *work_queue;
struct device *parentdev;
- struct completion recovery_gate;
+ struct completion ft_gate;
struct dentry *d_debugfs;
struct idr context_idr;
struct early_suspend display_off;
@@ -211,6 +212,8 @@
int drv_log;
int mem_log;
int pwr_log;
+ int ft_log;
+ int pm_dump_enable;
struct kgsl_pwrscale pwrscale;
struct kobject pwrscale_kobj;
struct work_struct ts_expired_ws;
@@ -230,7 +233,7 @@
#define KGSL_DEVICE_COMMON_INIT(_dev) \
.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
.suspend_gate = COMPLETION_INITIALIZER((_dev).suspend_gate),\
- .recovery_gate = COMPLETION_INITIALIZER((_dev).recovery_gate),\
+ .ft_gate = COMPLETION_INITIALIZER((_dev).ft_gate),\
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
kgsl_idle_check),\
.ts_expired_ws = __WORK_INITIALIZER((_dev).ts_expired_ws,\
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 9a1a431..cb206ac 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -22,6 +22,7 @@
#include "kgsl_device.h"
#include "kgsl_sharedmem.h"
#include "kgsl_trace.h"
+#include "adreno.h"
#define KGSL_PAGETABLE_SIZE \
ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
@@ -403,11 +404,22 @@
{
unsigned int reg;
unsigned int ptbase;
+ struct kgsl_device *device;
+ struct adreno_device *adreno_dev;
+ unsigned int no_page_fault_log = 0;
- kgsl_regread(mmu->device, MH_MMU_PAGE_FAULT, ®);
- kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
+ device = mmu->device;
+ adreno_dev = ADRENO_DEVICE(device);
- KGSL_MEM_CRIT(mmu->device,
+ kgsl_regread(device, MH_MMU_PAGE_FAULT, ®);
+ kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
+
+
+ if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE)
+ no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, reg);
+
+ if (!no_page_fault_log)
+ KGSL_MEM_CRIT(mmu->device,
"mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
reg & ~(PAGE_SIZE - 1),
kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 93b7e5d..e3cea88 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -283,10 +283,16 @@
struct kgsl_iommu_device *iommu_dev;
unsigned int ptbase, fsr;
unsigned int pid;
-
struct _mem_entry prev, next;
unsigned int fsynr0, fsynr1;
int write;
+ struct kgsl_device *device;
+ struct adreno_device *adreno_dev;
+ unsigned int no_page_fault_log = 0;
+ unsigned int curr_context_id = 0;
+ unsigned int curr_global_ts = 0;
+ static struct adreno_context *curr_context;
+ static struct kgsl_context *context;
ret = get_iommu_unit(dev, &mmu, &iommu_unit);
if (ret)
@@ -298,6 +304,8 @@
goto done;
}
iommu = mmu->priv;
+ device = mmu->device;
+ adreno_dev = ADRENO_DEVICE(device);
ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
iommu_dev->ctx_id, TTBR0);
@@ -324,27 +332,55 @@
iommu_dev->ctx_id, fsr, fsynr0, fsynr1,
write ? "write" : "read");
- _check_if_freed(iommu_dev, addr, pid);
+ if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE)
+ no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
- KGSL_LOG_DUMP(iommu_dev->kgsldev, "---- nearby memory ----\n");
+ if (!no_page_fault_log) {
+ KGSL_MEM_CRIT(iommu_dev->kgsldev,
+ "GPU PAGE FAULT: addr = %lX pid = %d\n",
+ addr, kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase));
+ KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
+ iommu_dev->ctx_id, fsr);
- _find_mem_entries(mmu, addr, ptbase, &prev, &next);
+ _check_if_freed(iommu_dev, addr, pid);
- if (prev.gpuaddr)
- _print_entry(iommu_dev->kgsldev, &prev);
- else
- KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
+ KGSL_LOG_DUMP(iommu_dev->kgsldev, "---- nearby memory ----\n");
- KGSL_LOG_DUMP(iommu_dev->kgsldev, " <- fault @ %8.8lX\n", addr);
+ _find_mem_entries(mmu, addr, ptbase, &prev, &next);
- if (next.gpuaddr != 0xFFFFFFFF)
- _print_entry(iommu_dev->kgsldev, &next);
- else
- KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
+ if (prev.gpuaddr)
+ _print_entry(iommu_dev->kgsldev, &prev);
+ else
+ KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
+
+ KGSL_LOG_DUMP(iommu_dev->kgsldev, " <- fault @ %8.8lX\n", addr);
+
+ if (next.gpuaddr != 0xFFFFFFFF)
+ _print_entry(iommu_dev->kgsldev, &next);
+ else
+ KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
+
+ }
mmu->fault = 1;
iommu_dev->fault = 1;
+ kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
+ context = idr_find(&device->context_idr, curr_context_id);
+ if (context != NULL)
+ curr_context = context->devctxt;
+
+ kgsl_sharedmem_readl(&device->memstore, &curr_global_ts,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, eoptimestamp));
+
+ /*
+ * Store pagefault's timestamp in adreno context,
+ * this information will be used in GFT
+ */
+ curr_context->pagefault = 1;
+ curr_context->pagefault_ts = curr_global_ts;
+
trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
write ? "write" : "read");
@@ -355,7 +391,8 @@
* the GPU and trigger a snapshot. To stall the transaction return
* EBUSY error.
*/
- ret = -EBUSY;
+ if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
+ ret = -EBUSY;
done:
return ret;
}
@@ -722,17 +759,23 @@
{
struct kgsl_iommu *iommu = mmu->priv;
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[unit_id];
- int i;
+ int i, j;
+ int found_ctx;
- if (data->iommu_ctx_count > KGSL_IOMMU_MAX_DEVS_PER_UNIT) {
- KGSL_CORE_ERR("Too many iommu devices defined for an "
- "IOMMU unit\n");
- return -EINVAL;
- }
-
- for (i = 0; i < data->iommu_ctx_count; i++) {
- if (!data->iommu_ctxs[i].iommu_ctx_name)
- continue;
+ for (j = 0; j < KGSL_IOMMU_MAX_DEVS_PER_UNIT; j++) {
+ found_ctx = 0;
+ for (i = 0; i < data->iommu_ctx_count; i++) {
+ if (j == data->iommu_ctxs[i].ctx_id) {
+ found_ctx = 1;
+ break;
+ }
+ }
+ if (!found_ctx)
+ break;
+ if (!data->iommu_ctxs[i].iommu_ctx_name) {
+ KGSL_CORE_ERR("Context name invalid\n");
+ return -EINVAL;
+ }
iommu_unit->dev[iommu_unit->dev_count].dev =
msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
@@ -741,12 +784,6 @@
"device %s\n", data->iommu_ctxs[i].iommu_ctx_name);
return -EINVAL;
}
- if (KGSL_IOMMU_CONTEXT_USER != data->iommu_ctxs[i].ctx_id &&
- KGSL_IOMMU_CONTEXT_PRIV != data->iommu_ctxs[i].ctx_id) {
- KGSL_CORE_ERR("Invalid context ID defined: %d\n",
- data->iommu_ctxs[i].ctx_id);
- return -EINVAL;
- }
iommu_unit->dev[iommu_unit->dev_count].ctx_id =
data->iommu_ctxs[i].ctx_id;
iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;
@@ -758,6 +795,10 @@
iommu_unit->dev_count++;
}
+ if (!j) {
+ KGSL_CORE_ERR("No ctxts initialized, user ctxt absent\n ");
+ return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h
index 81a35e0..a7832e4 100644
--- a/drivers/gpu/msm/kgsl_log.h
+++ b/drivers/gpu/msm/kgsl_log.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2011,2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -103,6 +103,15 @@
#define KGSL_PWR_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_FT_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->ft_log, fmt, ##args)
+#define KGSL_FT_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->ft_log, fmt, ##args)
+#define KGSL_FT_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->ft_log, fmt, ##args)
+#define KGSL_FT_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->ft_log, fmt, ##args)
+
/* Core error messages - these are for core KGSL functions that have
no device associated with them (such as memory) */
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 01b255c..ccaceb3 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -333,6 +333,35 @@
}
EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
+unsigned int
+kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, unsigned int pt_base,
+ unsigned int addr)
+{
+ struct kgsl_pagetable *pt;
+ unsigned int ret = 0;
+
+ if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal)
+ return 0;
+ spin_lock(&kgsl_driver.ptlock);
+ list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
+ if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
+ if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
+ ret = 1;
+ break;
+ } else {
+ pt->fault_addr = (addr & ~(PAGE_SIZE-1));
+ ret = 0;
+ break;
+ }
+
+ }
+ }
+ spin_unlock(&kgsl_driver.ptlock);
+
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_mmu_log_fault_addr);
+
int kgsl_mmu_init(struct kgsl_device *device)
{
int status = 0;
@@ -438,6 +467,7 @@
pagetable->name = name;
pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
+ pagetable->fault_addr = 0xFFFFFFFF;
/*
* create a separate kgsl pool for IOMMU, global mappings can be mapped
@@ -693,6 +723,8 @@
{
struct gen_pool *pool;
int size;
+ unsigned int start_addr = 0;
+ unsigned int end_addr = 0;
if (memdesc->size == 0 || memdesc->gpuaddr == 0)
return 0;
@@ -704,10 +736,19 @@
size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+ start_addr = memdesc->gpuaddr;
+ end_addr = (memdesc->gpuaddr + size);
+
if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
spin_lock(&pagetable->lock);
pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc,
&pagetable->tlb_flags);
+
+ /* If buffer is unmapped 0 fault addr */
+ if ((pagetable->fault_addr >= start_addr) &&
+ (pagetable->fault_addr < end_addr))
+ pagetable->fault_addr = 0;
+
if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
spin_lock(&pagetable->lock);
/* Remove the statistics */
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 0458a13..2d48e86 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -114,6 +114,7 @@
} stats;
const struct kgsl_mmu_pt_ops *pt_ops;
unsigned int tlb_flags;
+ unsigned int fault_addr;
void *priv;
};
@@ -211,6 +212,8 @@
uint32_t flags);
int kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu,
unsigned int pt_base);
+unsigned int kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu,
+ unsigned int pt_base, unsigned int addr);
int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
enum kgsl_deviceid id);
void kgsl_mmu_ptpool_destroy(void *ptpool);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index d9dbad8..c716731 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1081,7 +1081,7 @@
}
}
} else if (device->state & (KGSL_STATE_HUNG |
- KGSL_STATE_DUMP_AND_RECOVER)) {
+ KGSL_STATE_DUMP_AND_FT)) {
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
}
@@ -1120,7 +1120,7 @@
break;
case KGSL_STATE_INIT:
case KGSL_STATE_HUNG:
- case KGSL_STATE_DUMP_AND_RECOVER:
+ case KGSL_STATE_DUMP_AND_FT:
if (test_bit(KGSL_PWRFLAGS_CLK_ON,
&device->pwrctrl.power_flags))
break;
@@ -1144,9 +1144,9 @@
mutex_unlock(&device->mutex);
wait_for_completion(&device->hwaccess_gate);
mutex_lock(&device->mutex);
- } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
+ } else if (device->state == KGSL_STATE_DUMP_AND_FT) {
mutex_unlock(&device->mutex);
- wait_for_completion(&device->recovery_gate);
+ wait_for_completion(&device->ft_gate);
mutex_lock(&device->mutex);
} else if (device->state == KGSL_STATE_SLUMBER)
kgsl_pwrctrl_wake(device);
@@ -1385,7 +1385,7 @@
return "SUSPEND";
case KGSL_STATE_HUNG:
return "HUNG";
- case KGSL_STATE_DUMP_AND_RECOVER:
+ case KGSL_STATE_DUMP_AND_FT:
return "DNR";
case KGSL_STATE_SLUMBER:
return "SLUMBER";
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index dffae70..02ada38 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -237,16 +237,14 @@
void kgsl_pwrscale_busy(struct kgsl_device *device)
{
if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->busy)
- if (device->requested_state != KGSL_STATE_SLUMBER)
- device->pwrscale.policy->busy(device,
- &device->pwrscale);
+ device->pwrscale.policy->busy(device,
+ &device->pwrscale);
}
void kgsl_pwrscale_idle(struct kgsl_device *device)
{
if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->idle)
- if (device->requested_state != KGSL_STATE_SLUMBER &&
- device->requested_state != KGSL_STATE_SLEEP)
+ if (device->state == KGSL_STATE_ACTIVE)
device->pwrscale.policy->idle(device,
&device->pwrscale);
}
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index c4647a1..42e79a0 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -572,10 +572,9 @@
/* Freeze the snapshot on a hang until it gets read */
device->snapshot_frozen = (hang) ? 1 : 0;
- /* log buffer info to aid in ramdump recovery */
- KGSL_DRV_ERR(device, "snapshot created at va %p pa %lx size %d\n",
- device->snapshot, __pa(device->snapshot),
- device->snapshot_size);
+ /* log buffer info to aid in ramdump fault tolerance */
+ KGSL_DRV_ERR(device, "snapshot created at pa %lx size %d\n",
+ __pa(device->snapshot), device->snapshot_size);
if (hang)
sysfs_notify(&device->snapshot_kobj, NULL, "timestamp");
return 0;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 9ed3d53..41f79be 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -693,8 +693,8 @@
spin_lock_irq(&client->buffer_lock);
client->use_wake_lock = false;
- wake_lock_destroy(&client->wake_lock);
spin_unlock_irq(&client->buffer_lock);
+ wake_lock_destroy(&client->wake_lock);
return 0;
}
diff --git a/drivers/iommu/msm_iommu_sec.c b/drivers/iommu/msm_iommu_sec.c
index 29cf0c1..44146a4 100644
--- a/drivers/iommu/msm_iommu_sec.c
+++ b/drivers/iommu/msm_iommu_sec.c
@@ -88,7 +88,7 @@
return 0;
of_node_put(np);
- ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_PTBL_SIZE, &spare,
+ ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &spare,
sizeof(spare), psize, sizeof(psize));
if (ret) {
pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
@@ -111,7 +111,7 @@
pinit.paddr = virt_to_phys(buf);
pinit.size = psize[0];
- ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_PTBL_INIT, &pinit,
+ ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &pinit,
sizeof(pinit), &ptbl_ret, sizeof(ptbl_ret));
if (ret) {
pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
@@ -142,7 +142,7 @@
cfg.id = sec_id;
- ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_CFG, &cfg, sizeof(cfg),
+ ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_CFG, &cfg, sizeof(cfg),
&scm_ret, sizeof(scm_ret));
if (ret || scm_ret) {
pr_err("scm call IOMMU_SECURE_CFG failed\n");
@@ -167,7 +167,7 @@
map.info.va = va;
map.info.size = len;
- if (scm_call(SCM_SVC_CP, IOMMU_SECURE_MAP, &map, sizeof(map), &ret,
+ if (scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP, &map, sizeof(map), &ret,
sizeof(ret)))
return -EINVAL;
if (ret)
@@ -242,7 +242,7 @@
map.plist.size = SZ_1M;
}
- ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_MAP, &map, sizeof(map),
+ ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP, &map, sizeof(map),
&scm_ret, sizeof(scm_ret));
kfree(pa_list);
return ret;
@@ -260,7 +260,7 @@
mi.va = va;
mi.size = len;
- ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_UNMAP, &mi, sizeof(mi),
+ ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP, &mi, sizeof(mi),
&scm_ret, sizeof(scm_ret));
return ret;
}
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 52b7994..dce37e5 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -121,6 +121,7 @@
events->notified_index = 0;
events->bytes_read_no_event = 0;
events->current_event_data_size = 0;
+ events->wakeup_events_counter = 0;
}
static inline void dvb_dmxdev_flush_output(struct dvb_ringbuffer *buffer,
@@ -222,6 +223,10 @@
int new_write_index;
int data_event;
+ /* Check if the event is disabled */
+ if (events->event_mask.disable_mask & event->type)
+ return 0;
+
/* Check if we are adding an event that user already read its data */
if (events->bytes_read_no_event) {
data_event = 1;
@@ -241,7 +246,7 @@
if (data_event) {
if (res) {
/*
- * Data relevent to this event was fully
+ * Data relevant to this event was fully
* consumed already, discard event.
*/
events->bytes_read_no_event -= res;
@@ -266,6 +271,9 @@
events->queue[events->write_index] = *event;
events->write_index = new_write_index;
+ if (!(events->event_mask.no_wakeup_mask & event->type))
+ events->wakeup_events_counter++;
+
return 0;
}
@@ -280,6 +288,9 @@
events->notified_index =
dvb_dmxdev_advance_event_idx(events->notified_index);
+ if (!(events->event_mask.no_wakeup_mask & event->type))
+ events->wakeup_events_counter--;
+
return 0;
}
@@ -291,6 +302,13 @@
int data_event;
/*
+ * If data events are not enabled on this filter,
+ * there's nothing to update.
+ */
+ if (events->data_read_event_masked)
+ return 0;
+
+ /*
* Go through all events that were notified and
* remove them from the events queue if their respective
* data was read.
@@ -364,7 +382,7 @@
if (data_event) {
if (res) {
/*
- * Data relevent to this event was
+ * Data relevant to this event was
* fully consumed, remove it from the queue.
*/
bytes_read -= res;
@@ -616,6 +634,9 @@
}
dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
dvb_dmxdev_flush_events(&dmxdev->dvr_output_events);
+ dmxdev->dvr_output_events.event_mask.disable_mask = 0;
+ dmxdev->dvr_output_events.event_mask.no_wakeup_mask = 0;
+ dmxdev->dvr_output_events.event_mask.wakeup_threshold = 1;
dmxdev->dvr_feeds_count = 0;
dmxdev->dvr_buffer_mode = DMX_BUFFER_MODE_INTERNAL;
dmxdev->dvr_priv_buff_handle = NULL;
@@ -1429,24 +1450,58 @@
static int dvb_dmxdev_reuse_decoder_buf(struct dmxdev_filter *dmxdevfilter,
int cookie)
{
- if ((dmxdevfilter->type == DMXDEV_TYPE_PES) &&
- (dmxdevfilter->params.pes.output == DMX_OUT_DECODER)) {
- struct dmxdev_feed *feed;
- int ret = -ENODEV;
+ struct dmxdev_feed *feed;
- /* Only one feed should be in the list in case of decoder */
- feed = list_first_entry(&dmxdevfilter->feed.ts,
- struct dmxdev_feed, next);
+ if ((dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+ (dmxdevfilter->params.pes.output != DMX_OUT_DECODER) ||
+ (dmxdevfilter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_ES_DATA))
+ return -EPERM;
- if (feed->ts->reuse_decoder_buffer)
- ret = feed->ts->reuse_decoder_buffer(
- feed->ts,
- cookie);
+ /* Only one feed should be in the list in case of decoder */
+ feed = list_first_entry(&dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
- return ret;
- }
+ if (feed->ts->reuse_decoder_buffer)
+ return feed->ts->reuse_decoder_buffer(feed->ts, cookie);
- return -EPERM;
+ return -ENODEV;
+}
+
+static int dvb_dmxdev_set_event_mask(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_events_mask *event_mask)
+{
+ if (!event_mask ||
+ (event_mask->wakeup_threshold >= DMX_EVENT_QUEUE_SIZE))
+ return -EINVAL;
+
+ if (dmxdevfilter->state == DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ /*
+ * Overflow event is not allowed to be masked.
+ * This is because if overflow occurs, demux stops outputting data
+ * until user is notified. If user is using events to read the data,
+ * the overflow event must be always enabled or otherwise we would
+ * never recover from overflow state.
+ */
+ event_mask->disable_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
+ event_mask->no_wakeup_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
+
+ dmxdevfilter->events.event_mask = *event_mask;
+
+ return 0;
+}
+
+static int dvb_dmxdev_get_event_mask(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_events_mask *event_mask)
+{
+ if (!event_mask)
+ return -EINVAL;
+
+ *event_mask = dmxdevfilter->events.event_mask;
+
+ return 0;
}
static int dvb_dmxdev_ts_fullness_callback(
@@ -1708,11 +1763,13 @@
}
/*
- * Decoder filters have no data in the data buffer and their
- * events can be removed now from the queue.
+ * If no-data events are enabled on this filter,
+ * the events can be removed from the queue when
+ * user gets them.
+ * For filters with data events enabled, the event is removed
+ * from the queue only when the respective data is read.
*/
- if ((dmxdevfilter->type == DMXDEV_TYPE_PES) &&
- (dmxdevfilter->params.pes.output == DMX_OUT_DECODER))
+ if (dmxdevfilter->events.data_read_event_masked)
dmxdevfilter->events.read_index =
dvb_dmxdev_advance_event_idx(
dmxdevfilter->events.read_index);
@@ -2538,6 +2595,9 @@
(*secfilter)->filter_mask[2] = 0;
filter->todo = 0;
+ filter->events.data_read_event_masked =
+ filter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_SECTION;
ret = filter->feed.sec.feed->start_filtering(
filter->feed.sec.feed);
@@ -2558,6 +2618,21 @@
filter->params.pes.rec_chunk_size =
filter->buffer.size >> 2;
+ if (filter->params.pes.output == DMX_OUT_TS_TAP)
+ dmxdev->dvr_output_events.data_read_event_masked =
+ dmxdev->dvr_output_events.event_mask.disable_mask &
+ DMX_EVENT_NEW_REC_CHUNK;
+ else if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+ filter->events.data_read_event_masked =
+ filter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_REC_CHUNK;
+ else if (filter->params.pes.output == DMX_OUT_TAP)
+ filter->events.data_read_event_masked =
+ filter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_PES;
+ else
+ filter->events.data_read_event_masked = 1;
+
ret = 0;
list_for_each_entry(feed, &filter->feed.ts, next) {
ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
@@ -2627,6 +2702,9 @@
dmxdevfilter->priv_buff_handle = NULL;
dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
dvb_dmxdev_flush_events(&dmxdevfilter->events);
+ dmxdevfilter->events.event_mask.disable_mask = DMX_EVENT_NEW_ES_DATA;
+ dmxdevfilter->events.event_mask.no_wakeup_mask = 0;
+ dmxdevfilter->events.event_mask.wakeup_threshold = 1;
dmxdevfilter->type = DMXDEV_TYPE_NONE;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
@@ -3223,6 +3301,24 @@
mutex_unlock(&dmxdevfilter->mutex);
break;
+ case DMX_SET_EVENTS_MASK:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_event_mask(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_GET_EVENTS_MASK:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_get_event_mask(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -3258,10 +3354,9 @@
if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
mask |= (POLLIN | POLLRDNORM);
- if (dmxdevfilter->events.notified_index !=
- dmxdevfilter->events.write_index) {
+ if (dmxdevfilter->events.wakeup_events_counter >=
+ dmxdevfilter->events.event_mask.wakeup_threshold)
mask |= POLLPRI;
- }
return mask;
}
@@ -3430,8 +3525,8 @@
if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
mask |= (POLLIN | POLLRDNORM);
- if (dmxdev->dvr_output_events.notified_index !=
- dmxdev->dvr_output_events.write_index)
+ if (dmxdev->dvr_output_events.wakeup_events_counter >=
+ dmxdev->dvr_output_events.event_mask.wakeup_threshold)
mask |= POLLPRI;
} else {
poll_wait(file, &dmxdev->dvr_input_buffer.queue, wait);
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index a55b4f0..1443de5 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -68,8 +68,8 @@
struct dmx_section_feed *feed;
};
-struct dmxdev_events_queue {
#define DMX_EVENT_QUEUE_SIZE 500 /* number of events */
+struct dmxdev_events_queue {
/*
* indices used to manage events queue.
* read_index advanced when relevent data is read
@@ -94,6 +94,22 @@
u32 current_event_data_size;
u32 current_event_start_offset;
+ /* current setting of the events masking */
+ struct dmx_events_mask event_mask;
+
+ /*
+ * indicates if an event used for data-reading from demux
+ * filter is enabled or not. These are events on which
+ * user may wait for before calling read() on the demux filter.
+ */
+ int data_read_event_masked;
+
+ /*
+ * holds the current number of pending events in the
+ * events queue that are considered as a wake-up source
+ */
+ u32 wakeup_events_counter;
+
struct dmx_filter_event queue[DMX_EVENT_QUEUE_SIZE];
};
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index b136125..a786750 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -120,7 +120,6 @@
{"camss_csi_vfe_clk", -1},
{"iface_clk", -1},
{"bus_clk", -1},
- {"alt_bus_clk", -1},
};
static void msm_vfe40_init_qos_parms(struct vfe_device *vfe_dev)
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 691edc3..ddf4c67 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -59,6 +59,79 @@
false : true;
}
+static struct msm_cam_clk_info ispif_8960_clk_info[] = {
+ {"csi_pix_clk", 0},
+ {"csi_rdi_clk", 0},
+ {"csi_pix1_clk", 0},
+ {"csi_rdi1_clk", 0},
+ {"csi_rdi2_clk", 0},
+};
+
+static struct msm_cam_clk_info ispif_8974_clk_info_vfe0[] = {
+ {"camss_vfe_vfe_clk", -1},
+ {"camss_csi_vfe_clk", -1},
+};
+
+static struct msm_cam_clk_info ispif_8974_clk_info_vfe1[] = {
+ {"camss_vfe_vfe_clk1", -1},
+ {"camss_csi_vfe_clk1", -1},
+};
+
+static int msm_ispif_clk_enable(struct ispif_device *ispif,
+ enum msm_ispif_vfe_intf vfe_intf, int enable)
+{
+ int rc = 0;
+
+ if (enable)
+ pr_debug("enable clk for VFE%d\n", vfe_intf);
+ else
+ pr_debug("disable clk for VFE%d\n", vfe_intf);
+
+ if (ispif->csid_version < CSID_VERSION_V2) {
+ rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
+ ispif->ispif_clk[vfe_intf], 2, enable);
+ if (rc) {
+ pr_err("%s: cannot enable clock, error = %d\n",
+ __func__, rc);
+ goto end;
+ }
+ } else if (ispif->csid_version == CSID_VERSION_V2) {
+ rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
+ ispif->ispif_clk[vfe_intf],
+ ARRAY_SIZE(ispif_8960_clk_info),
+ enable);
+ if (rc) {
+ pr_err("%s: cannot enable clock, error = %d\n",
+ __func__, rc);
+ goto end;
+ }
+ } else if (ispif->csid_version >= CSID_VERSION_V3) {
+ if (vfe_intf == VFE0) {
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8974_clk_info_vfe0,
+ ispif->ispif_clk[vfe_intf],
+ ARRAY_SIZE(ispif_8974_clk_info_vfe0), enable);
+ } else {
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8974_clk_info_vfe1,
+ ispif->ispif_clk[vfe_intf],
+ ARRAY_SIZE(ispif_8974_clk_info_vfe1), enable);
+ }
+ if (rc) {
+ pr_err("%s: cannot enable clock, error = %d\n",
+ __func__, rc);
+ goto end;
+ }
+ } else {
+ pr_err("%s: unsupported version=%d\n", __func__,
+ ispif->csid_version);
+ goto end;
+ }
+
+end:
+ return rc;
+}
+
static int msm_ispif_intf_reset(struct ispif_device *ispif,
struct msm_ispif_param_data *params)
{
@@ -97,8 +170,8 @@
unsigned long flags;
spin_lock_irqsave(&ispif->auto_complete_lock, flags);
- ispif->wait_timeout = 0;
- init_completion(&ispif->reset_complete);
+ ispif->wait_timeout[params->vfe_intf] = 0;
+ init_completion(&ispif->reset_complete[params->vfe_intf]);
spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
if (params->vfe_intf == VFE0)
@@ -106,14 +179,15 @@
else
msm_camera_io_w(data, ispif->base +
ISPIF_RST_CMD_1_ADDR);
+
lrc = wait_for_completion_interruptible_timeout(
- &ispif->reset_complete, jiffes);
+ &ispif->reset_complete[params->vfe_intf], jiffes);
if (lrc < 0 || !lrc) {
pr_err("%s: wait timeout ret = %ld\n", __func__, lrc);
rc = -EIO;
spin_lock_irqsave(&ispif->auto_complete_lock, flags);
- ispif->wait_timeout = 1;
+ ispif->wait_timeout[params->vfe_intf] = 1;
spin_unlock_irqrestore(
&ispif->auto_complete_lock, flags);
}
@@ -129,8 +203,12 @@
unsigned long flags;
spin_lock_irqsave(&ispif->auto_complete_lock, flags);
- ispif->wait_timeout = 0;
- init_completion(&ispif->reset_complete);
+ ispif->wait_timeout[VFE0] = 0;
+ init_completion(&ispif->reset_complete[VFE0]);
+ if (ispif->csid_version >= CSID_VERSION_V3) {
+ ispif->wait_timeout[VFE1] = 0;
+ init_completion(&ispif->reset_complete[VFE1]);
+ }
spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
BUG_ON(!ispif);
@@ -139,22 +217,40 @@
msm_camera_io_w(ISPIF_RST_CMD_MASK, ispif->base + ISPIF_RST_CMD_ADDR);
- if (ispif->csid_version >= CSID_VERSION_V3)
- msm_camera_io_w_mb(ISPIF_RST_CMD_1_MASK, ispif->base +
- ISPIF_RST_CMD_1_ADDR);
-
lrc = wait_for_completion_interruptible_timeout(
- &ispif->reset_complete, jiffes);
+ &ispif->reset_complete[VFE0], jiffes);
if (lrc < 0 || !lrc) {
pr_err("%s: wait timeout ret = %ld\n", __func__, lrc);
rc = -EIO;
spin_lock_irqsave(&ispif->auto_complete_lock, flags);
- ispif->wait_timeout = 1;
+ ispif->wait_timeout[VFE0] = 1;
spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
+
+ goto end;
}
+ if (ispif->csid_version >= CSID_VERSION_V3) {
+ msm_camera_io_w_mb(ISPIF_RST_CMD_1_MASK, ispif->base +
+ ISPIF_RST_CMD_1_ADDR);
+
+ lrc = wait_for_completion_interruptible_timeout(
+ &ispif->reset_complete[VFE1], jiffes);
+
+ if (lrc < 0 || !lrc) {
+ pr_err("%s: wait timeout ret = %ld\n", __func__, lrc);
+ rc = -EIO;
+
+ spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+ ispif->wait_timeout[VFE1] = 1;
+ spin_unlock_irqrestore(&ispif->auto_complete_lock,
+ flags);
+ }
+
+ }
+
+end:
return rc;
}
@@ -181,20 +277,19 @@
}
if (ispif->csid_version <= CSID_VERSION_V2) {
- if (ispif->ispif_clk[intftype] == NULL) {
+ if (ispif->ispif_clk[vfe_intf][intftype] == NULL) {
CDBG("%s: ispif NULL clk\n", __func__);
return;
}
- rc = clk_set_rate(ispif->ispif_clk[intftype], csid);
+ rc = clk_set_rate(ispif->ispif_clk[vfe_intf][intftype], csid);
if (rc) {
pr_err("%s: clk_set_rate failed %d\n", __func__, rc);
return;
}
}
- data = msm_camera_io_r(ispif->base + ISPIF_INPUT_SEL_ADDR +
- (0x200 * vfe_intf));
+ data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_INPUT_SEL(vfe_intf));
switch (intftype) {
case PIX0:
data &= ~(BIT(1) | BIT(0));
@@ -218,8 +313,8 @@
break;
}
if (data)
- msm_camera_io_w_mb(data, ispif->base + ISPIF_INPUT_SEL_ADDR +
- (0x200 * vfe_intf));
+ msm_camera_io_w_mb(data, ispif->base +
+ ISPIF_VFE_m_INPUT_SEL(vfe_intf));
}
static void msm_ispif_enable_intf_cids(struct ispif_device *ispif,
@@ -236,19 +331,19 @@
switch (intftype) {
case PIX0:
- intf_addr = ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf);
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 0);
break;
case RDI0:
- intf_addr = ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf);
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 0);
break;
case PIX1:
- intf_addr = ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf);
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 1);
break;
case RDI1:
- intf_addr = ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf);
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 1);
break;
case RDI2:
- intf_addr = ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf);
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 2);
break;
default:
pr_err("%s: invalid intftype=%d\n", __func__, intftype);
@@ -280,23 +375,23 @@
switch (intftype) {
case PIX0:
data = msm_camera_io_r(ispif->base +
- ISPIF_PIX_0_STATUS_ADDR + (0x200 * vfe_intf));
+ ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0));
break;
case RDI0:
data = msm_camera_io_r(ispif->base +
- ISPIF_RDI_0_STATUS_ADDR + (0x200 * vfe_intf));
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0));
break;
case PIX1:
data = msm_camera_io_r(ispif->base +
- ISPIF_PIX_1_STATUS_ADDR + (0x200 * vfe_intf));
+ ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1));
break;
case RDI1:
data = msm_camera_io_r(ispif->base +
- ISPIF_RDI_1_STATUS_ADDR + (0x200 * vfe_intf));
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1));
break;
case RDI2:
data = msm_camera_io_r(ispif->base +
- ISPIF_RDI_2_STATUS_ADDR + (0x200 * vfe_intf));
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2));
break;
}
if ((data & 0xf) != 0xf)
@@ -330,14 +425,22 @@
BUG_ON(!params);
vfe_intf = params->vfe_intf;
+
+ rc = msm_ispif_clk_enable(ispif, vfe_intf, 1);
+ if (rc < 0) {
+ pr_err("%s: unable to enable clocks for VFE%d", __func__,
+ vfe_intf);
+ return rc;
+ }
+
if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
pr_err("%s: invalid interface type\n", __func__);
return -EINVAL;
}
- msm_camera_io_w(0x0, ispif->base + ISPIF_IRQ_MASK_ADDR);
- msm_camera_io_w(0x0, ispif->base + ISPIF_IRQ_MASK_1_ADDR);
- msm_camera_io_w_mb(0x0, ispif->base + ISPIF_IRQ_MASK_2_ADDR);
+ msm_camera_io_w(0x0, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
+ msm_camera_io_w(0x0, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
+ msm_camera_io_w_mb(0x0, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
for (i = 0; i < params->num; i++) {
intftype = params->entries[i].intftype;
@@ -371,26 +474,29 @@
}
msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
- ISPIF_IRQ_MASK_ADDR);
+ ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
- ISPIF_IRQ_CLEAR_ADDR);
+ ISPIF_VFE_m_IRQ_CLEAR_0(vfe_intf));
msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
- ISPIF_IRQ_MASK_1_ADDR);
+ ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
- ISPIF_IRQ_CLEAR_1_ADDR);
+ ISPIF_VFE_m_IRQ_CLEAR_1(vfe_intf));
msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
- ISPIF_IRQ_MASK_2_ADDR);
+ ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
- ISPIF_IRQ_CLEAR_2_ADDR);
+ ISPIF_VFE_m_IRQ_CLEAR_2(vfe_intf));
msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ rc = msm_ispif_clk_enable(ispif, vfe_intf, 0);
+
return rc;
}
@@ -436,14 +542,12 @@
/* cmd for PIX0, PIX1, RDI0, RDI1 */
if (ispif->applied_intf_cmd[vfe_intf].intf_cmd != 0xFFFFFFFF) {
msm_camera_io_w_mb(ispif->applied_intf_cmd[vfe_intf].intf_cmd,
- ispif->base + ISPIF_INTF_CMD_ADDR +
- (0x200 * vfe_intf));
+ ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe_intf));
}
/* cmd for RDI2 */
if (ispif->applied_intf_cmd[vfe_intf].intf_cmd1 != 0xFFFFFFFF)
msm_camera_io_w_mb(ispif->applied_intf_cmd[vfe_intf].intf_cmd1,
- ispif->base + ISPIF_INTF_CMD_1_ADDR +
- (0x200 * vfe_intf));
+ ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe_intf));
}
static int msm_ispif_stop_immediately(struct ispif_device *ispif,
@@ -464,6 +568,7 @@
msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
cid_mask, params->vfe_intf, 0);
}
+
return rc;
}
@@ -472,6 +577,13 @@
{
int rc;
+ rc = msm_ispif_clk_enable(ispif, params->vfe_intf, 1);
+ if (rc < 0) {
+ pr_err("%s: unable to enable clocks for VFE%d", __func__,
+ params->vfe_intf);
+ return rc;
+ }
+
rc = msm_ispif_intf_reset(ispif, params);
if (rc) {
pr_err("%s: msm_ispif_intf_reset failed. rc=%d\n",
@@ -480,6 +592,9 @@
}
msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
+
+ msm_ispif_clk_enable(ispif, params->vfe_intf, 0);
+
return rc;
}
@@ -489,11 +604,21 @@
int i, rc = 0;
uint16_t cid_mask = 0;
uint32_t intf_addr;
+ enum msm_ispif_vfe_intf vfe_intf;
BUG_ON(!ispif);
BUG_ON(!params);
- if (!msm_ispif_is_intf_valid(ispif->csid_version, params->vfe_intf)) {
+ vfe_intf = params->vfe_intf;
+
+ rc = msm_ispif_clk_enable(ispif, params->vfe_intf, 1);
+ if (rc < 0) {
+ pr_err("%s: unable to enable clocks for VFE%d", __func__,
+ params->vfe_intf);
+ return rc;
+ }
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
pr_err("%s: invalid interface type\n", __func__);
return -EINVAL;
}
@@ -507,24 +632,19 @@
switch (params->entries[i].intftype) {
case PIX0:
- intf_addr = ISPIF_PIX_0_STATUS_ADDR +
- (0x200 * params->vfe_intf);
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0);
break;
case RDI0:
- intf_addr = ISPIF_RDI_0_STATUS_ADDR +
- (0x200 * params->vfe_intf);
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0);
break;
case PIX1:
- intf_addr = ISPIF_PIX_1_STATUS_ADDR +
- (0x200 * params->vfe_intf);
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
break;
case RDI1:
- intf_addr = ISPIF_RDI_1_STATUS_ADDR +
- (0x200 * params->vfe_intf);
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1);
break;
case RDI2:
- intf_addr = ISPIF_RDI_2_STATUS_ADDR +
- (0x200 * params->vfe_intf);
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2);
break;
default:
pr_err("%s: invalid intftype=%d\n", __func__,
@@ -539,8 +659,11 @@
/* disable CIDs in CID_MASK register */
msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
- cid_mask, params->vfe_intf, 0);
+ cid_mask, vfe_intf, 0);
}
+
+ msm_ispif_clk_enable(ispif, vfe_intf, 0);
+
return rc;
}
@@ -577,26 +700,26 @@
BUG_ON(!out);
out[VFE0].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
- ISPIF_IRQ_STATUS_ADDR);
+ ISPIF_VFE_m_IRQ_STATUS_0(VFE0));
msm_camera_io_w(out[VFE0].ispifIrqStatus0,
- ispif->base + ISPIF_IRQ_CLEAR_ADDR);
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE0));
out[VFE0].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
- ISPIF_IRQ_STATUS_1_ADDR);
+ ISPIF_VFE_m_IRQ_STATUS_1(VFE0));
msm_camera_io_w(out[VFE0].ispifIrqStatus1,
- ispif->base + ISPIF_IRQ_CLEAR_1_ADDR);
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE0));
out[VFE0].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
- ISPIF_IRQ_STATUS_2_ADDR);
+ ISPIF_VFE_m_IRQ_STATUS_2(VFE0));
msm_camera_io_w_mb(out[VFE0].ispifIrqStatus2,
- ispif->base + ISPIF_IRQ_CLEAR_2_ADDR);
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE0));
if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ) {
unsigned long flags;
spin_lock_irqsave(&ispif->auto_complete_lock, flags);
- if (ispif->wait_timeout == 0)
- complete(&ispif->reset_complete);
+ if (ispif->wait_timeout[VFE0] == 0)
+ complete(&ispif->reset_complete[VFE0]);
spin_unlock_irqrestore(
&ispif->auto_complete_lock, flags);
}
@@ -617,19 +740,29 @@
}
if (ispif->csid_version >= CSID_VERSION_V3) {
out[VFE1].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
- ISPIF_IRQ_STATUS_ADDR + 0x200);
+ ISPIF_VFE_m_IRQ_STATUS_0(VFE1));
msm_camera_io_w(out[VFE1].ispifIrqStatus0,
- ispif->base + ISPIF_IRQ_CLEAR_ADDR + 0x200);
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE1));
out[VFE1].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
- ISPIF_IRQ_STATUS_1_ADDR + 0x200);
+ ISPIF_VFE_m_IRQ_STATUS_1(VFE1));
msm_camera_io_w(out[VFE1].ispifIrqStatus1,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE1));
- ispif->base + ISPIF_IRQ_CLEAR_1_ADDR + 0x200);
out[VFE1].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
- ISPIF_IRQ_STATUS_2_ADDR + 0x200);
+ ISPIF_VFE_m_IRQ_STATUS_2(VFE1));
msm_camera_io_w_mb(out[VFE1].ispifIrqStatus2,
- ispif->base + ISPIF_IRQ_CLEAR_2_ADDR + 0x200);
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE1));
+
+
+ if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ) {
+ unsigned long flags;
+ spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+ if (ispif->wait_timeout[VFE1] == 0)
+ complete(&ispif->reset_complete[VFE1]);
+ spin_unlock_irqrestore(
+ &ispif->auto_complete_lock, flags);
+ }
if (out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
pr_err("%s: VFE1 pix0 overflow.\n", __func__);
@@ -657,20 +790,6 @@
return IRQ_HANDLED;
}
-static struct msm_cam_clk_info ispif_8960_clk_info[] = {
- {"csi_pix_clk", 0},
- {"csi_rdi_clk", 0},
- {"csi_pix1_clk", 0},
- {"csi_rdi1_clk", 0},
- {"csi_rdi2_clk", 0},
-};
-static struct msm_cam_clk_info ispif_8974_clk_info[] = {
- {"camss_vfe_vfe_clk", -1},
- {"camss_csi_vfe_clk", -1},
- {"camss_vfe_vfe_clk1", -1},
- {"camss_csi_vfe_clk1", -1},
-};
-
static int msm_ispif_init(struct ispif_device *ispif,
uint32_t csid_version)
{
@@ -693,41 +812,27 @@
memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
ispif->csid_version = csid_version;
- if (ispif->csid_version < CSID_VERSION_V2) {
- rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
- ispif->ispif_clk, 2, 1);
- if (rc) {
- pr_err("%s: cannot enable clock, error = %d\n",
- __func__, rc);
- goto end;
- }
- } else if (ispif->csid_version == CSID_VERSION_V2) {
- rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
- ispif->ispif_clk, ARRAY_SIZE(ispif_8960_clk_info), 1);
- if (rc) {
- pr_err("%s: cannot enable clock, error = %d\n",
- __func__, rc);
- goto end;
- }
- } else if (ispif->csid_version >= CSID_VERSION_V3) {
- rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8974_clk_info,
- ispif->ispif_clk, ARRAY_SIZE(ispif_8974_clk_info), 1);
- if (rc) {
- pr_err("%s: cannot enable clock, error = %d\n",
- __func__, rc);
- goto end;
- }
- } else {
- pr_err("%s: unsupported version=%d\n", __func__,
- ispif->csid_version);
- goto end;
+ rc = msm_ispif_clk_enable(ispif, VFE0, 1);
+ if (rc < 0) {
+ pr_err("%s: unable to enable clocks for VFE0", __func__);
+ goto error_clk0;
}
+
+ if (ispif->csid_version >= CSID_VERSION_V3) {
+ rc = msm_ispif_clk_enable(ispif, VFE1, 1);
+ if (rc < 0) {
+ pr_err("%s: unable to enable clocks for VFE1",
+ __func__);
+ goto error_clk1;
+ }
+ }
+
ispif->base = ioremap(ispif->mem->start,
resource_size(ispif->mem));
if (!ispif->base) {
rc = -ENOMEM;
pr_err("%s: nomem\n", __func__);
- goto error_clk;
+ goto end;
}
rc = request_irq(ispif->irq->start, msm_io_ispif_irq,
IRQF_TRIGGER_RISING, "ispif", ispif);
@@ -745,23 +850,21 @@
free_irq(ispif->irq->start, ispif);
error_irq:
iounmap(ispif->base);
-error_clk:
- if (ispif->csid_version < CSID_VERSION_V2) {
- msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
- ispif->ispif_clk, 2, 0);
- } else if (ispif->csid_version == CSID_VERSION_V2) {
- msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
- ispif->ispif_clk, ARRAY_SIZE(ispif_8960_clk_info), 0);
- } else if (ispif->csid_version >= CSID_VERSION_V3) {
- msm_cam_clk_enable(&ispif->pdev->dev, ispif_8974_clk_info,
- ispif->ispif_clk, ARRAY_SIZE(ispif_8974_clk_info), 0);
- }
+
end:
+ if (ispif->csid_version >= CSID_VERSION_V3)
+ msm_ispif_clk_enable(ispif, VFE1, 0);
+
+error_clk1:
+ msm_ispif_clk_enable(ispif, VFE0, 0);
+
+error_clk0:
return rc;
}
static void msm_ispif_release(struct ispif_device *ispif)
{
+ int i;
BUG_ON(!ispif);
if (ispif->ispif_state != ISPIF_POWER_UP) {
@@ -770,6 +873,9 @@
return;
}
+ for (i = 0; i < VFE_MAX; i++)
+ msm_ispif_clk_enable(ispif, i, 1);
+
/* make sure no streaming going on */
msm_ispif_reset(ispif);
@@ -777,16 +883,9 @@
iounmap(ispif->base);
- if (ispif->csid_version < CSID_VERSION_V2) {
- msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
- ispif->ispif_clk, 2, 0);
- } else if (ispif->csid_version == CSID_VERSION_V2) {
- msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
- ispif->ispif_clk, ARRAY_SIZE(ispif_8960_clk_info), 0);
- } else if (ispif->csid_version >= CSID_VERSION_V3) {
- msm_cam_clk_enable(&ispif->pdev->dev, ispif_8974_clk_info,
- ispif->ispif_clk, ARRAY_SIZE(ispif_8974_clk_info), 0);
- }
+ for (i = 0; i < VFE_MAX; i++)
+ msm_ispif_clk_enable(ispif, i, 0);
+
ispif->ispif_state = ISPIF_POWER_DOWN;
}
@@ -801,7 +900,6 @@
BUG_ON(!pcdata);
mutex_lock(&ispif->mutex);
-
switch (pcdata->cfg_type) {
case ISPIF_ENABLE_REG_DUMP:
ispif->enb_dump_reg = pcdata->reg_dump; /* save dump config */
@@ -903,6 +1001,7 @@
{
int rc;
struct ispif_device *ispif;
+ int i;
ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL);
if (!ispif) {
@@ -961,7 +1060,9 @@
ispif->ispif_state = ISPIF_POWER_DOWN;
ispif->open_cnt = 0;
spin_lock_init(&ispif->auto_complete_lock);
- ispif->wait_timeout = 0;
+ for (i = 0; i < VFE_MAX; i++)
+ ispif->wait_timeout[i] = 0;
+
return 0;
error:
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
index f8c3cce..ef7a1bf 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
@@ -47,15 +47,15 @@
void __iomem *base;
struct mutex mutex;
uint8_t start_ack_pending;
- struct completion reset_complete;
+ struct completion reset_complete[VFE_MAX];
spinlock_t auto_complete_lock;
- uint8_t wait_timeout;
+ uint8_t wait_timeout[VFE_MAX];
uint32_t csid_version;
int enb_dump_reg;
uint32_t open_cnt;
struct ispif_sof_count sof_count[VFE_MAX];
struct ispif_intf_cmd applied_intf_cmd[VFE_MAX];
enum msm_ispif_state_t ispif_state;
- struct clk *ispif_clk[INTF_MAX];
+ struct clk *ispif_clk[VFE_MAX][INTF_MAX];
};
#endif
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
index cdbebea..afd91d1 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
@@ -14,32 +14,41 @@
#define __MSM_ISPIF_HWREG_V1_H__
/* common registers */
-#define ISPIF_RST_CMD_ADDR 0x0000
-#define ISPIF_RST_CMD_1_ADDR 0x0000 /* undefined */
-#define ISPIF_INTF_CMD_ADDR 0x0004
-#define ISPIF_INTF_CMD_1_ADDR 0x0030
-#define ISPIF_CTRL_ADDR 0x0008
-#define ISPIF_INPUT_SEL_ADDR 0x000C
-#define ISPIF_PIX_0_INTF_CID_MASK_ADDR 0x0010
-#define ISPIF_RDI_0_INTF_CID_MASK_ADDR 0x0014
-#define ISPIF_PIX_1_INTF_CID_MASK_ADDR 0x0038
-#define ISPIF_RDI_1_INTF_CID_MASK_ADDR 0x003C
-#define ISPIF_RDI_2_INTF_CID_MASK_ADDR 0x0044
-#define ISPIF_PIX_0_STATUS_ADDR 0x0024
-#define ISPIF_RDI_0_STATUS_ADDR 0x0028
-#define ISPIF_PIX_1_STATUS_ADDR 0x0060
-#define ISPIF_RDI_1_STATUS_ADDR 0x0064
-#define ISPIF_RDI_2_STATUS_ADDR 0x006C
-#define ISPIF_IRQ_MASK_ADDR 0x0100
-#define ISPIF_IRQ_CLEAR_ADDR 0x0104
-#define ISPIF_IRQ_STATUS_ADDR 0x0108
-#define ISPIF_IRQ_MASK_1_ADDR 0x010C
-#define ISPIF_IRQ_CLEAR_1_ADDR 0x0110
-#define ISPIF_IRQ_STATUS_1_ADDR 0x0114
-#define ISPIF_IRQ_MASK_2_ADDR 0x0118
-#define ISPIF_IRQ_CLEAR_2_ADDR 0x011C
-#define ISPIF_IRQ_STATUS_2_ADDR 0x0120
-#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x0124
+#define ISPIF_RST_CMD_ADDR 0x0000
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x0124
+
+#define ISPIF_VFE(m) (0x0)
+
+#define ISPIF_VFE_m_CTRL_0(m) (0x0008 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x0100 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x010C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x0118 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x0108 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x0114 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x0120 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x0104 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x0110 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x011C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INPUT_SEL(m) (0x000C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_0(m) (0x0004 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_1(m) (0x0030 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) (0x0010 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) (0x0014 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_PIX_OUTPUT_n_MISR(m, n) (0x0290 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_0(m, n) (0x0298 + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_1(m, n) (0x029C + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) (0x0024 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) (0x0028 + ISPIF_VFE(m) + 4*(n))
+
+/* Defines for compatibility with newer ISPIF versions */
+#define ISPIF_RST_CMD_1_ADDR (0x0000)
+#define ISPIF_VFE_m_PIX_INTF_n_CROP(m, n) (0x0000 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_THRESHOLD(m) (0x0000 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_OUTPUT_SEL(m) (0x0000 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_3D_DESKEW_SIZE(m) (0x0000 + ISPIF_VFE(m))
+
+
+
/*ISPIF RESET BITS*/
#define VFE_CLK_DOMAIN_RST BIT(31)
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
index 37b19f5..80b32d4 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
@@ -16,42 +16,34 @@
/* common registers */
#define ISPIF_RST_CMD_ADDR 0x008
#define ISPIF_RST_CMD_1_ADDR 0x00C
-#define ISPIF_INTF_CMD_ADDR 0x248
-#define ISPIF_INTF_CMD_1_ADDR 0x24C
-#define ISPIF_CTRL_ADDR 0x008
-#define ISPIF_INPUT_SEL_ADDR 0x244
-#define ISPIF_PIX_0_INTF_CID_MASK_ADDR 0x254
-#define ISPIF_RDI_0_INTF_CID_MASK_ADDR 0x264
-#define ISPIF_PIX_1_INTF_CID_MASK_ADDR 0x258
-#define ISPIF_RDI_1_INTF_CID_MASK_ADDR 0x268
-#define ISPIF_RDI_2_INTF_CID_MASK_ADDR 0x26C
-#define ISPIF_PIX_0_STATUS_ADDR 0x2C0
-#define ISPIF_RDI_0_STATUS_ADDR 0x2D0
-#define ISPIF_PIX_1_STATUS_ADDR 0x2C4
-#define ISPIF_RDI_1_STATUS_ADDR 0x2D4
-#define ISPIF_RDI_2_STATUS_ADDR 0x2D8
-#define ISPIF_IRQ_MASK_ADDR 0x208
-#define ISPIF_IRQ_CLEAR_ADDR 0x230
-#define ISPIF_IRQ_STATUS_ADDR 0x21C
-#define ISPIF_IRQ_MASK_1_ADDR 0x20C
-#define ISPIF_IRQ_CLEAR_1_ADDR 0x234
-#define ISPIF_IRQ_STATUS_1_ADDR 0x220
-#define ISPIF_IRQ_MASK_2_ADDR 0x210
-#define ISPIF_IRQ_CLEAR_2_ADDR 0x238
-#define ISPIF_IRQ_STATUS_2_ADDR 0x224
#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x01C
-/* new */
-#define ISPIF_VFE_m_CTRL_0_ADDR 0x200
-#define ISPIF_VFE_m_IRQ_MASK_0 0x208
-#define ISPIF_VFE_m_IRQ_MASK_1 0x20C
-#define ISPIF_VFE_m_IRQ_MASK_2 0x210
-#define ISPIF_VFE_m_IRQ_STATUS_0 0x21C
-#define ISPIF_VFE_m_IRQ_STATUS_1 0x220
-#define ISPIF_VFE_m_IRQ_STATUS_2 0x224
-#define ISPIF_VFE_m_IRQ_CLEAR_0 0x230
-#define ISPIF_VFE_m_IRQ_CLEAR_1 0x234
-#define ISPIF_VFE_m_IRQ_CLEAR_2 0x238
+#define ISPIF_VFE(m) ((m) * 0x200)
+
+#define ISPIF_VFE_m_CTRL_0(m) (0x200 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x208 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x20C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x210 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x21C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x220 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x224 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x230 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x234 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x238 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INPUT_SEL(m) (0x244 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_0(m) (0x248 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_1(m) (0x24C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) (0x254 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) (0x264 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_CROP(m, n) (0x278 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_THRESHOLD(m) (0x288 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_OUTPUT_SEL(m) (0x28C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_OUTPUT_n_MISR(m, n) (0x290 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_0(m, n) (0x298 + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_1(m, n) (0x29C + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) (0x2C0 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) (0x2D0 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_DESKEW_SIZE(m) (0x2E4 + ISPIF_VFE(m))
/*ISPIF RESET BITS*/
#define VFE_CLK_DOMAIN_RST BIT(31)
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index 6418f21..fbc2b93 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -355,7 +355,7 @@
if (WARN_ON(!msm_subdev))
return -EINVAL;
- if (WARN_ON(!msm_v4l2_dev) && WARN_ON(!msm_v4l2_dev->dev))
+ if (WARN_ON(!msm_v4l2_dev) || WARN_ON(!msm_v4l2_dev->dev))
return -EIO;
return __msm_sd_register_subdev(&msm_subdev->sd);
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index ca5e646..8f63d33 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -563,6 +563,44 @@
return 0;
}
+static int msm_cpp_flush_frames(struct cpp_device *cpp_dev)
+{
+ struct v4l2_event v4l2_evt;
+ struct msm_queue_cmd *frame_qcmd;
+ struct msm_cpp_frame_info_t *process_frame;
+ struct msm_device_queue *queue;
+ struct msm_queue_cmd *event_qcmd;
+
+ do {
+ if (cpp_dev->realtime_q.len != 0) {
+ queue = &cpp_dev->realtime_q;
+ } else if (cpp_dev->offline_q.len != 0) {
+ queue = &cpp_dev->offline_q;
+ } else {
+ pr_debug("All frames flushed\n");
+ break;
+ }
+ frame_qcmd = msm_dequeue(queue, list_frame);
+ process_frame = frame_qcmd->command;
+ kfree(frame_qcmd);
+ event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_ATOMIC);
+ if (!event_qcmd) {
+ pr_err("Insufficient memory. return");
+ return -ENOMEM;
+ }
+ atomic_set(&event_qcmd->on_heap, 1);
+ event_qcmd->command = process_frame;
+ CPP_DBG("fid %d\n", process_frame->frame_id);
+ msm_enqueue(&cpp_dev->eventData_q, &event_qcmd->list_eventdata);
+
+ v4l2_evt.id = process_frame->inst_id;
+ v4l2_evt.type = V4L2_EVENT_CPP_FRAME_DONE;
+ v4l2_event_queue(cpp_dev->msm_sd.sd.devnode, &v4l2_evt);
+ } while ((cpp_dev->realtime_q.len != 0) ||
+ (cpp_dev->offline_q.len != 0));
+ return 0;
+}
+
static int msm_cpp_cfg(struct cpp_device *cpp_dev,
struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
{
@@ -755,7 +793,7 @@
rc = msm_cpp_cfg(cpp_dev, ioctl_ptr);
break;
case VIDIOC_MSM_CPP_FLUSH_QUEUE:
- rc = msm_cpp_send_frame_to_hardware(cpp_dev);
+ rc = msm_cpp_flush_frames(cpp_dev);
break;
case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD: {
struct msm_device_queue *queue = &cpp_dev->eventData_q;
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 499b36c..9be4704 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -75,10 +75,6 @@
module_param(video_nonsecure_ion_heap, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(video_nonsecure_ion_heap, "ION heap for non-secure video buffer allocation");
-static int generate_es_events;
-module_param(generate_es_events, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(generate_es_events, "Generate new elementary stream data events");
-
/* Value of TS packet scramble bits field for even key */
static int mpq_sdmx_scramble_even = 0x2;
module_param(mpq_sdmx_scramble_even, int, S_IRUGO | S_IWUSR);
@@ -1224,13 +1220,6 @@
{
struct mpq_demux *mpq_demux = feed->demux->priv;
- if (!generate_es_events) {
- MPQ_DVB_ERR_PRINT(
- "%s: Cannot release decoder buffer when not working with new elementary stream data events\n",
- __func__);
- return -EPERM;
- }
-
if (cookie < 0) {
MPQ_DVB_ERR_PRINT("%s: invalid cookie parameter\n", __func__);
return -EINVAL;
@@ -2834,13 +2823,11 @@
__func__);
}
- if (generate_es_events) {
- mpq_dmx_prepare_es_event_data(
- &packet, &meta_data, feed_data,
- stream_buffer, &data);
+ mpq_dmx_prepare_es_event_data(
+ &packet, &meta_data, feed_data,
+ stream_buffer, &data);
- feed->data_ready_cb.ts(&feed->feed.ts, &data);
- }
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
feed_data->pending_pattern_len = 0;
mpq_streambuffer_get_data_rw_offset(
@@ -2975,15 +2962,13 @@
NULL,
&feed_data->frame_offset);
- if (generate_es_events) {
- mpq_dmx_prepare_es_event_data(
- &packet, &meta_data,
- feed_data,
- stream_buffer, &data);
+ mpq_dmx_prepare_es_event_data(
+ &packet, &meta_data,
+ feed_data,
+ stream_buffer, &data);
- feed->data_ready_cb.ts(
- &feed->feed.ts, &data);
- }
+ feed->data_ready_cb.ts(
+ &feed->feed.ts, &data);
} else {
MPQ_DVB_ERR_PRINT(
"%s: received PUSI"
@@ -4122,6 +4107,8 @@
int ret;
int pes_cnt = 0;
struct dmx_data_ready data_event;
+ struct dmx_data_ready data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
goto decoder_filter_check_overflow;
@@ -4249,15 +4236,11 @@
mpq_dmx_update_decoder_stat(mpq_demux);
mpq_streambuffer_pkt_write(sbuf, &packet, (u8 *)&meta_data);
- if (generate_es_events) {
- struct dmx_data_ready data;
- struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
- mpq_dmx_prepare_es_event_data(
- &packet, &meta_data, &mpq_feed->video_info,
- sbuf, &data);
- MPQ_DVB_DBG_PRINT("%s: Notify ES Event\n", __func__);
- feed->data_ready_cb.ts(&feed->feed.ts, &data);
- }
+ mpq_dmx_prepare_es_event_data(
+ &packet, &meta_data, &mpq_feed->video_info,
+ sbuf, &data);
+ MPQ_DVB_DBG_PRINT("%s: Notify ES Event\n", __func__);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
spin_unlock(&mpq_feed->video_info.video_buffer_lock);
}
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index f8460be..40e09b6 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1165,6 +1165,16 @@
sizeof(struct hfi_index_extradata_config);
break;
}
+ case HAL_PARAM_VENC_SLICE_DELIVERY_MODE:
+ {
+ struct hfi_enable *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE;
+ hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+ hfi->enable = ((struct hal_enable *) pdata)->enable;
+ pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ break;
+ }
case HAL_CONFIG_VPE_DEINTERLACE:
break;
/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
@@ -1187,7 +1197,6 @@
case HAL_PARAM_VDEC_MB_QUANTIZATION:
case HAL_PARAM_VDEC_NUM_CONCEALED_MB:
case HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING:
- case HAL_PARAM_VENC_SLICE_DELIVERY_MODE:
case HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING:
case HAL_CONFIG_BUFFER_COUNT_ACTUAL:
case HAL_CONFIG_VDEC_MULTI_STREAM:
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index be9458d..102e1ec 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -305,11 +305,213 @@
callback(RELEASE_RESOURCE_DONE, &cmd_done);
}
+static inline void copy_cap_prop(
+ struct hfi_capability_supported *in,
+ struct vidc_hal_session_init_done *sess_init_done)
+{
+ struct hal_capability_supported *out = NULL;
+ switch (in->capability_type) {
+ case HFI_CAPABILITY_FRAME_WIDTH:
+ out = &sess_init_done->width;
+ break;
+
+ case HFI_CAPABILITY_FRAME_HEIGHT:
+ out = &sess_init_done->height;
+ break;
+
+ case HFI_CAPABILITY_MBS_PER_FRAME:
+ out = &sess_init_done->mbs_per_frame;
+ break;
+
+ case HFI_CAPABILITY_MBS_PER_SECOND:
+ out = &sess_init_done->mbs_per_sec;
+ break;
+
+ case HFI_CAPABILITY_FRAMERATE:
+ out = &sess_init_done->frame_rate;
+ break;
+
+ case HFI_CAPABILITY_SCALE_X:
+ out = &sess_init_done->scale_x;
+ break;
+
+ case HFI_CAPABILITY_SCALE_Y:
+ out = &sess_init_done->scale_y;
+ break;
+
+ case HFI_CAPABILITY_BITRATE:
+ out = &sess_init_done->bitrate;
+ break;
+ }
+
+ if (in && out) {
+ out->capability_type =
+ (enum hal_capability)in->capability_type;
+ out->min = in->min;
+ out->max = in->max;
+ out->step_size = in->step_size;
+ }
+}
+
enum vidc_status hfi_process_sess_init_done_prop_read(
struct hfi_msg_sys_session_init_done_packet *pkt,
- struct msm_vidc_cb_cmd_done *cmddone)
+ struct vidc_hal_session_init_done *sess_init_done)
{
- return VIDC_ERR_NONE;
+ u32 rem_bytes, num_properties;
+ u8 *data_ptr;
+ u32 status = VIDC_ERR_NONE;
+ u32 prop_id, next_offset = 0;
+
+ rem_bytes = pkt->size - sizeof(struct
+ hfi_msg_sys_session_init_done_packet) + sizeof(u32);
+
+ if (rem_bytes == 0) {
+ dprintk(VIDC_ERR,
+ "hfi_msg_sys_session_init_done:missing_prop_info");
+ return VIDC_ERR_FAIL;
+ }
+
+ status = hfi_map_err_status((u32)pkt->error_type);
+
+ if (status)
+ return status;
+
+ data_ptr = (u8 *) &pkt->rg_property_data[0];
+ num_properties = pkt->num_properties;
+
+ while ((status == VIDC_ERR_NONE) && num_properties &&
+ (rem_bytes >= sizeof(u32))) {
+ prop_id = *((u32 *)data_ptr);
+ next_offset = sizeof(u32);
+
+ switch (prop_id) {
+ case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
+ {
+ struct hfi_capability_supported_info *prop =
+ (struct hfi_capability_supported_info *)
+ (data_ptr + next_offset);
+ u32 num_capabilities;
+ struct hfi_capability_supported *cap_ptr;
+
+ if ((rem_bytes - next_offset) < sizeof(*cap_ptr)) {
+ status = VIDC_ERR_BAD_PARAM;
+ break;
+ }
+
+ num_capabilities = prop->num_capabilities;
+ cap_ptr = &prop->rg_data[0];
+ next_offset += sizeof(u32);
+
+ while (num_capabilities &&
+ ((rem_bytes - next_offset) >= sizeof(u32))) {
+ copy_cap_prop(cap_ptr, sess_init_done);
+ cap_ptr++;
+ next_offset += sizeof(*cap_ptr);
+ num_capabilities--;
+ }
+ num_properties--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
+ {
+ struct hfi_uncompressed_format_supported *prop =
+ (struct hfi_uncompressed_format_supported *)
+ (data_ptr + next_offset);
+
+ u32 num_format_entries;
+ char *fmt_ptr;
+ struct hfi_uncompressed_plane_info *plane_info;
+
+ if ((rem_bytes - next_offset) < sizeof(*prop)) {
+ status = VIDC_ERR_BAD_PARAM;
+ break;
+ }
+ num_format_entries = prop->format_entries;
+ next_offset = sizeof(*prop) - sizeof(u32);
+ fmt_ptr = (char *)&prop->rg_format_info[0];
+
+ while (num_format_entries) {
+ u32 bytes_to_skip;
+ plane_info =
+ (struct hfi_uncompressed_plane_info *) fmt_ptr;
+
+ if ((rem_bytes - next_offset) <
+ sizeof(*plane_info)) {
+ status = VIDC_ERR_BAD_PARAM;
+ break;
+ }
+ bytes_to_skip = sizeof(*plane_info) -
+ sizeof(struct
+ hfi_uncompressed_plane_constraints) +
+ plane_info->num_planes *
+ sizeof(struct
+ hfi_uncompressed_plane_constraints);
+
+ fmt_ptr += bytes_to_skip;
+ next_offset += bytes_to_skip;
+ num_format_entries--;
+ }
+ num_properties--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED:
+ {
+ struct hfi_properties_supported *prop =
+ (struct hfi_properties_supported *)
+ (data_ptr + next_offset);
+
+ next_offset += sizeof(*prop) - sizeof(u32)
+ + prop->num_properties * sizeof(u32);
+ num_properties--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
+ {
+ struct hfi_profile_level_supported *prop =
+ (struct hfi_profile_level_supported *)
+ (data_ptr + next_offset);
+
+ next_offset += sizeof(*prop) -
+ sizeof(struct hfi_profile_level) +
+ prop->profile_count *
+ sizeof(struct hfi_profile_level);
+ num_properties--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED:
+ {
+ next_offset +=
+ sizeof(struct hfi_nal_stream_format_supported);
+ num_properties--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT:
+ {
+ next_offset += sizeof(u32);
+ num_properties--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE:
+ {
+ next_offset += sizeof(u32);
+ num_properties--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH:
+ {
+ next_offset +=
+ sizeof(struct hfi_intra_refresh);
+ num_properties--;
+ break;
+ }
+ default:
+ dprintk(VIDC_DBG,
+ "%s default case - 0x%x", __func__, prop_id);
+ }
+ rem_bytes -= next_offset;
+ data_ptr += next_offset;
+ }
+ return status;
}
static void hfi_process_sess_get_prop_buf_req(
@@ -493,7 +695,7 @@
cmd_done.data = &session_init_done;
if (!cmd_done.status) {
cmd_done.status = hfi_process_sess_init_done_prop_read(
- pkt, &cmd_done);
+ pkt, &session_init_done);
}
cmd_done.size = sizeof(struct vidc_hal_session_init_done);
callback(SESSION_INIT_DONE, &cmd_done);
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 4f8c257..d782227 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -682,6 +682,13 @@
{
return 0;
}
+
+static int msm_v4l2_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+ return msm_vidc_enum_framesizes((void *)vidc_inst, fsize);
+}
static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = {
.vidioc_querycap = msm_v4l2_querycap,
.vidioc_enum_fmt_vid_cap_mplane = msm_v4l2_enum_fmt,
@@ -703,7 +710,8 @@
.vidioc_decoder_cmd = msm_v4l2_decoder_cmd,
.vidioc_encoder_cmd = msm_v4l2_encoder_cmd,
.vidioc_s_parm = msm_v4l2_s_parm,
- .vidioc_g_parm = msm_v4l2_g_parm
+ .vidioc_g_parm = msm_v4l2_g_parm,
+ .vidioc_enum_framesizes = msm_v4l2_enum_framesizes,
};
static const struct v4l2_ioctl_ops msm_v4l2_enc_ioctl_ops = {
@@ -1006,6 +1014,13 @@
goto err_mem_alloc;
}
for (i = 0; i < num_bus_pdata; i++) {
+ if (!res->has_ocmem &&
+ (!strcmp(bus_pdata_config_vector[i].name,
+ "qcom,enc-ocmem-ab-ib")
+ || !strcmp(bus_pdata_config_vector[i].name,
+ "qcom,dec-ocmem-ab-ib"))) {
+ continue;
+ }
res->bus_pdata[i].num_usecases = get_u32_array_num_elements(
pdev, bus_pdata_config_vector[i].name);
if (res->bus_pdata[i].num_usecases == 0) {
@@ -1201,6 +1216,9 @@
kres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
res->irq = kres ? kres->start : -1;
+ res->has_ocmem = of_property_read_bool(pdev->dev.of_node,
+ "qcom,has-ocmem");
+
rc = msm_vidc_load_freq_table(res);
if (rc) {
dprintk(VIDC_ERR, "Failed to load freq table: %d\n", rc);
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 5966d12..8f8e723 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -20,10 +20,6 @@
#include "msm_vidc_debug.h"
#define MSM_VDEC_DVC_NAME "msm_vdec_8974"
-#define DEFAULT_HEIGHT 720
-#define DEFAULT_WIDTH 1280
-#define MAX_SUPPORTED_WIDTH 3820
-#define MAX_SUPPORTED_HEIGHT 2160
#define MIN_NUM_OUTPUT_BUFFERS 4
#define MAX_NUM_OUTPUT_BUFFERS 6
@@ -248,7 +244,7 @@
return (MAX_SUPPORTED_WIDTH * MAX_SUPPORTED_HEIGHT * 3/2)/2;
}
-static const struct msm_vidc_format vdec_formats[] = {
+struct msm_vidc_format vdec_formats[] = {
{
.name = "YCbCr Semiplanar 4:2:0",
.description = "Y/CbCr 4:2:0",
@@ -582,7 +578,6 @@
int stride, scanlines;
int extra_idx = 0;
int rc = 0;
- int ret;
int i;
struct hal_buffer_requirements *buff_req_buffer;
if (!inst || !f || !inst->core || !inst->core->device) {
@@ -602,6 +597,12 @@
if (inst->in_reconfig == true) {
inst->prop.height = inst->reconfig_height;
inst->prop.width = inst->reconfig_width;
+ rc = msm_vidc_check_session_supported(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s: session not supported\n", __func__);
+ goto exit;
+ }
}
f->fmt.pix_mp.height = inst->prop.height;
f->fmt.pix_mp.width = inst->prop.width;
@@ -612,10 +613,20 @@
frame_sz.height = inst->prop.height;
dprintk(VIDC_DBG, "width = %d, height = %d\n",
frame_sz.width, frame_sz.height);
- ret = msm_comm_try_set_prop(inst,
+ rc = msm_comm_try_set_prop(inst,
HAL_PARAM_FRAME_SIZE, &frame_sz);
- ret = ret || msm_comm_try_get_bufreqs(inst);
- if (ret || (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) {
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s: Failed : Frame size setting\n", __func__);
+ goto exit;
+ }
+ rc = msm_comm_try_get_bufreqs(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s: Failed : Buffer requirements\n", __func__);
+ goto exit;
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
for (i = 0; i < fmt->num_planes; ++i) {
f->fmt.pix_mp.plane_fmt[i].sizeimage =
fmt->get_frame_size(i,
@@ -680,6 +691,7 @@
f->type);
rc = -EINVAL;
}
+exit:
return rc;
}
int msm_vdec_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a)
@@ -717,7 +729,7 @@
}
int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
{
- const struct msm_vidc_format *fmt = NULL;
+ struct msm_vidc_format *fmt = NULL;
struct hal_frame_size frame_sz;
int extra_idx = 0;
int rc = 0;
@@ -787,6 +799,12 @@
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
inst->prop.width = f->fmt.pix_mp.width;
inst->prop.height = f->fmt.pix_mp.height;
+ rc = msm_vidc_check_session_supported(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s: session not supported\n", __func__);
+ goto err_invalid_fmt;
+ }
fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
ARRAY_SIZE(vdec_formats),
f->fmt.pix_mp.pixelformat,
@@ -1161,6 +1179,10 @@
inst->fmts[CAPTURE_PORT] = &vdec_formats[0];
inst->prop.height = DEFAULT_HEIGHT;
inst->prop.width = DEFAULT_WIDTH;
+ inst->capability.height.min = MIN_SUPPORTED_HEIGHT;
+ inst->capability.height.max = DEFAULT_HEIGHT;
+ inst->capability.width.min = MIN_SUPPORTED_WIDTH;
+ inst->capability.width.max = DEFAULT_WIDTH;
inst->prop.fps = 30;
inst->prop.prev_time_stamp = 0;
return rc;
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 072f4ab..9aa8175 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -19,10 +19,7 @@
#include "msm_vidc_debug.h"
#define MSM_VENC_DVC_NAME "msm_venc_8974"
-#define DEFAULT_HEIGHT 720
-#define DEFAULT_WIDTH 1280
#define MIN_NUM_OUTPUT_BUFFERS 4
-#define MAX_NUM_OUTPUT_BUFFERS 8
#define MIN_BIT_RATE 64000
#define MAX_BIT_RATE 160000000
#define DEFAULT_BIT_RATE 64000
@@ -88,6 +85,27 @@
"High Latency",
};
+static const char *const mpeg_video_vidc_extradata[] = {
+ "Extradata none",
+ "Extradata MB Quantization",
+ "Extradata Interlace Video",
+ "Extradata VC1 Framedisp",
+ "Extradata VC1 Seqdisp",
+ "Extradata timestamp",
+ "Extradata S3D Frame Packing",
+ "Extradata Frame Rate",
+ "Extradata Panscan Window",
+ "Extradata Recovery point SEI",
+ "Extradata Closed Caption UD",
+ "Extradata AFD UD",
+ "Extradata Multislice info",
+ "Extradata number of concealed MB",
+ "Extradata metadata filler",
+ "Extradata input crop",
+ "Extradata digital zoom",
+ "Extradata aspect ratio",
+};
+
enum msm_venc_ctrl_cluster {
MSM_VENC_CTRL_CLUSTER_QP = 1,
MSM_VENC_CTRL_CLUSTER_INTRA_PERIOD,
@@ -439,6 +457,18 @@
.cluster = MSM_VENC_CTRL_CLUSTER_SLICING,
},
{
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE,
+ .name = "Slice delivery mode",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ .cluster = MSM_VENC_CTRL_CLUSTER_SLICING,
+ },
+ {
.id = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE,
.name = "Intra Refresh Mode",
.type = V4L2_CTRL_TYPE_MENU,
@@ -557,6 +587,36 @@
.qmenu = NULL,
.cluster = 0,
},
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA,
+ .name = "Extradata Type",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+ .maximum = V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO,
+ .default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_AFD_UD) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER) |
+ (1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP) |
+ (1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM) |
+ (1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO)
+ ),
+ .qmenu = mpeg_video_vidc_extradata,
+ .step = 0,
+ },
};
#define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
@@ -578,7 +638,7 @@
return sz;
}
-static const struct msm_vidc_format venc_formats[] = {
+static struct msm_vidc_format venc_formats[] = {
{
.name = "YCbCr Semiplanar 4:2:0",
.description = "Y/CbCr 4:2:0",
@@ -640,6 +700,9 @@
struct hal_buffer_count_actual new_buf_count;
enum hal_property property_id;
struct hfi_device *hdev;
+ struct hal_buffer_requirements *buff_req;
+ struct v4l2_ctrl *ctrl = NULL;
+ u32 extradata = 0;
if (!q || !q->drv_priv) {
dprintk(VIDC_ERR, "Invalid input, q = %p\n", q);
return -EINVAL;
@@ -655,13 +718,33 @@
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
*num_planes = 1;
- if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
- *num_buffers > MAX_NUM_OUTPUT_BUFFERS)
- *num_buffers = MIN_NUM_OUTPUT_BUFFERS;
+ buff_req = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
+ *num_buffers = buff_req->buffer_count_actual =
+ max(*num_buffers, buff_req->buffer_count_actual);
+ if (*num_buffers > VIDEO_MAX_FRAME) {
+ dprintk(VIDC_ERR,
+ "Failed : No of slices requested = %d"\
+ " Max supported slices = %d",
+ *num_buffers, VIDEO_MAX_FRAME);
+ rc = -EINVAL;
+ break;
+ }
+ ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
+ V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
+ if (ctrl)
+ extradata = v4l2_ctrl_g_ctrl(ctrl);
+ if (extradata)
+ *num_planes = *num_planes + 1;
+ inst->fmts[CAPTURE_PORT]->num_planes = *num_planes;
for (i = 0; i < *num_planes; i++) {
sizes[i] = inst->fmts[CAPTURE_PORT]->get_frame_size(
i, inst->prop.height, inst->prop.width);
}
+ property_id = HAL_PARAM_BUFFER_COUNT_ACTUAL;
+ new_buf_count.buffer_type = HAL_BUFFER_OUTPUT;
+ new_buf_count.buffer_count_actual = *num_buffers;
+ rc = call_hfi_op(hdev, session_set_property, inst->session,
+ property_id, &new_buf_count);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
@@ -1342,6 +1425,25 @@
multi_slice_control.slice_size = ctrl->val;
pdata = &multi_slice_control;
break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE: {
+ temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE);
+ if ((temp_ctrl->val ==
+ V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) &&
+ (inst->fmts[CAPTURE_PORT]->fourcc ==
+ V4L2_PIX_FMT_H264 ||
+ inst->fmts[CAPTURE_PORT]->fourcc ==
+ V4L2_PIX_FMT_H264_NO_SC)) {
+ property_id = HAL_PARAM_VENC_SLICE_DELIVERY_MODE;
+ enable.enable = true;
+ } else {
+ dprintk(VIDC_WARN,
+ "Failed : slice delivery mode is valid "\
+ "only for H264 encoder and MB based slicing");
+ enable.enable = false;
+ }
+ pdata = &enable;
+ break;
+ }
case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE: {
struct v4l2_ctrl *air_mbs, *air_ref, *cir_mbs;
air_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS);
@@ -1452,6 +1554,15 @@
inst->mode = VIDC_SECURE;
dprintk(VIDC_INFO, "Setting secure mode to :%d\n", inst->mode);
break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
+ {
+ struct hal_extradata_enable extra;
+ property_id = HAL_PARAM_INDEX_EXTRADATA;
+ extra.index = msm_comm_get_hal_extradata_index(ctrl->val);
+ extra.enable = 1;
+ pdata = &extra;
+ break;
+ }
default:
rc = -ENOTSUPP;
break;
@@ -1676,7 +1787,7 @@
}
int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
{
- const struct msm_vidc_format *fmt = NULL;
+ struct msm_vidc_format *fmt = NULL;
struct hal_frame_size frame_sz;
int rc = 0;
int i;
@@ -1707,6 +1818,12 @@
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
inst->prop.width = f->fmt.pix_mp.width;
inst->prop.height = f->fmt.pix_mp.height;
+ rc = msm_vidc_check_session_supported(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s: session not supported\n", __func__);
+ goto exit;
+ }
frame_sz.buffer_type = HAL_BUFFER_INPUT;
frame_sz.width = inst->prop.width;
frame_sz.height = inst->prop.height;
@@ -1768,6 +1885,7 @@
const struct msm_vidc_format *fmt = NULL;
int rc = 0;
int i;
+ int extra_idx = 0;
if (!inst || !f) {
dprintk(VIDC_ERR,
"Invalid input, inst = %p, format = %p\n", inst, f);
@@ -1788,6 +1906,16 @@
fmt->get_frame_size(i, inst->prop.height,
inst->prop.width);
}
+ extra_idx = EXTRADATA_IDX(fmt->num_planes);
+ if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+ f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+ inst->buff_req.buffer
+ [HAL_BUFFER_EXTRADATA_OUTPUT].buffer_size;
+ }
+ for (i = 0; i < fmt->num_planes; ++i) {
+ inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
+ f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
} else {
dprintk(VIDC_ERR,
"Buf type not recognized, type = %d\n", f->type);
@@ -1827,6 +1955,7 @@
int i;
struct vidc_buffer_addr_info buffer_info;
struct hfi_device *hdev;
+ int extra_idx = 0;
if (!inst || !inst->core || !inst->core->device) {
dprintk(VIDC_ERR, "%s invalid parameters", __func__);
@@ -1839,24 +1968,41 @@
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- for (i = 0; i < b->length; i++) {
- dprintk(VIDC_DBG,
- "device_addr = %ld, size = %d\n",
+ if (b->length != inst->fmts[CAPTURE_PORT]->num_planes) {
+ dprintk(VIDC_ERR,
+ "Planes mismatch: needed: %d, allocated: %d\n",
+ inst->fmts[CAPTURE_PORT]->num_planes,
+ b->length);
+ rc = -EINVAL;
+ break;
+ }
+
+ for (i = 0; (i < b->length) && (i < VIDEO_MAX_PLANES); i++) {
+ dprintk(VIDC_DBG, "device_addr = 0x%lx, size = %d\n",
b->m.planes[i].m.userptr,
b->m.planes[i].length);
- buffer_info.buffer_size = b->m.planes[i].length;
- buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
- buffer_info.num_buffers = 1;
- buffer_info.align_device_addr =
- b->m.planes[i].m.userptr;
- buffer_info.extradata_size = 0;
- buffer_info.extradata_addr = 0;
- rc = call_hfi_op(hdev, session_set_buffers,
- (void *)inst->session, &buffer_info);
- if (rc)
- dprintk(VIDC_ERR,
- "vidc_hal_session_set_buffers failed");
}
+ buffer_info.buffer_size = b->m.planes[0].length;
+ buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
+ buffer_info.num_buffers = 1;
+ buffer_info.align_device_addr =
+ b->m.planes[0].m.userptr;
+
+ extra_idx = EXTRADATA_IDX(b->length);
+ if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+ buffer_info.extradata_addr =
+ b->m.planes[extra_idx].m.userptr;
+ dprintk(VIDC_DBG, "extradata: 0x%lx\n",
+ b->m.planes[extra_idx].m.userptr);
+ buffer_info.extradata_size =
+ b->m.planes[extra_idx].length;
+ }
+
+ rc = call_hfi_op(hdev, session_set_buffers,
+ (void *)inst->session, &buffer_info);
+ if (rc)
+ dprintk(VIDC_ERR,
+ "vidc_hal_session_set_buffers failed");
break;
default:
dprintk(VIDC_ERR,
@@ -1869,8 +2015,7 @@
int msm_venc_release_buf(struct msm_vidc_inst *inst,
struct v4l2_buffer *b)
{
- int rc = 0;
- int i;
+ int i, rc = 0, extra_idx = 0;
struct vidc_buffer_addr_info buffer_info;
struct hfi_device *hdev;
@@ -1891,24 +2036,36 @@
switch (b->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
+ if (b->length !=
+ inst->fmts[CAPTURE_PORT]->num_planes) {
+ dprintk(VIDC_ERR,
+ "Planes mismatch: needed: %d, to release: %d\n",
+ inst->fmts[CAPTURE_PORT]->num_planes,
+ b->length);
+ rc = -EINVAL;
+ break;
+ }
for (i = 0; i < b->length; i++) {
dprintk(VIDC_DBG,
- "Release device_addr = %ld, size = %d, %d\n",
+ "Release device_addr = 0x%lx, size = %d, %d\n",
b->m.planes[i].m.userptr,
b->m.planes[i].length, inst->state);
- buffer_info.buffer_size = b->m.planes[i].length;
- buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
- buffer_info.num_buffers = 1;
- buffer_info.align_device_addr =
- b->m.planes[i].m.userptr;
- buffer_info.extradata_size = 0;
- buffer_info.extradata_addr = 0;
- buffer_info.response_required = false;
- rc = call_hfi_op(hdev, session_release_buffers,
+ }
+ buffer_info.buffer_size = b->m.planes[0].length;
+ buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
+ buffer_info.num_buffers = 1;
+ buffer_info.align_device_addr =
+ b->m.planes[0].m.userptr;
+ extra_idx = EXTRADATA_IDX(b->length);
+ if (extra_idx && (extra_idx < VIDEO_MAX_PLANES))
+ buffer_info.extradata_addr =
+ b->m.planes[extra_idx].m.userptr;
+ buffer_info.response_required = false;
+ rc = call_hfi_op(hdev, session_release_buffers,
(void *)inst->session, &buffer_info);
- if (rc)
- dprintk(VIDC_ERR,
+ if (rc)
+ dprintk(VIDC_ERR,
"vidc_hal_session_release_buffers failed\n");
}
break;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 218987e..0fbfd72 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -299,6 +299,30 @@
return -EINVAL;
}
+
+int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
+{
+ struct msm_vidc_inst *inst = instance;
+ struct msm_vidc_core_capability *capability = NULL;
+
+ if (!inst || !fsize) {
+ dprintk(VIDC_ERR, "%s: invalid parameter: %p %p\n",
+ __func__, inst, fsize);
+ return -EINVAL;
+ }
+ if (!inst->core)
+ return -EINVAL;
+
+ capability = &inst->capability;
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = capability->width.min;
+ fsize->stepwise.max_width = capability->width.max;
+ fsize->stepwise.step_width = capability->width.step_size;
+ fsize->stepwise.min_height = capability->height.min;
+ fsize->stepwise.max_height = capability->height.max;
+ fsize->stepwise.step_height = capability->height.step_size;
+ return 0;
+}
static void *vidc_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write)
{
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index b71a816..4346a4e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -174,8 +174,8 @@
}
return &fmt[i];
}
-const struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc(
- const struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type)
+struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc(
+ struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type)
{
int i;
if (!fmt) {
@@ -350,7 +350,15 @@
struct msm_vidc_cb_cmd_done *response = data;
struct msm_vidc_inst *inst;
if (response) {
+ struct vidc_hal_session_init_done *session_init_done =
+ (struct vidc_hal_session_init_done *) response->data;
inst = (struct msm_vidc_inst *)response->session_id;
+
+ inst->capability.width = session_init_done->width;
+ inst->capability.height = session_init_done->height;
+ inst->capability.frame_rate =
+ session_init_done->frame_rate;
+ inst->capability.capability_set = true;
signal_session_msg_receipt(cmd, inst);
} else {
dprintk(VIDC_ERR,
@@ -394,8 +402,11 @@
inst->reconfig_height = event_notify->height;
inst->reconfig_width = event_notify->width;
inst->in_reconfig = true;
- v4l2_event_queue_fh(&inst->event_handler, &dqevent);
- wake_up(&inst->kernel_event_queue);
+ rc = msm_vidc_check_session_supported(inst);
+ if (!rc) {
+ v4l2_event_queue_fh(&inst->event_handler, &dqevent);
+ wake_up(&inst->kernel_event_queue);
+ }
return;
} else {
dprintk(VIDC_ERR,
@@ -949,7 +960,7 @@
dprintk(VIDC_WARN,
"Failed to scale DDR bus. Performance might be impacted\n");
}
- if (call_hfi_op(hdev, is_ocmem_present, hdev->hfi_device_data)) {
+ if (core->resources.has_ocmem) {
if (msm_comm_scale_bus(core, inst->session_type,
OCMEM_MEM))
dprintk(VIDC_WARN,
@@ -1112,9 +1123,11 @@
}
msm_comm_scale_clocks_and_bus(inst);
if (list_empty(&core->instances)) {
- if (inst->state != MSM_VIDC_CORE_INVALID)
- msm_comm_unset_ocmem(core);
- call_hfi_op(hdev, free_ocmem, hdev->hfi_device_data);
+ if (core->resources.has_ocmem) {
+ if (inst->state != MSM_VIDC_CORE_INVALID)
+ msm_comm_unset_ocmem(core);
+ call_hfi_op(hdev, free_ocmem, hdev->hfi_device_data);
+ }
dprintk(VIDC_DBG, "Calling vidc_hal_core_release\n");
rc = call_hfi_op(hdev, core_release, hdev->hfi_device_data);
if (rc) {
@@ -1126,7 +1139,10 @@
core->state = VIDC_CORE_UNINIT;
mutex_unlock(&core->lock);
call_hfi_op(hdev, unload_fw, hdev->hfi_device_data);
- msm_comm_unvote_buses(core, DDR_MEM|OCMEM_MEM);
+ if (core->resources.has_ocmem)
+ msm_comm_unvote_buses(core, DDR_MEM|OCMEM_MEM);
+ else
+ msm_comm_unvote_buses(core, DDR_MEM);
}
core_already_uninited:
change_inst_state(inst, MSM_VIDC_CORE_UNINIT);
@@ -1273,21 +1289,26 @@
inst, inst->state);
goto exit;
}
- ocmem_sz = get_ocmem_requirement(inst->prop.height, inst->prop.width);
- rc = msm_comm_scale_bus(inst->core, inst->session_type, OCMEM_MEM);
- if (!rc) {
- mutex_lock(&inst->core->sync_lock);
- rc = call_hfi_op(hdev, alloc_ocmem, hdev->hfi_device_data,
- ocmem_sz);
- mutex_unlock(&inst->core->sync_lock);
- if (rc) {
+ if (inst->core->resources.has_ocmem) {
+ ocmem_sz = get_ocmem_requirement(inst->prop.height,
+ inst->prop.width);
+ rc = msm_comm_scale_bus(inst->core, inst->session_type,
+ OCMEM_MEM);
+ if (!rc) {
+ mutex_lock(&inst->core->sync_lock);
+ rc = call_hfi_op(hdev, alloc_ocmem,
+ hdev->hfi_device_data,
+ ocmem_sz);
+ mutex_unlock(&inst->core->sync_lock);
+ if (rc) {
+ dprintk(VIDC_WARN,
+ "Failed to allocate OCMEM. Performance will be impacted\n");
+ msm_comm_unvote_buses(inst->core, OCMEM_MEM);
+ }
+ } else {
dprintk(VIDC_WARN,
- "Failed to allocate OCMEM. Performance will be impacted\n");
- msm_comm_unvote_buses(inst->core, OCMEM_MEM);
+ "Failed to vote for OCMEM BW. Performance will be impacted\n");
}
- } else {
- dprintk(VIDC_WARN,
- "Failed to vote for OCMEM BW. Performance will be impacted\n");
}
rc = call_hfi_op(hdev, session_load_res, (void *) inst->session);
if (rc) {
@@ -1779,6 +1800,13 @@
mutex_unlock(&inst->sync_lock);
} else {
int64_t time_usec = timeval_to_ns(&vb->v4l2_buf.timestamp);
+
+ rc = msm_vidc_check_session_supported(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s: session not supported\n", __func__);
+ goto err_no_mem;
+ }
do_div(time_usec, NSEC_PER_USEC);
memset(&frame_data, 0 , sizeof(struct vidc_frame_data));
frame_data.alloc_len = vb->v4l2_planes[0].length;
@@ -2349,3 +2377,54 @@
hdev->hfi_device_data, type);
return rc;
}
+
+int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
+{
+ struct msm_vidc_core_capability *capability;
+ int rc = 0;
+ struct v4l2_event dqevent;
+
+ if (!inst) {
+ dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
+ return -EINVAL;
+ }
+ capability = &inst->capability;
+
+ if (inst->capability.capability_set) {
+ if (msm_vp8_low_tier &&
+ inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_VP8) {
+ capability->width.max = DEFAULT_WIDTH;
+ capability->height.max = DEFAULT_HEIGHT;
+ }
+ if (inst->prop.width < capability->width.min ||
+ inst->prop.width > capability->width.max ||
+ (inst->prop.width % capability->width.step_size != 0)) {
+ dprintk(VIDC_ERR,
+ "Unsupported width = %d range min(%u) - max(%u) step_size(%u)",
+ inst->prop.width, capability->width.min,
+ capability->width.max, capability->width.step_size);
+ rc = -ENOTSUPP;
+ }
+
+ if (inst->prop.height < capability->height.min ||
+ inst->prop.height > capability->height.max ||
+ (inst->prop.height %
+ capability->height.step_size != 0)) {
+ dprintk(VIDC_ERR,
+ "Unsupported height = %d range min(%u) - max(%u) step_size(%u)",
+ inst->prop.height, capability->height.min,
+ capability->height.max, capability->height.step_size);
+ rc = -ENOTSUPP;
+ }
+ }
+ if (rc) {
+ mutex_lock(&inst->sync_lock);
+ inst->state = MSM_VIDC_CORE_INVALID;
+ mutex_unlock(&inst->sync_lock);
+ dqevent.type = V4L2_EVENT_MSM_VIDC_SYS_ERROR;
+ dqevent.id = 0;
+ v4l2_event_queue_fh(&inst->event_handler, &dqevent);
+ wake_up(&inst->kernel_event_queue);
+ }
+ return rc;
+}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 4f3deb6..862dfab 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -21,8 +21,8 @@
struct msm_vidc_core *get_vidc_core(int core_id);
const struct msm_vidc_format *msm_comm_get_pixel_fmt_index(
const struct msm_vidc_format fmt[], int size, int index, int fmt_type);
-const struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc(
- const struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type);
+struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc(
+ struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type);
struct buf_queue *msm_comm_get_vb2q(
struct msm_vidc_inst *inst, enum v4l2_buf_type type);
int msm_comm_try_state(struct msm_vidc_inst *inst, int state);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 62158b0..5948c7c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -18,7 +18,8 @@
int msm_vidc_debug = 0x3;
int msm_fw_debug = 0x18;
int msm_fw_debug_mode = 0x1;
-int msm_fw_low_power_mode = 0x0;
+int msm_fw_low_power_mode = 0x1;
+int msm_vp8_low_tier = 0x1;
struct debug_buffer {
char ptr[MAX_DBG_BUF_SIZE];
@@ -165,6 +166,11 @@
dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
goto failed_create_dir;
}
+ if (!debugfs_create_u32("vp8_low_tier", S_IRUGO | S_IWUSR,
+ parent, &msm_vp8_low_tier)) {
+ dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
+ goto failed_create_dir;
+ }
failed_create_dir:
return dir;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index fb06af6..ea6dd70 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -45,6 +45,7 @@
extern int msm_fw_debug;
extern int msm_fw_debug_mode;
extern int msm_fw_low_power_mode;
+extern int msm_vp8_low_tier;
#define dprintk(__level, __fmt, arg...) \
do { \
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 8238d42..1bfbaa6 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -37,6 +37,14 @@
#define MSM_VIDC_VERSION KERNEL_VERSION(0, 0, 1);
#define MAX_DEBUGFS_NAME 50
#define DEFAULT_TIMEOUT 3
+#define DEFAULT_HEIGHT 1080
+#define DEFAULT_WIDTH 1920
+#define MIN_SUPPORTED_WIDTH 32
+#define MIN_SUPPORTED_HEIGHT 32
+#define MAX_SUPPORTED_WIDTH 3820
+#define MAX_SUPPORTED_HEIGHT 2160
+
+
#define V4L2_EVENT_VIDC_BASE 10
@@ -166,6 +174,13 @@
VIDC_SECURE,
};
+struct msm_vidc_core_capability {
+ struct hal_capability_supported width;
+ struct hal_capability_supported height;
+ struct hal_capability_supported frame_rate;
+ u32 capability_set;
+};
+
struct msm_vidc_core {
struct list_head list;
struct mutex sync_lock, lock;
@@ -189,7 +204,7 @@
void *session;
struct session_prop prop;
int state;
- const struct msm_vidc_format *fmts[MAX_PORT_NUM];
+ struct msm_vidc_format *fmts[MAX_PORT_NUM];
struct buf_queue bufq[MAX_PORT_NUM];
struct list_head pendingq;
struct list_head internalbufs;
@@ -212,6 +227,7 @@
struct msm_vidc_debug debug;
struct buf_count count;
enum msm_vidc_mode mode;
+ struct msm_vidc_core_capability capability;
};
extern struct msm_vidc_drv *vidc_driver;
@@ -238,4 +254,5 @@
void handle_cmd_response(enum command_response cmd, void *data);
int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
enum hal_ssr_trigger_type type);
+int msm_vidc_check_session_supported(struct msm_vidc_inst *inst);
#endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 54c0878..86b824b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -73,6 +73,7 @@
struct msm_bus_scale_pdata *bus_pdata;
struct iommu_set iommu_group_set;
struct buffer_usage_set buffer_usage_set;
+ bool has_ocmem;
struct platform_device *pdev;
};
diff --git a/drivers/media/platform/msm/vidc/q6_hfi.c b/drivers/media/platform/msm/vidc/q6_hfi.c
index d1948d1..afe1431 100644
--- a/drivers/media/platform/msm/vidc/q6_hfi.c
+++ b/drivers/media/platform/msm/vidc/q6_hfi.c
@@ -1105,14 +1105,6 @@
return 0;
}
-static int q6_hfi_is_ocmem_present(void *dev)
-{
- (void)dev;
-
- /* Q6 does not support ocmem */
- return 0;
-}
-
static int q6_hfi_iommu_get_domain_partition(void *dev, u32 flags,
u32 buffer_type, int *domain, int *partition)
{
@@ -1140,6 +1132,15 @@
if (!device)
return -EINVAL;
+ if (!device->resources.fw.cookie)
+ device->resources.fw.cookie = subsystem_get("adsp");
+
+ if (IS_ERR_OR_NULL(device->resources.fw.cookie)) {
+ dprintk(VIDC_ERR, "Failed to download firmware\n");
+ rc = -ENOMEM;
+ goto fail_subsystem_get;
+ }
+
/*Set Q6 to loaded state*/
apr_set_q6_state(APR_SUBSYS_LOADED);
@@ -1164,9 +1165,11 @@
fail_iommu_attach:
apr_deregister(device->apr);
+ device->apr = NULL;
fail_apr_register:
subsystem_put(device->resources.fw.cookie);
device->resources.fw.cookie = NULL;
+fail_subsystem_get:
return rc;
}
@@ -1176,8 +1179,17 @@
if (!device)
return;
- if (device->apr)
- apr_deregister(device->apr);
+
+ if (device->resources.fw.cookie) {
+ subsystem_put(device->resources.fw.cookie);
+ device->resources.fw.cookie = NULL;
+ }
+
+ if (device->apr) {
+ if (apr_deregister(device->apr))
+ dprintk(VIDC_ERR, "Failed to deregister APR");
+ device->apr = NULL;
+ }
}
static int q6_hfi_get_fw_info(void *dev, enum fw_info info)
@@ -1225,7 +1237,6 @@
hdev->unset_ocmem = q6_hfi_unset_ocmem;
hdev->alloc_ocmem = q6_hfi_alloc_ocmem;
hdev->free_ocmem = q6_hfi_free_ocmem;
- hdev->is_ocmem_present = q6_hfi_is_ocmem_present;
hdev->iommu_get_domain_partition = q6_hfi_iommu_get_domain_partition;
hdev->load_fw = q6_hfi_load_fw;
hdev->unload_fw = q6_hfi_unload_fw;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 424af64..995c655 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1941,8 +1941,11 @@
sizeof(clock[VCODEC_AHB_CLK].name));
strlcpy(clock[VCODEC_AXI_CLK].name, "bus_clk",
sizeof(clock[VCODEC_AXI_CLK].name));
- strlcpy(clock[VCODEC_OCMEM_CLK].name, "mem_clk",
- sizeof(clock[VCODEC_OCMEM_CLK].name));
+
+ if (res->has_ocmem) {
+ strlcpy(clock[VCODEC_OCMEM_CLK].name, "mem_clk",
+ sizeof(clock[VCODEC_OCMEM_CLK].name));
+ }
clock[VCODEC_CLK].count = res->load_freq_tbl_size;
memcpy((void *)clock[VCODEC_CLK].load_freq_tbl, res->load_freq_tbl,
@@ -1962,6 +1965,8 @@
}
for (i = 0; i < VCODEC_MAX_CLKS; i++) {
+ if (i == VCODEC_OCMEM_CLK && !res->has_ocmem)
+ continue;
cl = &device->resources.clock[i];
if (!cl->clk) {
cl->clk = devm_clk_get(&res->pdev->dev, cl->name);
@@ -1976,6 +1981,8 @@
if (i < VCODEC_MAX_CLKS) {
for (--i; i >= 0; i--) {
+ if (i == VCODEC_OCMEM_CLK && !res->has_ocmem)
+ continue;
cl = &device->resources.clock[i];
clk_put(cl->clk);
}
@@ -1991,8 +1998,12 @@
dprintk(VIDC_ERR, "Invalid args\n");
return;
}
- for (i = 0; i < VCODEC_MAX_CLKS; i++)
+
+ for (i = 0; i < VCODEC_MAX_CLKS; i++) {
+ if (i == VCODEC_OCMEM_CLK && !device->res->has_ocmem)
+ continue;
clk_put(device->resources.clock[i].clk);
+ }
}
static inline void venus_hfi_disable_clks(struct venus_hfi_device *device)
{
@@ -2003,7 +2014,9 @@
return;
}
- for (i = 0; i < VCODEC_MAX_CLKS; i++) {
+ for (i = VCODEC_CLK; i < VCODEC_MAX_CLKS; i++) {
+ if (i == VCODEC_OCMEM_CLK && !device->res->has_ocmem)
+ continue;
cl = &device->resources.clock[i];
clk_disable_unprepare(cl->clk);
}
@@ -2018,7 +2031,10 @@
dprintk(VIDC_ERR, "Invalid params: %p\n", device);
return -EINVAL;
}
- for (i = 0; i < VCODEC_MAX_CLKS; i++) {
+
+ for (i = VCODEC_CLK; i < VCODEC_MAX_CLKS; i++) {
+ if (i == VCODEC_OCMEM_CLK && !device->res->has_ocmem)
+ continue;
cl = &device->resources.clock[i];
rc = clk_prepare_enable(cl->clk);
if (rc) {
@@ -2151,18 +2167,27 @@
dprintk(VIDC_ERR, "Failed to register bus scale client\n");
goto err_init_bus;
}
- bus_info->ocmem_handle[MSM_VIDC_ENCODER] =
- msm_bus_scale_register_client(&device->res->bus_pdata[0]);
- if (!bus_info->ocmem_handle[MSM_VIDC_ENCODER]) {
- dprintk(VIDC_ERR, "Failed to register bus scale client\n");
- goto err_init_bus;
+
+ if (device->res->has_ocmem) {
+ bus_info->ocmem_handle[MSM_VIDC_ENCODER] =
+ msm_bus_scale_register_client(
+ &device->res->bus_pdata[0]);
+ if (!bus_info->ocmem_handle[MSM_VIDC_ENCODER]) {
+ dprintk(VIDC_ERR,
+ "Failed to register bus scale client\n");
+ goto err_init_bus;
+ }
+
+ bus_info->ocmem_handle[MSM_VIDC_DECODER] =
+ msm_bus_scale_register_client(
+ &device->res->bus_pdata[1]);
+ if (!bus_info->ocmem_handle[MSM_VIDC_DECODER]) {
+ dprintk(VIDC_ERR,
+ "Failed to register bus scale client\n");
+ goto err_init_bus;
+ }
}
- bus_info->ocmem_handle[MSM_VIDC_DECODER] =
- msm_bus_scale_register_client(&device->res->bus_pdata[1]);
- if (!bus_info->ocmem_handle[MSM_VIDC_DECODER]) {
- dprintk(VIDC_ERR, "Failed to register bus scale client\n");
- goto err_init_bus;
- }
+
return rc;
err_init_bus:
venus_hfi_deinit_bus(device);
@@ -2406,18 +2431,6 @@
return rc;
}
-static int venus_hfi_is_ocmem_present(void *dev)
-{
- struct venus_hfi_device *device = dev;
- if (!device) {
- dprintk(VIDC_ERR, "%s invalid device handle %p",
- __func__, device);
- return -EINVAL;
- }
-
- return device->resources.ocmem.buf ? 1 : 0;
-}
-
static void venus_hfi_deinit_ocmem(struct venus_hfi_device *device)
{
if (device->resources.ocmem.handle)
@@ -2451,7 +2464,9 @@
goto err_register_iommu_domain;
}
- venus_hfi_ocmem_init(device);
+ if (res->has_ocmem)
+ venus_hfi_ocmem_init(device);
+
return rc;
err_register_iommu_domain:
@@ -2464,7 +2479,8 @@
static void venus_hfi_deinit_resources(struct venus_hfi_device *device)
{
- venus_hfi_deinit_ocmem(device);
+ if (device->res->has_ocmem)
+ venus_hfi_deinit_ocmem(device);
venus_hfi_deregister_iommu_domains(device);
venus_hfi_deinit_bus(device);
venus_hfi_deinit_clocks(device);
@@ -2588,7 +2604,7 @@
}
}
- rc = scm_call(SCM_SVC_CP, TZBSP_MEM_PROTECT_VIDEO_VAR, &memprot,
+ rc = scm_call(SCM_SVC_MP, TZBSP_MEM_PROTECT_VIDEO_VAR, &memprot,
sizeof(memprot), &resp, sizeof(resp));
if (rc)
dprintk(VIDC_ERR,
@@ -2658,8 +2674,8 @@
}
if (device->resources.fw.cookie) {
venus_hfi_disable_clks(device);
- subsystem_put(device->resources.fw.cookie);
venus_hfi_iommu_detach(device);
+ subsystem_put(device->resources.fw.cookie);
device->resources.fw.cookie = NULL;
}
}
@@ -2839,7 +2855,6 @@
hdev->unset_ocmem = venus_hfi_unset_ocmem;
hdev->alloc_ocmem = venus_hfi_alloc_ocmem;
hdev->free_ocmem = venus_hfi_free_ocmem;
- hdev->is_ocmem_present = venus_hfi_is_ocmem_present;
hdev->iommu_get_domain_partition = venus_hfi_iommu_get_domain_partition;
hdev->load_fw = venus_hfi_load_fw;
hdev->unload_fw = venus_hfi_unload_fw;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 0c88866..5ad0bb5 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -1047,7 +1047,6 @@
int (*unset_ocmem)(void *dev);
int (*alloc_ocmem)(void *dev, unsigned long size);
int (*free_ocmem)(void *dev);
- int (*is_ocmem_present)(void *dev);
int (*iommu_get_domain_partition)(void *dev, u32 flags, u32 buffer_type,
int *domain_num, int *partition_num);
int (*load_fw)(void *dev);
diff --git a/drivers/media/platform/msm/wfd/wfd-ioctl.c b/drivers/media/platform/msm/wfd/wfd-ioctl.c
index 9fb7c6d..3d11400 100644
--- a/drivers/media/platform/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/platform/msm/wfd/wfd-ioctl.c
@@ -271,11 +271,15 @@
mmap_context.ion_client = wfd_dev->ion_client;
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
ENC_MMAP, &mmap_context);
- if (rc || !enc_mregion->paddr) {
+ if (rc) {
WFD_MSG_ERR("Failed to map input memory\n");
goto alloc_fail;
+ } else if (!enc_mregion->paddr) {
+ WFD_MSG_ERR("ENC_MMAP returned success" \
+ "but failed to map input memory\n");
+ rc = -EINVAL;
+ goto alloc_fail;
}
-
WFD_MSG_DBG("NOTE: enc paddr = [%p->%p], kvaddr = %p\n",
enc_mregion->paddr, (int8_t *)
enc_mregion->paddr + enc_mregion->size,
@@ -303,7 +307,7 @@
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
MDP_MMAP, (void *)&mmap_context);
- if (rc || !mdp_mregion->paddr) {
+ if (rc) {
WFD_MSG_ERR(
"Failed to map to mdp, rc = %d, paddr = 0x%p\n",
rc, mdp_mregion->paddr);
@@ -311,6 +315,14 @@
mdp_mregion->paddr = NULL;
mdp_mregion->ion_handle = NULL;
goto mdp_mmap_fail;
+ } else if (!mdp_mregion->paddr) {
+ WFD_MSG_ERR("MDP_MMAP returned success" \
+ "but failed to map to MDP\n");
+ rc = -EINVAL;
+ mdp_mregion->kvaddr = NULL;
+ mdp_mregion->paddr = NULL;
+ mdp_mregion->ion_handle = NULL;
+ goto mdp_mmap_fail;
}
mdp_buf.inst = inst->mdp_inst;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index cfa5487..e8096d1 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -523,7 +523,7 @@
TI wl127x chips.
config TSIF
- depends on ARCH_MSM
+ depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064
tristate "TSIF (Transport Stream InterFace) support"
default n
---help---
diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c
index f7e8c9f..c37a4a4 100644
--- a/drivers/mtd/devices/msm_qpic_nand.c
+++ b/drivers/mtd/devices/msm_qpic_nand.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2264,19 +2264,22 @@
*/
bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
+ rc = sps_phy2h(bam.phys_addr, &nand_info->sps.bam_handle);
+ if (!rc)
+ goto init_sps_ep;
rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle);
if (rc) {
- pr_err("sps_register_bam_device() failed with %d\n", rc);
+ pr_err("%s: sps_register_bam_device() failed with %d\n",
+ __func__, rc);
goto out;
}
- pr_info("BAM device registered: bam_handle 0x%x\n",
- nand_info->sps.bam_handle);
-
+ pr_info("%s: BAM device registered: bam_handle 0x%x\n",
+ __func__, nand_info->sps.bam_handle);
+init_sps_ep:
rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod,
SPS_DATA_PROD_PIPE_INDEX);
if (rc)
- goto unregister_bam;
-
+ goto out;
rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons,
SPS_DATA_CONS_PIPE_INDEX);
if (rc)
@@ -2291,22 +2294,20 @@
msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
deinit_data_prod:
msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
-unregister_bam:
- sps_deregister_bam_device(nand_info->sps.bam_handle);
out:
return rc;
}
/*
- * This function de-registers BAM device, disconnects and frees its end points
- * for all the pipes.
+ * This function disconnects and frees its end points for all the pipes.
+ * Since the BAM is shared resource, it is not deregistered as its handle
+ * might be in use with LCDC.
*/
static void msm_nand_bam_free(struct msm_nand_info *nand_info)
{
msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe);
- sps_deregister_bam_device(nand_info->sps.bam_handle);
}
/* This function enables DMA support for the NANDc in BAM mode. */
diff --git a/drivers/net/wireless/wcnss/wcnss_vreg.c b/drivers/net/wireless/wcnss/wcnss_vreg.c
index 0aa9677..3f0a887 100644
--- a/drivers/net/wireless/wcnss/wcnss_vreg.c
+++ b/drivers/net/wireless/wcnss/wcnss_vreg.c
@@ -28,8 +28,6 @@
static void __iomem *msm_wcnss_base;
-static struct msm_xo_voter *wlan_clock;
-static const char *id = "WLAN";
static LIST_HEAD(power_on_lock_list);
static DEFINE_MUTEX(list_lock);
static DEFINE_SEMAPHORE(wcnss_power_on_lock);
@@ -124,6 +122,7 @@
void __iomem *pmu_conf_reg;
void __iomem *spare_reg;
struct clk *clk;
+ struct clk *clk_rf = NULL;
if (wcnss_hardware_type() == WCNSS_PRONTO_HW) {
wcnss_phys_addr = MSM_PRONTO_PHYS;
@@ -136,6 +135,15 @@
pr_err("Couldn't get xo clock\n");
return PTR_ERR(clk);
}
+
+ if (!use_48mhz_xo) {
+ clk_rf = clk_get(dev, "rf_clk");
+ if (IS_ERR(clk_rf)) {
+ pr_err("Couldn't get rf_clk\n");
+ clk_put(clk);
+ return PTR_ERR(clk_rf);
+ }
+ }
} else {
wcnss_phys_addr = MSM_RIVA_PHYS;
pmu_offset = RIVA_PMU_OFFSET;
@@ -162,6 +170,7 @@
pr_err("clk enable failed\n");
goto fail;
}
+
/* NV bit is set to indicate that platform driver is capable
* of doing NV download. SSR should not set NV bit; during
* SSR NV bin is downloaded by WLAN driver.
@@ -205,40 +214,28 @@
clk_disable_unprepare(clk);
if (!use_48mhz_xo) {
- wlan_clock = msm_xo_get(MSM_XO_TCXO_A2, id);
- if (IS_ERR(wlan_clock)) {
- rc = PTR_ERR(wlan_clock);
- pr_err("Failed to get MSM_XO_TCXO_A2 voter (%d)\n",
- rc);
+ rc = clk_prepare_enable(clk_rf);
+ if (rc) {
+ pr_err("clk_rf enable failed\n");
goto fail;
}
-
- rc = msm_xo_mode_vote(wlan_clock, MSM_XO_MODE_ON);
- if (rc < 0) {
- pr_err("Configuring MSM_XO_MODE_ON failed (%d)\n",
- rc);
- goto msm_xo_vote_fail;
- }
}
- } else {
- if (wlan_clock != NULL && !use_48mhz_xo) {
- rc = msm_xo_mode_vote(wlan_clock, MSM_XO_MODE_OFF);
- if (rc < 0)
- pr_err("Configuring MSM_XO_MODE_OFF failed (%d)\n",
- rc);
- }
- }
-
+ } else if (clk_rf != NULL && !use_48mhz_xo)
+ clk_disable_unprepare(clk_rf);
/* Add some delay for XO to settle */
msleep(20);
clk_put(clk);
+
+ if (wcnss_hardware_type() == WCNSS_PRONTO_HW) {
+ if (!use_48mhz_xo)
+ clk_put(clk_rf);
+ }
+
return rc;
-
-msm_xo_vote_fail:
- msm_xo_put(wlan_clock);
-
fail:
+ if (clk_rf != NULL)
+ clk_put(clk_rf);
clk_put(clk);
return rc;
}
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index ee76304..c5ebfd2 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -48,6 +48,10 @@
module_param(has_48mhz_xo, int, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(has_48mhz_xo, "Is an external 48 MHz XO present");
+static int do_not_cancel_vote = WCNSS_CONFIG_UNSPECIFIED;
+module_param(do_not_cancel_vote, int, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(do_not_cancel_vote, "Do not cancel votes for wcnss");
+
static DEFINE_SPINLOCK(reg_spinlock);
#define MSM_RIVA_PHYS 0x03204000
@@ -477,6 +481,11 @@
static void wcnss_post_bootup(struct work_struct *work)
{
+ if (do_not_cancel_vote == 1) {
+ pr_info("%s: Keeping APPS vote for Iris & WCNSS\n", __func__);
+ return;
+ }
+
pr_info("%s: Cancel APPS vote for Iris & WCNSS\n", __func__);
/* Since WCNSS is up, cancel any APPS vote for Iris & WCNSS VREGs */
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index 42a0016..db7f7f0 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -119,7 +119,7 @@
module_param(ip4_rt_tbl_lcl, bool, 0644);
MODULE_PARM_DESC(ip4_rt_tbl_lcl,
"where ip4 rt tables reside 1-local; 0-system");
-static bool ip6_rt_tbl_lcl = 1;
+static bool ip6_rt_tbl_lcl;
module_param(ip6_rt_tbl_lcl, bool, 0644);
MODULE_PARM_DESC(ip6_rt_tbl_lcl,
"where ip6 rt tables reside 1-local; 0-system");
diff --git a/drivers/platform/msm/ipa/ipa_client.c b/drivers/platform/msm/ipa/ipa_client.c
index b57585c..0ee7f27 100644
--- a/drivers/platform/msm/ipa/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_client.c
@@ -310,6 +310,8 @@
IPA_HOLB_TMR_VAL);
}
+ IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);
+
return 0;
sps_connect_fail:
@@ -345,6 +347,7 @@
return result;
}
EXPORT_SYMBOL(ipa_connect);
+
/**
* ipa_disconnect() - low-level IPA client disconnect
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
@@ -420,6 +423,8 @@
ipa_disable_clks();
}
+ IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
return 0;
}
EXPORT_SYMBOL(ipa_disconnect);
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
index 1605ed2..51a950d 100644
--- a/drivers/platform/msm/ipa/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -18,6 +18,8 @@
#define IPA_MAX_MSG_LEN 4096
+#define IPA_DBG_CNTR_ON 127265
+#define IPA_DBG_CNTR_OFF 127264
const char *ipa_client_name[] = {
__stringify(IPA_CLIENT_HSIC1_PROD),
@@ -76,6 +78,19 @@
__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP),
};
+const char *ipa_event_name[] = {
+ __stringify(WLAN_CLIENT_CONNECT),
+ __stringify(WLAN_CLIENT_DISCONNECT),
+ __stringify(WLAN_CLIENT_POWER_SAVE_MODE),
+ __stringify(WLAN_CLIENT_NORMAL_MODE),
+ __stringify(SW_ROUTING_ENABLE),
+ __stringify(SW_ROUTING_DISABLE),
+ __stringify(WLAN_AP_CONNECT),
+ __stringify(WLAN_AP_DISCONNECT),
+ __stringify(WLAN_STA_CONNECT),
+ __stringify(WLAN_STA_DISCONNECT),
+};
+
static struct dentry *dent;
static struct dentry *dfile_gen_reg;
static struct dentry *dfile_ep_reg;
@@ -85,6 +100,8 @@
static struct dentry *dfile_ip4_flt;
static struct dentry *dfile_ip6_flt;
static struct dentry *dfile_stats;
+static struct dentry *dfile_dbg_cnt;
+static struct dentry *dfile_msg;
static char dbg_buff[IPA_MAX_MSG_LEN];
static s8 ep_reg_idx;
@@ -199,7 +216,10 @@
"IPA_ENDP_INIT_HDR_%u=0x%x\n"
"IPA_ENDP_INIT_MODE_%u=0x%x\n"
"IPA_ENDP_INIT_AGGR_%u=0x%x\n"
- "IPA_ENDP_INIT_ROUTE_%u=0x%x\n",
+ "IPA_ENDP_INIT_ROUTE_%u=0x%x\n"
+ "IPA_ENDP_INIT_CTRL_%u=0x%x\n"
+ "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n"
+ "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n",
i, ipa_read_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_NAT_n_OFST_v2(i)),
i, ipa_read_reg(ipa_ctx->mmio,
@@ -209,7 +229,14 @@
i, ipa_read_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_AGGR_n_OFST_v2(i)),
i, ipa_read_reg(ipa_ctx->mmio,
- IPA_ENDP_INIT_ROUTE_n_OFST_v2(i)));
+ IPA_ENDP_INIT_ROUTE_n_OFST_v2(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_CTRL_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OFST(i))
+ );
}
*ppos = pos;
@@ -516,6 +543,10 @@
int nbytes;
int i;
int cnt = 0;
+ uint connect = 0;
+
+ for (i = 0; i < IPA_NUM_PIPES; i++)
+ connect |= (ipa_ctx->ep[i].valid << i);
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"sw_tx=%u\n"
@@ -523,13 +554,17 @@
"rx=%u\n"
"rx_repl_repost=%u\n"
"x_intr_repost=%u\n"
- "rx_q_len=%u\n",
+ "rx_q_len=%u\n"
+ "act_clnt=%u\n"
+ "con_clnt_bmap=0x%x\n",
ipa_ctx->stats.tx_sw_pkts,
ipa_ctx->stats.tx_hw_pkts,
ipa_ctx->stats.rx_pkts,
ipa_ctx->stats.rx_repl_repost,
ipa_ctx->stats.x_intr_repost,
- ipa_ctx->stats.rx_q_len);
+ ipa_ctx->stats.rx_q_len,
+ atomic_read(&ipa_ctx->ipa_active_clients),
+ connect);
cnt += nbytes;
for (i = 0; i < MAX_NUM_EXCP; i++) {
@@ -560,6 +595,64 @@
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
+static ssize_t ipa_write_dbg_cnt(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ u32 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtou32(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ if (option == 1)
+ ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_n_OFST(0),
+ IPA_DBG_CNTR_ON);
+ else
+ ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_n_OFST(0),
+ IPA_DBG_CNTR_OFF);
+
+ return count;
+}
+
+static ssize_t ipa_read_dbg_cnt(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_DEBUG_CNT_REG_0=0x%x\n",
+ ipa_read_reg(ipa_ctx->mmio,
+ IPA_DEBUG_CNT_REG_n_OFST(0)));
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_read_msg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < IPA_EVENT_MAX; i++) {
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "msg[%u:%27s] W:%u R:%u\n", i,
+ ipa_event_name[i],
+ ipa_ctx->stats.msg_w[i],
+ ipa_ctx->stats.msg_r[i]);
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
const struct file_operations ipa_gen_reg_ops = {
.read = ipa_read_gen_reg,
@@ -588,6 +681,15 @@
.read = ipa_read_stats,
};
+const struct file_operations ipa_msg_ops = {
+ .read = ipa_read_msg,
+};
+
+const struct file_operations ipa_dbg_cnt_ops = {
+ .read = ipa_read_dbg_cnt,
+ .write = ipa_write_dbg_cnt,
+};
+
void ipa_debugfs_init(void)
{
const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
@@ -656,6 +758,20 @@
goto fail;
}
+ dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
+ &ipa_dbg_cnt_ops);
+ if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
+ IPAERR("fail to create file for debug_fs dbg_cnt\n");
+ goto fail;
+ }
+
+ dfile_msg = debugfs_create_file("msg", read_only_mode, dent, 0,
+ &ipa_msg_ops);
+ if (!dfile_msg || IS_ERR(dfile_msg)) {
+ IPAERR("fail to create file for debug_fs msg\n");
+ goto fail;
+ }
+
return;
fail:
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
index 971cf5e..dabf86f 100644
--- a/drivers/platform/msm/ipa/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -900,6 +900,8 @@
*clnt_hdl = ipa_ep_idx;
+ IPADBG("client %d (ep: %d) connected\n", sys_in->client, ipa_ep_idx);
+
return 0;
fail_register_event:
@@ -936,6 +938,9 @@
ipa_ctx->ep[clnt_hdl].connect.desc.phys_base);
sps_free_endpoint(ipa_ctx->ep[clnt_hdl].ep_hdl);
memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+
+ IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
return 0;
}
EXPORT_SYMBOL(ipa_teardown_sys_pipe);
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
index 45f7b09..d79504e 100644
--- a/drivers/platform/msm/ipa/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -529,6 +529,8 @@
u32 rx_repl_repost;
u32 x_intr_repost;
u32 rx_q_len;
+ u32 msg_w[IPA_EVENT_MAX];
+ u32 msg_r[IPA_EVENT_MAX];
};
/**
diff --git a/drivers/platform/msm/ipa/ipa_intf.c b/drivers/platform/msm/ipa/ipa_intf.c
index 9876650..0f41d2c 100644
--- a/drivers/platform/msm/ipa/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_intf.c
@@ -268,6 +268,11 @@
return -EINVAL;
}
+ if (meta->msg_type >= IPA_EVENT_MAX) {
+ IPAERR("unsupported message type %d\n", meta->msg_type);
+ return -EINVAL;
+ }
+
msg = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL);
if (msg == NULL) {
IPAERR("fail to alloc ipa_msg container\n");
@@ -281,6 +286,7 @@
mutex_lock(&ipa_ctx->msg_lock);
list_add_tail(&msg->link, &ipa_ctx->msg_list);
mutex_unlock(&ipa_ctx->msg_lock);
+ IPA_STATS_INC_CNT(ipa_ctx->stats.msg_w[meta->msg_type]);
wake_up(&ipa_ctx->msg_waitq);
@@ -424,6 +430,8 @@
msg->callback(msg->buff, msg->meta.msg_len,
msg->meta.msg_type);
}
+ IPA_STATS_INC_CNT(
+ ipa_ctx->stats.msg_r[msg->meta.msg_type]);
}
ret = -EAGAIN;
diff --git a/drivers/platform/msm/ipa/ipa_ram_mmap.h b/drivers/platform/msm/ipa/ipa_ram_mmap.h
index 7e12b6a..d120f37 100644
--- a/drivers/platform/msm/ipa/ipa_ram_mmap.h
+++ b/drivers/platform/msm/ipa/ipa_ram_mmap.h
@@ -19,17 +19,18 @@
*/
#define IPA_RAM_NAT_OFST 0
-#define IPA_RAM_NAT_SIZE 2048
-#define IPA_RAM_HDR_OFST 2048
-#define IPA_RAM_HDR_SIZE 440
+#define IPA_RAM_NAT_SIZE 0
+#define IPA_RAM_HDR_OFST (IPA_RAM_NAT_OFST + IPA_RAM_NAT_SIZE)
+#define IPA_RAM_HDR_SIZE 1288
#define IPA_RAM_V4_FLT_OFST (IPA_RAM_HDR_OFST + IPA_RAM_HDR_SIZE)
-#define IPA_RAM_V4_FLT_SIZE 1024
+#define IPA_RAM_V4_FLT_SIZE 1420
#define IPA_RAM_V4_RT_OFST (IPA_RAM_V4_FLT_OFST + IPA_RAM_V4_FLT_SIZE)
-#define IPA_RAM_V4_RT_SIZE 1024
+#define IPA_RAM_V4_RT_SIZE 2192
#define IPA_RAM_V6_FLT_OFST (IPA_RAM_V4_RT_OFST + IPA_RAM_V4_RT_SIZE)
-#define IPA_RAM_V6_FLT_SIZE 1024
+#define IPA_RAM_V6_FLT_SIZE 1228
#define IPA_RAM_V6_RT_OFST (IPA_RAM_V6_FLT_OFST + IPA_RAM_V6_FLT_SIZE)
-#define IPA_RAM_V6_RT_SIZE 1024
+#define IPA_RAM_V6_RT_SIZE 528
#define IPA_RAM_END_OFST (IPA_RAM_V6_RT_OFST + IPA_RAM_V6_RT_SIZE)
+#define IPA_RAM_V6_RT_SIZE_DDR 15764
#endif /* _IPA_RAM_MMAP_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_reg.h b/drivers/platform/msm/ipa/ipa_reg.h
index 03d1972..ffa81dc 100644
--- a/drivers/platform/msm/ipa/ipa_reg.h
+++ b/drivers/platform/msm/ipa/ipa_reg.h
@@ -183,6 +183,27 @@
#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK 0x1ff
#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT 0x0
+#define IPA_DEBUG_CNT_REG_n_OFST(n) (0x00000340 + 0x4 * (n))
+#define IPA_DEBUG_CNT_REG_n_RMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_n_MAXn 15
+#define IPA_DEBUG_CNT_REG_n_DBG_CNT_REG_BMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_n_DBG_CNT_REG_SHFT 0x0
+
+#define IPA_DEBUG_CNT_CTRL_n_OFST(n) (0x00000380 + 0x4 * (n))
+#define IPA_DEBUG_CNT_CTRL_n_RMSK 0x1ff1f171
+#define IPA_DEBUG_CNT_CTRL_n_MAXn 15
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK 0x1ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT 0x14
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT 0xc
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK 0x100
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT 0x8
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK 0x70
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT 0x4
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK 0x1
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT 0x0
+
+
#endif
diff --git a/drivers/platform/msm/ipa/ipa_rt.c b/drivers/platform/msm/ipa/ipa_rt.c
index 7d509c6..fcc5e58 100644
--- a/drivers/platform/msm/ipa/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_rt.c
@@ -361,7 +361,8 @@
avail = IPA_RAM_V4_RT_SIZE;
size = sizeof(struct ipa_ip_v4_routing_init);
} else {
- avail = IPA_RAM_V6_RT_SIZE;
+ avail = ipa_ctx->ip6_rt_tbl_lcl ? IPA_RAM_V6_RT_SIZE :
+ IPA_RAM_V6_RT_SIZE_DDR;
size = sizeof(struct ipa_ip_v6_routing_init);
}
cmd = kmalloc(size, GFP_KERNEL);
diff --git a/drivers/platform/msm/ipa/teth_bridge.c b/drivers/platform/msm/ipa/teth_bridge.c
index 29253cd..5b26e41 100644
--- a/drivers/platform/msm/ipa/teth_bridge.c
+++ b/drivers/platform/msm/ipa/teth_bridge.c
@@ -29,7 +29,6 @@
#define TETH_BRIDGE_DRV_NAME "ipa_tethering_bridge"
-#ifdef TETH_DEBUG
#define TETH_DBG(fmt, args...) \
pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, \
__func__, __LINE__, ## args)
@@ -37,12 +36,6 @@
pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d ENTRY\n", __func__, __LINE__)
#define TETH_DBG_FUNC_EXIT() \
pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__)
-#else
-#define TETH_DBG(fmt, args...)
-#define TETH_DBG_FUNC_ENTRY()
-#define TETH_DBG_FUNC_EXIT()
-#endif
-
#define TETH_ERR(fmt, args...) \
pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
@@ -62,6 +55,9 @@
#define ETHERTYPE_IPV4 0x0800
#define ETHERTYPE_IPV6 0x86DD
+#define TETH_AGGR_MAX_DATAGRAMS_DEFAULT 16
+#define TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT (8*1024)
+
struct mac_addresses_type {
u8 host_pc_mac_addr[ETH_ALEN];
bool host_pc_mac_addr_known;
@@ -905,7 +901,6 @@
int teth_bridge_init(ipa_notify_cb *usb_notify_cb_ptr, void **private_data_ptr)
{
int res = 0;
- struct ipa_rm_create_params bridge_prod_params;
TETH_DBG_FUNC_ENTRY();
if (usb_notify_cb_ptr == NULL) {
@@ -918,48 +913,34 @@
*private_data_ptr = NULL;
/* Build IPA Resource manager dependency graph */
- bridge_prod_params.name = IPA_RM_RESOURCE_BRIDGE_PROD;
- bridge_prod_params.reg_params.user_data = NULL;
- bridge_prod_params.reg_params.notify_cb = bridge_prod_notify_cb;
- res = ipa_rm_create_resource(&bridge_prod_params);
- if (res) {
- TETH_ERR("ipa_rm_create_resource() failed\n");
- goto bail;
- }
-
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
IPA_RM_RESOURCE_USB_CONS);
- if (res) {
+ if (res && res != -EEXIST) {
TETH_ERR("ipa_rm_add_dependency() failed\n");
goto bail;
}
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
IPA_RM_RESOURCE_A2_CONS);
- if (res) {
+ if (res && res != -EEXIST) {
TETH_ERR("ipa_rm_add_dependency() failed\n");
goto fail_add_dependency_1;
}
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_A2_CONS);
- if (res) {
+ if (res && res != -EEXIST) {
TETH_ERR("ipa_rm_add_dependency() failed\n");
goto fail_add_dependency_2;
}
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_A2_PROD,
IPA_RM_RESOURCE_USB_CONS);
- if (res) {
+ if (res && res != -EEXIST) {
TETH_ERR("ipa_rm_add_dependency() failed\n");
goto fail_add_dependency_3;
}
- init_completion(&teth_ctx->is_bridge_prod_up);
- init_completion(&teth_ctx->is_bridge_prod_down);
-
- /* The default link protocol is Ethernet */
- teth_ctx->link_protocol = TETH_LINK_PROTOCOL_ETHERNET;
goto bail;
fail_add_dependency_3:
@@ -1001,6 +982,20 @@
if (res == -EINPROGRESS)
wait_for_completion(&teth_ctx->is_bridge_prod_down);
+ /* Initialize statistics */
+ memset(&teth_ctx->stats, 0, sizeof(teth_ctx->stats));
+
+ /* Delete IPA Resource manager dependency graph */
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
+ IPA_RM_RESOURCE_USB_CONS);
+ res |= ipa_rm_delete_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
+ IPA_RM_RESOURCE_A2_CONS);
+ res |= ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_A2_CONS);
+ res |= ipa_rm_delete_dependency(IPA_RM_RESOURCE_A2_PROD,
+ IPA_RM_RESOURCE_USB_CONS);
+ if (res)
+ TETH_ERR("Failed deleting ipa_rm dependency.\n");
bail:
TETH_DBG_FUNC_EXIT();
return res;
@@ -1084,9 +1079,11 @@
static void set_aggr_default_params(struct teth_aggr_params_link *params)
{
if (params->max_datagrams == 0)
- params->max_datagrams = 16;
+ params->max_datagrams =
+ TETH_AGGR_MAX_DATAGRAMS_DEFAULT;
if (params->max_transfer_size_byte == 0)
- params->max_transfer_size_byte = 16*1024;
+ params->max_transfer_size_byte =
+ TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT;
}
static void teth_set_bridge_mode(enum teth_link_protocol_type link_protocol)
@@ -1096,11 +1093,38 @@
memset(&teth_ctx->mac_addresses, 0, sizeof(teth_ctx->mac_addresses));
}
+int teth_bridge_set_aggr_params(struct teth_aggr_params *aggr_params)
+{
+ int res;
+
+ if (!aggr_params) {
+ TETH_ERR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ memcpy(&teth_ctx->aggr_params,
+ aggr_params,
+ sizeof(struct teth_aggr_params));
+ set_aggr_default_params(&teth_ctx->aggr_params.dl);
+ set_aggr_default_params(&teth_ctx->aggr_params.ul);
+
+ teth_ctx->aggr_params_known = true;
+ res = teth_set_aggregation();
+ if (res) {
+ TETH_ERR("Failed setting aggregation params\n");
+ res = -EFAULT;
+ }
+
+ return res;
+}
+EXPORT_SYMBOL(teth_bridge_set_aggr_params);
+
static long teth_bridge_ioctl(struct file *filp,
unsigned int cmd,
unsigned long arg)
{
int res = 0;
+ struct teth_aggr_params aggr_params;
TETH_DBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
@@ -1119,7 +1143,7 @@
case TETH_BRIDGE_IOC_SET_AGGR_PARAMS:
TETH_DBG("TETH_BRIDGE_IOC_SET_AGGR_PARAMS ioctl called\n");
- res = copy_from_user(&teth_ctx->aggr_params,
+ res = copy_from_user(&aggr_params,
(struct teth_aggr_params *)arg,
sizeof(struct teth_aggr_params));
if (res) {
@@ -1127,9 +1151,8 @@
res = -EFAULT;
break;
}
- set_aggr_default_params(&teth_ctx->aggr_params.dl);
- set_aggr_default_params(&teth_ctx->aggr_params.ul);
- teth_ctx->aggr_params_known = true;
+
+ res = teth_bridge_set_aggr_params(&aggr_params);
break;
case TETH_BRIDGE_IOC_GET_AGGR_PARAMS:
@@ -1201,12 +1224,10 @@
teth_ctx->aggr_caps->num_protocols = NUM_PROTOCOLS;
teth_ctx->aggr_caps->prot_caps[0].aggr_prot = TETH_AGGR_PROTOCOL_MBIM;
- teth_ctx->aggr_caps->prot_caps[0].max_datagrams = 16;
- teth_ctx->aggr_caps->prot_caps[0].max_transfer_size_byte = 16*1024;
+ set_aggr_default_params(&teth_ctx->aggr_caps->prot_caps[0]);
teth_ctx->aggr_caps->prot_caps[1].aggr_prot = TETH_AGGR_PROTOCOL_TLP;
- teth_ctx->aggr_caps->prot_caps[1].max_datagrams = 16;
- teth_ctx->aggr_caps->prot_caps[1].max_transfer_size_byte = 16*1024;
+ set_aggr_default_params(&teth_ctx->aggr_caps->prot_caps[1]);
}
void teth_bridge_get_client_handles(u32 *producer_handle,
@@ -1287,7 +1308,7 @@
aggr_prot_to_str(teth_ctx->aggr_params.ul.aggr_prot,
aggr_str,
sizeof(aggr_str)-1);
- nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN,
+ nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
"Aggregation parameters for uplink:\n");
nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
" Aggregation protocol: %s\n",
@@ -1493,6 +1514,7 @@
int teth_bridge_driver_init(void)
{
int res;
+ struct ipa_rm_create_params bridge_prod_params;
TETH_DBG("Tethering bridge driver init\n");
teth_ctx = kzalloc(sizeof(*teth_ctx), GFP_KERNEL);
@@ -1535,6 +1557,22 @@
teth_ctx->comp_hw_bridge_in_progress = false;
teth_debugfs_init();
+
+ /* Create BRIDGE_PROD entity in IPA Resource Manager */
+ bridge_prod_params.name = IPA_RM_RESOURCE_BRIDGE_PROD;
+ bridge_prod_params.reg_params.user_data = NULL;
+ bridge_prod_params.reg_params.notify_cb = bridge_prod_notify_cb;
+ res = ipa_rm_create_resource(&bridge_prod_params);
+ if (res) {
+ TETH_ERR("ipa_rm_create_resource() failed\n");
+ goto fail_cdev_add;
+ }
+ init_completion(&teth_ctx->is_bridge_prod_up);
+ init_completion(&teth_ctx->is_bridge_prod_down);
+
+ /* The default link protocol is Ethernet */
+ teth_ctx->link_protocol = TETH_LINK_PROTOCOL_ETHERNET;
+
TETH_DBG("Tethering bridge driver init OK\n");
return 0;
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 37dfc1b..b34f9dc 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -555,14 +555,14 @@
return;
}
- dfile_info = debugfs_create_file("info", 0666, dent, 0,
+ dfile_info = debugfs_create_file("info", 0664, dent, 0,
&sps_info_ops);
if (!dfile_info || IS_ERR(dfile_info)) {
pr_err("sps:fail to create the file for debug_fs info.\n");
goto info_err;
}
- dfile_logging_option = debugfs_create_file("logging_option", 0666,
+ dfile_logging_option = debugfs_create_file("logging_option", 0664,
dent, 0, &sps_logging_option_ops);
if (!dfile_logging_option || IS_ERR(dfile_logging_option)) {
pr_err("sps:fail to create the file for debug_fs "
@@ -571,7 +571,7 @@
}
dfile_debug_level_option = debugfs_create_u8("debug_level_option",
- 0666, dent, &debug_level_option);
+ 0664, dent, &debug_level_option);
if (!dfile_debug_level_option || IS_ERR(dfile_debug_level_option)) {
pr_err("sps:fail to create the file for debug_fs "
"debug_level_option.\n");
@@ -579,14 +579,14 @@
}
dfile_print_limit_option = debugfs_create_u8("print_limit_option",
- 0666, dent, &print_limit_option);
+ 0664, dent, &print_limit_option);
if (!dfile_print_limit_option || IS_ERR(dfile_print_limit_option)) {
pr_err("sps:fail to create the file for debug_fs "
"print_limit_option.\n");
goto print_limit_option_err;
}
- dfile_reg_dump_option = debugfs_create_u8("reg_dump_option", 0666,
+ dfile_reg_dump_option = debugfs_create_u8("reg_dump_option", 0664,
dent, ®_dump_option);
if (!dfile_reg_dump_option || IS_ERR(dfile_reg_dump_option)) {
pr_err("sps:fail to create the file for debug_fs "
@@ -594,28 +594,28 @@
goto reg_dump_option_err;
}
- dfile_testbus_sel = debugfs_create_u32("testbus_sel", 0666,
+ dfile_testbus_sel = debugfs_create_u32("testbus_sel", 0664,
dent, &testbus_sel);
if (!dfile_testbus_sel || IS_ERR(dfile_testbus_sel)) {
pr_err("sps:fail to create debug_fs file for testbus_sel.\n");
goto testbus_sel_err;
}
- dfile_bam_pipe_sel = debugfs_create_u32("bam_pipe_sel", 0666,
+ dfile_bam_pipe_sel = debugfs_create_u32("bam_pipe_sel", 0664,
dent, &bam_pipe_sel);
if (!dfile_bam_pipe_sel || IS_ERR(dfile_bam_pipe_sel)) {
pr_err("sps:fail to create debug_fs file for bam_pipe_sel.\n");
goto bam_pipe_sel_err;
}
- dfile_desc_option = debugfs_create_u32("desc_option", 0666,
+ dfile_desc_option = debugfs_create_u32("desc_option", 0664,
dent, &desc_option);
if (!dfile_desc_option || IS_ERR(dfile_desc_option)) {
pr_err("sps:fail to create debug_fs file for desc_option.\n");
goto desc_option_err;
}
- dfile_bam_addr = debugfs_create_file("bam_addr", 0666,
+ dfile_bam_addr = debugfs_create_file("bam_addr", 0664,
dent, 0, &sps_bam_addr_ops);
if (!dfile_bam_addr || IS_ERR(dfile_bam_addr)) {
pr_err("sps:fail to create the file for debug_fs "
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index bc6f289..a194c49 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -116,6 +116,8 @@
u8 revision1;
u8 revision2;
int battery_present;
+ bool new_battery;
+ bool last_soc_invalid;
/* platform data */
int r_sense_uohm;
unsigned int v_cutoff_uv;
@@ -135,6 +137,7 @@
int default_rbatt_mohm;
struct delayed_work calculate_soc_delayed_work;
+ struct work_struct recalc_work;
struct mutex bms_output_lock;
struct mutex last_ocv_uv_mutex;
@@ -144,7 +147,7 @@
bool use_ocv_thresholds;
bool ignore_shutdown_soc;
- int shutdown_soc_invalid;
+ bool shutdown_soc_invalid;
int shutdown_soc;
int shutdown_iavg_ma;
@@ -552,6 +555,100 @@
pr_err("cc reenable failed: %d\n", rc);
}
+static bool is_battery_charging(struct qpnp_bms_chip *chip)
+{
+ union power_supply_propval ret = {0,};
+
+ if (chip->batt_psy == NULL)
+ chip->batt_psy = power_supply_get_by_name("battery");
+ if (chip->batt_psy) {
+ /* if battery has been registered, use the status property */
+ chip->batt_psy->get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_STATUS, &ret);
+ return ret.intval == POWER_SUPPLY_STATUS_CHARGING;
+ }
+
+ /* Default to false if the battery power supply is not registered. */
+ pr_debug("battery power supply is not registered\n");
+ return false;
+}
+
+static bool is_batfet_open(struct qpnp_bms_chip *chip)
+{
+ union power_supply_propval ret = {0,};
+
+ if (chip->batt_psy == NULL)
+ chip->batt_psy = power_supply_get_by_name("battery");
+ if (chip->batt_psy) {
+ /* if battery has been registered, use the status property */
+ chip->batt_psy->get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_STATUS, &ret);
+ return ret.intval == POWER_SUPPLY_STATUS_FULL;
+ }
+
+ /* Default to true if the battery power supply is not registered. */
+ pr_debug("battery power supply is not registered\n");
+ return true;
+}
+
+static int get_simultaneous_batt_v_and_i(struct qpnp_bms_chip *chip,
+ int *ibat_ua, int *vbat_uv)
+{
+ struct qpnp_iadc_result i_result;
+ struct qpnp_vadc_result v_result;
+ enum qpnp_iadc_channels iadc_channel;
+ int rc;
+
+ iadc_channel = chip->use_external_rsense ?
+ EXTERNAL_RSENSE : INTERNAL_RSENSE;
+ rc = qpnp_iadc_vadc_sync_read(iadc_channel, &i_result,
+ VBAT_SNS, &v_result);
+ if (rc) {
+ pr_err("vadc read failed with rc: %d\n", rc);
+ return rc;
+ }
+ /*
+ * reverse the current read by the iadc, since the bms uses
+ * flipped battery current polarity.
+ */
+ *ibat_ua = -1 * (int)i_result.result_ua;
+ *vbat_uv = (int)v_result.physical;
+
+ return 0;
+}
+
+static int estimate_ocv(struct qpnp_bms_chip *chip)
+{
+ int ibat_ua, vbat_uv, ocv_est_uv;
+ int rc;
+ int rbatt_mohm = chip->default_rbatt_mohm + chip->r_conn_mohm;
+
+ rc = get_simultaneous_batt_v_and_i(chip, &ibat_ua, &vbat_uv);
+ if (rc) {
+ pr_err("simultaneous failed rc = %d\n", rc);
+ return rc;
+ }
+
+ ocv_est_uv = vbat_uv + (ibat_ua * rbatt_mohm) / 1000;
+ pr_debug("estimated pon ocv = %d\n", ocv_est_uv);
+ return ocv_est_uv;
+}
+
+static void reset_for_new_battery(struct qpnp_bms_chip *chip, int batt_temp)
+{
+ chip->last_ocv_uv = estimate_ocv(chip);
+ chip->last_soc = -EINVAL;
+ chip->soc_at_cv = -EINVAL;
+ chip->shutdown_soc_invalid = true;
+ chip->shutdown_soc = 0;
+ chip->shutdown_iavg_ma = 0;
+ chip->prev_pc_unusable = -EINVAL;
+ reset_cc(chip);
+ chip->last_cc_uah = INT_MIN;
+ chip->last_ocv_temp = batt_temp;
+ chip->last_soc_invalid = true;
+}
+
#define OCV_RAW_UNINITIALIZED 0xFFFF
static int read_soc_params_raw(struct qpnp_bms_chip *chip,
struct raw_soc_params *raw,
@@ -590,6 +687,12 @@
if (chip->prev_last_good_ocv_raw == OCV_RAW_UNINITIALIZED) {
convert_and_store_ocv(chip, raw, batt_temp);
pr_debug("PON_OCV_UV = %d\n", chip->last_ocv_uv);
+ } else if (chip->new_battery) {
+ /* if a new battery was inserted, estimate the ocv */
+ reset_for_new_battery(chip, batt_temp);
+ raw->cc = 0;
+ raw->last_good_ocv_uv = chip->last_ocv_uv;
+ chip->new_battery = false;
} else if (chip->prev_last_good_ocv_raw != raw->last_good_ocv_raw) {
convert_and_store_ocv(chip, raw, batt_temp);
/* forget the old cc value upon ocv */
@@ -1083,75 +1186,13 @@
pr_debug("rejecting shutdown soc = %d, soc = %d limit = %d\n",
chip->shutdown_soc, soc,
chip->shutdown_soc_valid_limit);
- chip->shutdown_soc_invalid = 1;
+ chip->shutdown_soc_invalid = true;
return 0;
}
return 1;
}
-static bool is_battery_charging(struct qpnp_bms_chip *chip)
-{
- union power_supply_propval ret = {0,};
-
- if (chip->batt_psy == NULL)
- chip->batt_psy = power_supply_get_by_name("battery");
- if (chip->batt_psy) {
- /* if battery has been registered, use the status property */
- chip->batt_psy->get_property(chip->batt_psy,
- POWER_SUPPLY_PROP_STATUS, &ret);
- return ret.intval == POWER_SUPPLY_STATUS_CHARGING;
- }
-
- /* Default to false if the battery power supply is not registered. */
- pr_debug("battery power supply is not registered\n");
- return false;
-}
-
-static bool is_batfet_open(struct qpnp_bms_chip *chip)
-{
- union power_supply_propval ret = {0,};
-
- if (chip->batt_psy == NULL)
- chip->batt_psy = power_supply_get_by_name("battery");
- if (chip->batt_psy) {
- /* if battery has been registered, use the status property */
- chip->batt_psy->get_property(chip->batt_psy,
- POWER_SUPPLY_PROP_STATUS, &ret);
- return ret.intval == POWER_SUPPLY_STATUS_FULL;
- }
-
- /* Default to true if the battery power supply is not registered. */
- pr_debug("battery power supply is not registered\n");
- return true;
-}
-
-static int get_simultaneous_batt_v_and_i(struct qpnp_bms_chip *chip,
- int *ibat_ua, int *vbat_uv)
-{
- struct qpnp_iadc_result i_result;
- struct qpnp_vadc_result v_result;
- enum qpnp_iadc_channels iadc_channel;
- int rc;
-
- iadc_channel = chip->use_external_rsense ?
- EXTERNAL_RSENSE : INTERNAL_RSENSE;
- rc = qpnp_iadc_vadc_sync_read(iadc_channel, &i_result,
- VBAT_SNS, &v_result);
- if (rc) {
- pr_err("vadc read failed with rc: %d\n", rc);
- return rc;
- }
- /*
- * reverse the current read by the iadc, since the bms uses
- * flipped battery current polarity.
- */
- *ibat_ua = -1 * (int)i_result.result_ua;
- *vbat_uv = (int)v_result.physical;
-
- return 0;
-}
-
static int bound_soc(int soc)
{
soc = max(0, soc);
@@ -1188,6 +1229,7 @@
pr_debug("forcing ocv to be %d due to bms reset mode\n", ocv_est_uv);
chip->last_ocv_uv = ocv_est_uv;
chip->last_soc = -EINVAL;
+ chip->last_soc_invalid = true;
reset_cc(chip);
chip->last_cc_uah = INT_MIN;
stop_ocv_updates(chip);
@@ -1475,6 +1517,11 @@
int shutdown_soc, new_calculated_soc, remaining_usable_charge_uah;
struct soc_params params;
+ if (!chip->battery_present) {
+ pr_debug("battery gone, reporting 0\n");
+ new_calculated_soc = 0;
+ goto done_calculating;
+ }
calculate_soc_params(chip, raw, ¶ms, batt_temp);
/* calculate remaining usable charge */
remaining_usable_charge_uah = params.ocv_charge_uah
@@ -1574,6 +1621,10 @@
}
chip->calculated_soc = new_calculated_soc;
+ if (chip->last_soc_invalid) {
+ chip->last_soc_invalid = false;
+ chip->last_soc = -EINVAL;
+ }
pr_debug("CC based calculated SOC = %d\n", chip->calculated_soc);
chip->first_time_calc_soc = 0;
get_current_time(&chip->last_recalc_time);
@@ -1639,6 +1690,15 @@
return soc;
}
+static void recalculate_work(struct work_struct *work)
+{
+ struct qpnp_bms_chip *chip = container_of(work,
+ struct qpnp_bms_chip,
+ recalc_work);
+
+ recalculate_soc(chip);
+}
+
static void calculate_soc_work(struct work_struct *work)
{
struct qpnp_bms_chip *chip = container_of(work,
@@ -1820,7 +1880,7 @@
pr_debug("Reported SOC = %d\n", chip->last_soc);
chip->t_soc_queried = now;
- return chip->last_soc;
+ return soc;
}
static int report_state_of_charge(struct qpnp_bms_chip *chip)
@@ -1872,8 +1932,14 @@
static void set_prop_bms_present(struct qpnp_bms_chip *chip, int present)
{
- if (chip->battery_present != present)
+ if (chip->battery_present != present) {
chip->battery_present = present;
+ if (present)
+ chip->new_battery = true;
+ /* a new battery was inserted or removed, so force a soc
+ * recalculation to update the SoC */
+ schedule_work(&chip->recalc_work);
+ }
}
static void qpnp_bms_external_power_changed(struct power_supply *psy)
@@ -1969,7 +2035,7 @@
u8 temp;
if (chip->ignore_shutdown_soc) {
- chip->shutdown_soc_invalid = 1;
+ chip->shutdown_soc_invalid = true;
chip->shutdown_soc = 0;
chip->shutdown_iavg_ma = 0;
} else {
@@ -2003,7 +2069,7 @@
if (chip->shutdown_soc == 0) {
pr_debug("No shutdown soc available\n");
- chip->shutdown_soc_invalid = 1;
+ chip->shutdown_soc_invalid = true;
chip->shutdown_iavg_ma = 0;
} else if (chip->shutdown_soc == SOC_ZERO) {
chip->shutdown_soc = 0;
@@ -2391,6 +2457,7 @@
"qpnp_low_voltage_lock");
INIT_DELAYED_WORK(&chip->calculate_soc_delayed_work,
calculate_soc_work);
+ INIT_WORK(&chip->recalc_work, recalculate_work);
read_shutdown_soc_and_iavg(chip);
@@ -2403,6 +2470,9 @@
chip->batt_psy->get_property(chip->batt_psy,
POWER_SUPPLY_PROP_PRESENT, &retval);
chip->battery_present = retval.intval;
+ pr_debug("present = %d\n", chip->battery_present);
+ } else {
+ chip->battery_present = 1;
}
calculate_soc_work(&(chip->calculate_soc_delayed_work.work));
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index eb0d6d4..180ca0d 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -525,6 +525,8 @@
if (chip->usb_present ^ usb_present) {
chip->usb_present = usb_present;
+ if (!usb_present)
+ qpnp_chg_iusbmax_set(chip, QPNP_CHG_I_MAX_MIN_100);
power_supply_set_present(chip->usb_psy,
chip->usb_present);
}
@@ -1015,17 +1017,15 @@
chip->usb_psy->get_property(chip->usb_psy,
POWER_SUPPLY_PROP_ONLINE, &ret);
- if (ret.intval && qpnp_chg_is_usb_chg_plugged_in(chip)) {
- chip->usb_psy->get_property(chip->usb_psy,
- POWER_SUPPLY_PROP_CURRENT_MAX, &ret);
- qpnp_chg_iusbmax_set(chip, ret.intval / 1000);
- if ((ret.intval / 1000) <= QPNP_CHG_I_MAX_MIN_MA)
+ /* Only honour requests while USB is present */
+ if (qpnp_chg_is_usb_chg_plugged_in(chip)) {
+ if (ret.intval <= 2) {
qpnp_chg_usb_suspend_enable(chip, 1);
- else
+ qpnp_chg_iusbmax_set(chip, QPNP_CHG_I_MAX_MIN_100);
+ } else {
+ qpnp_chg_iusbmax_set(chip, ret.intval / 1000);
qpnp_chg_usb_suspend_enable(chip, 0);
- } else {
- qpnp_chg_iusbmax_set(chip, QPNP_CHG_I_MAX_MIN_100);
- qpnp_chg_usb_suspend_enable(chip, 0);
+ }
}
pr_debug("end of power supply changed\n");
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 63acde1..21a936e 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -200,7 +200,6 @@
struct work_struct clock_off_w; /* work for actual clock off */
struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
struct mutex clk_mutex; /* mutex to guard against clock off/clock on */
- struct work_struct reset_bam_rx; /* work for reset bam rx endpoint */
struct work_struct disconnect_rx_endpoint; /* disconnect rx_endpoint */
bool tty_flush_receive;
enum uart_core_type uart_type;
@@ -219,6 +218,7 @@
/* BLSP UART required BUS Scaling data */
struct msm_bus_scale_pdata *bus_scale_table;
bool rx_discard_flush_issued;
+ int rx_count_callback;
};
#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
@@ -823,22 +823,6 @@
}
-/* Reset BAM RX Endpoint Pipe Index from workqueue context*/
-
-static void hsuart_reset_bam_rx_work(struct work_struct *w)
-{
- struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
- reset_bam_rx);
- struct uart_port *uport = &msm_uport->uport;
- struct msm_hs_rx *rx = &msm_uport->rx;
- struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
-
- sps_disconnect(sps_pipe_handle);
- msm_hs_spsconnect_rx(uport);
-
- msm_serial_hs_rx_tlet((unsigned long) &rx->tlet);
-}
-
/*
* termios : new ktermios
* oldtermios: old ktermios previous setting
@@ -1073,13 +1057,6 @@
if (!is_blsp_uart(msm_uport) && msm_uport->rx.flush != FLUSH_SHUTDOWN)
msm_uport->rx.flush = FLUSH_STOP;
- /* During uart port close, due to spurious rx stale interrupt,
- * the rx state machine is causing BUG_ON to be hit in
- * msm_hs_shutdown causing kernel panic.
- * Hence fixing the same by handling the rx state machine.
- */
- if (is_blsp_uart(msm_uport) && msm_uport->rx.flush == FLUSH_DATA_READY)
- msm_uport->rx.flush = FLUSH_SHUTDOWN;
}
/* Transmit the next chunk of data */
@@ -1178,10 +1155,6 @@
printk(KERN_ERR "Error: rx started in buffer state = %x",
buffer_pending);
- if (is_blsp_uart(msm_uport)) {
- /* Issue RX BAM Start IFC command */
- msm_hs_write(uport, UARTDM_CR_ADDR, START_RX_BAM_IFC);
- }
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE);
msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE);
@@ -1204,6 +1177,19 @@
/* Calling next DMOV API. Hence mb() here. */
mb();
+ if (is_blsp_uart(msm_uport)) {
+ /*
+ * RX-transfer will be automatically re-activated
+ * after last data of previous transfer was read.
+ */
+ data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
+ RX_DMRX_CYCLIC_EN);
+ msm_hs_write(uport, UARTDM_RX_TRANS_CTRL_ADDR, data);
+ /* Issue RX BAM Start IFC command */
+ msm_hs_write(uport, UARTDM_CR_ADDR, START_RX_BAM_IFC);
+ mb();
+ }
+
msm_uport->rx.flush = FLUSH_NONE;
if (is_blsp_uart(msm_uport)) {
@@ -1276,7 +1262,6 @@
{
int retval;
int rx_count;
- static int remaining_rx_count, bytes_pending;
unsigned long status;
unsigned long flags;
unsigned int error_f = 0;
@@ -1284,17 +1269,24 @@
struct msm_hs_port *msm_uport;
unsigned int flush;
struct tty_struct *tty;
+ struct sps_event_notify *notify;
+ struct msm_hs_rx *rx;
+ struct sps_pipe *sps_pipe_handle;
+ u32 sps_flags = SPS_IOVEC_FLAG_EOT;
msm_uport = container_of((struct tasklet_struct *)tlet_ptr,
struct msm_hs_port, rx.tlet);
uport = &msm_uport->uport;
tty = uport->state->port.tty;
+ notify = &msm_uport->notify;
+ rx = &msm_uport->rx;
status = msm_hs_read(uport, UARTDM_SR_ADDR);
spin_lock_irqsave(&uport->lock, flags);
- msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
+ if (!is_blsp_uart(msm_uport))
+ msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
/* overflow is not connect to data in a FIFO */
if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
@@ -1351,24 +1343,13 @@
if (flush >= FLUSH_DATA_INVALID)
goto out;
- rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
-
if (is_blsp_uart(msm_uport)) {
- if (rx_count > UARTDM_RX_BUF_SIZE) {
- if (bytes_pending) {
- rx_count = remaining_rx_count;
- bytes_pending = 0;
- } else {
- remaining_rx_count = rx_count -
- UARTDM_RX_BUF_SIZE;
- if (remaining_rx_count)
- bytes_pending = 1;
- rx_count = UARTDM_RX_BUF_SIZE;
- }
- }
+ rx_count = msm_uport->rx_count_callback;
+ } else {
+ rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
+ /* order the read of rx.buffer */
+ rmb();
}
- /* order the read of rx.buffer */
- rmb();
if (0 != (uport->read_status_mask & CREAD)) {
retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
@@ -1382,9 +1363,17 @@
/* order the read of rx.buffer and the start of next rx xfer */
wmb();
- if (!msm_uport->rx.buffer_pending)
- msm_hs_start_rx_locked(uport);
-
+ if (!msm_uport->rx.buffer_pending) {
+ if (is_blsp_uart(msm_uport)) {
+ msm_uport->rx.flush = FLUSH_NONE;
+ sps_pipe_handle = rx->prod.pipe_handle;
+ /* Queue transfer request to SPS */
+ sps_transfer_one(sps_pipe_handle, rx->rbuffer,
+ UARTDM_RX_BUF_SIZE, msm_uport, sps_flags);
+ } else {
+ msm_hs_start_rx_locked(uport);
+ }
+ }
out:
if (msm_uport->rx.buffer_pending) {
if (hs_serial_debug_mask)
@@ -1502,7 +1491,10 @@
struct msm_hs_port *msm_uport =
(struct msm_hs_port *)
((struct sps_event_notify *)notify)->user;
+ struct uart_port *uport;
+ unsigned long flags;
+ uport = &(msm_uport->uport);
msm_uport->notify = *notify;
pr_debug("%s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
__func__, notify->event_id,
@@ -1510,8 +1502,12 @@
notify->data.transfer.iovec.size,
notify->data.transfer.iovec.flags);
- if (msm_uport->rx.flush == FLUSH_NONE)
+ if (msm_uport->rx.flush == FLUSH_NONE) {
+ spin_lock_irqsave(&uport->lock, flags);
+ msm_uport->rx_count_callback = notify->data.transfer.iovec.size;
+ spin_unlock_irqrestore(&uport->lock, flags);
tasklet_schedule(&msm_uport->rx.tlet);
+ }
}
/*
@@ -1691,6 +1687,8 @@
int ret;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct circ_buf *tx_buf = &uport->state->xmit;
+ struct msm_hs_rx *rx = &msm_uport->rx;
+ struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
mutex_lock(&msm_uport->clk_mutex);
spin_lock_irqsave(&uport->lock, flags);
@@ -1717,26 +1715,26 @@
switch (msm_uport->clk_req_off_state) {
case CLK_REQ_OFF_START:
msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED;
- if (is_blsp_uart(msm_uport)) {
- /* Stale interrupt when RX-FIFO is empty
- * will fire if STALE_IRQ_EMPTY bit is set
- * for UART Core v1.4
- */
- msm_hs_write(uport, UARTDM_BCR_ADDR,
- UARTDM_BCR_STALE_IRQ_EMPTY);
+
+ if (!is_blsp_uart(msm_uport)) {
+ msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
+ /*
+ * Before returning make sure that device writel
+ * completed. Hence mb() requires here.
+ */
+ mb();
}
- msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
- /*
- * Before returning make sure that device writel completed.
- * Hence mb() requires here.
- */
- mb();
spin_unlock_irqrestore(&uport->lock, flags);
mutex_unlock(&msm_uport->clk_mutex);
return 0; /* RXSTALE flush not complete - retry */
case CLK_REQ_OFF_RXSTALE_ISSUED:
case CLK_REQ_OFF_FLUSH_ISSUED:
spin_unlock_irqrestore(&uport->lock, flags);
+ if (is_blsp_uart(msm_uport)) {
+ msm_uport->clk_req_off_state =
+ CLK_REQ_OFF_RXSTALE_FLUSHED;
+ sps_disconnect(sps_pipe_handle);
+ }
mutex_unlock(&msm_uport->clk_mutex);
return 0; /* RXSTALE flush not complete - retry */
case CLK_REQ_OFF_RXSTALE_FLUSHED:
@@ -1846,24 +1844,13 @@
mb();
if (msm_uport->clk_req_off_state ==
- CLK_REQ_OFF_RXSTALE_ISSUED) {
+ CLK_REQ_OFF_RXSTALE_ISSUED)
msm_uport->clk_req_off_state =
- CLK_REQ_OFF_FLUSH_ISSUED;
+ CLK_REQ_OFF_FLUSH_ISSUED;
- if (is_blsp_uart(msm_uport)) {
- /* Reset BCR Register for UARTDM Core v14*/
- msm_hs_write(uport, UARTDM_BCR_ADDR, 0x0);
- }
- }
-
- if (rx->flush == FLUSH_NONE) {
+ if (!is_blsp_uart(msm_uport) && (rx->flush == FLUSH_NONE)) {
rx->flush = FLUSH_DATA_READY;
- if (is_blsp_uart(msm_uport)) {
- queue_work(msm_uport->hsuart_wq,
- &msm_uport->reset_bam_rx);
- } else {
- msm_dmov_flush(msm_uport->dma_rx_channel, 1);
- }
+ msm_dmov_flush(msm_uport->dma_rx_channel, 1);
}
}
/* tx ready interrupt */
@@ -1993,8 +1980,14 @@
mb();
}
hrtimer_try_to_cancel(&msm_uport->clk_off_timer);
- if (msm_uport->rx.flush == FLUSH_SHUTDOWN)
+ if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+ if (is_blsp_uart(msm_uport)) {
+ spin_unlock_irqrestore(&uport->lock, flags);
+ msm_hs_spsconnect_rx(uport);
+ spin_lock_irqsave(&uport->lock, flags);
+ }
msm_hs_start_rx_locked(uport);
+ }
if (msm_uport->rx.flush == FLUSH_STOP)
msm_uport->rx.flush = FLUSH_IGNORE;
msm_uport->clk_state = MSM_HS_CLK_ON;
@@ -2265,9 +2258,10 @@
tx->command_ptr->dst_row_addr =
msm_uport->uport.mapbase + UARTDM_TF_ADDR;
+
+ msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK;
}
- msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK;
/* Enable reading the current CTS, no harm even if CTS is ignored */
msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
@@ -2378,7 +2372,10 @@
}
/* Set up Uart Receive */
- msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
+ if (is_blsp_uart(msm_uport))
+ msm_hs_write(uport, UARTDM_RFWR_ADDR, 32);
+ else
+ msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
@@ -2975,9 +2972,6 @@
INIT_WORK(&msm_uport->clock_off_w, hsuart_clock_off_work);
- /* Init work for Reset Rx bam endpoints */
- INIT_WORK(&msm_uport->reset_bam_rx, hsuart_reset_bam_rx_work);
-
/* Init work for sps_disconnect in stop_rx_locked */
INIT_WORK(&msm_uport->disconnect_rx_endpoint,
hsuart_disconnect_rx_endpoint_work);
diff --git a/drivers/tty/serial/msm_serial_hs_hwreg.h b/drivers/tty/serial/msm_serial_hs_hwreg.h
index 9fa4f55..cdd0450 100644
--- a/drivers/tty/serial/msm_serial_hs_hwreg.h
+++ b/drivers/tty/serial/msm_serial_hs_hwreg.h
@@ -68,6 +68,14 @@
*/
#define UARTDM_BCR_STALE_IRQ_EMPTY 0x2
+/* TRANSFER_CONTROL Register for UARTDM Core v1.4 */
+#define UARTDM_RX_TRANS_CTRL_ADDR 0xcc
+
+/* TRANSFER_CONTROL Register bits */
+#define RX_STALE_AUTO_RE_EN 0x1
+#define RX_TRANS_AUTO_RE_ACTIVATE 0x2
+#define RX_DMRX_CYCLIC_EN 0x4
+
/* write only register */
#define UARTDM_CSR_115200 0xFF
#define UARTDM_CSR_57600 0xEE
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index e5aca6f..b48785c 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -535,6 +535,7 @@
static int dwc3_msm_dbm_ep_unconfig(u8 usb_ep)
{
u8 dbm_ep;
+ u32 data;
dev_dbg(context->dev, "%s\n", __func__);
@@ -548,10 +549,18 @@
context->ep_num_mapping[dbm_ep] = 0;
- dwc3_msm_write_reg(context->base, DBM_EP_CFG(dbm_ep), 0);
+ data = dwc3_msm_read_reg(context->base, DBM_EP_CFG(dbm_ep));
+ data &= (~0x1);
+ dwc3_msm_write_reg(context->base, DBM_EP_CFG(dbm_ep), data);
/* Reset the dbm endpoint */
dwc3_msm_dbm_ep_soft_reset(dbm_ep, true);
+ /*
+ * 10 usec delay is required before deasserting DBM endpoint reset
+ * according to hardware programming guide.
+ */
+ udelay(10);
+ dwc3_msm_dbm_ep_soft_reset(dbm_ep, false);
return 0;
}
@@ -888,6 +897,8 @@
}
(*new_ep_ops) = (*ep->ops);
new_ep_ops->queue = dwc3_msm_ep_queue;
+ new_ep_ops->disable = ep->ops->disable;
+
ep->ops = new_ep_ops;
/*
@@ -1328,24 +1339,27 @@
dwc3_msm_write_readback(msm->base, SS_PHY_PARAM_CTRL_1, 0x07, 0x5);
}
-static void dwc3_msm_block_reset(void)
+static void dwc3_msm_block_reset(bool core_reset)
{
+
struct dwc3_msm *mdwc = context;
int ret = 0;
- ret = dwc3_msm_link_clk_reset(1);
- if (ret)
- return;
+ if (core_reset) {
+ ret = dwc3_msm_link_clk_reset(1);
+ if (ret)
+ return;
- usleep_range(1000, 1200);
- ret = dwc3_msm_link_clk_reset(0);
- if (ret)
- return;
+ usleep_range(1000, 1200);
+ ret = dwc3_msm_link_clk_reset(0);
+ if (ret)
+ return;
- usleep_range(10000, 12000);
+ usleep_range(10000, 12000);
- /* Reinitialize QSCRATCH registers after block reset */
- dwc3_msm_qscratch_reg_init(mdwc);
+ /* Reinitialize QSCRATCH registers after block reset */
+ dwc3_msm_qscratch_reg_init(mdwc);
+ }
/* Reset the DBM */
dwc3_msm_dbm_soft_reset(1);
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 01fad76..282f49e 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -206,6 +206,20 @@
return ret;
}
dwc3_otg_notify_host_mode(otg, on);
+
+ /*
+ * Perform USB hardware RESET (both core reset and DBM reset)
+ * when moving from host to peripheral. This is required for
+ * peripheral mode to work.
+ */
+ if (ext_xceiv && ext_xceiv->otg_capability &&
+ ext_xceiv->ext_block_reset)
+ ext_xceiv->ext_block_reset(true);
+
+ /* re-init core and OTG registers as block reset clears these */
+ dwc3_post_host_reset_core_init(dwc);
+ if (ext_xceiv && !ext_xceiv->otg_capability)
+ dwc3_otg_reset(dotg);
}
return 0;
@@ -253,7 +267,6 @@
{
struct dwc3_otg *dotg = container_of(otg, struct dwc3_otg, otg);
struct dwc3_ext_xceiv *ext_xceiv = dotg->ext_xceiv;
- struct dwc3 *dwc = dotg->dwc;
if (!otg->gadget)
return -EINVAL;
@@ -262,20 +275,11 @@
dev_dbg(otg->phy->dev, "%s: turn on gadget %s\n",
__func__, otg->gadget->name);
- /*
- * Hardware reset is required to support below scenarios:
- * 1. Host <-> peripheral switching
- * 2. Once an endpoint is configured in DBM (BAM) mode, it
- * can be unconfigured only after RESET
- */
+ /* Core reset is not required during start peripheral. Only
+ * DBM reset is required, hence perform only DBM reset here */
if (ext_xceiv && ext_xceiv->otg_capability &&
ext_xceiv->ext_block_reset)
- ext_xceiv->ext_block_reset();
-
- /* re-init core and OTG registers as block reset clears these */
- dwc3_post_host_reset_core_init(dwc);
- if (ext_xceiv && !ext_xceiv->otg_capability)
- dwc3_otg_reset(dotg);
+ ext_xceiv->ext_block_reset(false);
dwc3_otg_set_peripheral_regs(dotg);
usb_gadget_vbus_connect(otg->gadget);
diff --git a/drivers/usb/dwc3/dwc3_otg.h b/drivers/usb/dwc3/dwc3_otg.h
index d3b1b4a..c2fab53 100644
--- a/drivers/usb/dwc3/dwc3_otg.h
+++ b/drivers/usb/dwc3/dwc3_otg.h
@@ -109,7 +109,7 @@
void (*notify_ext_events)(struct usb_otg *otg,
enum dwc3_ext_events ext_event);
/* for block reset USB core */
- void (*ext_block_reset)(void);
+ void (*ext_block_reset)(bool core_reset);
};
/* for external transceiver driver */
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 5b97148..d4bdf99 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -77,8 +77,8 @@
#include "f_rndis.c"
#include "rndis.c"
#include "f_qc_ecm.c"
-#include "u_bam_data.c"
#include "f_mbim.c"
+#include "u_bam_data.c"
#include "f_ecm.c"
#include "f_qc_rndis.c"
#include "u_ether.c"
@@ -865,10 +865,18 @@
fmbim_cleanup();
}
+
+/* mbim transport string */
+static char mbim_transports[MAX_XPORT_STR_LEN];
+
static int mbim_function_bind_config(struct android_usb_function *f,
struct usb_configuration *c)
{
- return mbim_bind_config(c, 0);
+ char *trans;
+
+ pr_debug("%s: mbim transport is %s", __func__, mbim_transports);
+ trans = strim(mbim_transports);
+ return mbim_bind_config(c, 0, trans);
}
static int mbim_function_ctrlrequest(struct android_usb_function *f,
@@ -878,12 +886,34 @@
return mbim_ctrlrequest(cdev, c);
}
+static ssize_t mbim_transports_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", mbim_transports);
+}
+
+static ssize_t mbim_transports_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ strlcpy(mbim_transports, buf, sizeof(mbim_transports));
+ return size;
+}
+
+static DEVICE_ATTR(mbim_transports, S_IRUGO | S_IWUSR, mbim_transports_show,
+ mbim_transports_store);
+
+static struct device_attribute *mbim_function_attributes[] = {
+ &dev_attr_mbim_transports,
+ NULL
+};
+
static struct android_usb_function mbim_function = {
.name = "usb_mbim",
.cleanup = mbim_function_cleanup,
.bind_config = mbim_function_bind_config,
.init = mbim_function_init,
.ctrlrequest = mbim_function_ctrlrequest,
+ .attributes = mbim_function_attributes,
};
#ifdef CONFIG_SND_PCM
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index 893f315..52e3126 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -84,6 +84,7 @@
wait_queue_head_t read_wq;
wait_queue_head_t write_wq;
+ enum transport_type xport;
u8 port_num;
struct data_port bam_port;
struct mbim_notify_port not_port;
@@ -143,6 +144,9 @@
#define NTB_OUT_SIZE (0x1000)
#define NDP_IN_DIVISOR (0x4)
+#define NTB_DEFAULT_IN_SIZE_IPA (0x2000)
+#define NTB_OUT_SIZE_IPA (0x2000)
+
#define FORMATS_SUPPORTED USB_CDC_NCM_NTB16_SUPPORTED
static struct usb_cdc_ncm_ntb_parameters ntb_parameters = {
@@ -659,26 +663,49 @@
return 0;
}
+int mbim_configure_params(void)
+{
+ struct teth_aggr_params aggr_params;
+ int ret = 0;
+
+ aggr_params.dl.aggr_prot = TETH_AGGR_PROTOCOL_MBIM;
+ aggr_params.dl.max_datagrams = ntb_parameters.wNtbOutMaxDatagrams;
+ aggr_params.dl.max_transfer_size_byte = ntb_parameters.dwNtbInMaxSize;
+
+ aggr_params.ul.aggr_prot = TETH_AGGR_PROTOCOL_MBIM;
+ aggr_params.ul.max_datagrams = ntb_parameters.wNtbOutMaxDatagrams;
+ aggr_params.ul.max_transfer_size_byte = ntb_parameters.dwNtbOutMaxSize;
+
+ ret = teth_bridge_set_aggr_params(&aggr_params);
+ if (ret)
+ pr_err("%s: teth_bridge_set_aggr_params failed\n", __func__);
+
+ return ret;
+}
+
static int mbim_bam_connect(struct f_mbim *dev)
{
int ret;
u8 src_connection_idx, dst_connection_idx;
struct usb_gadget *gadget = dev->cdev->gadget;
+ enum peer_bam bam_name = (dev->xport == USB_GADGET_XPORT_BAM2BAM_IPA) ?
+ IPA_P_BAM : A2_P_BAM;
pr_info("dev:%p portno:%d\n", dev, dev->port_num);
- src_connection_idx = usb_bam_get_connection_idx(gadget->name, A2_P_BAM,
- USB_TO_PEER_PERIPHERAL, dev->port_num);
- dst_connection_idx = usb_bam_get_connection_idx(gadget->name, A2_P_BAM,
- PEER_PERIPHERAL_TO_USB, dev->port_num);
+ src_connection_idx = usb_bam_get_connection_idx(gadget->name, bam_name,
+ USB_TO_PEER_PERIPHERAL, dev->port_num);
+ dst_connection_idx = usb_bam_get_connection_idx(gadget->name, bam_name,
+ PEER_PERIPHERAL_TO_USB, dev->port_num);
if (src_connection_idx < 0 || dst_connection_idx < 0) {
pr_err("%s: usb_bam_get_connection_idx failed\n", __func__);
return ret;
}
ret = bam_data_connect(&dev->bam_port, dev->port_num,
- USB_GADGET_XPORT_BAM2BAM, src_connection_idx,
- dst_connection_idx, USB_FUNC_MBIM);
+ dev->xport, src_connection_idx, dst_connection_idx,
+ USB_FUNC_MBIM);
+
if (ret) {
pr_err("bam_data_setup failed: err:%d\n",
ret);
@@ -1603,7 +1630,8 @@
* Context: single threaded during gadget setup
* Returns zero on success, else negative errno.
*/
-int mbim_bind_config(struct usb_configuration *c, unsigned portno)
+int mbim_bind_config(struct usb_configuration *c, unsigned portno,
+ char *xport_name)
{
struct f_mbim *mbim = NULL;
int status = 0;
@@ -1662,6 +1690,19 @@
mbim->function.disable = mbim_disable;
mbim->function.suspend = mbim_suspend;
mbim->function.resume = mbim_resume;
+ mbim->xport = str_to_xport(xport_name);
+
+ if (mbim->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ /* Use BAM2BAM by default if not IPA */
+ mbim->xport = USB_GADGET_XPORT_BAM2BAM;
+ } else {
+ /* For IPA we use limit of 16 */
+ ntb_parameters.wNtbOutMaxDatagrams = 16;
+ /* For IPA this is proven to give maximum throughput */
+ ntb_parameters.dwNtbInMaxSize =
+ cpu_to_le32(NTB_DEFAULT_IN_SIZE_IPA);
+ ntb_parameters.dwNtbOutMaxSize = cpu_to_le32(NTB_OUT_SIZE_IPA);
+ }
INIT_LIST_HEAD(&mbim->cpkt_req_q);
INIT_LIST_HEAD(&mbim->cpkt_resp_q);
diff --git a/drivers/usb/gadget/f_qdss.c b/drivers/usb/gadget/f_qdss.c
index 6518095..cece500 100644
--- a/drivers/usb/gadget/f_qdss.c
+++ b/drivers/usb/gadget/f_qdss.c
@@ -410,7 +410,6 @@
clear_eps(f);
clear_desc(c->cdev->gadget, f);
- msm_dwc3_restart_usb_session();
}
static void qdss_eps_disable(struct usb_function *f)
@@ -467,13 +466,13 @@
qdss->usb_connected = 0;
spin_unlock_irqrestore(&qdss->lock, flags);
+ /*cancell all active xfers*/
+ qdss_eps_disable(f);
+
status = uninit_data(qdss->data);
if (status)
pr_err("%s: uninit_data error\n", __func__);
- /*cancell all active xfers*/
- qdss_eps_disable(f);
-
schedule_work(&qdss->disconnect_w);
}
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 801d24d..7ac5b64 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -687,6 +687,12 @@
rndis_reset_cmplt_type *resp;
rndis_resp_t *r;
struct rndis_params *params = rndis_per_dev_params + configNr;
+ u32 length;
+ u8 *xbuf;
+
+ /* drain the response queue */
+ while ((xbuf = rndis_get_next_response(configNr, &length)))
+ rndis_free_response(configNr, xbuf);
r = rndis_add_response(configNr, sizeof(rndis_reset_cmplt_type));
if (!r)
diff --git a/drivers/usb/gadget/u_bam_data.c b/drivers/usb/gadget/u_bam_data.c
index 83f885a..eec9e37 100644
--- a/drivers/usb/gadget/u_bam_data.c
+++ b/drivers/usb/gadget/u_bam_data.c
@@ -183,6 +183,8 @@
int ret;
if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ if (d->func_type == USB_FUNC_MBIM)
+ teth_bridge_disconnect();
if (d->func_type == USB_FUNC_ECM)
ecm_ipa_disconnect(d->ipa_params.priv);
ret = usb_bam_disconnect_ipa(&d->ipa_params);
@@ -195,13 +197,28 @@
{
struct bam_data_port *port = container_of(w, struct bam_data_port,
connect_w);
+ struct teth_bridge_connect_params connect_params;
struct bam_data_ch_info *d = &port->data_ch;
+ ipa_notify_cb usb_notify_cb;
+ void *priv;
u32 sps_params;
int ret;
pr_debug("%s: Connect workqueue started", __func__);
if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ if (d->func_type == USB_FUNC_MBIM) {
+ ret = teth_bridge_init(&usb_notify_cb, &priv);
+ if (ret) {
+ pr_err("%s:teth_bridge_init() failed\n",
+ __func__);
+ return;
+ }
+ d->ipa_params.notify = usb_notify_cb;
+ d->ipa_params.priv = priv;
+ d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ }
+
d->ipa_params.client = IPA_CLIENT_USB_CONS;
d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
if (d->func_type == USB_FUNC_ECM) {
@@ -227,6 +244,23 @@
__func__, ret);
return;
}
+
+ if (d->func_type == USB_FUNC_MBIM) {
+ connect_params.ipa_usb_pipe_hdl =
+ d->ipa_params.prod_clnt_hdl;
+ connect_params.usb_ipa_pipe_hdl =
+ d->ipa_params.cons_clnt_hdl;
+ connect_params.tethering_mode =
+ TETH_TETHERING_MODE_MBIM;
+ ret = teth_bridge_connect(&connect_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_connect() failed\n",
+ __func__);
+ return;
+ }
+ mbim_configure_params();
+ }
+
if (d->func_type == USB_FUNC_ECM) {
ret = ecm_ipa_connect(d->ipa_params.cons_clnt_hdl,
d->ipa_params.prod_clnt_hdl,
@@ -417,6 +451,7 @@
d->dst_connection_idx = dst_connection_idx;
d->trans = trans;
+ d->func_type = func;
if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
d->ipa_params.src_pipe = &(d->src_pipe_idx);
@@ -425,8 +460,6 @@
d->ipa_params.dst_idx = dst_connection_idx;
}
- d->func_type = func;
-
queue_work(bam_data_wq, &port->connect_w);
return 0;
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index 66363eb..521ace0 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -117,6 +117,8 @@
return -ENOMEM;
}
+ hcd_to_bus(hcd)->skip_resume = true;
+
hcd->irq = platform_get_irq(pdev, 0);
if (hcd->irq < 0) {
dev_err(&pdev->dev, "Unable to get IRQ resource\n");
diff --git a/drivers/usb/host/ehci-msm2.c b/drivers/usb/host/ehci-msm2.c
index faa5625..6727996 100644
--- a/drivers/usb/host/ehci-msm2.c
+++ b/drivers/usb/host/ehci-msm2.c
@@ -1057,6 +1057,8 @@
return -ENOMEM;
}
+ hcd_to_bus(hcd)->skip_resume = true;
+
hcd->irq = platform_get_irq(pdev, 0);
if (hcd->irq < 0) {
dev_err(&pdev->dev, "Unable to get IRQ resource\n");
diff --git a/drivers/usb/host/ehci-msm72k.c b/drivers/usb/host/ehci-msm72k.c
index 76cd977..bab330c 100644
--- a/drivers/usb/host/ehci-msm72k.c
+++ b/drivers/usb/host/ehci-msm72k.c
@@ -681,6 +681,8 @@
if (!hcd)
return -ENOMEM;
+ hcd_to_bus(hcd)->skip_resume = true;
+
hcd->irq = platform_get_irq(pdev, 0);
if (hcd->irq < 0) {
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/hbm.c b/drivers/usb/host/hbm.c
index d48a631..1a0c0aa 100644
--- a/drivers/usb/host/hbm.c
+++ b/drivers/usb/host/hbm.c
@@ -39,6 +39,7 @@
#define PIPE_PRODUCER 1
#define MAX_PIPE_NUM 16
#define HBM_QH_MAP_PIPE 0xffffffc0
+#define QTD_CERR_MASK 0xfffff3ff
struct hbm_msm {
u32 *base;
@@ -257,6 +258,7 @@
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct list_head qtd_list;
+ struct ehci_qtd *qtd;
INIT_LIST_HEAD(&qtd_list);
@@ -272,5 +274,11 @@
if (!qh_urb_transaction(ehci, urb, &qtd_list, mem_flags))
return -ENOMEM;
+
+ /* set err counter in qTD token to zero */
+ qtd = list_entry(qtd_list.next, struct ehci_qtd, qtd_list);
+ if (qtd != NULL)
+ qtd->hw_token &= QTD_CERR_MASK;
+
return hbm_submit_async(ehci, urb, &qtd_list, mem_flags);
}
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 7760d28..5750e0d 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -3651,7 +3651,7 @@
cmd_buf.vmid_idx = MSM_OTG_VMID_IDX;
cmd_buf.mem_type = MSM_OTG_MEM_TYPE;
- ret = scm_call(SCM_SVC_CP, MSM_OTG_CMD_ID, &cmd_buf,
+ ret = scm_call(SCM_SVC_MP, MSM_OTG_CMD_ID, &cmd_buf,
sizeof(cmd_buf), NULL, 0);
if (ret)
diff --git a/drivers/video/msm/Kconfig b/drivers/video/msm/Kconfig
index 1c59a68..590723a 100644
--- a/drivers/video/msm/Kconfig
+++ b/drivers/video/msm/Kconfig
@@ -41,6 +41,9 @@
bool
default n
+config FB_MSM_MDSS_COMMON
+ bool
+
choice
prompt "MDP HW version"
default FB_MSM_MDP22
@@ -88,6 +91,7 @@
config FB_MSM_MDSS
bool "MDSS HW"
+ select FB_MSM_MDSS_COMMON
---help---
The Mobile Display Sub System (MDSS) driver supports devices which
contain MDSS hardware block.
@@ -103,6 +107,10 @@
endchoice
+config FB_MSM_QPIC
+ bool
+ select FB_MSM_MDSS_COMMON
+
config FB_MSM_EBI2
bool
default n
@@ -124,7 +132,7 @@
default n
config FB_MSM_OVERLAY
- depends on FB_MSM_MDP40 && ANDROID_PMEM
+ depends on FB_MSM_MDP40
bool "MDP4 overlay support"
default n
@@ -950,6 +958,21 @@
---help---
Support for EBI2 panel auto detect
+config FB_MSM_QPIC_ILI_QVGA_PANEL
+ bool "Qpic MIPI ILI QVGA Panel"
+ select FB_MSM_QPIC
+ ---help---
+ Support for MIPI ILI QVGA (240x320) panel
+ ILI TECHNOLOGY 9341
+ with on-chip full display RAM
+ use parallel interface
+
+config FB_MSM_QPIC_PANEL_DETECT
+ bool "Qpic Panel Detect"
+ select FB_MSM_QPIC_ILI_QVGA_PANEL
+ ---help---
+ Support for Qpic panel auto detect
+
if FB_MSM_MDSS
source "drivers/video/msm/mdss/Kconfig"
endif
diff --git a/drivers/video/msm/Makefile b/drivers/video/msm/Makefile
index e49e2ba..67c6b48 100644
--- a/drivers/video/msm/Makefile
+++ b/drivers/video/msm/Makefile
@@ -1,4 +1,4 @@
-ifeq ($(CONFIG_FB_MSM_MDSS),y)
+ifeq ($(CONFIG_FB_MSM_MDSS_COMMON),y)
obj-y += mdss/
else
obj-y := msm_fb.o
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 77eb9c2..63a842d 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -1609,7 +1609,6 @@
outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */
} else if (term == MDP_OVERLAY0_TERM) {
mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- mdp_lut_enable();
outpdw(MDP_BASE + 0x0004, 0);
} else if (term == MDP_OVERLAY1_TERM) {
mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index e50055f..4dd4245 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -29,7 +29,6 @@
#include <linux/fb.h>
#include <linux/msm_mdp.h>
#include <linux/file.h>
-#include <linux/android_pmem.h>
#include <linux/major.h>
#include <asm/system.h>
#include <asm/mach-types.h>
@@ -3109,9 +3108,6 @@
{
struct file *file;
int put_needed, ret = 0, fb_num;
-#ifdef CONFIG_ANDROID_PMEM
- unsigned long vstart;
-#endif
*p_need = 0;
if (img->flags & MDP_BLIT_SRC_GEM) {
@@ -3146,13 +3142,6 @@
return mdp4_overlay_iommu_map_buf(img->memory_id, pipe, plane,
start, len, srcp_ihdl);
#endif
-#ifdef CONFIG_ANDROID_PMEM
- if (!get_pmem_file(img->memory_id, start, &vstart,
- len, srcp_file))
- return 0;
- else
- return -EINVAL;
-#endif
}
#ifdef CONFIG_FB_MSM_MIPI_DSI
diff --git a/drivers/video/msm/mdp_ppp.c b/drivers/video/msm/mdp_ppp.c
index 415b575..2e795fd 100644
--- a/drivers/video/msm/mdp_ppp.c
+++ b/drivers/video/msm/mdp_ppp.c
@@ -22,7 +22,6 @@
#include <linux/fb.h>
#include <linux/msm_mdp.h>
#include <linux/file.h>
-#include <linux/android_pmem.h>
#include <linux/major.h>
#include "linux/proc_fs.h"
@@ -548,40 +547,8 @@
format == MDP_Y_CRCB_H2V2) ? 2 : (format == MDP_Y_CBCR_H2V1 || \
format == MDP_Y_CRCB_H2V1) ? 1 : 1)
-#ifdef CONFIG_ANDROID_PMEM
-static void get_len(struct mdp_img *img, struct mdp_rect *rect, uint32_t bpp,
- uint32_t *len0, uint32_t *len1)
-{
- *len0 = IMG_LEN(rect->h, img->width, rect->w, bpp);
- if (IS_PSEUDOPLNR(img->format))
- *len1 = *len0/Y_TO_CRCB_RATIO(img->format);
- else
- *len1 = 0;
-}
-
-static void flush_imgs(struct mdp_blit_req *req, int src_bpp, int dst_bpp,
- struct file *p_src_file, struct file *p_dst_file)
-{
- uint32_t src0_len, src1_len;
-
- if (!(req->flags & MDP_BLIT_NON_CACHED)) {
- /* flush src images to memory before dma to mdp */
- get_len(&req->src, &req->src_rect, src_bpp,
- &src0_len, &src1_len);
-
- flush_pmem_file(p_src_file,
- req->src.offset, src0_len);
-
- if (IS_PSEUDOPLNR(req->src.format))
- flush_pmem_file(p_src_file,
- req->src.offset + src0_len, src1_len);
- }
-
-}
-#else
static void flush_imgs(struct mdp_blit_req *req, int src_bpp, int dst_bpp,
struct file *p_src_file, struct file *p_dst_file) { }
-#endif
static void mdp_start_ppp(struct msm_fb_data_type *mfd, MDPIBUF *iBuf,
struct mdp_blit_req *req, struct file *p_src_file, struct file *p_dst_file)
@@ -1286,9 +1253,6 @@
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
#endif
-#ifdef CONFIG_ANDROID_PMEM
- unsigned long vstart;
-#endif
if (req->flags & MDP_MEMORY_ID_TYPE_FB) {
file = fget_light(img->memory_id, &put_needed);
@@ -1321,21 +1285,10 @@
return -EINVAL;
#endif
-#ifdef CONFIG_ANDROID_PMEM
- if (!get_pmem_file(img->memory_id, start, &vstart, len, srcp_file))
- return ret;
- else
- return -EINVAL;
-#endif
}
void put_img(struct file *p_src_file, struct ion_handle *p_ihdl)
{
-#ifdef CONFIG_ANDROID_PMEM
- if (p_src_file)
- put_pmem_file(p_src_file);
-#endif
-
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
if (!IS_ERR_OR_NULL(p_ihdl))
ion_free(ppp_display_iclient, p_ihdl);
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index 17987d4..a710fef 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -7,7 +7,10 @@
mdss-mdp-objs += mdss_mdp_wb.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
+
+ifeq ($(CONFIG_FB_MSM_MDSS),y)
obj-$(CONFIG_DEBUG_FS) += mdss_debug.o
+endif
mdss-dsi-objs := mdss_dsi.o mdss_dsi_host.o
mdss-dsi-objs += mdss_dsi_panel.o
@@ -23,3 +26,7 @@
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp.o
obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
+
+mdss-qpic-objs := mdss_qpic.o mdss_fb.o mdss_qpic_panel.o
+obj-$(CONFIG_FB_MSM_QPIC) += mdss-qpic.o
+obj-$(CONFIG_FB_MSM_QPIC_ILI_QVGA_PANEL) += qpic_panel_ili_qvga.o
diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
index 4a06be5..b365f36 100644
--- a/drivers/video/msm/mdss/mdss_dsi.h
+++ b/drivers/video/msm/mdss/mdss_dsi.h
@@ -252,7 +252,7 @@
struct dsi_panel_cmds_list {
struct dsi_cmd_desc *buf;
- char size;
+ u32 size;
char ctrl_state;
};
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index 3d632c7..4e74858 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -16,7 +16,6 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
-#include <linux/android_pmem.h>
#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/debugfs.h>
@@ -730,19 +729,6 @@
.fb_mmap = mdss_fb_mmap,
};
-static u32 mdss_fb_line_length(u32 fb_index, u32 xres, int bpp)
-{
- /* The adreno GPU hardware requires that the pitch be aligned to
- 32 pixels for color buffers, so for the cases where the GPU
- is writing directly to fb0, the framebuffer pitch
- also needs to be 32 pixel aligned */
-
- if (fb_index == 0)
- return ALIGN(xres, 32) * bpp;
- else
- return xres * bpp;
-}
-
static int mdss_fb_alloc_fbmem(struct msm_fb_data_type *mfd)
{
void *virt = NULL;
@@ -753,16 +739,8 @@
size = PAGE_ALIGN(mfd->fbi->fix.line_length * yres);
if (mfd->index == 0) {
- int dom;
- virt = allocate_contiguous_memory(size, MEMTYPE_EBI1, SZ_1M, 0);
- if (!virt) {
- pr_err("unable to alloc fbmem size=%u\n", size);
- return -ENOMEM;
- }
- phys = memory_pool_node_paddr(virt);
- dom = mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
- msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K,
- 0, &(mfd->iova));
+ if (mdss_mdp_alloc_fb_mem(mfd, size, (u32 *)&phys, &virt))
+ return -ENOMEM;
pr_info("allocating %u bytes at %p (%lx phys) for fb %d\n",
size, virt, phys, mfd->index);
} else {
@@ -921,7 +899,7 @@
var->xres *= 2;
fix->type = panel_info->is_3d_panel;
- fix->line_length = mdss_fb_line_length(mfd->index, var->xres, bpp);
+ fix->line_length = mdss_mdp_fb_stride(mfd->index, var->xres, bpp);
var->yres = panel_info->yres;
var->xres_virtual = var->xres;
@@ -1425,7 +1403,7 @@
return -EINVAL;
}
- mfd->fbi->fix.line_length = mdss_fb_line_length(mfd->index, var->xres,
+ mfd->fbi->fix.line_length = mdss_mdp_fb_stride(mfd->index, var->xres,
var->bits_per_pixel / 8);
if (mfd->panel_reconfig || (mfd->fb_imgType != old_imgType)) {
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 977fc63..f9b05fc 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -46,6 +46,8 @@
#include <mach/msm_bus_board.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
+#include <mach/memory.h>
+#include <mach/msm_memtypes.h>
#include "mdss.h"
#include "mdss_fb.h"
@@ -126,6 +128,39 @@
char *prop_name);
static int mdss_mdp_parse_dt_smp(struct platform_device *pdev);
+int mdss_mdp_alloc_fb_mem(struct msm_fb_data_type *mfd,
+ u32 size, u32 *phys, void **virt)
+{
+ int dom;
+ void *fb_virt;
+ u32 fb_phys;
+ fb_virt = allocate_contiguous_memory(size, MEMTYPE_EBI1, SZ_1M, 0);
+ if (!fb_virt) {
+ pr_err("unable to alloc fbmem size=%u\n", size);
+ return -ENOMEM;
+ }
+ fb_phys = memory_pool_node_paddr(fb_virt);
+ dom = mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
+ msm_iommu_map_contig_buffer(fb_phys, dom, 0, size, SZ_4K,
+ 0, &(mfd->iova));
+ *phys = fb_phys;
+ *virt = fb_virt;
+ return 0;
+}
+
+u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp)
+{
+ /* The adreno GPU hardware requires that the pitch be aligned to
+ 32 pixels for color buffers, so for the cases where the GPU
+ is writing directly to fb0, the framebuffer pitch
+ also needs to be 32 pixel aligned */
+
+ if (fb_index == 0)
+ return ALIGN(xres, 32) * bpp;
+ else
+ return xres * bpp;
+}
+
static inline int mdss_irq_dispatch(u32 hw_ndx, int irq, void *ptr)
{
struct mdss_hw *hw;
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index efd93c0..43456ca 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -420,4 +420,7 @@
int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd, void *arg);
int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id);
+int mdss_mdp_alloc_fb_mem(struct msm_fb_data_type *mfd,
+ u32 size, u32 *phys, void **virt);
+u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp);
#endif /* MDSS_MDP_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 8515782..e850321 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -71,7 +71,7 @@
}
}
if (flags & MDSS_MDP_PERF_UPDATE_BUS) {
- bus_ab_quota = bus_ab_quota << MDSS_MDP_BUS_FACTOR_SHIFT;
+ bus_ab_quota = bus_ib_quota << MDSS_MDP_BUS_FACTOR_SHIFT;
bus_ib_quota = MDSS_MDP_BUS_FUDGE_FACTOR(bus_ib_quota);
bus_ib_quota <<= MDSS_MDP_BUS_FACTOR_SHIFT;
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 98a8202..c6b3472 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -564,6 +564,8 @@
struct ion_client *iclient = mdss_get_ionclient();
struct mdss_panel_data *pdata;
int ret = 0, off;
+ int mdss_mdp_rev = MDSS_MDP_REG_READ(MDSS_MDP_REG_HW_VERSION);
+ int mdss_v2_intf_off = 0;
off = 0;
@@ -576,9 +578,13 @@
mdss_mdp_ctl_write(ctl, 0, MDSS_MDP_LM_BORDER_COLOR);
off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+ if (mdss_mdp_rev == MDSS_MDP_HW_REV_102)
+ mdss_v2_intf_off = 0xEC00;
+
/* wait for 1 VSYNC for the pipe to be unstaged */
msleep(20);
- MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+ MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN -
+ mdss_v2_intf_off, 0);
ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CONT_SPLASH_FINISH,
NULL);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index 911e29f..1c0d306 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -12,7 +12,6 @@
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
-#include <linux/android_pmem.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/file.h>
@@ -310,7 +309,6 @@
data->srcp_file = NULL;
} else if (data->srcp_file) {
pr_debug("pmem buf=0x%x\n", data->addr);
- put_pmem_file(data->srcp_file);
data->srcp_file = NULL;
} else if (!IS_ERR_OR_NULL(data->srcp_ihdl)) {
pr_debug("ion hdl=%p buf=0x%x\n", data->srcp_ihdl, data->addr);
@@ -414,10 +412,6 @@
pr_err("failed to map ion handle (%d)\n", ret);
return ret;
}
- } else {
- unsigned long vstart;
- ret = get_pmem_file(img->memory_id, start, &vstart, len,
- &data->srcp_file);
}
if (!ret && (img->offset < data->len)) {
diff --git a/drivers/video/msm/mdss/mdss_qpic.c b/drivers/video/msm/mdss/mdss_qpic.c
new file mode 100644
index 0000000..647504f
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_qpic.c
@@ -0,0 +1,708 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/bootmem.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/sps.h>
+#include <mach/clk.h>
+#include <mach/hardware.h>
+
+#include "mdss_fb.h"
+#include "mdss_qpic.h"
+
+static int mdss_qpic_probe(struct platform_device *pdev);
+static int mdss_qpic_remove(struct platform_device *pdev);
+
+struct qpic_data_type *qpic_res;
+
+/* for tuning */
+static u32 use_bam = true;
+static u32 use_irq;
+static u32 use_vsync;
+
+static const struct of_device_id mdss_qpic_dt_match[] = {
+ { .compatible = "qcom,mdss_qpic",},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdss_qpic_dt_match);
+
+static struct platform_driver mdss_qpic_driver = {
+ .probe = mdss_qpic_probe,
+ .remove = mdss_qpic_remove,
+ .shutdown = NULL,
+ .driver = {
+ /*
+ * Simulate mdp hw
+ */
+ .name = "mdp",
+ .of_match_table = mdss_qpic_dt_match,
+ },
+};
+
+int qpic_on(struct msm_fb_data_type *mfd)
+{
+ int ret;
+ ret = mdss_qpic_panel_on(qpic_res->panel_data);
+ return ret;
+}
+
+int qpic_off(struct msm_fb_data_type *mfd)
+{
+ int ret;
+ ret = mdss_qpic_panel_off(qpic_res->panel_data);
+ return ret;
+}
+
+static void mdss_qpic_pan_display(struct msm_fb_data_type *mfd)
+{
+
+ struct fb_info *fbi;
+ u32 offset, fb_offset, size;
+ int bpp;
+
+ if (!mfd) {
+ pr_err("%s: mfd is NULL!", __func__);
+ return;
+ }
+
+ fbi = mfd->fbi;
+
+ bpp = fbi->var.bits_per_pixel / 8;
+ offset = fbi->var.xoffset * bpp +
+ fbi->var.yoffset * fbi->fix.line_length;
+
+ if (offset > fbi->fix.smem_len) {
+ pr_err("invalid fb offset=%u total length=%u\n",
+ offset, fbi->fix.smem_len);
+ return;
+ }
+ fb_offset = (u32)fbi->fix.smem_start + offset;
+
+ mdss_qpic_panel_on(qpic_res->panel_data);
+ size = fbi->var.xres * fbi->var.yres * bpp;
+
+ qpic_send_frame(0, 0, fbi->var.xres, fbi->var.yres,
+ (u32 *)fb_offset, size);
+}
+
+int mdss_mdp_alloc_fb_mem(struct msm_fb_data_type *mfd,
+ u32 size, u32 *phys, void **virt)
+{
+ if (!qpic_res->res_init)
+ return -EINVAL;
+ if (!qpic_res->fb_virt) {
+ qpic_res->fb_virt = (void *)dmam_alloc_coherent(
+ &qpic_res->pdev->dev,
+ size + QPIC_MAX_CMD_BUF_SIZE,
+ &qpic_res->fb_phys,
+ GFP_KERNEL);
+ pr_err("%s size=%d vir_addr=%x phys_addr=%x",
+ __func__, size, (int)qpic_res->fb_virt,
+ (int)qpic_res->fb_phys);
+ if (!qpic_res->fb_virt)
+ return -ENOMEM;
+ qpic_res->cmd_buf_virt = qpic_res->fb_virt + size;
+ qpic_res->cmd_buf_phys = qpic_res->fb_phys + size;
+ }
+ *phys = qpic_res->fb_phys;
+ *virt = qpic_res->fb_virt;
+ return 0;
+}
+
+u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp)
+{
+ return xres * bpp;
+}
+
+int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
+{
+ mfd->on_fnc = qpic_on;
+ mfd->off_fnc = qpic_off;
+ mfd->dma_fnc = mdss_qpic_pan_display;
+ return 0;
+}
+
+u32 mdss_get_panel_framerate(struct msm_fb_data_type *mfd)
+{
+ return qpic_panel_get_framerate();
+}
+
+int qpic_register_panel(struct mdss_panel_data *pdata)
+{
+ struct platform_device *mdss_fb_dev = NULL;
+ int rc;
+
+ mdss_fb_dev = platform_device_alloc("mdss_fb", pdata->panel_info.pdest);
+ if (!mdss_fb_dev) {
+ pr_err("unable to allocate mdss_fb device\n");
+ return -ENOMEM;
+ }
+
+ mdss_fb_dev->dev.platform_data = pdata;
+
+ rc = platform_device_add(mdss_fb_dev);
+ if (rc) {
+ platform_device_put(mdss_fb_dev);
+ pr_err("unable to probe mdss_fb device (%d)\n", rc);
+ return rc;
+ }
+
+ qpic_res->panel_data = pdata;
+
+ return rc;
+}
+
+int qpic_init_sps(struct platform_device *pdev,
+ struct qpic_sps_endpt *end_point)
+{
+ int rc = 0;
+ struct sps_pipe *pipe_handle;
+ struct sps_connect *sps_config = &end_point->config;
+ struct sps_register_event *sps_event = &end_point->bam_event;
+ struct sps_bam_props bam = {0};
+ u32 bam_handle = 0;
+
+ if (qpic_res->sps_init)
+ return 0;
+ bam.phys_addr = qpic_res->qpic_phys + 0x4000;
+ bam.virt_addr = qpic_res->qpic_base + 0x4000;
+ bam.irq = qpic_res->irq - 4;
+ bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
+
+ rc = sps_phy2h(bam.phys_addr, &bam_handle);
+ if (rc)
+ rc = sps_register_bam_device(&bam, &bam_handle);
+ if (rc) {
+ pr_err("%s bam_handle is NULL", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ pipe_handle = sps_alloc_endpoint();
+ if (!pipe_handle) {
+ pr_err("sps_alloc_endpoint() failed\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = sps_get_config(pipe_handle, sps_config);
+ if (rc) {
+ pr_err("sps_get_config() failed %d\n", rc);
+ goto free_endpoint;
+ }
+
+ /* WRITE CASE: source - system memory; destination - BAM */
+ sps_config->source = SPS_DEV_HANDLE_MEM;
+ sps_config->destination = bam_handle;
+ sps_config->mode = SPS_MODE_DEST;
+ sps_config->dest_pipe_index = 6;
+
+ sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
+ sps_config->lock_group = 0;
+ /*
+ * Descriptor FIFO is a cyclic FIFO. If 64 descriptors
+ * are allowed to be submitted before we get any ack for any of them,
+ * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
+ * sizeof(struct sps_iovec).
+ */
+ sps_config->desc.size = (64) *
+ sizeof(struct sps_iovec);
+ sps_config->desc.base = dmam_alloc_coherent(&pdev->dev,
+ sps_config->desc.size,
+ &sps_config->desc.phys_base,
+ GFP_KERNEL);
+ if (!sps_config->desc.base) {
+ pr_err("dmam_alloc_coherent() failed for size %x\n",
+ sps_config->desc.size);
+ rc = -ENOMEM;
+ goto free_endpoint;
+ }
+ memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+ rc = sps_connect(pipe_handle, sps_config);
+ if (rc) {
+ pr_err("sps_connect() failed %d\n", rc);
+ goto free_endpoint;
+ }
+
+ init_completion(&end_point->completion);
+ sps_event->mode = SPS_TRIGGER_WAIT;
+ sps_event->options = SPS_O_EOT;
+ sps_event->xfer_done = &end_point->completion;
+ sps_event->user = (void *)qpic_res;
+
+ rc = sps_register_event(pipe_handle, sps_event);
+ if (rc) {
+ pr_err("sps_register_event() failed %d\n", rc);
+ goto sps_disconnect;
+ }
+
+ end_point->handle = pipe_handle;
+ qpic_res->sps_init = true;
+ goto out;
+sps_disconnect:
+ sps_disconnect(pipe_handle);
+free_endpoint:
+ sps_free_endpoint(pipe_handle);
+out:
+ return rc;
+}
+
+void mdss_qpic_reset(void)
+{
+ u32 time_end;
+
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_RESET, 1 << 0);
+ /* wait 100 us after reset as suggested by hw */
+ usleep(100);
+ time_end = (u32)ktime_to_ms(ktime_get()) + QPIC_MAX_VSYNC_WAIT_TIME;
+ while (((QPIC_INP(QPIC_REG_QPIC_LCDC_STTS) & (1 << 8)) == 0)) {
+ if ((u32)ktime_to_ms(ktime_get()) > time_end) {
+ pr_err("%s reset not finished", __func__);
+ break;
+ }
+ /* yield 100 us for next polling by experiment*/
+ usleep(100);
+ }
+}
+
+void qpic_interrupt_en(u32 en)
+{
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
+ if (en) {
+ if (!qpic_res->irq_ena) {
+ qpic_res->irq_ena = true;
+ enable_irq(qpic_res->irq);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN,
+ (1 << 0) | (1 << 2));
+ }
+ } else {
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, 0);
+ disable_irq(qpic_res->irq);
+ qpic_res->irq_ena = false;
+ }
+}
+
+static irqreturn_t qpic_irq_handler(int irq, void *ptr)
+{
+ u32 data;
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
+ return 0;
+}
+
+int qpic_flush_buffer_bam(u32 cmd, u32 len, u32 *param, u32 is_cmd)
+{
+ int ret = 0;
+ u32 phys_addr, cfg2, block_len , flags;
+ if (is_cmd) {
+ memcpy((u8 *)qpic_res->cmd_buf_virt, param, len);
+ invalidate_caches((unsigned long)qpic_res->cmd_buf_virt,
+ len,
+ (unsigned long)qpic_res->cmd_buf_phys);
+ phys_addr = qpic_res->cmd_buf_phys;
+ } else {
+ phys_addr = (u32)param;
+ }
+
+ cfg2 = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
+ cfg2 &= ~0xFF;
+ cfg2 |= cmd;
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
+ block_len = 0x7FF0;
+ while (len > 0) {
+ if (len <= 0x7FF0) {
+ flags = SPS_IOVEC_FLAG_EOT;
+ block_len = len;
+ } else {
+ flags = 0;
+ }
+ ret = sps_transfer_one(qpic_res->qpic_endpt.handle,
+ phys_addr, block_len, NULL, flags);
+ if (ret)
+ pr_err("failed to submit command %x ret %d\n",
+ cmd, ret);
+ phys_addr += block_len;
+ len -= block_len;
+ }
+ ret = wait_for_completion_interruptible_timeout(
+ &qpic_res->qpic_endpt.completion,
+ msecs_to_jiffies(100 * 4));
+ if (ret <= 0)
+ pr_err("%s timeout %x", __func__, ret);
+ else
+ ret = 0;
+ return ret;
+}
+
+int qpic_flush_buffer_sw(u32 cmd, u32 len, u32 *param, u32 is_cmd)
+{
+ u32 bytes_left, space, data, cfg2, time_end;
+ int i, ret = 0;
+ if ((len <= (sizeof(u32) * 4)) && (is_cmd)) {
+ len >>= 2;/* len in dwords */
+ data = 0;
+ for (i = 0; i < len; i++)
+ data |= param[i] << (8 * i);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT, len);
+ QPIC_OUTP(QPIC_REG_LCD_DEVICE_CMD0 + (4 * cmd), data);
+ return 0;
+ }
+ if ((len & 0x1) != 0) {
+ pr_err("%s: number of bytes needs be even", __func__);
+ len = (len + 1) & (~0x1);
+ }
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
+ cfg2 = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
+ cfg2 |= (1 << 24); /* transparent mode */
+ cfg2 &= ~0xFF;
+ cfg2 |= cmd;
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_SOF, 0x0);
+ bytes_left = len;
+ while (bytes_left > 0) {
+ time_end = (u32)ktime_to_ms(ktime_get()) +
+ QPIC_MAX_VSYNC_WAIT_TIME;
+ while (1) {
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_STTS);
+ data &= 0x3F;
+ if (data == 0)
+ break;
+ /* yield 10 us for next polling by experiment*/
+ usleep(10);
+ if (ktime_to_ms(ktime_get()) > time_end) {
+ pr_err("%s time out", __func__);
+ ret = -EBUSY;
+ goto exit_send_cmd_sw;
+ }
+ }
+ space = (16 - data);
+
+ while ((space > 0) && (bytes_left > 0)) {
+ /* write to fifo */
+ if (bytes_left >= 4) {
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0,
+ param[0]);
+ param++;
+ bytes_left -= 4;
+ space++;
+ } else if (bytes_left == 2) {
+ QPIC_OUTPW(QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0,
+ *(u16 *)param);
+ bytes_left -= 2;
+ }
+ }
+ }
+ /* finished */
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_EOF, 0x0);
+
+ time_end = (u32)ktime_to_ms(ktime_get()) + QPIC_MAX_VSYNC_WAIT_TIME;
+ while (1) {
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS);
+ if (data & (1 << 2))
+ break;
+ /* yield 10 us for next polling by experiment*/
+ usleep(10);
+ if (ktime_to_ms(ktime_get()) > time_end) {
+ pr_err("%s wait for eof time out", __func__);
+ ret = -EBUSY;
+ goto exit_send_cmd_sw;
+ }
+ }
+exit_send_cmd_sw:
+ cfg2 &= ~(1 << 24);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
+ return ret;
+}
+
+int qpic_flush_buffer(u32 cmd, u32 len, u32 *param, u32 is_cmd)
+{
+ if (use_bam) {
+ if (is_cmd)
+ return qpic_flush_buffer_sw(cmd, len, param, is_cmd);
+ else
+ return qpic_flush_buffer_bam(cmd, len, param, is_cmd);
+ } else {
+ return qpic_flush_buffer_sw(cmd, len, param, is_cmd);
+ }
+}
+
+int mdss_qpic_init(void)
+{
+ int ret = 0;
+ u32 data;
+ mdss_qpic_reset();
+
+ pr_info("%s version=%x", __func__, QPIC_INP(QPIC_REG_LCDC_VERSION));
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
+ /* clear vsync wait , bam mode = 0*/
+ data &= ~(3 << 0);
+ data &= ~(0x1f << 3);
+ data |= (1 << 3); /* threshold */
+ data |= (1 << 8); /* lcd_en */
+ data &= ~(0x1f << 9);
+ data |= (1 << 9); /* threshold */
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
+
+ if (use_irq && qpic_res->irq_requested) {
+ ret = devm_request_irq(&qpic_res->pdev->dev,
+ qpic_res->irq, qpic_irq_handler,
+ IRQF_DISABLED, "QPIC", qpic_res);
+ if (ret) {
+ pr_err("qpic request_irq() failed!\n");
+ use_irq = false;
+ }
+ qpic_res->irq_requested = true;
+ }
+
+ qpic_interrupt_en(use_irq);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG0, 0x02108501);
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
+ data &= ~(0xFFF);
+ data |= 0x200; /* XRGB */
+ data |= 0x2C;
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, data);
+
+ if (use_bam) {
+ qpic_init_sps(qpic_res->pdev , &qpic_res->qpic_endpt);
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
+ data |= (1 << 1);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
+ }
+ /* TE enable */
+ if (use_vsync) {
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
+ data |= (1 << 0);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
+ }
+
+ return ret;
+}
+
+static int mdss_qpic_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int rc = 0;
+
+ if (!pdev->dev.of_node) {
+ pr_err("qpic driver only supports device tree probe\n");
+ return -ENOTSUPP;
+ }
+
+ if (!qpic_res)
+ qpic_res = devm_kzalloc(&pdev->dev,
+ sizeof(*qpic_res), GFP_KERNEL);
+
+ if (!qpic_res)
+ return -ENOMEM;
+
+ if (qpic_res->res_init) {
+ pr_err("qpic already initialized\n");
+ return -EINVAL;
+ }
+
+ pdev->id = 0;
+
+ qpic_res->pdev = pdev;
+ platform_set_drvdata(pdev, qpic_res);
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "qpic_base");
+ if (!res) {
+ pr_err("unable to get QPIC reg base address\n");
+ rc = -ENOMEM;
+ goto probe_done;
+ }
+
+ qpic_res->qpic_reg_size = resource_size(res);
+ qpic_res->qpic_base = devm_ioremap(&pdev->dev, res->start,
+ qpic_res->qpic_reg_size);
+ if (unlikely(!qpic_res->qpic_base)) {
+ pr_err("unable to map MDSS QPIC base\n");
+ rc = -ENOMEM;
+ goto probe_done;
+ }
+ qpic_res->qpic_phys = res->start;
+ pr_info("MDSS QPIC HW Base phy_Address=0x%x virt=0x%x\n",
+ (int) res->start,
+ (int) qpic_res->qpic_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ pr_err("unable to get QPIC irq\n");
+ rc = -ENOMEM;
+ goto probe_done;
+ }
+
+ qpic_res->irq = res->start;
+ qpic_res->res_init = true;
+probe_done:
+ return rc;
+}
+
+static int mdss_qpic_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int __init mdss_qpic_driver_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mdss_qpic_driver);
+ if (ret)
+ pr_err("mdss_qpic_register_driver() failed!\n");
+ return ret;
+}
+
+module_init(mdss_qpic_driver_init);
+
+int mdss_mdp_pa_config(struct mdss_mdp_ctl *ctl,
+ struct mdp_pa_cfg_data *config, u32 *copyback)
+{
+ return 0;
+}
+
+int mdss_mdp_pcc_config(struct mdss_mdp_ctl *ctl,
+ struct mdp_pcc_cfg_data *cfg_ptr, u32 *copyback)
+{
+ return 0;
+}
+
+int mdss_mdp_igc_lut_config(struct mdss_mdp_ctl *ctl,
+ struct mdp_igc_lut_data *config, u32 *copyback)
+{
+ return 0;
+}
+
+int mdss_mdp_argc_config(struct mdss_mdp_ctl *ctl,
+ struct mdp_pgc_lut_data *config, u32 *copyback)
+{
+ return 0;
+}
+
+int mdss_mdp_hist_lut_config(struct mdss_mdp_ctl *ctl,
+ struct mdp_hist_lut_data *config, u32 *copyback)
+{
+ return 0;
+}
+
+int mdss_mdp_dither_config(struct mdss_mdp_ctl *ctl,
+ struct mdp_dither_cfg_data *config, u32 *copyback)
+{
+ return 0;
+}
+
+int mdss_mdp_gamut_config(struct mdss_mdp_ctl *ctl,
+ struct mdp_gamut_cfg_data *config, u32 *copyback)
+{
+ return 0;
+}
+
+int mdss_mdp_histogram_start(struct mdss_mdp_ctl *ctl,
+ struct mdp_histogram_start_req *req)
+{
+ return 0;
+}
+
+int mdss_mdp_histogram_stop(struct mdss_mdp_ctl *ctl, u32 block)
+{
+ return 0;
+}
+
+int mdss_mdp_hist_collect(struct mdss_mdp_ctl *ctl,
+ struct mdp_histogram_data *hist,
+ u32 *hist_data_addr)
+{
+ return 0;
+}
+
+int mdss_mdp_overlay_kickoff(struct mdss_mdp_ctl *ctl)
+{
+ return 0;
+}
+
+void mdss_mdp_clk_ctrl(int enable, int isr)
+{
+}
+
+int mdss_mdp_copy_splash_screen(struct mdss_panel_data *pdata)
+{
+ return 0;
+}
+
+void mdss_mdp_footswitch_ctrl_splash(int on)
+{
+}
+
+int msm_fb_writeback_init(struct fb_info *info)
+{
+ return 0;
+}
+EXPORT_SYMBOL(msm_fb_writeback_init);
+
+int msm_fb_writeback_start(struct fb_info *info)
+{
+ return 0;
+}
+EXPORT_SYMBOL(msm_fb_writeback_start);
+
+int msm_fb_writeback_queue_buffer(struct fb_info *info,
+ struct msmfb_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL(msm_fb_writeback_queue_buffer);
+
+int msm_fb_writeback_dequeue_buffer(struct fb_info *info,
+ struct msmfb_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL(msm_fb_writeback_dequeue_buffer);
+
+int msm_fb_writeback_stop(struct fb_info *info)
+{
+ return 0;
+}
+EXPORT_SYMBOL(msm_fb_writeback_stop);
+
+int msm_fb_writeback_terminate(struct fb_info *info)
+{
+ return 0;
+}
+EXPORT_SYMBOL(msm_fb_writeback_terminate);
+
+int msm_fb_get_iommu_domain(struct fb_info *info, int domain)
+{
+ return 0;
+}
+EXPORT_SYMBOL(msm_fb_get_iommu_domain);
diff --git a/drivers/video/msm/mdss/mdss_qpic.h b/drivers/video/msm/mdss/mdss_qpic.h
new file mode 100644
index 0000000..086e8c8
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_qpic.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_QPIC_H
+#define MDSS_QPIC_H
+
+#include <linux/list.h>
+#include <mach/scm-io.h>
+#include <mach/sps.h>
+
+#include "mdss_panel.h"
+
+#define QPIC_REG_QPIC_LCDC_CTRL 0x22000
+#define QPIC_REG_LCDC_VERSION 0x22004
+#define QPIC_REG_QPIC_LCDC_IRQ_EN 0x22008
+#define QPIC_REG_QPIC_LCDC_IRQ_STTS 0x2200C
+#define QPIC_REG_QPIC_LCDC_IRQ_CLR 0x22010
+#define QPIC_REG_QPIC_LCDC_STTS 0x22014
+#define QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT 0x22018
+#define QPIC_REG_QPIC_LCDC_CFG0 0x22020
+#define QPIC_REG_QPIC_LCDC_CFG1 0x22024
+#define QPIC_REG_QPIC_LCDC_CFG2 0x22028
+#define QPIC_REG_QPIC_LCDC_RESET 0x2202C
+#define QPIC_REG_QPIC_LCDC_FIFO_SOF 0x22100
+#define QPIC_REG_LCD_DEVICE_CMD0 0x23000
+#define QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0 0x22140
+#define QPIC_REG_QPIC_LCDC_FIFO_EOF 0x22180
+
+#define QPIC_OUTP(off, data) \
+ writel_relaxed((data), qpic_res->qpic_base + (off))
+#define QPIC_OUTPW(off, data) \
+ writew_relaxed((data), qpic_res->qpic_base + (off))
+#define QPIC_INP(off) \
+ readl_relaxed(qpic_res->qpic_base + (off))
+
+#define QPIC_MAX_VSYNC_WAIT_TIME 500
+#define QPIC_MAX_CMD_BUF_SIZE 512
+
+int mdss_qpic_init(void);
+int qpic_flush_buffer(u32 cmd, u32 len, u32 *param, u32 is_cmd);
+
+u32 msm_qpic_get_bam_hdl(struct sps_bam_props *bam);
+int mdss_qpic_panel_on(struct mdss_panel_data *pdata);
+int mdss_qpic_panel_off(struct mdss_panel_data *pdata);
+int qpic_register_panel(struct mdss_panel_data *pdata);
+int ili9341_on(void);
+
+/* Structure that defines an SPS end point for a BAM pipe. */
+struct qpic_sps_endpt {
+ struct sps_pipe *handle;
+ struct sps_connect config;
+ struct sps_register_event bam_event;
+ struct completion completion;
+};
+
+struct qpic_data_type {
+ u32 rev;
+ struct platform_device *pdev;
+ size_t qpic_reg_size;
+ u32 qpic_phys;
+ char __iomem *qpic_base;
+ u32 irq;
+ u32 irq_ena;
+ u32 res_init;
+ void *fb_virt;
+ u32 fb_phys;
+ void *cmd_buf_virt;
+ u32 cmd_buf_phys;
+ struct qpic_sps_endpt qpic_endpt;
+ u32 sps_init;
+ u32 irq_requested;
+ struct mdss_panel_data *panel_data;
+};
+
+u32 qpic_send_frame(
+ u32 x_start,
+ u32 y_start,
+ u32 x_end,
+ u32 y_end,
+ u32 *data,
+ u32 total_bytes);
+
+u32 qpic_panel_get_framerate(void);
+
+#endif /* MDSS_QPIC_H */
diff --git a/drivers/video/msm/mdss/mdss_qpic_panel.c b/drivers/video/msm/mdss/mdss_qpic_panel.c
new file mode 100644
index 0000000..9825abc
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_qpic_panel.c
@@ -0,0 +1,246 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/qpnp/pin.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+
+#include <mach/sps.h>
+
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_qpic.h"
+#include "mdss_qpic_panel.h"
+
+static u32 panel_is_on;
+static u32 panel_refresh_rate;
+static int (*qpic_panel_on)(void);
+static void (*qpic_panel_off)(void);
+static int (*qpic_panel_init)(struct platform_device *pdev,
+ struct device_node *np);
+
+u32 qpic_panel_get_framerate(void)
+{
+ return panel_refresh_rate;
+}
+
+u32 qpic_send_panel_cmd(u32 cmd, u32 *val, u32 length)
+{
+ u32 ret;
+ u32 cmd_index;
+ u32 size;
+ u32 buffer[LCDC_INTERNAL_BUFFER_SIZE];
+ u32 i;
+
+ cmd_index = LCDC_EXTRACT_OP_CMD(cmd);
+ size = LCDC_EXTRACT_OP_SIZE(cmd);
+ if (size == INV_SIZE)
+ size = length;
+ /* short or pixel data commands need not conversion */
+ if ((cmd == OP_WRITE_MEMORY_CONTINUE) ||
+ (cmd == OP_WRITE_MEMORY_START)) {
+ ret = qpic_flush_buffer(cmd_index, size, val, false);
+ } else {
+ if (size > LCDC_INTERNAL_BUFFER_SIZE)
+ size = LCDC_INTERNAL_BUFFER_SIZE;
+ /* correcting for encoding issues */
+ for (i = 0; i < size; i += sizeof(u32)) {
+ buffer[i] = (val[(i>>2)] >> 0) & 0xff;
+ buffer[i+1] = (val[(i>>2)] >> 8) & 0xff;
+ buffer[i+2] = (val[(i>>2)] >> 16) & 0xff;
+ buffer[i+3] = (val[(i>>2)] >> 24) & 0xff;
+ }
+ ret = qpic_flush_buffer(cmd_index,
+ size * sizeof(u32), buffer, true);
+ }
+ return ret;
+}
+
+u32 qpic_panel_set_cmd_only(u32 command)
+{
+ u32 param;
+ return qpic_send_panel_cmd(command, ¶m, 0);
+}
+
+/* write a frame of pixels to a MIPI screen */
+u32 qpic_send_frame(u32 x_start,
+ u32 y_start,
+ u32 x_end,
+ u32 y_end,
+ u32 *data,
+ u32 total_bytes)
+{
+ u32 param;
+ u32 status;
+ u32 start_0_7;
+ u32 end_0_7;
+ u32 start_8_15;
+ u32 end_8_15;
+
+ /* convert to 16 bit representation */
+ x_start = x_start & 0xffff;
+ y_start = y_start & 0xffff;
+ x_end = x_end & 0xffff;
+ y_end = y_end & 0xffff;
+
+ /* set column/page */
+ start_0_7 = x_start & 0xff;
+ end_0_7 = x_end & 0xff;
+ start_8_15 = (x_start >> 8) & 0xff;
+ end_8_15 = (x_end >> 8) & 0xff;
+ param = (start_8_15 << 0) | (start_0_7 << 8) |
+ (end_8_15 << 16) | (end_0_7 << 24U);
+ status = qpic_send_panel_cmd(OP_SET_COLUMN_ADDRESS, ¶m, 0);
+ if (status) {
+ pr_err("Failed to set column address");
+ return status;
+ }
+
+ start_0_7 = y_start & 0xff;
+ end_0_7 = y_end & 0xff;
+ start_8_15 = (y_start >> 8) & 0xff;
+ end_8_15 = (y_end >> 8) & 0xff;
+ param = (start_8_15 << 0) | (start_0_7 << 8) |
+ (end_8_15 << 16) | (end_0_7 << 24U);
+ status = qpic_send_panel_cmd(OP_SET_PAGE_ADDRESS, ¶m, 0);
+ if (status) {
+ pr_err("Failed to set page address");
+ return status;
+ }
+
+ status = qpic_send_panel_cmd(OP_WRITE_MEMORY_START,
+ &(data[0]), total_bytes);
+ if (status) {
+ pr_err("Failed to start memory write");
+ return status;
+ }
+ return 0;
+}
+
+int mdss_qpic_panel_on(struct mdss_panel_data *pdata)
+{
+ int rc = 0;
+
+ if (panel_is_on)
+ return 0;
+ mdss_qpic_init();
+
+ if (qpic_panel_on)
+ rc = qpic_panel_on();
+ if (rc)
+ return rc;
+ panel_is_on = true;
+ return 0;
+}
+
+int mdss_qpic_panel_off(struct mdss_panel_data *pdata)
+{
+ if (qpic_panel_off)
+ qpic_panel_off();
+ panel_is_on = false;
+ return 0;
+}
+
+static int mdss_panel_parse_dt(struct platform_device *pdev,
+ struct mdss_panel_data *panel_data)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 res[6], tmp;
+ int rc;
+
+ rc = of_property_read_u32_array(np, "qcom,mdss-pan-res", res, 2);
+ if (rc) {
+ pr_err("%s:%d, panel resolution not specified\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ panel_data->panel_info.xres = (!rc ? res[0] : 240);
+ panel_data->panel_info.yres = (!rc ? res[1] : 320);
+ rc = of_property_read_u32(np, "qcom,mdss-pan-bpp", &tmp);
+ if (rc) {
+ pr_err("%s:%d, panel bpp not specified\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ panel_data->panel_info.bpp = (!rc ? tmp : 24);
+ of_property_read_u32(np, "qcom,refresh_rate", &panel_refresh_rate);
+
+ panel_data->panel_info.type = EBI2_PANEL;
+ panel_data->panel_info.pdest = DISPLAY_1;
+
+ if (qpic_panel_init)
+ rc = qpic_panel_init(pdev, np);
+
+ return rc;
+}
+
+static int __devinit mdss_qpic_panel_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ static struct mdss_panel_data vendor_pdata;
+ static const char *panel_name;
+
+ pr_debug("%s:%d, debug info id=%d", __func__, __LINE__, pdev->id);
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ panel_name = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!panel_name)
+ pr_info("%s:%d, panel name not specified\n",
+ __func__, __LINE__);
+ else
+ pr_info("%s: Panel Name = %s\n", __func__, panel_name);
+
+ /* select panel according to label */
+ qpic_panel_init = ili9341_init;
+ qpic_panel_on = ili9341_on;
+ qpic_panel_off = ili9341_off;
+
+ rc = mdss_panel_parse_dt(pdev, &vendor_pdata);
+ if (rc)
+ return rc;
+
+ rc = qpic_register_panel(&vendor_pdata);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static const struct of_device_id mdss_qpic_panel_match[] = {
+ {.compatible = "qcom,mdss-qpic-panel"},
+ {}
+};
+
+static struct platform_driver this_driver = {
+ .probe = mdss_qpic_panel_probe,
+ .driver = {
+ .name = "qpic_panel",
+ .of_match_table = mdss_qpic_panel_match,
+ },
+};
+
+static int __init mdss_qpic_panel_init(void)
+{
+ return platform_driver_register(&this_driver);
+}
+MODULE_DEVICE_TABLE(of, mdss_qpic_panel_match);
+module_init(mdss_qpic_panel_init);
diff --git a/drivers/video/msm/mdss/mdss_qpic_panel.h b/drivers/video/msm/mdss/mdss_qpic_panel.h
new file mode 100644
index 0000000..76f7dc2
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_qpic_panel.h
@@ -0,0 +1,114 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_QPIC_PANEL_H
+#define MDSS_QPIC_PANEL_H
+
+#include <linux/list.h>
+#include <mach/sps.h>
+
+#include "mdss_panel.h"
+
+#define LCDC_INTERNAL_BUFFER_SIZE 30
+
+/**
+ Macros for coding MIPI commands
+*/
+#define INV_SIZE 0xFFFF
+/* Size of argument to MIPI command is variable */
+#define OP_SIZE_PAIR(op, size) ((op<<16) | size)
+/* MIPI {command, argument size} tuple */
+#define LCDC_EXTRACT_OP_SIZE(op_identifier) ((op_identifier&0xFFFF))
+/* extract size from command identifier */
+#define LCDC_EXTRACT_OP_CMD(op_identifier) (((op_identifier>>16)&0xFFFF))
+/* extract command id from command identifier */
+
+
+/* MIPI standard efinitions */
+#define LCDC_ADDRESS_MODE_ORDER_BOTTOM_TO_TOP 0x80
+#define LCDC_ADDRESS_MODE_ORDER_RIGHT_TO_LEFT 0x40
+#define LCDC_ADDRESS_MODE_ORDER_REVERSE 0x20
+#define LCDC_ADDRESS_MODE_ORDER_REFRESH_BOTTOM_TO_TOP 0x10
+#define LCDC_ADDRESS_MODE_ORDER_BGER_RGB 0x08
+#define LCDC_ADDRESS_MODE_ORDER_REFERESH_RIGHT_TO_LEFT 0x04
+#define LCDC_ADDRESS_MODE_FLIP_HORIZONTAL 0x02
+#define LCDC_ADDRESS_MODE_FLIP_VERTICAL 0x01
+
+#define LCDC_PIXEL_FORMAT_3_BITS_PER_PIXEL 0x1
+#define LCDC_PIXEL_FORMAT_8_BITS_PER_PIXEL 0x2
+#define LCDC_PIXEL_FORMAT_12_BITS_PER_PIXEL 0x3
+#define LCDC_PIXEL_FORMAT_16_BITS_PER_PIXEL 0x5
+#define LCDC_PIXEL_FORMAT_18_BITS_PER_PIXEL 0x6
+#define LCDC_PIXEL_FORMAT_24_BITS_PER_PIXEL 0x7
+
+#define LCDC_CREATE_PIXEL_FORMAT(dpi_format, dbi_format) \
+ (dpi_format | (dpi_format<<4))
+
+#define POWER_MODE_IDLE_ON 0x40
+#define POWER_MODE_PARTIAL_ON 0x20
+#define POWER_MODE_SLEEP_ON 0x10
+#define POWER_MODE_NORMAL_ON 0x08
+#define POWER_MODE_DISPLAY_ON 0x04
+
+#define LCDC_DISPLAY_MODE_SCROLLING_ON 0x80
+#define LCDC_DISPLAY_MODE_INVERSION_ON 0x20
+#define LCDC_DISPLAY_MODE_GAMMA_MASK 0x07
+
+/**
+ * LDCc MIPI Type B supported commands
+ */
+enum {
+ OP_ENTER_IDLE_MODE = OP_SIZE_PAIR(0x39, 0),
+ OP_ENTER_INVERT_MODE = OP_SIZE_PAIR(0x21, 0),
+ OP_ENTER_NORMAL_MODE = OP_SIZE_PAIR(0x13, 0),
+ OP_ENTER_PARTIAL_MODE = OP_SIZE_PAIR(0x12, 0),
+ OP_ENTER_SLEEP_MODE = OP_SIZE_PAIR(0x10, 0),
+ OP_EXIT_INVERT_MODE = OP_SIZE_PAIR(0x20, 0),
+ OP_EXIT_SLEEP_MODE = OP_SIZE_PAIR(0x11, 0),
+ OP_EXIT_IDLE_MODE = OP_SIZE_PAIR(0x38, 0),
+ OP_GET_ADDRESS_MODE = OP_SIZE_PAIR(0x0B, 1),
+ OP_GET_BLUE_CHANNEL = OP_SIZE_PAIR(0x08, 1),
+ OP_GET_DIAGNOSTIC_RESULT = OP_SIZE_PAIR(0x0F, 2),
+ OP_GET_DISPLAY_MODE = OP_SIZE_PAIR(0x0D, 1),
+ OP_GET_GREEN_CHANNEL = OP_SIZE_PAIR(0x07, 1),
+ OP_GET_PIXEL_FORMAT = OP_SIZE_PAIR(0x0C, 1),
+ OP_GET_POWER_MODE = OP_SIZE_PAIR(0x0A, 1),
+ OP_GET_RED_CHANNEL = OP_SIZE_PAIR(0x06, 1),
+ OP_GET_SCANLINE = OP_SIZE_PAIR(0x45, 2),
+ OP_GET_SIGNAL_MODE = OP_SIZE_PAIR(0x0E, 1),
+ OP_NOP = OP_SIZE_PAIR(0x00, 0),
+ OP_READ_DDB_CONTINUE = OP_SIZE_PAIR(0xA8, INV_SIZE),
+ OP_READ_DDB_START = OP_SIZE_PAIR(0xA1, INV_SIZE),
+ OP_READ_MEMORY_CONTINUE = OP_SIZE_PAIR(0x3E, INV_SIZE),
+ OP_READ_MEMORY_START = OP_SIZE_PAIR(0x2E, INV_SIZE),
+ OP_SET_ADDRESS_MODE = OP_SIZE_PAIR(0x36, 1),
+ OP_SET_COLUMN_ADDRESS = OP_SIZE_PAIR(0x2A, 4),
+ OP_SET_DISPLAY_OFF = OP_SIZE_PAIR(0x28, 0),
+ OP_SET_DISPLAY_ON = OP_SIZE_PAIR(0x29, 0),
+ OP_SET_GAMMA_CURVE = OP_SIZE_PAIR(0x26, 1),
+ OP_SET_PAGE_ADDRESS = OP_SIZE_PAIR(0x2B, 4),
+ OP_SET_PARTIAL_COLUMNS = OP_SIZE_PAIR(0x31, 4),
+ OP_SET_PARTIAL_ROWS = OP_SIZE_PAIR(0x30, 4),
+ OP_SET_PIXEL_FORMAT = OP_SIZE_PAIR(0x3A, 1),
+ OP_SOFT_RESET = OP_SIZE_PAIR(0x01, 0),
+ OP_WRITE_MEMORY_CONTINUE = OP_SIZE_PAIR(0x3C, INV_SIZE),
+ OP_WRITE_MEMORY_START = OP_SIZE_PAIR(0x2C, INV_SIZE),
+};
+
+u32 qpic_panel_set_cmd_only(u32 command);
+u32 qpic_send_panel_cmd(u32 cmd, u32 *val, u32 length);
+int ili9341_on(void);
+void ili9341_off(void);
+int ili9341_init(struct platform_device *pdev,
+ struct device_node *np);
+#endif /* MDSS_QPIC_PANEL_H */
diff --git a/drivers/video/msm/mdss/qpic_panel_ili_qvga.c b/drivers/video/msm/mdss/qpic_panel_ili_qvga.c
new file mode 100644
index 0000000..4459c6b
--- /dev/null
+++ b/drivers/video/msm/mdss/qpic_panel_ili_qvga.c
@@ -0,0 +1,226 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/memory.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+
+#include "mdss_qpic.h"
+#include "mdss_qpic_panel.h"
+
+enum {
+ OP_ILI9341_TEARING_EFFECT_LINE_ON = OP_SIZE_PAIR(0x35, 1),
+ OP_ILI9341_INTERFACE_CONTROL = OP_SIZE_PAIR(0xf6, 3),
+ OP_ILI9341_WRITE_CTRL_DISPLAY = OP_SIZE_PAIR(0x53, 1),
+ OP_ILI9341_POWER_CONTROL_A = OP_SIZE_PAIR(0xcb, 5),
+ OP_ILI9341_POWER_CONTROL_B = OP_SIZE_PAIR(0xcf, 3),
+ OP_ILI9341_DRIVER_TIMING_CONTROL_A = OP_SIZE_PAIR(0xe8, 3),
+ OP_ILI9341_DRIVER_TIMING_CONTROL_B = OP_SIZE_PAIR(0xea, 3),
+ OP_ILI9341_POWER_ON_SEQUENCE_CONTROL = OP_SIZE_PAIR(0xed, 4),
+ OP_ILI9341_PUMP_RATIO_CONTROL = OP_SIZE_PAIR(0xf7, 1),
+ OP_ILI9341_POWER_CONTROL_1 = OP_SIZE_PAIR(0xc0, 1),
+ OP_ILI9341_POWER_CONTROL_2 = OP_SIZE_PAIR(0xc1, 1),
+ OP_ILI9341_VCOM_CONTROL_1 = OP_SIZE_PAIR(0xc5, 2),
+ OP_ILI9341_VCOM_CONTROL_2 = OP_SIZE_PAIR(0xc7, 1),
+ OP_ILI9341_MEMORY_ACCESS_CONTROL = OP_SIZE_PAIR(0x36, 1),
+ OP_ILI9341_FRAME_RATE_CONTROL = OP_SIZE_PAIR(0xb1, 2),
+ OP_ILI9341_DISPLAY_FUNCTION_CONTROL = OP_SIZE_PAIR(0xb6, 4),
+ OP_ILI9341_ENABLE_3G = OP_SIZE_PAIR(0xf2, 1),
+ OP_ILI9341_COLMOD_PIXEL_FORMAT_SET = OP_SIZE_PAIR(0x3a, 1),
+ OP_ILI9341_GAMMA_SET = OP_SIZE_PAIR(0x26, 1),
+ OP_ILI9341_POSITIVE_GAMMA_CORRECTION = OP_SIZE_PAIR(0xe0, 15),
+ OP_ILI9341_NEGATIVE_GAMMA_CORRECTION = OP_SIZE_PAIR(0xe1, 15),
+ OP_ILI9341_READ_DISPLAY_ID = OP_SIZE_PAIR(0x04, 4),
+ OP_ILI9341_READ_DISPLAY_POWER_MODE = OP_SIZE_PAIR(0x0a, 2),
+ OP_ILI9341_READ_DISPLAY_MADCTL = OP_SIZE_PAIR(0x0b, 2),
+};
+
+static int rst_gpio;
+static int cs_gpio;
+static int ad8_gpio;
+static int te_gpio;
+struct regulator *vdd_vreg;
+struct regulator *avdd_vreg;
+
+int ili9341_init(struct platform_device *pdev,
+ struct device_node *np)
+{
+ int rc = 0;
+ rst_gpio = of_get_named_gpio(np, "qcom,rst-gpio", 0);
+ cs_gpio = of_get_named_gpio(np, "qcom,cs-gpio", 0);
+ ad8_gpio = of_get_named_gpio(np, "qcom,ad8-gpio", 0);
+ te_gpio = of_get_named_gpio(np, "qcom,te-gpio", 0);
+ if (!gpio_is_valid(rst_gpio)) {
+ pr_err("%s: reset gpio not specified\n" , __func__);
+ return -EINVAL;
+ }
+ if (!gpio_is_valid(cs_gpio)) {
+ pr_err("%s: cs gpio not specified\n", __func__);
+ return -EINVAL;
+ }
+ if (!gpio_is_valid(ad8_gpio)) {
+ pr_err("%s: ad8 gpio not specified\n", __func__);
+ return -EINVAL;
+ }
+ if (!gpio_is_valid(te_gpio)) {
+ pr_err("%s: te gpio not specified\n", __func__);
+ return -EINVAL;
+ }
+ vdd_vreg = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(vdd_vreg)) {
+ pr_err("%s could not get vdd,", __func__);
+ return -ENODEV;
+ }
+ avdd_vreg = devm_regulator_get(&pdev->dev, "avdd");
+ if (IS_ERR(avdd_vreg)) {
+ pr_err("%s could not get avdd,", __func__);
+ return -ENODEV;
+ }
+ rc = regulator_set_voltage(vdd_vreg, 1800000, 1800000);
+ if (rc) {
+ pr_err("vdd_vreg->set_voltage failed, rc=%d\n", rc);
+ return -EINVAL;
+ }
+ rc = regulator_set_voltage(avdd_vreg, 2700000, 2700000);
+ if (rc) {
+ pr_err("vdd_vreg->set_voltage failed, rc=%d\n", rc);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ili9341_panel_power_on(void)
+{
+ int rc;
+ rc = regulator_enable(vdd_vreg);
+ if (rc) {
+ pr_err("enable vdd failed, rc=%d\n", rc);
+ return -ENODEV;
+ }
+ rc = regulator_enable(avdd_vreg);
+ if (rc) {
+ pr_err("enable avdd failed, rc=%d\n", rc);
+ return -ENODEV;
+ }
+
+ if (gpio_request(rst_gpio, "disp_rst_n")) {
+ pr_err("%s request reset gpio failed\n", __func__);
+ return -EINVAL;
+ }
+ if (gpio_request(cs_gpio, "disp_cs_n")) {
+ gpio_free(rst_gpio);
+ pr_err("%s request cs gpio failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (gpio_request(ad8_gpio, "disp_ad8_n")) {
+ gpio_free(cs_gpio);
+ gpio_free(rst_gpio);
+ pr_err("%s request ad8 gpio failed\n", __func__);
+ return -EINVAL;
+ }
+ if (gpio_request(te_gpio, "disp_te_n")) {
+ gpio_free(ad8_gpio);
+ gpio_free(cs_gpio);
+ gpio_free(rst_gpio);
+ pr_err("%s request te gpio failed\n", __func__);
+ return -EINVAL;
+ }
+ /* wait for 20 ms after enable gpio as suggested by hw */
+ msleep(20);
+ return 0;
+}
+
+static void ili9341_panel_power_off(void)
+{
+ gpio_free(ad8_gpio);
+ gpio_free(cs_gpio);
+ gpio_free(rst_gpio);
+ gpio_free(te_gpio);
+ regulator_disable(vdd_vreg);
+ regulator_disable(avdd_vreg);
+}
+
+int ili9341_on(void)
+{
+ u32 param[20];
+ int ret;
+ ret = ili9341_panel_power_on();
+ if (ret)
+ return ret;
+ qpic_panel_set_cmd_only(OP_SOFT_RESET);
+ /* wait for 120 ms after reset as panel spec suggests */
+ msleep(120);
+ qpic_panel_set_cmd_only(OP_SET_DISPLAY_OFF);
+ /* wait for 20 ms after disply off */
+ msleep(20);
+
+ /* set memory access control */
+ param[0] = ((0x48)<<0) | ((0x00)<<8) | ((0x00)<<16) | ((0x00U)<<24U);
+ qpic_send_panel_cmd(OP_ILI9341_MEMORY_ACCESS_CONTROL, param, 0);
+ /* wait for 20 ms after command sent as panel spec suggests */
+ msleep(20);
+
+ /* set COLMOD: Pixel Format Set */
+ param[0] = ((0x66)<<0) | ((0x00)<<8) | ((0x00)<<16) | ((0x00U)<<24U);
+ qpic_send_panel_cmd(OP_ILI9341_COLMOD_PIXEL_FORMAT_SET, param, 0);
+ /* wait for 20 ms after command sent as panel spec suggests */
+ msleep(20);
+
+ /* set interface */
+ param[0] = ((0x01)<<0) | ((0x00)<<8) | ((0x00)<<16) | ((0x00U)<<24U);
+ qpic_send_panel_cmd(OP_ILI9341_INTERFACE_CONTROL, ¶m[0], 0);
+ /* wait for 20 ms after command sent */
+ msleep(20);
+
+ /* exit sleep mode */
+ qpic_panel_set_cmd_only(OP_EXIT_SLEEP_MODE);
+ /* wait for 20 ms after command sent as panel spec suggests */
+ msleep(20);
+
+ /* normal mode */
+ qpic_panel_set_cmd_only(OP_ENTER_NORMAL_MODE);
+ /* wait for 20 ms after command sent as panel spec suggests */
+ msleep(20);
+
+ /* display on */
+ qpic_panel_set_cmd_only(OP_SET_DISPLAY_ON);
+ /* wait for 20 ms after command sent as panel spec suggests */
+ msleep(20);
+
+ /* tearing effect */
+ param[0] = ((0x00)<<0) | ((0x00)<<8) | ((0x00)<<16) | ((0x00U)<<24U);
+ qpic_send_panel_cmd(OP_ILI9341_TEARING_EFFECT_LINE_ON, param, 0);
+ /* wait for 20 ms after command sent as panel spec suggests */
+ msleep(20);
+
+ return 0;
+}
+
+void ili9341_off(void)
+{
+ ili9341_panel_power_off();
+}
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 57bf9f2..797d4a3 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -38,7 +38,6 @@
#include <linux/vmalloc.h>
#include <linux/debugfs.h>
#include <linux/console.h>
-#include <linux/android_pmem.h>
#include <linux/leds.h>
#include <linux/pm_runtime.h>
#include <linux/sync.h>
@@ -1747,9 +1746,12 @@
mutex_lock(&mfd->sync_mutex);
if (mfd->is_committing) {
mutex_unlock(&mfd->sync_mutex);
- ret = wait_for_completion_timeout(&mfd->commit_comp,
+ ret = wait_for_completion_interruptible_timeout(
+ &mfd->commit_comp,
msecs_to_jiffies(WAIT_FENCE_TIMEOUT));
if (ret <= 0)
+ ret = -ERESTARTSYS;
+ else if (!ret)
pr_err("%s wait for commit_comp timeout %d %d",
__func__, ret, mfd->is_committing);
} else {
@@ -3502,17 +3504,17 @@
static int msmfb_handle_buf_sync_ioctl(struct msm_fb_data_type *mfd,
struct mdp_buf_sync *buf_sync)
{
- int i, fence_cnt = 0, ret;
+ int i, fence_cnt = 0, ret = 0;
int acq_fen_fd[MDP_MAX_FENCE_FD];
struct sync_fence *fence;
- if ((buf_sync->acq_fen_fd_cnt == 0) ||
- (buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
+ if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
(mfd->timeline == NULL))
return -EINVAL;
- ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
- buf_sync->acq_fen_fd_cnt * sizeof(int));
+ if (buf_sync->acq_fen_fd_cnt)
+ ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
+ buf_sync->acq_fen_fd_cnt * sizeof(int));
if (ret) {
pr_err("%s:copy_from_user failed", __func__);
return ret;
@@ -3531,6 +3533,10 @@
fence_cnt = i;
if (ret)
goto buf_sync_err_1;
+ mfd->acq_fen_cnt = fence_cnt;
+ if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT)
+ msm_fb_wait_for_fence(mfd);
+
mfd->cur_rel_sync_pt = sw_sync_pt_create(mfd->timeline,
mfd->timeline_value + 2);
if (mfd->cur_rel_sync_pt == NULL) {
@@ -3557,7 +3563,6 @@
pr_err("%s:copy_to_user failed", __func__);
goto buf_sync_err_2;
}
- mfd->acq_fen_cnt = buf_sync->acq_fen_fd_cnt;
mutex_unlock(&mfd->sync_mutex);
return ret;
buf_sync_err_2:
diff --git a/drivers/video/msm/msm_fb_def.h b/drivers/video/msm/msm_fb_def.h
index dcd648b..34cf427 100644
--- a/drivers/video/msm/msm_fb_def.h
+++ b/drivers/video/msm/msm_fb_def.h
@@ -34,7 +34,6 @@
#include <linux/vmalloc.h>
#include <linux/debugfs.h>
#include <linux/console.h>
-#include <linux/android_pmem.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/time.h>
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 281c72a..235248c 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -25,12 +25,12 @@
* struct devfreq_dev_status - Data given from devfreq user device to
* governors. Represents the performance
* statistics.
- * @total_time The total time represented by this instance of
+ * @total_time: The total time represented by this instance of
* devfreq_dev_status
- * @busy_time The time that the device was working among the
+ * @busy_time: The time that the device was working among the
* total_time.
- * @current_frequency The operating frequency.
- * @private_data An entry not specified by the devfreq framework.
+ * @current_frequency: The operating frequency.
+ * @private_data: An entry not specified by the devfreq framework.
* A device and a specific governor may have their
* own protocol with private_data. However, because
* this is governor-specific, a governor using this
@@ -54,23 +54,27 @@
/**
* struct devfreq_dev_profile - Devfreq's user device profile
- * @initial_freq The operating frequency when devfreq_add_device() is
+ * @initial_freq: The operating frequency when devfreq_add_device() is
* called.
- * @polling_ms The polling interval in ms. 0 disables polling.
- * @target The device should set its operating frequency at
+ * @polling_ms: The polling interval in ms. 0 disables polling.
+ * @target: The device should set its operating frequency at
* freq or lowest-upper-than-freq value. If freq is
* higher than any operable frequency, set maximum.
* Before returning, target function should set
* freq at the current frequency.
* The "flags" parameter's possible values are
* explained above with "DEVFREQ_FLAG_*" macros.
- * @get_dev_status The device should provide the current performance
+ * @get_dev_status: The device should provide the current performance
* status to devfreq, which is used by governors.
- * @exit An optional callback that is called when devfreq
+ * @get_cur_freq: The device should provide the current frequency
+ * at which it is operating.
+ * @exit: An optional callback that is called when devfreq
* is removing the devfreq object due to error or
* from devfreq_remove_device() call. If the user
* has registered devfreq->nb at a notifier-head,
* this is the time to unregister it.
+ * @freq_table: Optional list of frequencies to support statistics.
+ * @max_state: The size of freq_table.
*/
struct devfreq_dev_profile {
unsigned long initial_freq;
@@ -79,63 +83,63 @@
int (*target)(struct device *dev, unsigned long *freq, u32 flags);
int (*get_dev_status)(struct device *dev,
struct devfreq_dev_status *stat);
+ int (*get_cur_freq)(struct device *dev, unsigned long *freq);
void (*exit)(struct device *dev);
+
+ unsigned int *freq_table;
+ unsigned int max_state;
};
/**
* struct devfreq_governor - Devfreq policy governor
- * @name Governor's name
- * @get_target_freq Returns desired operating frequency for the device.
+ * @node: list node - contains registered devfreq governors
+ * @name: Governor's name
+ * @get_target_freq: Returns desired operating frequency for the device.
* Basically, get_target_freq will run
* devfreq_dev_profile.get_dev_status() to get the
* status of the device (load = busy_time / total_time).
* If no_central_polling is set, this callback is called
* only with update_devfreq() notified by OPP.
- * @init Called when the devfreq is being attached to a device
- * @exit Called when the devfreq is being removed from a
- * device. Governor should stop any internal routines
- * before return because related data may be
- * freed after exit().
- * @no_central_polling Do not use devfreq's central polling mechanism.
- * When this is set, devfreq will not call
- * get_target_freq with devfreq_monitor(). However,
- * devfreq will call get_target_freq with
- * devfreq_update() notified by OPP framework.
+ * @event_handler: Callback for devfreq core framework to notify events
+ * to governors. Events include per device governor
+ * init and exit, opp changes out of devfreq, suspend
+ * and resume of per device devfreq during device idle.
*
* Note that the callbacks are called with devfreq->lock locked by devfreq.
*/
struct devfreq_governor {
+ struct list_head node;
+
const char name[DEVFREQ_NAME_LEN];
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
- int (*init)(struct devfreq *this);
- void (*exit)(struct devfreq *this);
- const bool no_central_polling;
+ int (*event_handler)(struct devfreq *devfreq,
+ unsigned int event, void *data);
};
/**
* struct devfreq - Device devfreq structure
- * @node list node - contains the devices with devfreq that have been
+ * @node: list node - contains the devices with devfreq that have been
* registered.
- * @lock a mutex to protect accessing devfreq.
- * @dev device registered by devfreq class. dev.parent is the device
+ * @lock: a mutex to protect accessing devfreq.
+ * @dev: device registered by devfreq class. dev.parent is the device
* using devfreq.
- * @profile device-specific devfreq profile
- * @governor method how to choose frequency based on the usage.
- * @nb notifier block used to notify devfreq object that it should
+ * @profile: device-specific devfreq profile
+ * @governor: method how to choose frequency based on the usage.
+ * @governor_name: devfreq governor name for use with this devfreq
+ * @nb: notifier block used to notify devfreq object that it should
* reevaluate operable frequencies. Devfreq users may use
* devfreq.nb to the corresponding register notifier call chain.
- * @polling_jiffies interval in jiffies.
- * @previous_freq previously configured frequency value.
- * @next_polling the number of remaining jiffies to poll with
- * "devfreq_monitor" executions to reevaluate
- * frequency/voltage of the device. Set by
- * profile's polling_ms interval.
- * @data Private data of the governor. The devfreq framework does not
+ * @work: delayed work for load monitoring.
+ * @previous_freq: previously configured frequency value.
+ * @data: Private data of the governor. The devfreq framework does not
* touch this.
- * @being_removed a flag to mark that this object is being removed in
- * order to prevent trying to remove the object multiple times.
- * @min_freq Limit minimum frequency requested by user (0: none)
- * @max_freq Limit maximum frequency requested by user (0: none)
+ * @min_freq: Limit minimum frequency requested by user (0: none)
+ * @max_freq: Limit maximum frequency requested by user (0: none)
+ * @stop_polling: devfreq polling status of a device.
+ * @total_trans: Number of devfreq transitions
+ * @trans_table: Statistics of devfreq transitions
+ * @time_in_state: Statistics of devfreq states
+ * @last_stat_updated: The last time stat updated
*
* This structure stores the devfreq information for a give device.
*
@@ -152,26 +156,33 @@
struct device dev;
struct devfreq_dev_profile *profile;
const struct devfreq_governor *governor;
+ char governor_name[DEVFREQ_NAME_LEN];
struct notifier_block nb;
+ struct delayed_work work;
- unsigned long polling_jiffies;
unsigned long previous_freq;
- unsigned int next_polling;
void *data; /* private data for governors */
- bool being_removed;
-
unsigned long min_freq;
unsigned long max_freq;
+ bool stop_polling;
+
+ /* information for device freqeuncy transition */
+ unsigned int total_trans;
+ unsigned int *trans_table;
+ unsigned long *time_in_state;
+ unsigned long last_stat_updated;
};
#if defined(CONFIG_PM_DEVFREQ)
extern struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
- const struct devfreq_governor *governor,
+ const char *governor_name,
void *data);
extern int devfreq_remove_device(struct devfreq *devfreq);
+extern int devfreq_suspend_device(struct devfreq *devfreq);
+extern int devfreq_resume_device(struct devfreq *devfreq);
/* Helper functions for devfreq user device driver with OPP. */
extern struct opp *devfreq_recommended_opp(struct device *dev,
@@ -181,23 +192,13 @@
extern int devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq *devfreq);
-#ifdef CONFIG_DEVFREQ_GOV_POWERSAVE
-extern const struct devfreq_governor devfreq_powersave;
-#endif
-#ifdef CONFIG_DEVFREQ_GOV_PERFORMANCE
-extern const struct devfreq_governor devfreq_performance;
-#endif
-#ifdef CONFIG_DEVFREQ_GOV_USERSPACE
-extern const struct devfreq_governor devfreq_userspace;
-#endif
#ifdef CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND
-extern const struct devfreq_governor devfreq_simple_ondemand;
/**
* struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
* and devfreq_add_device
- * @ upthreshold If the load is over this value, the frequency jumps.
+ * @upthreshold: If the load is over this value, the frequency jumps.
* Specify 0 to use the default. Valid value = 0 to 100.
- * @ downdifferential If the load is under upthreshold - downdifferential,
+ * @downdifferential: If the load is under upthreshold - downdifferential,
* the governor may consider slowing the frequency down.
* Specify 0 to use the default. Valid value = 0 to 100.
* downdifferential < upthreshold must hold.
@@ -214,7 +215,7 @@
#else /* !CONFIG_PM_DEVFREQ */
static struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
- struct devfreq_governor *governor,
+ const char *governor_name,
void *data)
{
return NULL;
@@ -225,6 +226,16 @@
return 0;
}
+static int devfreq_suspend_device(struct devfreq *devfreq)
+{
+ return 0;
+}
+
+static int devfreq_resume_device(struct devfreq *devfreq)
+{
+ return 0;
+}
+
static struct opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags)
{
@@ -243,11 +254,6 @@
return -EINVAL;
}
-#define devfreq_powersave NULL
-#define devfreq_performance NULL
-#define devfreq_userspace NULL
-#define devfreq_simple_ondemand NULL
-
#endif /* CONFIG_PM_DEVFREQ */
#endif /* __LINUX_DEVFREQ_H__ */
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index 2cea256..6e50578 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -189,28 +189,28 @@
/* Events associated with each demux filter */
enum dmx_event {
/* New PES packet is ready to be consumed */
- DMX_EVENT_NEW_PES,
+ DMX_EVENT_NEW_PES = 0x00000001,
/* New section is ready to be consumed */
- DMX_EVENT_NEW_SECTION,
+ DMX_EVENT_NEW_SECTION = 0x00000002,
/* New recording chunk is ready to be consumed */
- DMX_EVENT_NEW_REC_CHUNK,
+ DMX_EVENT_NEW_REC_CHUNK = 0x00000004,
/* New PCR value is ready */
- DMX_EVENT_NEW_PCR,
+ DMX_EVENT_NEW_PCR = 0x00000008,
/* Overflow */
- DMX_EVENT_BUFFER_OVERFLOW,
+ DMX_EVENT_BUFFER_OVERFLOW = 0x00000010,
/* Section was dropped due to CRC error */
- DMX_EVENT_SECTION_CRC_ERROR,
+ DMX_EVENT_SECTION_CRC_ERROR = 0x00000020,
/* End-of-stream, no more data from this filter */
- DMX_EVENT_EOS,
+ DMX_EVENT_EOS = 0x00000040,
/* New Elementary Stream data is ready */
- DMX_EVENT_NEW_ES_DATA
+ DMX_EVENT_NEW_ES_DATA = 0x00000080
};
/* Flags passed in filter events */
@@ -552,7 +552,6 @@
int handle;
};
-
struct dmx_decoder_buffers {
/*
* Specify if linear buffer support is requested. If set, buffers_num
@@ -587,6 +586,35 @@
__u32 key_ladder_id;
};
+struct dmx_events_mask {
+ /*
+ * Bitmask of events to be disabled (dmx_event).
+ * Disabled events will not be notified to the user.
+ * By default all events are enabled except for
+ * DMX_EVENT_NEW_ES_DATA.
+ * Overflow event can't be disabled.
+ */
+ __u32 disable_mask;
+
+ /*
+ * Bitmask of events that will not wake-up the user
+ * when user calls poll with POLLPRI flag.
+ * Events that are used as wake-up source should not be
+ * disabled in disable_mask or they would not be used
+ * as a wake-up source.
+ * By default all enabled events are set as wake-up events.
+ * Overflow event can't be disabled as a wake-up source.
+ */
+ __u32 no_wakeup_mask;
+
+ /*
+ * Number of ready wake-up events which will trigger
+ * a wake-up when user calls poll with POLLPRI flag.
+ * Default is set to 1.
+ */
+ __u32 wakeup_threshold;
+};
+
#define DMX_START _IO('o', 41)
#define DMX_STOP _IO('o', 42)
#define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params)
@@ -611,6 +639,8 @@
#define DMX_SET_DECODER_BUFFER _IOW('o', 63, struct dmx_decoder_buffers)
#define DMX_REUSE_DECODER_BUFFER _IO('o', 64)
#define DMX_SET_SECURE_MODE _IOW('o', 65, struct dmx_secure_mode)
+#define DMX_SET_EVENTS_MASK _IOW('o', 66, struct dmx_events_mask)
+#define DMX_GET_EVENTS_MASK _IOR('o', 67, struct dmx_events_mask)
#endif /*_DVBDMX_H_*/
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index a2b59d7..b767ec8 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1074,6 +1074,73 @@
#define WLAN_HT_SMPS_CONTROL_STATIC 1
#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3
+#define VHT_MCS_SUPPORTED_SET_SIZE 8
+
+struct ieee80211_vht_capabilities {
+ __le32 vht_capabilities_info;
+ u8 vht_supported_mcs_set[VHT_MCS_SUPPORTED_SET_SIZE];
+} __packed;
+
+struct ieee80211_vht_operation {
+ u8 vht_op_info_chwidth;
+ u8 vht_op_info_chan_center_freq_seg1_idx;
+ u8 vht_op_info_chan_center_freq_seg2_idx;
+ __le16 vht_basic_mcs_set;
+} __packed;
+
+/**
+ * struct ieee80211_vht_mcs_info - VHT MCS information
+ * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams
+ * @rx_highest: Indicates highest long GI VHT PPDU data rate
+ * STA can receive. Rate expressed in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest RX data rate supported.
+ * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
+ * @tx_highest: Indicates highest long GI VHT PPDU data rate
+ * STA can transmit. Rate expressed in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest TX data rate supported.
+ */
+struct ieee80211_vht_mcs_info {
+ __le16 rx_mcs_map;
+ __le16 rx_highest;
+ __le16 tx_mcs_map;
+ __le16 tx_highest;
+} __packed;
+
+#define IEEE80211_VHT_MCS_ZERO_TO_SEVEN_SUPPORT 0
+#define IEEE80211_VHT_MCS_ZERO_TO_EIGHT_SUPPORT 1
+#define IEEE80211_VHT_MCS_ZERO_TO_NINE_SUPPORT 2
+#define IEEE80211_VHT_MCS_NOT_SUPPORTED 3
+
+/* 802.11ac VHT Capabilities */
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
+#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
+#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
+#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
+#define IEEE80211_VHT_CAP_TXSTBC 0x00000080
+#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100
+#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200
+#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
+#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
+#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
+#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
+#define IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX 0x00006000
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENTION_MAX 0x00030000
+#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
+#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
+#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
+#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000
+#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT 0x00800000
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
+#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
+#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
+
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
@@ -1334,6 +1401,9 @@
WLAN_EID_DSE_REGISTERED_LOCATION = 58,
WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59,
WLAN_EID_EXT_CHANSWITCH_ANN = 60,
+
+ WLAN_EID_VHT_CAPABILITY = 191,
+ WLAN_EID_VHT_OPERATION = 192,
};
/* Action category code */
diff --git a/include/linux/msm_ipa.h b/include/linux/msm_ipa.h
index b377a6c..5151654 100644
--- a/include/linux/msm_ipa.h
+++ b/include/linux/msm_ipa.h
@@ -173,6 +173,7 @@
WLAN_AP_DISCONNECT,
WLAN_STA_CONNECT,
WLAN_STA_DISCONNECT,
+ IPA_EVENT_MAX
};
/**
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index 4e62b4f..b7d393f 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -20,6 +20,8 @@
#define KGSL_CONTEXT_TRASH_STATE 0x00000020
#define KGSL_CONTEXT_PER_CONTEXT_TS 0x00000040
#define KGSL_CONTEXT_USER_GENERATED_TS 0x00000080
+#define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
+
#define KGSL_CONTEXT_INVALID 0xffffffff
diff --git a/include/linux/msm_vidc_dec.h b/include/linux/msm_vidc_dec.h
index cc864f0..35279bf 100644
--- a/include/linux/msm_vidc_dec.h
+++ b/include/linux/msm_vidc_dec.h
@@ -282,7 +282,8 @@
VDEC_CODECTYPE_MPEG1 = 0x9,
VDEC_CODECTYPE_MPEG2 = 0xa,
VDEC_CODECTYPE_VC1 = 0xb,
- VDEC_CODECTYPE_VC1_RCV = 0xc
+ VDEC_CODECTYPE_VC1_RCV = 0xc,
+ VDEC_CODECTYPE_HEVC = 0xd,
};
enum vdec_mpeg2_profile {
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index ce5ddf3..bde9078 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -1783,6 +1783,9 @@
* @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE
* @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n
* @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n
+ * @NL80211_BAND_ATTR_VHT_MCS_SET: 32-byte attribute containing the MCS set as
+ * defined in 802.11ac
+ * @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE
* @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
* @__NL80211_BAND_ATTR_AFTER_LAST: internal use
*/
@@ -1796,6 +1799,9 @@
NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
+ NL80211_BAND_ATTR_VHT_MCS_SET,
+ NL80211_BAND_ATTR_VHT_CAPA,
+
/* keep last */
__NL80211_BAND_ATTR_AFTER_LAST,
NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1
diff --git a/include/linux/opp.h b/include/linux/opp.h
index 2a4e5fa..214e0eb 100644
--- a/include/linux/opp.h
+++ b/include/linux/opp.h
@@ -48,6 +48,14 @@
struct srcu_notifier_head *opp_get_notifier(struct device *dev);
+#ifdef CONFIG_OF
+int of_init_opp_table(struct device *dev);
+#else
+static inline int of_init_opp_table(struct device *dev)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
#else
static inline unsigned long opp_get_voltage(struct opp *opp)
{
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index a1d0445..74b09cb 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -402,6 +402,7 @@
#define V4L2_PIX_FMT_DIVX_311 v4l2_fourcc('D', 'I', 'V', '3') /* DIVX311 */
#define V4L2_PIX_FMT_DIVX v4l2_fourcc('D', 'I', 'V', 'X') /* DIVX */
#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* ON2 VP8 stream */
+#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* for HEVC stream */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -1851,6 +1852,8 @@
};
#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_GOB \
(V4L2_CID_MPEG_MSM_VIDC_BASE+27)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE+28)
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index dcdfc2b..6071e91 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -32,7 +32,7 @@
struct page **pages;
unsigned int nr_pages;
phys_addr_t phys_addr;
- void *caller;
+ const void *caller;
};
/*
@@ -62,7 +62,7 @@
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, int node, void *caller);
+ pgprot_t prot, int node, const void *caller);
extern void vfree(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
@@ -85,14 +85,15 @@
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *get_vm_area_caller(unsigned long size,
- unsigned long flags, void *caller);
+ unsigned long flags, const void *caller);
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end);
extern struct vm_struct *__get_vm_area_caller(unsigned long size,
unsigned long flags,
unsigned long start, unsigned long end,
- void *caller);
+ const void *caller);
extern struct vm_struct *remove_vm_area(const void *addr);
+extern struct vm_struct *find_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index c53d604..f632ad6 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -8,6 +8,7 @@
enum core_id {
MSM_VIDC_CORE_0 = 0,
+ MSM_VIDC_CORE_1, /* for Q6 core */
MSM_VIDC_CORES_MAX,
};
@@ -45,6 +46,7 @@
int msm_vidc_dqevent(void *instance, struct v4l2_event *event);
int msm_vidc_wait(void *instance);
int msm_vidc_s_parm(void *instance, struct v4l2_streamparm *a);
+int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize);
#endif
struct msm_vidc_interlace_payload {
unsigned int format;
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 34b3139..54d9583 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -9,6 +9,8 @@
#define ISP_VERSION_40 40
#define ISP_VERSION_32 32
#define ISP_NATIVE_BUF_BIT 0x10000
+#define ISP0_BIT 0x20000
+#define ISP1_BIT 0x40000
#define ISP_STATS_STREAM_BIT 0x80000000
enum ISP_START_PIXEL_PATTERN {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 1645608..275dc10 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -211,6 +211,22 @@
};
/**
+ * struct ieee80211_sta_vht_cap - STA's VHT capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ac VHT capabilities for an STA.
+ *
+ * @vht_supported: is VHT supported by the STA
+ * @cap: VHT capabilities map as described in 802.11ac spec
+ * @vht_mcs: Supported VHT MCS rates
+ */
+struct ieee80211_sta_vht_cap {
+ bool vht_supported;
+ u32 cap; /* use IEEE80211_VHT_CAP_ */
+ struct ieee80211_vht_mcs_info vht_mcs;
+};
+
+/**
* struct ieee80211_supported_band - frequency band definition
*
* This structure describes a frequency band a wiphy
@@ -233,6 +249,7 @@
int n_channels;
int n_bitrates;
struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
};
/*
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ae34bf5..d1e73a4 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -61,6 +61,7 @@
DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
{
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
.clock_base =
{
{
@@ -1619,8 +1620,6 @@
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
- raw_spin_lock_init(&cpu_base->lock);
-
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 07a3858..5065adb 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1280,7 +1280,7 @@
struct vm_struct *vmlist;
static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
- unsigned long flags, void *caller)
+ unsigned long flags, const void *caller)
{
vm->flags = flags;
vm->addr = (void *)va->va_start;
@@ -1306,7 +1306,7 @@
}
static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
- unsigned long flags, void *caller)
+ unsigned long flags, const void *caller)
{
setup_vmalloc_vm(vm, va, flags, caller);
insert_vmalloc_vmlist(vm);
@@ -1314,7 +1314,7 @@
static struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long flags, unsigned long start,
- unsigned long end, int node, gfp_t gfp_mask, void *caller)
+ unsigned long end, int node, gfp_t gfp_mask, const void *caller)
{
struct vmap_area *va;
struct vm_struct *area;
@@ -1375,7 +1375,7 @@
struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end,
- void *caller)
+ const void *caller)
{
return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
caller);
@@ -1397,13 +1397,21 @@
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
- void *caller)
+ const void *caller)
{
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, caller);
}
-static struct vm_struct *find_vm_area(const void *addr)
+/**
+ * find_vm_area - find a continuous kernel virtual area
+ * @addr: base address
+ *
+ * Search for the kernel VM area starting at @addr, and return it.
+ * It is up to the caller to do all required locking to keep the returned
+ * pointer valid.
+ */
+struct vm_struct *find_vm_area(const void *addr)
{
struct vmap_area *va;
@@ -1568,9 +1576,9 @@
static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
- int node, void *caller);
+ int node, const void *caller);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
- pgprot_t prot, int node, void *caller)
+ pgprot_t prot, int node, const void *caller)
{
const int order = 0;
struct page **pages;
@@ -1643,7 +1651,7 @@
*/
void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, int node, void *caller)
+ pgprot_t prot, int node, const void *caller)
{
struct vm_struct *area;
void *addr;
@@ -1704,7 +1712,7 @@
*/
static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
- int node, void *caller)
+ int node, const void *caller)
{
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
gfp_mask, prot, node, caller);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index db8fae5..1982c90 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1474,6 +1474,8 @@
struct ieee80211_sta_ht_cap *ht_cap,
struct ieee80211_channel *channel,
enum nl80211_channel_type channel_type);
+u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
+ u32 cap);
/* internal work items */
void ieee80211_work_init(struct ieee80211_local *local);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 1633648..c80361f 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -673,7 +673,7 @@
int result, i;
enum ieee80211_band band;
int channels, max_bitrates;
- bool supp_ht;
+ bool supp_ht, supp_vht;
static const u32 cipher_suites[] = {
/* keep WEP first, it may be removed below */
WLAN_CIPHER_SUITE_WEP40,
@@ -706,6 +706,7 @@
channels = 0;
max_bitrates = 0;
supp_ht = false;
+ supp_vht = false;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
struct ieee80211_supported_band *sband;
@@ -723,6 +724,7 @@
if (max_bitrates < sband->n_bitrates)
max_bitrates = sband->n_bitrates;
supp_ht = supp_ht || sband->ht_cap.ht_supported;
+ supp_vht = supp_vht || sband->vht_cap.vht_supported;
}
local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
@@ -798,6 +800,10 @@
if (supp_ht)
local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);
+ if (supp_vht)
+ local->scan_ies_len +=
+ 2 + sizeof(struct ieee80211_vht_capabilities);
+
if (!local->ops->hw_scan) {
/* For hw_scan, driver needs to set these up. */
local->hw.wiphy->max_scan_ssids = 4;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 32f7a3b..fc73b1d 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1031,6 +1031,10 @@
pos += noffset - offset;
}
+ if (sband->vht_cap.vht_supported)
+ pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
+ sband->vht_cap.cap);
+
return pos - buffer;
}
@@ -1611,6 +1615,27 @@
return pos;
}
+u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
+ u32 cap)
+{
+ __le32 tmp;
+
+ *pos++ = WLAN_EID_VHT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_vht_capabilities);
+ memset(pos, 0, sizeof(struct ieee80211_vht_capabilities));
+
+ /* capability flags */
+ tmp = cpu_to_le32(cap);
+ memcpy(pos, &tmp, sizeof(u32));
+ pos += sizeof(u32);
+
+ /* VHT MCS set */
+ memcpy(pos, &vht_cap->vht_mcs, sizeof(vht_cap->vht_mcs));
+ pos += sizeof(vht_cap->vht_mcs);
+
+ return pos;
+}
+
u8 *ieee80211_ie_build_ht_info(u8 *pos,
struct ieee80211_sta_ht_cap *ht_cap,
struct ieee80211_channel *channel,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 68a6b17..d82b7e3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -811,6 +811,15 @@
dev->wiphy.bands[band]->ht_cap.ampdu_density);
}
+ /* add VHT info */
+ if (dev->wiphy.bands[band]->vht_cap.vht_supported &&
+ (nla_put(msg, NL80211_BAND_ATTR_VHT_MCS_SET,
+ sizeof(dev->wiphy.bands[band]->vht_cap.vht_mcs),
+ &dev->wiphy.bands[band]->vht_cap.vht_mcs) ||
+ nla_put_u32(msg, NL80211_BAND_ATTR_VHT_CAPA,
+ dev->wiphy.bands[band]->vht_cap.cap)))
+ goto nla_put_failure;
+
/* add frequencies */
nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS);
if (!nl_freqs)
diff --git a/sound/soc/msm/apq8064.c b/sound/soc/msm/apq8064.c
index fb77c8d..cafc5c3 100644
--- a/sound/soc/msm/apq8064.c
+++ b/sound/soc/msm/apq8064.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -41,8 +41,9 @@
#define MSM_SLIM_0_RX_MAX_CHANNELS 2
#define MSM_SLIM_0_TX_MAX_CHANNELS 4
-#define BTSCO_RATE_8KHZ 8000
-#define BTSCO_RATE_16KHZ 16000
+#define SAMPLE_RATE_8KHZ 8000
+#define SAMPLE_RATE_16KHZ 16000
+#define SAMPLE_RATE_48KHZ 48000
#define BOTTOM_SPK_AMP_POS 0x1
#define BOTTOM_SPK_AMP_NEG 0x2
@@ -67,6 +68,7 @@
enum {
SLIM_1_RX_1 = 145, /* BT-SCO and USB TX */
SLIM_1_TX_1 = 146, /* BT-SCO and USB RX */
+ SLIM_1_TX_2 = 147, /* USB RX */
SLIM_3_RX_1 = 151, /* External echo-cancellation ref */
SLIM_3_RX_2 = 152, /* External echo-cancellation ref */
SLIM_3_TX_1 = 153, /* HDMI RX */
@@ -90,8 +92,11 @@
static int msm_slim_0_tx_ch = 1;
static int msm_slim_3_rx_ch = 1;
-static int msm_btsco_rate = BTSCO_RATE_8KHZ;
+static int msm_slim_1_rate = SAMPLE_RATE_8KHZ;
static int msm_btsco_ch = 1;
+static int msm_slim_1_rx_ch = 1;
+static int msm_slim_1_tx_ch = 1;
+
static int hdmi_rate_variable;
static int rec_mode = INCALL_REC_MONO;
@@ -651,11 +656,14 @@
SOC_ENUM_SINGLE_EXT(2, hdmi_rate),
};
-static const char *btsco_rate_text[] = {"8000", "16000"};
-static const struct soc_enum msm_btsco_enum[] = {
- SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
+static const char * const slim1_rate_text[] = {"8000", "16000", "48000"};
+static const struct soc_enum msm_slim_1_rate_enum[] = {
+ SOC_ENUM_SINGLE_EXT(3, slim1_rate_text),
};
-
+static const char * const slim1_tx_ch_text[] = {"One", "Two"};
+static const struct soc_enum msm_slim_1_tx_ch_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, slim1_tx_ch_text),
+};
static int msm_slim_0_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -671,7 +679,7 @@
msm_slim_0_rx_ch = ucontrol->value.integer.value[0] + 1;
pr_debug("%s: msm_slim_0_rx_ch = %d\n", __func__,
- msm_slim_0_rx_ch);
+ msm_slim_0_rx_ch);
return 1;
}
@@ -694,6 +702,27 @@
return 1;
}
+static int msm_slim_1_tx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_slim_1_tx_ch = %d\n", __func__,
+ msm_slim_1_tx_ch);
+
+ ucontrol->value.integer.value[0] = msm_slim_1_tx_ch - 1;
+ return 0;
+}
+
+static int msm_slim_1_tx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_slim_1_tx_ch = ucontrol->value.integer.value[0] + 1;
+
+ pr_debug("%s: msm_slim_1_tx_ch = %d\n", __func__,
+ msm_slim_1_tx_ch);
+
+ return 1;
+}
+
static int msm_slim_3_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -713,31 +742,35 @@
return 1;
}
-static int msm_btsco_rate_get(struct snd_kcontrol *kcontrol,
+static int msm_slim_1_rate_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- pr_debug("%s: msm_btsco_rate = %d", __func__,
- msm_btsco_rate);
- ucontrol->value.integer.value[0] = msm_btsco_rate;
+ pr_debug("%s: msm_slim_1_rate = %d", __func__,
+ msm_slim_1_rate);
+
+ ucontrol->value.integer.value[0] = msm_slim_1_rate;
return 0;
}
-static int msm_btsco_rate_put(struct snd_kcontrol *kcontrol,
+static int msm_slim_1_rate_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
switch (ucontrol->value.integer.value[0]) {
case 8000:
- msm_btsco_rate = BTSCO_RATE_8KHZ;
+ msm_slim_1_rate = SAMPLE_RATE_8KHZ;
break;
case 16000:
- msm_btsco_rate = BTSCO_RATE_16KHZ;
+ msm_slim_1_rate = SAMPLE_RATE_16KHZ;
+ break;
+ case 48000:
+ msm_slim_1_rate = SAMPLE_RATE_48KHZ;
break;
default:
- msm_btsco_rate = BTSCO_RATE_8KHZ;
+ msm_slim_1_rate = SAMPLE_RATE_8KHZ;
break;
}
- pr_debug("%s: msm_btsco_rate = %d\n", __func__,
- msm_btsco_rate);
+ pr_debug("%s: msm_slim_1_rate = %d\n", __func__,
+ msm_slim_1_rate);
return 0;
}
@@ -780,8 +813,10 @@
msm_slim_0_rx_ch_get, msm_slim_0_rx_ch_put),
SOC_ENUM_EXT("SLIM_0_TX Channels", msm_enum[2],
msm_slim_0_tx_ch_get, msm_slim_0_tx_ch_put),
- SOC_ENUM_EXT("Internal BTSCO SampleRate", msm_btsco_enum[0],
- msm_btsco_rate_get, msm_btsco_rate_put),
+ SOC_ENUM_EXT("SLIM_1_TX Channels", msm_slim_1_tx_ch_enum[0],
+ msm_slim_1_tx_ch_get, msm_slim_1_tx_ch_put),
+ SOC_ENUM_EXT("SLIM_1 SampleRate", msm_slim_1_rate_enum[0],
+ msm_slim_1_rate_get, msm_slim_1_rate_put),
SOC_SINGLE_EXT("Incall Rec Mode", SND_SOC_NOPM, 0, 1, 0,
msm_incall_rec_mode_get, msm_incall_rec_mode_put),
SOC_ENUM_EXT("SLIM_3_RX Channels", msm_enum[1],
@@ -1001,7 +1036,7 @@
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
- unsigned int rx_ch = SLIM_1_RX_1, tx_ch = SLIM_1_TX_1;
+ unsigned int rx_ch = SLIM_1_RX_1, tx_ch[2] = {SLIM_1_TX_1, SLIM_1_TX_2};
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
pr_debug("%s: APQ BT/USB TX -> SLIMBUS_1_RX -> MDM TX shared ch %d\n",
@@ -1015,10 +1050,11 @@
goto end;
}
} else {
- pr_debug("%s: MDM RX -> SLIMBUS_1_TX -> APQ BT/USB Rx shared ch %d\n",
- __func__, tx_ch);
+ pr_debug("%s: MDM RX ->SLIMBUS_1_TX ->APQ BT/USB Rx shared ch %d %d\n",
+ __func__, tx_ch[0], tx_ch[1]);
- ret = snd_soc_dai_set_channel_map(cpu_dai, 1, &tx_ch, 0, 0);
+ ret = snd_soc_dai_set_channel_map(cpu_dai, msm_slim_1_tx_ch,
+ tx_ch, 0, 0);
if (ret < 0) {
pr_err("%s: Erorr %d setting SLIM_1 TX channel map\n",
__func__, ret);
@@ -1358,16 +1394,46 @@
struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_RATE);
+ SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_CHANNELS);
+ SNDRV_PCM_HW_PARAM_CHANNELS);
- rate->min = rate->max = msm_btsco_rate;
+ rate->min = rate->max = msm_slim_1_rate;
channels->min = channels->max = msm_btsco_ch;
return 0;
}
+static int msm_slim_1_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ rate->min = rate->max = msm_slim_1_rate;
+ channels->min = channels->max = msm_slim_1_rx_ch;
+
+ return 0;
+}
+
+static int msm_slim_1_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ rate->min = rate->max = msm_slim_1_rate;
+ channels->min = channels->max = msm_slim_1_tx_ch;
+
+ return 0;
+}
+
static int msm_auxpcm_be_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
@@ -1728,20 +1794,18 @@
.codec_name = "snd-soc-dummy",
},
{
- .name = "VoLTE",
- .stream_name = "VoLTE",
- .cpu_dai_name = "VoLTE",
- .platform_name = "msm-pcm-voice",
+ .name = "VoLTE Stub",
+ .stream_name = "VoLTE Stub",
+ .cpu_dai_name = "VOLTE_STUB",
+ .platform_name = "msm-pcm-hostless",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
- SND_SOC_DPCM_TRIGGER_POST},
+ SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
- /* this dainlink has playback support */
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_VOLTE,
},
{
.name = "MSM8960 LowLatency",
@@ -1800,6 +1864,21 @@
.ignore_pmdown_time = 1, /* this dailink has playback support */
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA8,
},
+ {
+ .name = "Voice2 Stub",
+ .stream_name = "Voice2 Stub",
+ .cpu_dai_name = "VOICE2_STUB",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ /* this dainlink has playback support */
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
/* Backend DAI Links */
{
.name = LPASS_BE_SLIMBUS_0_RX,
@@ -1971,7 +2050,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SLIMBUS_1_RX,
- .be_hw_params_fixup = msm_btsco_be_hw_params_fixup,
+ .be_hw_params_fixup = msm_slim_1_rx_be_hw_params_fixup,
.ops = &msm_slimbus_1_be_ops,
.ignore_pmdown_time = 1, /* this dainlink has playback support */
@@ -1985,7 +2064,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SLIMBUS_1_TX,
- .be_hw_params_fixup = msm_btsco_be_hw_params_fixup,
+ .be_hw_params_fixup = msm_slim_1_tx_be_hw_params_fixup,
.ops = &msm_slimbus_1_be_ops,
},
/* Ultrasound TX Back End DAI Link */
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index e00e409..734bd39 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -550,8 +550,8 @@
},
{
.playback = {
- .stream_name = "SGLTE Playback",
- .aif_name = "SGLTE_DL",
+ .stream_name = "Voice2 Playback",
+ .aif_name = "VOICE2_DL",
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
@@ -560,8 +560,8 @@
.rate_max = 48000,
},
.capture = {
- .stream_name = "SGLTE Capture",
- .aif_name = "SGLTE_UL",
+ .stream_name = "Voice2 Capture",
+ .aif_name = "VOICE2_UL",
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
@@ -570,7 +570,7 @@
.rate_max = 48000,
},
.ops = &msm_fe_dai_ops,
- .name = "SGLTE",
+ .name = "Voice2",
},
{
.playback = {
@@ -626,6 +626,54 @@
.ops = &msm_fe_dai_ops,
.name = "LSM",
},
+ {
+ .playback = {
+ .stream_name = "VoLTE Stub Playback",
+ .aif_name = "VOLTE_STUB_DL",
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .capture = {
+ .stream_name = "VoLTE Stub Capture",
+ .aif_name = "VOLTE_STUB_UL",
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &msm_fe_dai_ops,
+ .name = "VOLTE_STUB",
+ },
+ {
+ .playback = {
+ .stream_name = "Voice2 Stub Playback",
+ .aif_name = "VOICE2_STUB_DL",
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .capture = {
+ .stream_name = "Voice2 Stub Capture",
+ .aif_name = "VOICE2_STUB_UL",
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &msm_fe_dai_ops,
+ .name = "VOICE2_STUB",
+ },
};
static __devinit int msm_fe_dai_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index 6cf74d5..c5cb560 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -733,8 +733,8 @@
session_id = voc_get_session_id(VOICE_SESSION_NAME);
else if (val == MSM_FRONTEND_DAI_VOLTE)
session_id = voc_get_session_id(VOLTE_SESSION_NAME);
- else if (val == MSM_FRONTEND_DAI_SGLTE)
- session_id = voc_get_session_id(SGLTE_SESSION_NAME);
+ else if (val == MSM_FRONTEND_DAI_VOICE2)
+ session_id = voc_get_session_id(VOICE2_SESSION_NAME);
else
session_id = voc_get_session_id(VOIP_SESSION_NAME);
@@ -1768,12 +1768,18 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("SGLTE", MSM_BACKEND_DAI_PRI_I2S_RX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_PRI_I2S_RX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_PRI_I2S_RX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_PRI_I2S_RX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -1789,8 +1795,8 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_I2S_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("SGLTE", MSM_BACKEND_DAI_SEC_I2S_RX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SEC_I2S_RX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_I2S_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
@@ -1807,8 +1813,8 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("SGLTE", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
@@ -1825,11 +1831,17 @@
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_INT_BT_SCO_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("SGLTE", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
@@ -1846,8 +1858,14 @@
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_MI2S_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
- SOC_SINGLE_EXT("SGLTE", MSM_BACKEND_DAI_MI2S_RX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_MI2S_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
@@ -1864,11 +1882,17 @@
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_AFE_PCM_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AFE_PCM_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("SGLTE", MSM_BACKEND_DAI_AFE_PCM_RX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_AFE_PCM_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
@@ -1885,11 +1909,17 @@
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_AUXPCM_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AUXPCM_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("SGLTE", MSM_BACKEND_DAI_AUXPCM_RX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_AUXPCM_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
@@ -1906,8 +1936,8 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("SGLTE", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
@@ -1927,8 +1957,14 @@
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_HDMI_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
- SOC_SINGLE_EXT("SGLTE", MSM_BACKEND_DAI_HDMI_RX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_HDMI_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
@@ -1939,18 +1975,36 @@
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_EXTPROC_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_EXTPROC_RX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_EXTPROC_RX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
};
static const struct snd_kcontrol_new slimbus_1_rx_mixer_controls[] = {
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_1_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
};
static const struct snd_kcontrol_new slimbus_3_rx_mixer_controls[] = {
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_3_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
};
static const struct snd_kcontrol_new tx_voice_mixer_controls[] = {
@@ -2004,30 +2058,30 @@
msm_routing_put_voice_mixer),
};
-static const struct snd_kcontrol_new tx_sglte_mixer_controls[] = {
- SOC_SINGLE_EXT("PRI_TX_SGLTE", MSM_BACKEND_DAI_PRI_I2S_TX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+static const struct snd_kcontrol_new tx_voice2_mixer_controls[] = {
+ SOC_SINGLE_EXT("PRI_TX_Voice2", MSM_BACKEND_DAI_PRI_I2S_TX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("SEC_TX_SGLTE", MSM_BACKEND_DAI_SEC_I2S_TX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("SEC_TX_Voice2", MSM_BACKEND_DAI_SEC_I2S_TX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("MI2S_TX_SGLTE", MSM_BACKEND_DAI_MI2S_TX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("MI2S_TX_Voice2", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("SLIM_0_TX_SGLTE", MSM_BACKEND_DAI_SLIMBUS_0_TX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("SLIM_0_TX_Voice2", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_SGLTE",
- MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_SGLTE, 1, 0,
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_Voice2",
+ MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOICE2, 1, 0,
msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("AFE_PCM_TX_SGLTE", MSM_BACKEND_DAI_AFE_PCM_TX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("AFE_PCM_TX_Voice2", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("AUX_PCM_TX_SGLTE", MSM_BACKEND_DAI_AUXPCM_TX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("AUX_PCM_TX_Voice2", MSM_BACKEND_DAI_AUXPCM_TX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
- SOC_SINGLE_EXT("SEC_AUX_PCM_TX_SGLTE", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
- MSM_FRONTEND_DAI_SGLTE, 1, 0, msm_routing_get_voice_mixer,
+ SOC_SINGLE_EXT("SEC_AUX_PCM_TX_Voice2", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new tx_voip_mixer_controls[] = {
@@ -2087,6 +2141,66 @@
msm_routing_put_voice_stub_mixer),
};
+static const struct snd_kcontrol_new tx_volte_stub_mixer_controls[] = {
+ SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_EXTPROC_TX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("STUB_1_TX_HL", MSM_BACKEND_DAI_EXTPROC_EC_TX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("SLIM_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("PRIMARY_I2S_TX", MSM_BACKEND_DAI_PRI_I2S_TX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("SECONDARY_I2S_TX", MSM_BACKEND_DAI_SEC_I2S_TX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new tx_voice2_stub_mixer_controls[] = {
+ SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_EXTPROC_TX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("STUB_1_TX_HL", MSM_BACKEND_DAI_EXTPROC_EC_TX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("SLIM_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("PRIMARY_I2S_TX", MSM_BACKEND_DAI_PRI_I2S_TX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("SECONDARY_I2S_TX", MSM_BACKEND_DAI_SEC_I2S_TX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+};
+
static const struct snd_kcontrol_new sbus_0_rx_port_mixer_controls[] = {
SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
@@ -2553,8 +2667,8 @@
SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("VoLTE_DL", "VoLTE Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("VoLTE_UL", "VoLTE Capture", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SGLTE_DL", "SGLTE Playback", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SGLTE_UL", "SGLTE Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("VOICE2_DL", "Voice2 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("VOICE2_UL", "Voice2 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("VOIP_UL", "VoIP Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("SLIM0_DL_HL", "SLIMBUS0_HOSTLESS Playback",
0, 0, 0, 0),
@@ -2622,6 +2736,12 @@
0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("VOICE_STUB_DL", "VOICE_STUB Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("VOICE_STUB_UL", "VOICE_STUB Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("VOLTE_STUB_DL", "VOLTE_STUB Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("VOLTE_STUB_UL", "VOLTE_STUB Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("VOICE2_STUB_DL",
+ "VOICE2_STUB Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("VOICE2_STUB_UL",
+ "VOICE2_STUB Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("STUB_RX", "Stub Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("STUB_TX", "Stub Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
@@ -2712,9 +2832,9 @@
SND_SOC_DAPM_MIXER("VoLTE_Tx Mixer",
SND_SOC_NOPM, 0, 0, tx_volte_mixer_controls,
ARRAY_SIZE(tx_volte_mixer_controls)),
- SND_SOC_DAPM_MIXER("SGLTE_Tx Mixer",
- SND_SOC_NOPM, 0, 0, tx_sglte_mixer_controls,
- ARRAY_SIZE(tx_sglte_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Voice2_Tx Mixer",
+ SND_SOC_NOPM, 0, 0, tx_voice2_mixer_controls,
+ ARRAY_SIZE(tx_voice2_mixer_controls)),
SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
int_bt_sco_rx_mixer_controls, ARRAY_SIZE(int_bt_sco_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("INTERNAL_FM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -2758,7 +2878,12 @@
ARRAY_SIZE(sbus_3_rx_port_mixer_controls)),
SND_SOC_DAPM_MIXER("MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
mi2s_rx_port_mixer_controls, ARRAY_SIZE(mi2s_rx_port_mixer_controls)),
-
+ SND_SOC_DAPM_MIXER("VoLTE Stub Tx Mixer", SND_SOC_NOPM, 0, 0,
+ tx_volte_stub_mixer_controls,
+ ARRAY_SIZE(tx_volte_stub_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Voice2 Stub Tx Mixer", SND_SOC_NOPM, 0, 0,
+ tx_voice2_stub_mixer_controls,
+ ARRAY_SIZE(tx_voice2_stub_mixer_controls)),
/* Virtual Pins to force backends ON atm */
SND_SOC_DAPM_OUTPUT("BE_OUT"),
SND_SOC_DAPM_INPUT("BE_IN"),
@@ -2890,7 +3015,7 @@
{"PRI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"PRI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
- {"PRI_RX_Voice Mixer", "SGLTE", "SGLTE_DL"},
+ {"PRI_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"PRI_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"PRI_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
{"PRI_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
@@ -2898,49 +3023,49 @@
{"SEC_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"SEC_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
- {"SEC_RX_Voice Mixer", "SGLTE", "SGLTE_DL"},
+ {"SEC_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"SEC_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SEC_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"SEC_I2S_RX", NULL, "SEC_RX_Voice Mixer"},
{"SLIM_0_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"SLIM_0_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
- {"SLIM_0_RX_Voice Mixer", "SGLTE", "SGLTE_DL"},
+ {"SLIM_0_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"SLIM_0_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SLIM_0_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"SLIMBUS_0_RX", NULL, "SLIM_0_RX_Voice Mixer"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
- {"INTERNAL_BT_SCO_RX_Voice Mixer", "SGLTE", "SGLTE_DL"},
+ {"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX_Voice Mixer"},
{"AFE_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"AFE_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
- {"AFE_PCM_RX_Voice Mixer", "SGLTE", "SGLTE_DL"},
+ {"AFE_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"AFE_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"AFE_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"PCM_RX", NULL, "AFE_PCM_RX_Voice Mixer"},
{"AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
- {"AUX_PCM_RX_Voice Mixer", "SGLTE", "SGLTE_DL"},
+ {"AUX_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"AUX_PCM_RX", NULL, "AUX_PCM_RX_Voice Mixer"},
{"SEC_AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"SEC_AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
- {"SEC_AUX_PCM_RX_Voice Mixer", "SGLTE", "SGLTE_DL"},
+ {"SEC_AUX_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"SEC_AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SEC_AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"SEC_AUX_PCM_RX", NULL, "SEC_AUX_PCM_RX_Voice Mixer"},
{"HDMI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"HDMI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
- {"HDMI_RX_Voice Mixer", "SGLTE", "SGLTE_DL"},
+ {"HDMI_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"HDMI_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"HDMI_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"HDMI", NULL, "HDMI_RX_Voice Mixer"},
@@ -2963,15 +3088,15 @@
{"VoLTE_Tx Mixer", "AUX_PCM_TX_VoLTE", "AUX_PCM_TX"},
{"VoLTE_Tx Mixer", "SEC_AUX_PCM_TX_VoLTE", "SEC_AUX_PCM_TX"},
{"VoLTE_UL", NULL, "VoLTE_Tx Mixer"},
- {"SGLTE_Tx Mixer", "PRI_TX_SGLTE", "PRI_I2S_TX"},
- {"SGLTE_Tx Mixer", "SEC_TX_SGLTE", "SEC_I2S_TX"},
- {"SGLTE_Tx Mixer", "MI2S_TX_SGLTE", "MI2S_TX"},
- {"SGLTE_Tx Mixer", "SLIM_0_TX_SGLTE", "SLIMBUS_0_TX"},
- {"SGLTE_Tx Mixer", "INTERNAL_BT_SCO_TX_SGLTE", "INT_BT_SCO_TX"},
- {"SGLTE_Tx Mixer", "AFE_PCM_TX_SGLTE", "PCM_TX"},
- {"SGLTE_Tx Mixer", "AUX_PCM_TX_SGLTE", "AUX_PCM_TX"},
- {"SGLTE_Tx Mixer", "SEC_AUX_PCM_TX_SGLTE", "SEC_AUX_PCM_TX"},
- {"SGLTE_UL", NULL, "SGLTE_Tx Mixer"},
+ {"Voice2_Tx Mixer", "PRI_TX_Voice2", "PRI_I2S_TX"},
+ {"Voice2_Tx Mixer", "SEC_TX_Voice2", "SEC_I2S_TX"},
+ {"Voice2_Tx Mixer", "MI2S_TX_Voice2", "MI2S_TX"},
+ {"Voice2_Tx Mixer", "SLIM_0_TX_Voice2", "SLIMBUS_0_TX"},
+ {"Voice2_Tx Mixer", "INTERNAL_BT_SCO_TX_Voice2", "INT_BT_SCO_TX"},
+ {"Voice2_Tx Mixer", "AFE_PCM_TX_Voice2", "PCM_TX"},
+ {"Voice2_Tx Mixer", "AUX_PCM_TX_Voice2", "AUX_PCM_TX"},
+ {"Voice2_Tx Mixer", "SEC_AUX_PCM_TX_Voice2", "SEC_AUX_PCM_TX"},
+ {"VOICE2_UL", NULL, "Voice2_Tx Mixer"},
{"Voip_Tx Mixer", "PRI_TX_Voip", "PRI_I2S_TX"},
{"Voip_Tx Mixer", "SEC_TX_Voip", "SEC_I2S_TX"},
{"Voip_Tx Mixer", "MI2S_TX_Voip", "MI2S_TX"},
@@ -3017,17 +3142,53 @@
{"Voice Stub Tx Mixer", "AFE_PCM_TX", "PCM_TX"},
{"VOICE_STUB_UL", NULL, "Voice Stub Tx Mixer"},
+ {"VoLTE Stub Tx Mixer", "STUB_TX_HL", "STUB_TX"},
+ {"VoLTE Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+ {"VoLTE Stub Tx Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+ {"VoLTE Stub Tx Mixer", "STUB_1_TX_HL", "STUB_1_TX"},
+ {"VoLTE Stub Tx Mixer", "MI2S_TX", "MI2S_TX"},
+ {"VoLTE Stub Tx Mixer", "SLIM_3_TX", "SLIMBUS_3_TX"},
+ {"VoLTE Stub Tx Mixer", "PRIMARY_I2S_TX", "PRI_I2S_TX"},
+ {"VoLTE Stub Tx Mixer", "SECONDARY_I2S_TX", "SEC_I2S_TX"},
+ {"VoLTE Stub Tx Mixer", "AFE_PCM_TX", "PCM_TX"},
+ {"VOLTE_STUB_UL", NULL, "VoLTE Stub Tx Mixer"},
+
+ {"Voice2 Stub Tx Mixer", "STUB_TX_HL", "STUB_TX"},
+ {"Voice2 Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+ {"Voice2 Stub Tx Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+ {"Voice2 Stub Tx Mixer", "STUB_1_TX_HL", "STUB_1_TX"},
+ {"Voice2 Stub Tx Mixer", "MI2S_TX", "MI2S_TX"},
+ {"Voice2 Stub Tx Mixer", "SLIM_3_TX", "SLIMBUS_3_TX"},
+ {"Voice2 Stub Tx Mixer", "PRIMARY_I2S_TX", "PRI_I2S_TX"},
+ {"Voice2 Stub Tx Mixer", "SECONDARY_I2S_TX", "SEC_I2S_TX"},
+ {"Voice2 Stub Tx Mixer", "AFE_PCM_TX", "PCM_TX"},
+ {"VOICE2_STUB_UL", NULL, "Voice2 Stub Tx Mixer"},
+
{"STUB_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"STUB_RX Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+ {"STUB_RX Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
{"STUB_RX", NULL, "STUB_RX Mixer"},
{"SLIMBUS_1_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"SLIMBUS_1_RX Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+ {"SLIMBUS_1_RX Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
{"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Mixer"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"INTERNAL_BT_SCO_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+ {"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
{"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"MI2S_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+ {"MI2S_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
{"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"},
{"HDMI_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"HDMI_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+ {"HDMI_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
{"AFE_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"AFE_PCM_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+ {"AFE_PCM_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
{"SLIMBUS_3_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"SLIMBUS_3_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"},
+ {"SLIMBUS_3_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"},
{"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX_Voice Mixer"},
{"SLIMBUS_1_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
diff --git a/sound/soc/msm/msm-pcm-routing.h b/sound/soc/msm/msm-pcm-routing.h
index b571483..c42e155 100644
--- a/sound/soc/msm/msm-pcm-routing.h
+++ b/sound/soc/msm/msm-pcm-routing.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -69,8 +69,10 @@
MSM_FRONTEND_DAI_AFE_TX,
MSM_FRONTEND_DAI_VOICE_STUB,
MSM_FRONTEND_DAI_VOLTE,
- MSM_FRONTEND_DAI_SGLTE,
+ MSM_FRONTEND_DAI_VOICE2,
MSM_FRONTEND_DAI_DTMF_RX,
+ MSM_FRONTEND_DAI_VOLTE_STUB,
+ MSM_FRONTEND_DAI_VOICE2_STUB,
MSM_FRONTEND_DAI_MAX,
};
diff --git a/sound/soc/msm/msm-pcm-voice.c b/sound/soc/msm/msm-pcm-voice.c
index ac5bc34..26e6ae6 100644
--- a/sound/soc/msm/msm-pcm-voice.c
+++ b/sound/soc/msm/msm-pcm-voice.c
@@ -59,9 +59,10 @@
return false;
}
-static int is_sglte(struct msm_voice *psglte)
+static int is_voice2(struct msm_voice *pvoice2)
+
{
- if (psglte == &voice_info[SGLTE_SESSION_INDEX])
+ if (pvoice2 == &voice_info[VOICE2_SESSION_INDEX])
return true;
else
return false;
@@ -100,15 +101,15 @@
if (!strncmp("VoLTE", substream->pcm->id, 5)) {
voice = &voice_info[VOLTE_SESSION_INDEX];
pr_debug("%s: Open VoLTE Substream Id=%s\n",
- __func__, substream->pcm->id);
- } else if (!strncmp("SGLTE", substream->pcm->id, 5)) {
- voice = &voice_info[SGLTE_SESSION_INDEX];
- pr_debug("%s: Open SGLTE Substream Id=%s\n",
- __func__, substream->pcm->id);
+ __func__, substream->pcm->id);
+ } else if (!strncmp("Voice2", substream->pcm->id, 6)) {
+ voice = &voice_info[VOICE2_SESSION_INDEX];
+ pr_debug("%s: Open Voice2 Substream Id=%s\n",
+ __func__, substream->pcm->id);
} else {
voice = &voice_info[VOICE_SESSION_INDEX];
pr_debug("%s: Open VOICE Substream Id=%s\n",
- __func__, substream->pcm->id);
+ __func__, substream->pcm->id);
}
mutex_lock(&voice->lock);
@@ -174,8 +175,8 @@
pr_debug("end voice call\n");
if (is_volte(prtd))
session_id = voc_get_session_id(VOLTE_SESSION_NAME);
- else if (is_sglte(prtd))
- session_id = voc_get_session_id(SGLTE_SESSION_NAME);
+ else if (is_voice2(prtd))
+ session_id = voc_get_session_id(VOICE2_SESSION_NAME);
else
session_id = voc_get_session_id(VOICE_SESSION_NAME);
voc_end_voice_call(session_id);
@@ -201,8 +202,8 @@
if (prtd->playback_start && prtd->capture_start) {
if (is_volte(prtd))
session_id = voc_get_session_id(VOLTE_SESSION_NAME);
- else if (is_sglte(prtd))
- session_id = voc_get_session_id(SGLTE_SESSION_NAME);
+ else if (is_voice2(prtd))
+ session_id = voc_get_session_id(VOICE2_SESSION_NAME);
else
session_id = voc_get_session_id(VOICE_SESSION_NAME);
voc_start_voice_call(session_id);
@@ -233,8 +234,8 @@
pr_debug("%s: cmd = %d\n", __func__, cmd);
if (is_volte(prtd))
session_id = voc_get_session_id(VOLTE_SESSION_NAME);
- else if (is_sglte(prtd))
- session_id = voc_get_session_id(SGLTE_SESSION_NAME);
+ else if (is_voice2(prtd))
+ session_id = voc_get_session_id(VOICE2_SESSION_NAME);
else
session_id = voc_get_session_id(VOICE_SESSION_NAME);
@@ -308,19 +309,20 @@
return 0;
}
-static int msm_sglte_volume_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int msm_voice2_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = 0;
return 0;
}
-static int msm_sglte_volume_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int msm_voice2_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
int volume = ucontrol->value.integer.value[0];
pr_debug("%s: volume: %d\n", __func__, volume);
- voc_set_rx_vol_index(voc_get_session_id(SGLTE_SESSION_NAME),
+
+ voc_set_rx_vol_index(voc_get_session_id(VOICE2_SESSION_NAME),
RX_PATH, volume);
return 0;
}
@@ -401,21 +403,21 @@
return 0;
}
-static int msm_sglte_mute_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int msm_voice2_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = 0;
return 0;
}
-static int msm_sglte_mute_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int msm_voice2_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
int mute = ucontrol->value.integer.value[0];
pr_debug("%s: mute=%d\n", __func__, mute);
- voc_set_tx_mute(voc_get_session_id(SGLTE_SESSION_NAME), TX_PATH, mute);
+ voc_set_tx_mute(voc_get_session_id(VOICE2_SESSION_NAME), TX_PATH, mute);
return 0;
}
@@ -460,22 +462,22 @@
return 0;
}
-static int msm_sglte_rx_device_mute_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int msm_voice2_rx_device_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] =
- voc_get_rx_device_mute(voc_get_session_id(SGLTE_SESSION_NAME));
+ voc_get_rx_device_mute(voc_get_session_id(VOICE2_SESSION_NAME));
return 0;
}
-static int msm_sglte_rx_device_mute_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int msm_voice2_rx_device_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
int mute = ucontrol->value.integer.value[0];
pr_debug("%s: mute=%d\n", __func__, mute);
- voc_set_rx_device_mute(voc_get_session_id(SGLTE_SESSION_NAME), mute);
+ voc_set_rx_device_mute(voc_get_session_id(VOICE2_SESSION_NAME), mute);
return 0;
}
@@ -502,7 +504,7 @@
voc_set_tty_mode(voc_get_session_id(VOICE_SESSION_NAME), tty_mode);
- voc_set_tty_mode(voc_get_session_id(SGLTE_SESSION_NAME), tty_mode);
+ voc_set_tty_mode(voc_get_session_id(VOICE2_SESSION_NAME), tty_mode);
return 0;
}
static int msm_voice_widevoice_put(struct snd_kcontrol *kcontrol,
@@ -514,7 +516,7 @@
voc_set_widevoice_enable(voc_get_session_id(VOICE_SESSION_NAME),
wv_enable);
- voc_set_widevoice_enable(voc_get_session_id(SGLTE_SESSION_NAME),
+ voc_set_widevoice_enable(voc_get_session_id(VOICE2_SESSION_NAME),
wv_enable);
return 0;
}
@@ -536,9 +538,9 @@
pr_debug("%s: st enable=%d\n", __func__, st_enable);
voc_set_pp_enable(voc_get_session_id(VOICE_SESSION_NAME),
- MODULE_ID_VOICE_MODULE_ST, st_enable);
- voc_set_pp_enable(voc_get_session_id(SGLTE_SESSION_NAME),
- MODULE_ID_VOICE_MODULE_ST, st_enable);
+ MODULE_ID_VOICE_MODULE_ST, st_enable);
+ voc_set_pp_enable(voc_get_session_id(VOICE2_SESSION_NAME),
+ MODULE_ID_VOICE_MODULE_ST, st_enable);
return 0;
}
@@ -560,9 +562,9 @@
pr_debug("%s: fens enable=%d\n", __func__, fens_enable);
voc_set_pp_enable(voc_get_session_id(VOICE_SESSION_NAME),
- MODULE_ID_VOICE_MODULE_FENS, fens_enable);
- voc_set_pp_enable(voc_get_session_id(SGLTE_SESSION_NAME),
- MODULE_ID_VOICE_MODULE_FENS, fens_enable);
+ MODULE_ID_VOICE_MODULE_FENS, fens_enable);
+ voc_set_pp_enable(voc_get_session_id(VOICE2_SESSION_NAME),
+ MODULE_ID_VOICE_MODULE_FENS, fens_enable);
return 0;
}
@@ -605,13 +607,13 @@
SOC_SINGLE_EXT("VoLTE Topology Disable", SND_SOC_NOPM, 0, 1, 0,
msm_volte_topology_disable_get,
msm_volte_topology_disable_put),
- SOC_SINGLE_EXT("SGLTE Rx Device Mute", SND_SOC_NOPM, 0, 1, 0,
- msm_sglte_rx_device_mute_get,
- msm_sglte_rx_device_mute_put),
- SOC_SINGLE_EXT("SGLTE Tx Mute", SND_SOC_NOPM, 0, 1, 0,
- msm_sglte_mute_get, msm_sglte_mute_put),
- SOC_SINGLE_EXT("SGLTE Rx Volume", SND_SOC_NOPM, 0, 5, 0,
- msm_sglte_volume_get, msm_sglte_volume_put),
+ SOC_SINGLE_EXT("Voice2 Rx Device Mute", SND_SOC_NOPM, 0, 1, 0,
+ msm_voice2_rx_device_mute_get,
+ msm_voice2_rx_device_mute_put),
+ SOC_SINGLE_EXT("Voice2 Tx Mute", SND_SOC_NOPM, 0, 1, 0,
+ msm_voice2_mute_get, msm_voice2_mute_put),
+ SOC_SINGLE_EXT("Voice2 Rx Volume", SND_SOC_NOPM, 0, 5, 0,
+ msm_voice2_volume_get, msm_voice2_volume_put),
};
static struct snd_pcm_ops msm_pcm_ops = {
@@ -674,7 +676,7 @@
memset(&voice_info, 0, sizeof(voice_info));
mutex_init(&voice_info[VOICE_SESSION_INDEX].lock);
mutex_init(&voice_info[VOLTE_SESSION_INDEX].lock);
- mutex_init(&voice_info[SGLTE_SESSION_INDEX].lock);
+ mutex_init(&voice_info[VOICE2_SESSION_INDEX].lock);
return platform_driver_register(&msm_pcm_driver);
}
diff --git a/sound/soc/msm/msm-pcm-voice.h b/sound/soc/msm/msm-pcm-voice.h
index 6eb2060..207e7a7 100644
--- a/sound/soc/msm/msm-pcm-voice.h
+++ b/sound/soc/msm/msm-pcm-voice.h
@@ -16,7 +16,7 @@
enum {
VOICE_SESSION_INDEX,
VOLTE_SESSION_INDEX,
- SGLTE_SESSION_INDEX,
+ VOICE2_SESSION_INDEX,
VOICE_SESSION_INDEX_MAX,
};
diff --git a/sound/soc/msm/msm8226.c b/sound/soc/msm/msm8226.c
index 235b527..4dc6505 100644
--- a/sound/soc/msm/msm8226.c
+++ b/sound/soc/msm/msm8226.c
@@ -86,6 +86,7 @@
static struct mutex cdc_mclk_mutex;
static struct clk *codec_clk;
static int clk_users;
+static int vdd_spkr_gpio = -1;
static int msm_snd_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
bool dapm)
@@ -149,6 +150,30 @@
return 0;
}
+static int msm8226_vdd_spkr_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ pr_debug("%s: event = %d\n", __func__, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (vdd_spkr_gpio >= 0) {
+ gpio_direction_output(vdd_spkr_gpio, 1);
+ pr_debug("%s: Enabled 5V external supply for speaker\n",
+ __func__);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (vdd_spkr_gpio >= 0) {
+ gpio_direction_output(vdd_spkr_gpio, 0);
+ pr_debug("%s: Disabled 5V external supply for speaker\n",
+ __func__);
+ }
+ break;
+ }
+ return 0;
+}
+
static const struct snd_soc_dapm_widget msm8226_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0,
@@ -165,6 +190,9 @@
SND_SOC_DAPM_MIC("Digital Mic4", NULL),
SND_SOC_DAPM_MIC("Digital Mic5", NULL),
SND_SOC_DAPM_MIC("Digital Mic6", NULL),
+
+ SND_SOC_DAPM_SUPPLY("EXT_VDD_SPKR", SND_SOC_NOPM, 0, 0,
+ msm8226_vdd_spkr_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
};
static const char *const slim0_rx_ch_text[] = {"One", "Two"};
@@ -982,6 +1010,45 @@
.ops = &msm8226_be_ops,
.ignore_suspend = 1,
},
+ /* Incall Record Uplink BACK END DAI Link */
+ {
+ .name = LPASS_BE_INCALL_RECORD_TX,
+ .stream_name = "Voice Uplink Capture",
+ .cpu_dai_name = "msm-dai-q6-dev.32772",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ignore_suspend = 1,
+ },
+ /* Incall Record Downlink BACK END DAI Link */
+ {
+ .name = LPASS_BE_INCALL_RECORD_RX,
+ .stream_name = "Voice Downlink Capture",
+ .cpu_dai_name = "msm-dai-q6-dev.32771",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ignore_suspend = 1,
+ },
+ /* Incall Music BACK END DAI Link */
+ {
+ .name = LPASS_BE_VOICE_PLAYBACK_TX,
+ .stream_name = "Voice Farend Playback",
+ .cpu_dai_name = "msm-dai-q6-dev.32773",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ignore_suspend = 1,
+ },
};
struct snd_soc_card snd_soc_card_msm8226 = {
@@ -1065,19 +1132,44 @@
goto err;
}
+ vdd_spkr_gpio = of_get_named_gpio(pdev->dev.of_node,
+ "qcom,cdc-vdd-spkr-gpios", 0);
+ if (vdd_spkr_gpio < 0) {
+ dev_err(&pdev->dev,
+ "Looking up %s property in node %s failed %d\n",
+ "qcom, cdc-vdd-spkr-gpios",
+ pdev->dev.of_node->full_name, vdd_spkr_gpio);
+ } else {
+ ret = gpio_request(vdd_spkr_gpio, "TAPAN_CODEC_VDD_SPKR");
+ if (ret) {
+ /* GPIO to enable EXT VDD exists, but failed request */
+ dev_err(card->dev,
+ "%s: Failed to request tapan vdd spkr gpio %d\n",
+ __func__, vdd_spkr_gpio);
+ goto err;
+ }
+ }
+
ret = msm8226_prepare_codec_mclk(card);
if (ret)
- goto err;
+ goto err_vdd_spkr;
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
ret);
- goto err;
+ goto err_vdd_spkr;
}
mutex_init(&cdc_mclk_mutex);
return 0;
+
+err_vdd_spkr:
+ if (vdd_spkr_gpio >= 0) {
+ gpio_free(vdd_spkr_gpio);
+ vdd_spkr_gpio = -1;
+ }
+
err:
if (pdata->mclk_gpio > 0) {
dev_dbg(&pdev->dev, "%s free gpio %d\n",
@@ -1095,6 +1187,8 @@
struct msm8226_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
gpio_free(pdata->mclk_gpio);
+ gpio_free(vdd_spkr_gpio);
+ vdd_spkr_gpio = -1;
snd_soc_unregister_card(card);
return 0;
diff --git a/sound/soc/msm/msm8930.c b/sound/soc/msm/msm8930.c
index ae9468f..2bd5c88 100644
--- a/sound/soc/msm/msm8930.c
+++ b/sound/soc/msm/msm8930.c
@@ -1105,9 +1105,9 @@
.be_id = MSM_FRONTEND_DAI_VOLTE,
},
{
- .name = "SGLTE",
- .stream_name = "SGLTE",
- .cpu_dai_name = "SGLTE",
+ .name = "Voice2",
+ .stream_name = "Voice2",
+ .cpu_dai_name = "Voice2",
.platform_name = "msm-pcm-voice",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
@@ -1118,7 +1118,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_SGLTE,
+ .be_id = MSM_FRONTEND_DAI_VOICE2,
},
{
.name = "MSM8960 LowLatency",
diff --git a/sound/soc/msm/msm8960.c b/sound/soc/msm/msm8960.c
index f987eb4..8c0d1a9 100644
--- a/sound/soc/msm/msm8960.c
+++ b/sound/soc/msm/msm8960.c
@@ -1361,9 +1361,9 @@
.be_id = MSM_FRONTEND_DAI_VOLTE,
},
{
- .name = "SGLTE",
- .stream_name = "SGLTE",
- .cpu_dai_name = "SGLTE",
+ .name = "Voice2",
+ .stream_name = "Voice2",
+ .cpu_dai_name = "Voice2",
.platform_name = "msm-pcm-voice",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
@@ -1373,7 +1373,7 @@
.ignore_pmdown_time = 1,/* this dainlink has playback support */
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_SGLTE,
+ .be_id = MSM_FRONTEND_DAI_VOICE2,
},
{
.name = "MSM8960 LowLatency",
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index 0dbe3f7..50a0113 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -2104,8 +2104,7 @@
sizeof(struct msm8974_asoc_mach_data), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "Can't allocate msm8974_asoc_mach_data\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
ret = msm8974_dtparse_auxpcm(pdev, &pdata);
diff --git a/sound/soc/msm/qdsp6/q6voice.c b/sound/soc/msm/qdsp6/q6voice.c
index f04947c..17f2d03 100644
--- a/sound/soc/msm/qdsp6/q6voice.c
+++ b/sound/soc/msm/qdsp6/q6voice.c
@@ -31,11 +31,6 @@
#define CMD_STATUS_SUCCESS 0
#define CMD_STATUS_FAIL 1
-#define VOC_PATH_PASSIVE 0
-#define VOC_PATH_FULL 1
-#define VOC_PATH_VOLTE_PASSIVE 2
-#define VOC_PATH_SGLTE_PASSIVE 3
-
#define CAL_BUFFER_SIZE 4096
#define NUM_CVP_CAL_BLOCKS 75
#define NUM_CVS_CAL_BLOCKS 15
@@ -173,9 +168,9 @@
else if (!strncmp(name, "VoLTE session", 13))
session_id =
common.voice[VOC_PATH_VOLTE_PASSIVE].session_id;
- else if (!strncmp(name, "SGLTE session", 13))
+ else if (!strncmp(name, "Voice2 session", 14))
session_id =
- common.voice[VOC_PATH_SGLTE_PASSIVE].session_id;
+ common.voice[VOC_PATH_VOICE2_PASSIVE].session_id;
else
session_id = common.voice[VOC_PATH_FULL].session_id;
@@ -216,9 +211,9 @@
return (session_id == common.voice[VOC_PATH_VOLTE_PASSIVE].session_id);
}
-static bool is_sglte_session(u16 session_id)
+static bool is_voice2_session(u16 session_id)
{
- return (session_id == common.voice[VOC_PATH_SGLTE_PASSIVE].session_id);
+ return (session_id == common.voice[VOC_PATH_VOICE2_PASSIVE].session_id);
}
/* Only for memory allocated in the voice driver */
@@ -373,9 +368,9 @@
pr_err("%s: apr_mvm is NULL.\n", __func__);
return -EINVAL;
}
- pr_debug("%s: VoLTE/SGLTE command to MVM\n", __func__);
+ pr_debug("%s: VoLTE/Voice2 command to MVM\n", __func__);
if (is_volte_session(v->session_id) ||
- is_sglte_session(v->session_id)) {
+ is_voice2_session(v->session_id)) {
mvm_handle = voice_get_mvm_handle(v);
mvm_voice_ctl_cmd.hdr.hdr_field = APR_HDR_FIELD(
@@ -451,7 +446,7 @@
if (!mvm_handle) {
if (is_voice_session(v->session_id) ||
is_volte_session(v->session_id) ||
- is_sglte_session(v->session_id)) {
+ is_voice2_session(v->session_id)) {
mvm_session_cmd.hdr.hdr_field = APR_HDR_FIELD(
APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE),
@@ -471,7 +466,7 @@
strlcpy(mvm_session_cmd.mvm_session.name,
"default volte voice",
sizeof(mvm_session_cmd.mvm_session.name) - 1);
- } else if (is_sglte_session(v->session_id)) {
+ } else if (is_voice2_session(v->session_id)) {
strlcpy(mvm_session_cmd.mvm_session.name,
"default modem voice2",
sizeof(mvm_session_cmd.mvm_session.name));
@@ -538,7 +533,7 @@
if (!cvs_handle) {
if (is_voice_session(v->session_id) ||
is_volte_session(v->session_id) ||
- is_sglte_session(v->session_id)) {
+ is_voice2_session(v->session_id)) {
pr_debug("%s: creating CVS passive session\n",
__func__);
@@ -559,7 +554,7 @@
strlcpy(cvs_session_cmd.cvs_session.name,
"default volte voice",
sizeof(cvs_session_cmd.cvs_session.name) - 1);
- } else if (is_sglte_session(v->session_id)) {
+ } else if (is_voice2_session(v->session_id)) {
strlcpy(cvs_session_cmd.cvs_session.name,
"default modem voice2",
sizeof(cvs_session_cmd.cvs_session.name));
diff --git a/sound/soc/msm/qdsp6/q6voice.h b/sound/soc/msm/qdsp6/q6voice.h
index 8df6c38..0bae384 100644
--- a/sound/soc/msm/qdsp6/q6voice.h
+++ b/sound/soc/msm/qdsp6/q6voice.h
@@ -1314,12 +1314,12 @@
#define VOICE_SESSION_NAME "Voice session"
#define VOIP_SESSION_NAME "VoIP session"
#define VOLTE_SESSION_NAME "VoLTE session"
-#define SGLTE_SESSION_NAME "SGLTE session"
+#define VOICE2_SESSION_NAME "Voice2 session"
#define VOC_PATH_PASSIVE 0
#define VOC_PATH_FULL 1
#define VOC_PATH_VOLTE_PASSIVE 2
-#define VOC_PATH_SGLTE_PASSIVE 3
+#define VOC_PATH_VOICE2_PASSIVE 3
uint16_t voc_get_session_id(char *name);
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 916be0b..5b18311 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -485,7 +485,7 @@
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- case SNDRV_PCM_FMTBIT_SPECIAL:
+ case SNDRV_PCM_FORMAT_SPECIAL:
dai_data->port_config.i2s.bit_width = 16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
@@ -561,7 +561,7 @@
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- case SNDRV_PCM_FMTBIT_SPECIAL:
+ case SNDRV_PCM_FORMAT_SPECIAL:
dai_data->port_config.slim_sch.bit_width = 16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
@@ -1351,7 +1351,7 @@
ctrl = &mi2s_config_controls[3];
}
- if (!ctrl) {
+ if (ctrl) {
kcontrol = snd_ctl_new1(ctrl,
&mi2s_dai_data->rx_dai.mi2s_dai_data);
rc = snd_ctl_add(dai->card->snd_card, kcontrol);
@@ -1371,7 +1371,7 @@
ctrl = &mi2s_config_controls[4];
}
- if (!ctrl) {
+ if (ctrl) {
rc = snd_ctl_add(dai->card->snd_card,
snd_ctl_new1(ctrl,
&mi2s_dai_data->tx_dai.mi2s_dai_data));
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index b32daaa..b1db277 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -992,6 +992,8 @@
if (tmp >= 0 && tmp < AFE_MAX_PORTS)
copps_list[i] =
atomic_read(&this_adm.copp_id[tmp]);
+ else
+ continue;
pr_debug("%s: port_id[%#x]: %d, index: %d act coppid[0x%x]\n",
__func__, i, port_id[i], tmp,
atomic_read(&this_adm.copp_id[tmp]));