Merge "ASoC: msm8974: support amplifier for speaker path"
diff --git a/Documentation/ata/ahci_msm.txt b/Documentation/ata/ahci_msm.txt
new file mode 100644
index 0000000..2f84834
--- /dev/null
+++ b/Documentation/ata/ahci_msm.txt
@@ -0,0 +1,322 @@
+Introduction
+============
+The SATA Host Controller developed for Qualcomm SoC is used
+to facilitate SATA storage devices that connect to SoC through a
+standard SATA cable interface. The MSM Advanced Host Controller
+Interface (AHCI) driver interfaces with the generic Linux AHCI driver
+and communicates with the AHCI controller for data movement between
+system memory and SATA devices (persistent storage).
+
+Hardware description
+====================
+Serial Advanced Technology Attachment (SATA) is a communication
+protocol designed to transfer data between a computer and storage
+devices (Hard Disk Drive(HDD), Solid State Drives(SSD) etc.).
+First generation (Gen1) SATA interfaces communicate at a serial
+rate of 1.5Gb/s and use low-voltage differential signaling on
+serial links. With 8b-10b encoding, the effective data throughput
+for Gen1 interface is 150MB/s.
+
+The SATA host controller in Qualcomm chipsets adheres to the AHCI 1.3
+specification which describes the interface between system software
+and the host controller, as well as the functional behavior needed
+for software to communicate with the SATA host controller.
+
+The SATA PHY hardware macro in Qualcomm chipsets adheres to the
+SATA 3.0 specification with Gen1 serial interface. This is used to
+serialize and de-serialize data and communicates with SATA HDD. Also,
+the PHY can detect SATA HDD during hot swap and raise an interrupt to
+the CPU through AHCI controller to notify about the detection/removal.
+
+The following figure shows the SATA architecture block diagram as
+implemented in MSM chipsets.
+
+ +---------+
+ |SATA Disk|
+ | Drive |
+ +---------+
+ ^ ^
+ Tx | | Rx
+ v v
++--------------+ +--------------+ +-----------+
+| System Memory| | SATA PHY | | CPU |
++--------------+ +--------------+ +-----------+
+ ^ ^ ^ ^ ^
+ | | | | |
+ | v v | |
+ | +------------------+(Interrupt)|
+ | | SATA CONTROLLER |-----+ |
+ | +------------------+ |
+ | ^ ^ |
+ | | | |
+ v v v v
+ <--------------------------------------------------------->
+< System Fabric (Control and Data) >
+ <--------------------------------------------------------->
+
+Some controller capabilities:
+- Supports 64-bit addressing
+- Supports native command queueing (upto 32 commands)
+- Supports First-party DMA to move data to and from system memory
+- ATA-7 command set compliant
+- Port multiplier support for some chipsets
+- Supports aggressive power management (partial, slumber modes)
+- Supports asynchronous notification
+
+Software description
+====================
+The SATA driver uses the generic interface to read/write data to
+the Hard Disk Drive (HDD). It uses following components in Linux
+to interface with the generic block layer which then interfaces
+with file system or user processes.
+
+1) AHCI platform Driver (includes MSM-specific glue driver)
+2) LIBAHCI
+3) LIBATA
+4) SCSI
+
+AHCI platform driver registers as a device driver for platform
+device registered during SoC board initialization. It is responsible
+for platform specific tasks like PHY configuration, clock initial-
+ization, claiming memory resources etc. Also, implements certain
+functionality that deviates from the standard specification.
+
+Library "LIBAHCI" implements software layer functionality described
+in the standard AHCI specification. It interfaces with the LIBATA
+framework to execute SATA the command set. It converts ATA task files
+into AHCI command descriptors and pass them to the controller for
+execution. It handles controller interrupts and sends command
+completion events to the upper layers. It implements a controller-
+specific reset and recover mechanism in case of errors. It implements
+link power management policies - partial, slumber modes, runtime power
+management and platform power management. It abstracts the low-level
+controller details from the LIBATA framework.
+
+"LIBATA" is a helper library for implementing ATA and SATA command
+protocol as described in standard ATA and SATA specifications. It
+builds read/write requests from SCSI commands and pass them to the
+low-level controller driver (LLD). It handshakes with the SATA
+device using standard commands to understand capabilities and carry
+out device configurations. It interfaces with the SCSI layer to manage
+underlying disks. It manages different devices connected to each host
+port using a port multiplier. Also, it manages the link PHY component,
+the interconnect interface and any external interface (cables, etc.)
+that follow the SATA electrical specification.
+
+The SCSI layer is a helper library for translating generic block layer
+commands to SCSI commands and pass them on to the LIBATA framework.
+Certain generic stuff like device scan, media change, and hot plug
+detection are handled. This layer handles all types of SCSI devices,
+and SATA storage devices are one class of SCSI devices. It also provides
+the IOCTL interface to manage disks from userspace.
+
+Following is the logical code flow:
+
+ +------------------------+
+ | File System (ext4 etc.)|
+ +------------------------+
+ ^
+ |
+ v
+ +------------------------+
+ | Generic Block Layer |
+ +------------------------+
+ ^
+ |
+ v
+ +------------------------+
+ | SCSI Layer |
+ +------------------------+
+ ^
+ |
+ v
+ +------------------------+
+ | LIBATA library |
+ +------------------------+
+ ^
+ |
+ v
+ +------------------------+
+ | LIBAHCI library |
+ +------------------------+
+ ^
+ |
+ v
+ +------------------------+
+ | AHCI platform driver + |
+ | MSM AHCI glue driver |
+ +------------------------+
+
+Design
+======
+The MSM AHCI driver acts as a glue driver for the Linux
+AHCI controller driver. It provides the following functionality:
+- Registers as a driver for msm_sata device which has an AHCI-compliant
+ controller and PHY as resources.
+- Registers an AHCI platform device in the probe function providing
+ ahci platform data
+- AHCI platform data consists of the following callbacks:
+ - init
+ o PHY resource acquisition
+ o Clock and voltage regulator initialization
+ o PHY calibration
+ - exit
+ o PHY power down
+ o Clock and voltage regulator turn off
+ - suspend
+ - resume
+ o Sequence described in the next section.
+- The Linux AHCI platform driver then probes the AHCI device and
+ initializes it according to the standard AHCI specification.
+- The SATA drive is detected as part of scsi_scan_host() called by
+ LIBAHCI after controller initialization.
+
+Power Management
+================
+Various power modes are supported by this driver.
+
+Platform suspend/resume:
+During suspend:
+- PHY analog blocks are powered down
+- Controller and PHY is kept in Power-on-Reset (POR) mode
+- Clocks and voltage regulators are gated
+
+During resume:
+- Clocks and voltage regulators are ungated
+- PHY is powered up and calibrated to functional mode
+- Controller is re-initialized to process commands.
+
+Runtime suspend/resume:
+- Execute the same steps as in platform suspend/resume.
+- Runtime suspend/resume is disabled by default due to regressions
+ in hot-plug detection (specification limitation). The users can
+ enable runtime power management with following shell commands.
+
+ # cd /sys/devices/platform/msm_sata.0/ahci.0/
+ # echo auto > ./power/control
+ # echo auto > ./ata1/power/control
+ # echo auto > ./ata1/host0/target0:0:0/0:0:0:0/power/control
+
+ Note: The device will be runtime-suspended only when user unmounts
+ all the partitions.
+
+Link power management (defined by AHCI 1.3 specification):
+- Automatic low power mode transition are supported.
+- AHCI supports two power modes: partial and slumber.
+- Software uses Inteface Communication Control (ICC) bits in AHCI
+ register space to enable automatic partial/slumber state.
+- Partial mode:
+ - Software asserts automatic partial mode when the use
+ case demands low latency resume.
+ - Upon receiving partial mode signal, PHY disables byte clocks
+ and re-enables them during resume and thus has low latency.
+- Slumber mode:
+ - Software asserts automatic slumber mode when the use
+ case demands low power consumption and can withstand
+ high resume latencies.
+ - Upon receiving slumber mode signal, PHY disables byte
+ clocks and some internal circuitry. Upon resume PHY
+ enables byte clocks and reacquires the PLL lock.
+- Once the software enables partial/slumber modes, the transitioning
+ into these modes are automatic and is handled by hardware without
+ software intervention while the controller is idle with no outstanding
+ commands to process.
+
+- The Linux AHCI link power management defines three modes:
+ - max_performance (default mode)
+ Doesn't allow partial/slumber transition when host is idle.
+ - medium_power (Partial mode)
+ Following shell commands are used to enable this mode:
+
+ # cd /sys/devices/platform/msm_sata.0/ahci.0/
+ # echo medium_power > ./ata1/host0/scsi_host/host0/link_power_management_policy
+
+ - min_power (Slumber mode)
+ Following shell commands are used to enable this mode:
+
+ # cd /sys/devices/platform/msm_sata.0/ahci.0/
+ # echo min_power > ./ata1/host0/scsi_host/host0/link_power_management_policy
+
+SMP/multi-core
+==============
+The MSM AHCI driver hooks only init, exit, suspend, resume callbacks to
+the AHCI driver which are serialized by design and hence the driver, which
+is inherently SMP safe.
+
+Security
+========
+None.
+
+Performance
+===========
+The theoretical performance with Gen1 SATA PHY is 150MB/s (8b/10b encoding).
+The performance is dependent on various factors, mainly:
+- Capabilities of the external SATA hard disk connected to the MSM SATA port
+- Various system bus frequencies and system loads
+- System memory capabilities
+- Benchmark test applications that collect performance numbers
+
+One example of the maximum performance achieved in a specific system
+configuration follows:
+
+Benchmark: Iozone sequential performance
+Block size: 128K
+File size: 1GB
+Platform: APQ8064 V2 CDP
+CPU Governor: Performance
+
+SanDisk SSD (i100 64GB):
+Read - 135MB/s
+Write - 125MB/s
+
+Western Digital HDD (WD20EURS 2TB):
+Read - 121MB/s
+Write - 98MB/s
+
+Interface
+=========
+The MSM AHCI controller driver provides function pointers as the
+required interface to the Linux AHCI controller driver. The main
+routines implemented are init, exit, suspend, and resume for handling
+MSM-specific initialization, freeing of resources on exit, and
+MSM-specific power management tweaks during suspend power collapse.
+
+Driver parameters
+=================
+None.
+
+Config options
+==============
+Config option SATA_AHCI_MSM in drivers/ata/Kconfig enables this driver.
+
+Dependencies
+============
+The MSM AHCI controller driver is dependent on Linux AHCI driver,
+Linux ATA framework, Linux SCSI framework and Linux generic block layer.
+
+While configuring the kernel, the following options should be set:
+
+- CONFIG_BLOCK
+- CONFIG_SCSI
+- CONFIG_ATA
+- CONFIG_SATA_AHCI_PLATFORM
+
+User space utilities
+====================
+Any user space component that can mount a block device can be used to
+read/write data into persistent storage. However, at the time of this
+writing there are no utilities that author is aware of that can manage
+h/w from userspace.
+
+Other
+=====
+None.
+
+Known issues
+============
+None.
+
+To do
+=====
+- Device tree support.
+- MSM bus frequency voting support.
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 6aed1ce..b4ae5e6 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -198,57 +198,74 @@
The CPUfreq governor "interactive" is designed for latency-sensitive,
interactive workloads. This governor sets the CPU speed depending on
-usage, similar to "ondemand" and "conservative" governors. However,
-the governor is more aggressive about scaling the CPU speed up in
-response to CPU-intensive activity.
-
-Sampling the CPU load every X ms can lead to under-powering the CPU
-for X ms, leading to dropped frames, stuttering UI, etc. Instead of
-sampling the cpu at a specified rate, the interactive governor will
-check whether to scale the cpu frequency up soon after coming out of
-idle. When the cpu comes out of idle, a timer is configured to fire
-within 1-2 ticks. If the cpu is very busy between exiting idle and
-when the timer fires then we assume the cpu is underpowered and ramp
-to MAX speed.
-
-If the cpu was not sufficiently busy to immediately ramp to MAX speed,
-then governor evaluates the cpu load since the last speed adjustment,
-choosing the highest value between that longer-term load or the
-short-term load since idle exit to determine the cpu speed to ramp to.
+usage, similar to "ondemand" and "conservative" governors, but with a
+different set of configurable behaviors.
The tuneable values for this governor are:
+target_loads: CPU load values used to adjust speed to influence the
+current CPU load toward that value. In general, the lower the target
+load, the more often the governor will raise CPU speeds to bring load
+below the target. The format is a single target load, optionally
+followed by pairs of CPU speeds and CPU loads to target at or above
+those speeds. Colons can be used between the speeds and associated
+target loads for readability. For example:
+
+ 85 1000000:90 1700000:99
+
+targets CPU load 85% below speed 1GHz, 90% at or above 1GHz, until
+1.7GHz and above, at which load 99% is targeted. If speeds are
+specified these must appear in ascending order. Higher target load
+values are typically specified for higher speeds, that is, target load
+values also usually appear in an ascending order. The default is
+target load 90% for all speeds.
+
min_sample_time: The minimum amount of time to spend at the current
-frequency before ramping down. This is to ensure that the governor has
-seen enough historic cpu load data to determine the appropriate
-workload. Default is 80000 uS.
+frequency before ramping down. Default is 80000 uS.
hispeed_freq: An intermediate "hi speed" at which to initially ramp
when CPU load hits the value specified in go_hispeed_load. If load
stays high for the amount of time specified in above_hispeed_delay,
-then speed may be bumped higher. Default is maximum speed.
+then speed may be bumped higher. Default is the maximum speed
+allowed by the policy at governor initialization time.
-go_hispeed_load: The CPU load at which to ramp to the intermediate "hi
-speed". Default is 85%.
+go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
+Default is 99%.
-above_hispeed_delay: Once speed is set to hispeed_freq, wait for this
-long before bumping speed higher in response to continued high load.
+above_hispeed_delay: When speed is at or above hispeed_freq, wait for
+this long before raising speed in response to continued high load.
Default is 20000 uS.
-timer_rate: Sample rate for reevaluating cpu load when the system is
-not idle. Default is 20000 uS.
+timer_rate: Sample rate for reevaluating CPU load when the CPU is not
+idle. A deferrable timer is used, such that the CPU will not be woken
+from idle to service this timer until something else needs to run.
+(The maximum time to allow deferring this timer when not running at
+minimum speed is configurable via timer_slack.) Default is 20000 uS.
-input_boost: If non-zero, boost speed of all CPUs to hispeed_freq on
-touchscreen activity. Default is 0.
+timer_slack: Maximum additional time to defer handling the governor
+sampling timer beyond timer_rate when running at speeds above the
+minimum. For platforms that consume additional power at idle when
+CPUs are running at speeds greater than minimum, this places an upper
+bound on how long the timer will be deferred prior to re-evaluating
+load and dropping speed. For example, if timer_rate is 20000uS and
+timer_slack is 10000uS then timers will be deferred for up to 30msec
+when not at lowest speed. A value of -1 means defer timers
+indefinitely at all speeds. Default is 80000 uS.
boost: If non-zero, immediately boost speed of all CPUs to at least
hispeed_freq until zero is written to this attribute. If zero, allow
CPU speeds to drop below hispeed_freq according to load as usual.
+Default is zero.
-boostpulse: Immediately boost speed of all CPUs to hispeed_freq for
-min_sample_time, after which speeds are allowed to drop below
+boostpulse: On each write, immediately boost speed of all CPUs to
+hispeed_freq for at least the period of time specified by
+boostpulse_duration, after which speeds are allowed to drop below
hispeed_freq according to load as usual.
+boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
+on a write to boostpulse, before allowing speed to drop according to
+load as usual. Default is 80000 uS.
+
3. The Governor Interface in the CPUfreq Core
=============================================
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
index eb3986e..7851d53 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
@@ -6,7 +6,7 @@
The timer is attached to a GIC to deliver its two per-processor
interrupts (one for the secure mode, one for the non-secure mode).
-** Timer node properties:
+** CP15 Timer node properties:
- compatible : Should be "arm,armv7-timer"
@@ -21,3 +21,52 @@
interrupts = <1 13 0xf08 1 14 0xf08>;
clock-frequency = <100000000>;
};
+
+** Memory mapped timer node properties:
+
+- compatible : Should at least contain "arm,armv7-timer-mem".
+
+- clock-frequency : The frequency of the main counter, in Hz. Optional.
+
+- reg : The control frame base address.
+
+Note that #address-cells, #size-cells, and ranges shall be present to ensure
+the CPU can address the frame's registers.
+
+Each timer node has up to 8 frame sub-nodes with the following properties:
+
+- frame-number: 0 to 7.
+
+- interrupts : Interrupt list for physical and virtual timers in that order.
+ The virtual timer interrupt is optional.
+
+- reg : The first and second view base addresses in that order. The second view
+ base address is optional.
+
+- status : "disabled" indicates the frame is not available for use. Optional.
+
+Example:
+
+ timer@f0000000 {
+ compatible = "arm,armv7-timer-mem";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xf0000000 0x1000>;
+ clock-frequency = <50000000>;
+
+ frame0@f0001000 {
+ frame-number = <0>
+ interrupts = <0 13 0x8>,
+ <0 14 0x8>;
+ reg = <0xf0001000 0x1000>,
+ <0xf0002000 0x1000>;
+ };
+
+ frame1@f0003000 {
+ frame-number = <1>
+ interrupts = <0 15 0x8>;
+ reg = <0xf0003000 0x1000>;
+ status = "disabled";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/cpr-regulator.txt b/Documentation/devicetree/bindings/arm/msm/cpr-regulator.txt
index 3a29004..203730f 100644
--- a/Documentation/devicetree/bindings/arm/msm/cpr-regulator.txt
+++ b/Documentation/devicetree/bindings/arm/msm/cpr-regulator.txt
@@ -47,6 +47,22 @@
3 (SUPER_TURBO voltage): 1275000 uV
- vdd-apc-supply: Regulator to supply VDD APC power
+
+Optional properties:
+- vdd-mx-supply: Regulator to supply memory power as dependency
+ of VDD APC.
+- qcom,vdd-mx-vmax: The maximum voltage in uV for vdd-mx-supply. This
+ is required when vdd-mx-supply is present.
+- qcom,vdd-mx-vmin-method: The method to determine the minimum voltage for
+ vdd-mx-supply, which can be one of following
+ choices compared with VDD APC:
+ 0 => equal to the voltage(vmin) of VDD APC
+ 1 => equal to PVS corner ceiling voltage
+ 2 => equal to slow speed corner ceiling
+ 3 => equal to qcom,vdd-mx-vmax
+ This is required when vdd-mx-supply is present.
+
+
Example:
apc_vreg_corner: regulator@f9018000 {
status = "okay";
@@ -65,5 +81,8 @@
qcom,pvs-corner-ceiling-nom = <975000 1075000 1200000 1200000>;
qcom,pvs-corner-ceiling-fast = <900000 1000000 1140000 1140000>;
vdd-apc-supply = <&pm8226_s2>;
+ vdd-mx-supply = <&pm8226_l3_ao>;
+ qcom,vdd-mx-vmax = <1350000>;
+ qcom,vdd-mx-vmin-method = <1>;
};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
index 4d441ba..6b2f962 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
@@ -87,6 +87,10 @@
(In Bytes).
qcom,prio-rd: Read priority for a BIMC bus master (Can be 0/1/2)
qcom,prio-wr: Write priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio0: Priority low signal for a NoC bus master
+ (Can be 0/1/2).
+qcom,prio1: Priority high signal for a NoC bus master
+ (Can be 0/1/2)
Example:
@@ -149,7 +153,7 @@
- qcom,msm-bus,name: String representing the client-name
- qcom,msm-bus,num-cases: Total number of usecases
-- qcom,msm-bus,active-only: Context flag for requests in active or
+- qcom,msm-bus,active-only: Boolean context flag for requests in active or
dual (active & sleep) contex
- qcom,msm-bus,num-paths: Total number of master-slave pairs
- qcom,msm-bus,vectors-KBps: Arrays of unsigned integers representing:
@@ -160,7 +164,7 @@
qcom,msm-bus,name = "client-name";
qcom,msm-bus,num-cases = <3>;
- qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,active-only;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors =
<22 512 0 0>, <26 512 0 0>,
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt b/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt
index 5d1fafb..31600ca 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt
@@ -19,9 +19,55 @@
Typically the sensor closest to CPU0.
- qcom,poll-ms: Sampling interval to read sensor, in ms.
- qcom,limit-temp: Threshold temperature to start stepping CPU down, in degC.
-- qcom,temp-hysteresis: Degrees below threshold temperature to step CPU up.
+- qcom,temp-hysteresis: Degrees C below threshold temperature to step CPU up.
- qcom,freq-step: Number of frequency steps to take on each CPU mitigation.
+Optional properties
+
+- qcom,core-limit-temp: Threshold temperature to start shutting down cores
+ in degC
+- qcom,core-temp-hysterisis: Degrees C below which the cores will be brought
+ online in sequence.
+- qcom,core-control-mask: The cpu mask that will be used to determine if a
+ core can be controlled or not. A mask of 0 indicates
+ the feature is disabled.
+- qcom,vdd-restriction-temp: When temperature is below this threshold, will
+ enable vdd restriction which will set higher voltage on
+ key voltage rails, in degC.
+- qcom,vdd-restriction-temp-hysteresis: When temperature is above this threshold
+ will disable vdd restriction on key rails, in degC.
+- qcom,pmic-sw-mode-temp: Threshold temperature to disable auto mode on the
+ rail, in degC. If this property exists,
+ qcom,pmic-sw-mode-temp-hysteresis and
+ qcom,pmic-sw-mode-regs need to exist, otherwise return error.
+- qcom,pmic-sw-mode-temp-hysteresis: Degree below threshold temperature to
+ enable auto mode on the rail, in degC. If this property exists,
+ qcom,pmic-sw-mode-temp and qcom,pmic-sw-mode-regs need to
+ exist, otherwise return error.
+- qcom,pmic-sw-mode-regs: Array of the regulator names that will want to
+ disable/enable automode based on the threshold. If this
+ property exists, qcom,pmic-sw-mode-temp and
+ qcom,pmic-sw-mode-temp-hysteresis need to exist, otherwise
+ return error. Also, if this property is defined, will have to
+ define <consumer_supply_name>-supply = <&phandle_of_regulator>
+- <consumer_supply_name>-supply = <&phandle_of_regulator>: consumer_supply_name
+ is the name that's defined in thermal driver.
+ phandle_of_regulator is defined by reuglator device tree.
+
+Optional child nodes
+- qcom,<vdd restriction child node name>: Define the name of the child node.
+ If this property exisits, qcom,vdd-rstr-reg, qcom,levels,
+ qcom,min-level and qcom,freq-req need to exist, otherwise
+ we return an error.
+- qcom,vdd-rstr-reg: Name of the rail
+- qcom,levels: Array of the level values. Unit is corner voltage for voltage request
+ or kHz for frequency request.
+- qcom,min-level: Request this level as minimum level when disabling voltage
+ restriction. Unit is corner voltage for voltage request
+ or kHz for frequency request.
+- qcom,freq-req: Flag to determine if we should restrict frequency on this rail
+ instead of voltage.
+
Example:
qcom,msm-thermal {
@@ -31,4 +77,20 @@
qcom,limit-temp = <60>;
qcom,temp-hysteresis = <10>;
qcom,freq-step = <2>;
+ qcom,core-limit-temp = <90>;
+ qcom,core-temp-hysterisis = <10>;
+ qcom,core-control-mask = <7>;
+ qcom,pmic-sw-mode-temp = <90>;
+ qcom,pmic-sw-mode-temp-hysteresis = <80>;
+ qcom,pmic-sw-mode-regs = "vdd_dig";
+ qcom,vdd-restriction-temp = <5>;
+ qcom,vdd-restriction-temp-hysteresis = <10>;
+ vdd_dig-supply=<&pm8841_s2_floor_corner>
+
+ qcom,vdd-dig-rstr{
+ qcom,vdd-rstr-reg = "vdd_dig";
+ qcom,levels = <5 7 7>; /* Nominal, Super Turbo, Super Turbo */
+ qcom,min-level = <1>; /* No Request */
+ };
};
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
index a665431..c7a19ef 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
@@ -29,7 +29,7 @@
qcom,wdt@f9017000 {
compatible = "qcom,msm-watchdog";
reg = <0xf9017000 0x1000>;
- interrupts = <0 3 0 0 4 0>;
+ interrupts = <0 3 0>, <0 4 0>;
qcom,bark-time = <11000>;
qcom,pet-time = <10000>;
qcom,ipi-ping;
diff --git a/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt b/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt
index c741514..82e7e2a 100644
--- a/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt
+++ b/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt
@@ -25,6 +25,7 @@
- qcom,saw-turns-off-pll: Version of SAW2.1 or can turn off the HFPLL, when
doing power collapse and so the core need to switch to Global PLL before
PC.
+- qcom,pc-resets-timer: Indicates that the timer gets reset during power collapse.
Example:
diff --git a/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt b/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt
index 88d69e0..86d863c 100644
--- a/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt
+++ b/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt
@@ -5,12 +5,26 @@
Required properties:
- compatible: Should be "qca,ar3002"
- qca,bt-reset-gpio: GPIO pin to bring BT Controller out of reset
+ - qca,bt-vdd-io-supply: Bluetooth VDD IO regulator handle
+ - qca,bt-vdd-pa-supply: Bluetooth VDD PA regulator handle
Optional properties:
- None
+ -qca,bt-vdd-ldo-supply: Bluetooth VDD LDO regulator handle. Kept under optional parameters
+ as some of the chipsets doesn't require ldo or it may use from same vddio.
+ - qca,bt-chip-pwd-supply: Chip power down gpio is required when bluetooth module
+ and other modules like wifi co-exist in a singe chip and shares a
+ common gpio to bring chip out of reset.
+ - qca,bt-vdd-io-voltage-level: min and max voltages for the vdd io regulator
+ - qca,bt-vdd-pa-voltage-level: min and max voltages for the vdd pa regulator
+ - qca,bt-vdd-ldo-voltage-level: min and max voltages for the vdd ldo regulator
Example:
bt-ar3002 {
compatible = "qca,ar3002";
qca,bt-reset-gpio = <&pm8941_gpios 34 0>;
+ qca,bt-vdd-io-supply = <&pm8941_s3>;
+ qca,bt-vdd-pa-supply = <&pm8941_l19>;
+ qca,bt-chip-pwd-supply = <&ath_chip_pwd_l>;
+ qca,bt-vdd-io-supply = <1800000 1800000>;
+ qca,bt-vdd-pa-supply = <2900000 2900000>;
};
diff --git a/Documentation/devicetree/bindings/cache/msm_cache_erp.txt b/Documentation/devicetree/bindings/cache/msm_cache_erp.txt
index 400b299..8d00cc2 100644
--- a/Documentation/devicetree/bindings/cache/msm_cache_erp.txt
+++ b/Documentation/devicetree/bindings/cache/msm_cache_erp.txt
@@ -7,6 +7,17 @@
- interrupt-names: Should contain the interrupt names "l1_irq" and
"l2_irq"
+Optional properties:
+- reg: A set of I/O regions to be dumped in the event of a hardware fault being
+ detected. If this property is present, the "reg-names" property is must be
+ present as well.
+- reg-names: Human-readable names assigned to the I/O regions defined by the
+ "reg" property. The names can be completely arbitrary, since they are
+ intended to be human-read during failure analysis, and because the set of I/O
+ regions of interest may vary with the overall system design. This property
+ shall only be present if the "reg" property is present, and must contain as
+ many elements as the "reg" property.
+
Example:
qcom,cache_erp {
compatible = "qcom,cache_erp";
@@ -14,3 +25,17 @@
interrupt-names = "l1_irq", "l2_irq";
};
+Example with "reg" property defined:
+ qcom,cache_erp@f9012000 {
+ reg = <0xf9012000 0x80>,
+ <0xf9089000 0x80>,
+ <0xf9099000 0x80>;
+
+ reg-names = "l2_saw",
+ "krait0_saw",
+ "krait1_saw";
+
+ compatible = "qcom,cache_erp";
+ interrupts = <1 9 0>, <0 2 0>;
+ interrupt-names = "l1_irq", "l2_irq";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
index b9a71f6..7eb65d2 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -9,7 +9,7 @@
- qcom,ce-hw-instance : should contain crypto HW instance.
- qcom,msm_bus,name: Should be "qcedev-noc"
- qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
- - qcom,msm_bus,active-only: Default vector index
+ - qcom,msm_bus,active-only: Boolean flag for context of request (actve/dual)
- qcom,msm_bus,num_paths: The paths for source and destination ports
- qcom,msm_bus,vectors: Vectors for bus topology.
@@ -31,7 +31,6 @@
qcom,ce-hw-shared;
qcom,msm-bus,name = "qcedev-noc";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<56 512 0 0>,
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
index 59f9879..79dc287 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -9,7 +9,7 @@
- qcom,ce-hw-instance : should contain crypto HW instance.
- qcom,msm_bus,name: Should be "qcrypto-noc"
- qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
- - qcom,msm_bus,active-only: Default vector index
+ - qcom,msm_bus,active-only: Boolean flag for context of request (actve/dual)
- qcom,msm_bus,num_paths: The paths for source and destination ports
- qcom,msm_bus,vectors: Vectors for bus topology.
@@ -30,7 +30,6 @@
qcom,ce-hw-shared;
qcom,msm-bus,name = "qcrypto-noc";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<56 512 0 0>,
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt
index 5c426f2..ce4972a 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt
@@ -17,12 +17,6 @@
- label: A string used to describe the controller used.
- qcom,supply-names: A list of strings that lists the names of the
regulator supplies.
-- qcom,supply-type: A list of strings that list the type of supply(ies)
- mentioned above. This list maps in the order of
- the supply names listed above.
- regulator = supply with controlled output
- switch = supply without controlled output. i.e.
- voltage switch
- qcom,supply-min-voltage-level: A list that specifies minimum voltage level
of supply(ies) mentioned above. This list maps
in the order of the supply names listed above.
@@ -44,7 +38,6 @@
vddio-supply = <&pm8226_l8>;
vdda-supply = <&pm8226_l4>;
qcom,supply-names = "vdd", "vddio", "vdda";
- qcom,supply-type = "regulator", "regulator", "regulator";
qcom,supply-min-voltage-level = <2800000 1800000 1200000>;
qcom,supply-max-voltage-level = <2800000 1800000 1200000>;
qcom,supply-peak-current = <150000 100000 100000>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-mdp.txt b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
index 0422b57..a3f3a06 100644
--- a/Documentation/devicetree/bindings/fb/mdss-mdp.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
@@ -108,13 +108,19 @@
- qcom,mdss-rot-block-size: The size of a memory block (in pixels) to be used
by the rotator. If this property is not specified,
then a default value of 128 pixels would be used.
-
+- qcom,mdss-has-bwc: Boolean property to indicate the presence of bandwidth
+ compression feature in the rotator.
+- qcom,mdss-has-decimation: Boolean property to indicate the presence of
+ decimation feature in fetch.
Optional subnodes:
Child nodes representing the frame buffer virtual devices.
Subnode properties:
- compatible : Must be "qcom,mdss-fb"
- cell-index : Index representing frame buffer
+- qcom,mdss-mixer-swap: A boolean property that indicates if the mixer muxes
+ need to be swapped based on the target panel.
+ By default the property is not defined.
@@ -141,6 +147,8 @@
qcom,mdss-pipe-dma-fetch-id = <10 13>;
qcom,mdss-smp-data = <22 4096>;
qcom,mdss-rot-block-size = <64>;
+ qcom,mdss-has-bwc;
+ qcom,mdss-has-decimation;
qcom,mdss-ctl-off = <0x00000600 0x00000700 0x00000800
0x00000900 0x0000A00>;
@@ -157,6 +165,7 @@
mdss_fb0: qcom,mdss_fb_primary {
cell-index = <0>;
compatible = "qcom,mdss-fb";
+ qcom,mdss-mixer-swap;
};
};
diff --git a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
index a2b66f7..8579ec0 100644
--- a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
+++ b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
@@ -12,15 +12,12 @@
- core-vcc-supply: phandle to the HDMI vcc regulator device tree node.
- qcom,hdmi-tx-supply-names: a list of strings that map in order
to the list of supplies.
-- qcom,hdmi-tx-supply-type: a type of supply(ies) mentioned above.
- 0 = supply with controlled output
- 1 = supply without controlled output. i.e. voltage switch
- qcom,hdmi-tx-min-voltage-level: specifies minimum voltage level
of supply(ies) mentioned above.
- qcom,hdmi-tx-max-voltage-level: specifies maximum voltage level
of supply(ies) mentioned above.
-- qcom,hdmi-tx-op-mode: specifies optimum operating mode of
- supply(ies) mentioned above.
+- qcom,hdmi-tx-peak-current: specifies the peak current that will be
+ drawn from the supply(ies) mentioned above.
- qcom,hdmi-tx-cec: gpio for Consumer Electronics Control (cec) line.
- qcom,hdmi-tx-ddc-clk: gpio for Display Data Channel (ddc) clock line.
@@ -56,10 +53,9 @@
core-vdda-supply = <&pm8941_l12>;
core-vcc-supply = <&pm8941_s3>;
qcom,hdmi-tx-supply-names = "hpd-gdsc", "hpd-5v", "core-vdda", "core-vcc";
- qcom,hdmi-tx-supply-type = <1 1 0 0>;
qcom,hdmi-tx-min-voltage-level = <0 0 1800000 1800000>;
qcom,hdmi-tx-max-voltage-level = <0 0 1800000 1800000>;
- qcom,hdmi-tx-op-mode = <0 0 1800000 0>;
+ qcom,hdmi-tx-peak-current = <0 0 1800000 0>;
qcom,hdmi-tx-cec = <&msmgpio 31 0>;
qcom,hdmi-tx-ddc-clk = <&msmgpio 32 0>;
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 0004302..436dfc7 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -97,7 +97,6 @@
/* Bus Scale Settings */
qcom,msm-bus,name = "grp3d";
qcom,msm-bus,num-cases = <6>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>, <89 604 0 0>,
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
index fbe8ffa..418447d 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
@@ -16,7 +16,10 @@
- interrupt-names : Should contain "eoc-int-en-set".
- qcom,adc-bit-resolution : Bit resolution of the ADC.
- qcom,adc-vdd-reference : Voltage reference used by the ADC.
-- qcom,rsense : Internal rsense resistor used for current measurements.
+
+Optional properties:
+- qcom,rsense : Use this property when external rsense should be used
+ for current calculation and specify the units in nano-ohms.
Channel node
NOTE: Atleast one Channel node is required.
diff --git a/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt b/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt
index b31ec30..d24139b 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt
@@ -20,6 +20,7 @@
- synaptics,y-flip : modify orientation of the y axis
- synaptics,panel-x : panel x dimension
- synaptics,panel-y : panel y dimension
+ - synaptics,fw-image-name : name of firmware .img file in /etc/firmware
Example:
i2c@f9927000 { /* BLSP1 QUP5 */
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp.txt b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
index 4f31f07..a221433 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
@@ -68,8 +68,30 @@
- qcom,default-state: default state of the led, should be "on" or "off"
- qcom,turn-off-delay-ms: delay in millisecond for turning off the led when its default-state is "on". Value is being ignored in case default-state is "off".
+MPP LED is an LED controled through a Multi Purpose Pin.
+
+Optional properties for MPP LED:
+- linux,default-trigger: trigger the led from external modules such as display
+- qcom,default-state: default state of the led, should be "on" or "off"
+- qcom,source-sel: select power source, default 1 (enabled)
+- qcom,mode-ctrl: select operation mode, default 0x60 = Mode Sink
+
Example:
+ qcom,leds@a200 {
+ status = "okay";
+ qcom,led_mpp_3 {
+ label = "mpp";
+ linux,name = "wled-backlight";
+ linux-default-trigger = "none";
+ qcom,default-state = "on";
+ qcom,max-current = <40>;
+ qcom,id = <6>;
+ qcom,source-sel = <1>;
+ qcom,mode-ctrl = <0x10>;
+ };
+ };
+
qcom,leds@d000 {
status = "okay";
qcom,rgb_pwm {
@@ -151,3 +173,4 @@
linux,name = "led:wled_backlight";
};
};
+
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index 2caa959..ac60e38 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -6,6 +6,8 @@
- qcom,hfi : supported Host-Firmware Interface, one of:
- "venus"
- "q6"
+- qcom,max-hw-load: The maximum load the hardware can support expressed in units
+ of macroblocks per second.
Optional properties:
- reg : offset and length of the register set for the device.
@@ -40,8 +42,6 @@
(enum hal_buffer) to its corresponding TZ usage. The TZ usages are defined
as "enum cp_mem_usage" in include/linux/msm_ion.h
- qcom,has-ocmem: indicate the target has ocmem if this property exists
-- qcom,max-hw-load: The maximum load the hardware can support expressed in units
- of macroblocks per second.
Example:
diff --git a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
index b99b716..caead84 100644
--- a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
+++ b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
@@ -92,7 +92,6 @@
qcom,msm-bus,name = "sdcc2";
qcom,msm-bus,num-cases = <7>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
<81 512 6656 13312>, /* 13 MB/s*/
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 87281f7..013d56e 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -155,7 +155,6 @@
qcom,cpu-dma-latency-us = <200>;
qcom,msm-bus,name = "sdhc2";
qcom,msm-bus,num-cases = <7>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
<81 512 6656 13312>, /* 13 MB/s*/
diff --git a/Documentation/devicetree/bindings/pil/pil-pronto.txt b/Documentation/devicetree/bindings/pil/pil-pronto.txt
index ad35985..199862f 100644
--- a/Documentation/devicetree/bindings/pil/pil-pronto.txt
+++ b/Documentation/devicetree/bindings/pil/pil-pronto.txt
@@ -14,6 +14,8 @@
- vdd_pronto_pll-supply: regulator to supply pronto pll.
- qcom,firmware-name: Base name of the firmware image. Ex. "wcnss"
- qcom,gpio-err-fatal: GPIO used by the wcnss to indicate error fatal to the Apps.
+- qcom,gpio-proxy-unvote: GPIO used by the wcnss to trigger proxy unvoting in
+ the Apps
- qcom,gpio-force-stop: GPIO used by the Apps to force the wcnss to shutdown.
Example:
@@ -30,6 +32,7 @@
/* GPIO input from wcnss */
qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_4_in 0 0>;
+ qcom,proxy-unvote = <&smp2pgpio_ssr_smp2p_4_in 2 0>;
/* GPIO output to wcnss */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_4_out 0 0>;
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
index 70f8b55..4cbff52 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
@@ -14,6 +14,9 @@
- interrupts: The lpass watchdog interrupt
- vdd_cx-supply: Reference to the regulator that supplies the vdd_cx domain.
- qcom,firmware-name: Base name of the firmware image. Ex. "lpass"
+- qcom,gpio-err-fatal: GPIO used by the lpass to indicate error fatal to the apps.
+- qcom,gpio-force-stop: GPIO used by the apps to force the lpass to shutdown.
+- qcom,gpio-proxy-unvote: GPIO used by the lpass to indicate apps clock is ready.
Optional properties:
- vdd_pll-supply: Reference to the regulator that supplies the PLL's rail.
@@ -29,4 +32,11 @@
interrupts = <0 194 1>;
vdd_cx-supply = <&pm8841_s2>;
qcom,firmware-name = "lpass";
+
+ /* GPIO inputs from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
};
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index acd0ae3..2d20794 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -17,6 +17,8 @@
- vdd_mx-supply: Reference to the regulator that supplies the memory rail.
- qcom,firmware-name: Base name of the firmware image. Ex. "mdsp"
- qcom,gpio-err-fatal: GPIO used by the modem to indicate error fatal to the apps.
+- qcom,gpio-proxy-unvote: GPIO used by the modem to trigger proxy unvoting in
+ the apps.
- qcom,gpio-force-stop: GPIO used by the apps to force the modem to shutdown.
Optional properties:
@@ -46,8 +48,9 @@
qcom,pil-self-auth;
/* GPIO inputs from mss */
- gpio_err_fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
/* GPIO output to mss */
- gpio_force_stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
};
diff --git a/Documentation/devicetree/bindings/power/qpnp-bms.txt b/Documentation/devicetree/bindings/power/qpnp-bms.txt
index 5b22752..6d093f0 100644
--- a/Documentation/devicetree/bindings/power/qpnp-bms.txt
+++ b/Documentation/devicetree/bindings/power/qpnp-bms.txt
@@ -26,12 +26,9 @@
That is to say,
if (abs(shutdown-soc - current-soc) < limit)
then use old SoC.
-- qcom,adjust-soc-low-threshold : The low threshold for the "flat portion"
- of the charging curve. The BMS will not adjust SoC
- based on voltage during this time.
-- qcom,adjust-soc-high-threshold : The high threshold for the "flat
- portion" of the charging curve. The BMS will not
- adjust SoC based on voltage during this time.
+- qcom,adjust-soc-low-threshold : The low threshold for when the BMS algorithm
+ starts adjusting. If the estimated SoC is not below
+ this percentage, do not adjust.
- qcom,ocv-voltage-low-threshold-uv : The low voltage threshold for the
"flat portion" of the discharge curve. The bms will not
accept new ocvs between these thresholds.
@@ -59,6 +56,14 @@
- qcom,batt-type: Type of battery used. This is an integer that corresponds
to the enum defined in
include/linux/mfd/pm8xxx/batterydata-lib.h
+- qcom,high-ocv-correction-limit-uv: how much the bms will correct OCV when
+ voltage is above the flat portion of the discharge
+ curve.
+- qcom,low-ocv-correction-limit-uv: how much the bms will correct OCV when
+ voltage is below the flat portion of the discharge
+ curve.
+- qcom,hold-soc-est: if the voltage-based estimated SoC is above this percent,
+ the BMS will clamp SoC to be at least 1.
Parent node optional properties:
- qcom,ignore-shutdown-soc: A boolean that controls whether BMS will
@@ -107,14 +112,16 @@
qcom,shutdown-soc-valid-limit = <20>;
qcom,ocv-voltage-low-threshold-uv = <3650000>;
qcom,ocv-voltage-high-threshold-uv = <3750000>;
- qcom,adjust-soc-low-threshold = <25>;
- qcom,adjust-soc-high-threshold = <45>;
+ qcom,adjust-soc-low-threshold = <15>;
qcom,low-soc-calculate-soc-threshold = <15>;
qcom,low-voltage-threshold = <3420000>;
qcom,low-soc-calculate-soc-ms = <5000>;
qcom,calculate-soc-ms = <20000>;
qcom,chg-term-ua = <100000>;
qcom,batt-type = <0>;
+ qcom,low-ocv-correction-limit-uv = <100>;
+ qcom,high-ocv-correction-limit-uv = <50>;
+ qcom,hold-soc-est = <3>;
qcom,bms-iadc@3800 {
reg = <0x3800 0x100>;
diff --git a/Documentation/devicetree/bindings/power/qpnp-charger.txt b/Documentation/devicetree/bindings/power/qpnp-charger.txt
index f5465a4..fced0d7 100644
--- a/Documentation/devicetree/bindings/power/qpnp-charger.txt
+++ b/Documentation/devicetree/bindings/power/qpnp-charger.txt
@@ -7,12 +7,12 @@
Each of these peripherals are implemented as subnodes in the example at the
end of this file.
-- qcom,chg-chgr: Supports charging control and status
+- qcom,chgr: Supports charging control and status
reporting.
-- qcom,chg-bat-if: Battery status reporting such as presence,
+- qcom,bat-if: Battery status reporting such as presence,
temperature reporting and voltage collapse
protection.
-- qcom,chg-buck: Charger buck configuration and status
+- qcom,buck: Charger buck configuration and status
reporting with regards to several regulation
loops such as vdd, ibat etc.
- qcom,usb-chgpth: USB charge path detection and input current
@@ -23,38 +23,39 @@
settings, comparator override features etc.
Parent node required properties:
-- qcom,chg-vddmax-mv: Target voltage of battery in mV.
-- qcom,chg-vddsafe-mv: Maximum Vdd voltage in mV.
-- qcom,chg-vinmin-mv: Minimum input voltage in mV.
-- qcom,chg-ibatmax-ma: Maximum battery charge current in mA
-- qcom,chg-ibatsafe-ma: Safety battery current setting
-- qcom,chg-thermal-mitigation: Array of ibatmax values for different
+- qcom,vddmax-mv: Target voltage of battery in mV.
+- qcom,vddsafe-mv: Maximum Vdd voltage in mV.
+- qcom,vinmin-mv: Minimum input voltage in mV.
+- qcom,ibatmax-ma: Maximum battery charge current in mA
+- qcom,ibatsafe-ma: Safety battery current setting
+- qcom,thermal-mitigation: Array of ibatmax values for different
system thermal mitigation level.
Parent node optional properties:
-- qcom,chg-ibatterm-ma: Current at which charging is terminated when
+- qcom,ibatterm-ma: Current at which charging is terminated when
the analog end of charge option is selected.
-- qcom,chg-maxinput-usb-ma: Maximum input current USB.
-- qcom,chg-maxinput-dc-ma: Maximum input current DC.
-- qcom,chg-vbatdet-delta-mv: Battery charging resume delta.
-- qcom,chg-charging-disabled: Set this property to disable charging
+- qcom,maxinput-usb-ma: Maximum input current USB.
+- qcom,maxinput-dc-ma: Maximum input current DC.
+- qcom,vbatdet-delta-mv: Battery charging resume delta.
+- qcom,charging-disabled: Set this property to disable charging
by default. This can then be overriden
writing the the module parameter
"charging_disabled".
-- qcom,chg-use-default-batt-values: Set this flag to force reporting of
+- qcom,use-default-batt-values: Set this flag to force reporting of
battery temperature of 250 decidegree
Celsius, state of charge to be 50%
and disable charging.
-- qcom,chg-warm-bat-decidegc: Warm battery temperature in decidegC.
-- qcom,chg-cool-bat-decidegc: Cool battery temperature in decidegC.
+- qcom,warm-bat-decidegc: Warm battery temperature in decidegC.
+- qcom,cool-bat-decidegc: Cool battery temperature in decidegC.
Note that if both warm and cool battery
temperatures are set, the corresponding
ibatmax and bat-mv properties are
required to be set.
-- qcom,chg-ibatmax-cool-ma: Maximum cool battery charge current.
-- qcom,chg-ibatmax-warm-ma: Maximum warm battery charge current.
-- qcom,chg-warm-bat-mv: Warm temperature battery target voltage.
-- qcom,chg-cool-bat-mv: Cool temperature battery target voltage.
+- qcom,ibatmax-cool-ma: Maximum cool battery charge current.
+- qcom,ibatmax-warm-ma: Maximum warm battery charge current.
+- qcom,warm-bat-mv: Warm temperature battery target voltage.
+- qcom,cool-bat-mv: Cool temperature battery target voltage.
+- qcom,tchg-mins: Maximum total software initialized charge time.
Sub node required structure:
- A qcom,chg node must be a child of an SPMI node that has specified
@@ -77,13 +78,13 @@
qcom,usb-chgpth:
- usbin-valid
- qcom,chg-chgr:
+ qcom,chgr:
- chg-done
- chg-failed
The following interrupts are available:
- qcom,chg-chgr:
+ qcom,chgr:
- chg-done: Triggers on charge completion.
- chg-failed: Notifies of charge failures.
- fast-chg-on: Notifies of fast charging state.
@@ -98,7 +99,7 @@
setting, can be used as
battery alarm.
- qcom,chg-buck:
+ qcom,buck:
- vdd-loop: VDD loop change interrupt.
- ibat-loop: Ibat loop change interrupt.
- ichg-loop: Charge current loop change.
@@ -107,7 +108,7 @@
- vref-ov: Reference overvoltage interrupt.
- vbat-ov: Battery overvoltage interrupt.
- qcom,chg-bat-if:
+ qcom,bat-if:
- psi: PMIC serial interface interrupt.
- vcp-on: Voltage collapse protection
status interrupt.
@@ -140,22 +141,22 @@
#address-cells = <1>;
#size-cells = <1>;
- qcom,chg-vddmax-mv = <4200>;
- qcom,chg-vddsafe-mv = <4200>;
- qcom,chg-vinmin-mv = <4200>;
- qcom,chg-ibatmax-ma = <1500>;
- qcom,chg-ibatterm-ma = <200>;
- qcom,chg-ibatsafe-ma = <1500>;
- qcom,chg-thermal-mitigation = <1500 700 600 325>;
- qcom,chg-cool-bat-degc = <10>;
- qcom,chg-cool-bat-mv = <4100>;
- qcom,chg-ibatmax-warm-ma = <350>;
- qcom,chg-warm-bat-degc = <45>;
- qcom,chg-warm-bat-mv = <4100>;
- qcom,chg-ibatmax-cool-ma = <350>;
- qcom,chg-vbatdet-delta-mv = <60>;
+ qcom,vddmax-mv = <4200>;
+ qcom,vddsafe-mv = <4200>;
+ qcom,vinmin-mv = <4200>;
+ qcom,ibatmax-ma = <1500>;
+ qcom,ibatterm-ma = <200>;
+ qcom,ibatsafe-ma = <1500>;
+ qcom,thermal-mitigation = <1500 700 600 325>;
+ qcom,cool-bat-degc = <10>;
+ qcom,cool-bat-mv = <4100>;
+ qcom,ibatmax-warm-ma = <350>;
+ qcom,warm-bat-degc = <45>;
+ qcom,warm-bat-mv = <4100>;
+ qcom,ibatmax-cool-ma = <350>;
+ qcom,vbatdet-delta-mv = <60>;
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
reg = <0x1000 0x100>;
interrupts = <0x0 0x10 0x0>,
<0x0 0x10 0x1>,
@@ -176,7 +177,7 @@
"vbat-det-lo";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
reg = <0x1100 0x100>;
interrupts = <0x0 0x11 0x0>,
<0x0 0x11 0x1>,
@@ -195,7 +196,7 @@
"vbat-ov";
};
- qcom,chg-bat-if@1200 {
+ qcom,bat-if@1200 {
reg = <0x1200 0x100>;
interrupts = <0x0 0x12 0x0>,
<0x0 0x12 0x1>,
@@ -210,7 +211,7 @@
"batt-pres";
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
reg = <0x1300 0x100>;
interrupts = <0 0x13 0x0>,
<0 0x13 0x1>,
@@ -221,7 +222,7 @@
"chg-gone";
};
- qcom,chg-dc-chgpth@1400 {
+ qcom,dc-chgpth@1400 {
reg = <0x1400 0x100>;
interrupts = <0x0 0x14 0x0>,
<0x0 0x14 0x1>;
@@ -230,7 +231,7 @@
"coarse-det-dc";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
reg = <0x1500 0x100>;
interrupts = <0x0 0x15 0x0>,
<0x0 0x15 0x1>;
@@ -239,7 +240,7 @@
"boost-pwr-ok";
};
- qcom,chg-misc@1600 {
+ qcom,misc@1600 {
reg = <0x1600 0x100>;
};
};
diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
index 30d34f6..eb62ea1 100644
--- a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
@@ -11,6 +11,9 @@
Optional properties:
- parent-supply: phandle to the parent supply/regulator node
+ - qcom,retain-mems: For Oxili GDSCs only: Presence currently denotes a hardware
+ requirement to assert the forced memory retention signals
+ in the core's clock branch control register.
Example:
gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
diff --git a/Documentation/devicetree/bindings/regulator/krait-regulator.txt b/Documentation/devicetree/bindings/regulator/krait-regulator.txt
index aaa731e..6a02e86 100644
--- a/Documentation/devicetree/bindings/regulator/krait-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/krait-regulator.txt
@@ -13,6 +13,8 @@
register base
- reg-names: "apcs_gcc" -string to identify the area where
the APCS GCC registers reside.
+- qcom,pfm-threshold The power coeff threshold in abstract power units below which
+ pmic will be made to operate in PFM mode.
Optional properties:
- qcom,use-phase-switching indicates whether the driver should add/shed phases on the PMIC
@@ -51,6 +53,7 @@
reg-names = "apcs_gcc";
compatible = "qcom,krait-pdn";
qcom,use-phase-switching;
+ qcom,pfm-threshold = <376975>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-regulator.txt
index 2116888..041928d 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-regulator.txt
@@ -16,12 +16,17 @@
the spmi-slave-container property
Optional properties:
+- interrupts: List of interrupts used by the regulator.
+- interrupt-names: List of strings defining the names of the
+ interrupts in the 'interrupts' property 1-to-1.
+ Supported values are "ocp" for voltage switch
+ type regulators. If an OCP interrupt is
+ specified, then the voltage switch will be
+ toggled off and back on when OCP triggers in
+ order to handle high in-rush current.
- qcom,system-load: Load in uA present on regulator that is not
captured by any consumer request
- qcom,enable-time: Time in us to delay after enabling the regulator
-- qcom,ocp-enable-time: Time to delay in us between enabling a switch and
- subsequently enabling over current protection
- (OCP) for the switch
- qcom,auto-mode-enable: 1 = Enable automatic hardware selection of
regulator mode (HPM vs LPM); not available on
boost type regulators
@@ -30,11 +35,18 @@
so that it acts like a switch and simply outputs
its input voltage
0 = Do not enable bypass mode
-- qcom,ocp-enable: 1 = Enable over current protection (OCP) for
- voltage switch type regulators so that they
- latch off automatically when over current is
- detected
+- qcom,ocp-enable: 1 = Allow over current protection (OCP) to be
+ enabled for voltage switch type regulators so
+ that they latch off automatically when over
+ current is detected. OCP is enabled when in
+ HPM or auto mode.
0 = Disable OCP
+- qcom,ocp-max-retries: Maximum number of times to try toggling a voltage
+ switch off and back on as a result of
+ consecutive over current events.
+- qcom,ocp-retry-delay: Time to delay in milliseconds between each
+ voltage switch toggle after an over current
+ event takes place.
- qcom,pull-down-enable: 1 = Enable output pull down resistor when the
regulator is disabled
0 = Disable pull down resistor
@@ -76,6 +88,16 @@
1 = 0.25 uA
2 = 0.55 uA
3 = 0.75 uA
+- qcom,hpm-enable: 1 = Enable high power mode (HPM), also referred
+ to as NPM. HPM consumes more ground current
+ than LPM, but it can source significantly higher
+ load current. HPM is not available on boost
+ type regulators. For voltage switch type
+ regulators, HPM implies that over current
+ protection and soft start are active all the
+ time. This configuration can be overwritten
+ by changing the regulator's mode dynamically.
+ 0 = Do not enable HPM
- qcom,force-type: Override the type and subtype register values. Useful for some
regulators that have invalid types advertised by the hardware.
The format is two unsigned integers of the form <type subtype>.
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index cc4491b..be08a1a 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -389,15 +389,25 @@
qcom,msm-cpudai-auxpcm-data = <0>, <0>;
qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
- qcom,msm-auxpcm-rx {
+ qcom,msm-prim-auxpcm-rx {
qcom,msm-auxpcm-dev-id = <4106>;
compatible = "qcom,msm-auxpcm-dev";
};
- qcom,msm-auxpcm-tx {
+ qcom,msm-prim-auxpcm-tx {
qcom,msm-auxpcm-dev-id = <4107>;
compatible = "qcom,msm-auxpcm-dev";
};
+
+ qcom,msm-sec-auxpcm-rx {
+ qcom,msm-auxpcm-dev-id = <4108>;
+ compatible = "qcom,msm-auxpcm-dev";
+ };
+
+ qcom,msm-sec-auxpcm-tx {
+ qcom,msm-auxpcm-dev-id = <4109>;
+ compatible = "qcom,msm-auxpcm-dev";
+ };
};
qcom,msm-pcm-hostless {
@@ -433,13 +443,20 @@
Each entry is a pair of strings, the first being the connection's sink,
the second being the connection's source.
- qcom,cdc-mclk-gpios : GPIO on which mclk signal is comming.
-- taiko-mclk-clk : phandle to PMIC8941 clkdiv1 node.
- qcom,taiko-mclk-clk-freq : Taiko mclk Freq in Hz. currently only 9600000Hz
is supported.
-- prim-auxpcm-gpio-clk : GPIO on which AUXPCM clk signal is coming.
-- prim-auxpcm-gpio-sync : GPIO on which AUXPCM SYNC signal is coming.
-- prim-auxpcm-gpio-din : GPIO on which AUXPCM DIN signal is coming.
-- prim-auxpcm-gpio-dout : GPIO on which AUXPCM DOUT signal is coming.
+- qcom,prim-auxpcm-gpio-clk : GPIO on which Primary AUXPCM clk signal is coming.
+- qcom,prim-auxpcm-gpio-sync : GPIO on which Primary AUXPCM SYNC signal is coming.
+- qcom,prim-auxpcm-gpio-din : GPIO on which Primary AUXPCM DIN signal is coming.
+- qcom,prim-auxpcm-gpio-dout : GPIO on which Primary AUXPCM DOUT signal is coming.
+- qcom,prim-auxpcm-gpio-set : set of GPIO lines used for Primary AUXPCM port
+ Possible Values:
+ prim-gpio-prim : Primary AUXPCM shares GPIOs with Primary MI2S
+ prim-gpio-tert : Primary AUXPCM shares GPIOs with Tertiary MI2S
+- qcom,sec-auxpcm-gpio-clk : GPIO on which Secondary AUXPCM clk signal is coming.
+- qcom,sec-auxpcm-gpio-sync : GPIO on which Secondary AUXPCM SYNC signal is coming.
+- qcom,sec-auxpcm-gpio-din : GPIO on which Secondary AUXPCM DIN signal is coming.
+- qcom,sec-auxpcm-gpio-dout : GPIO on which Secondary AUXPCM DOUT signal is coming.
- qcom,us-euro-gpios : GPIO on which gnd/mic swap signal is coming.
Optional properties:
- qcom,hdmi-audio-rx: specifies if HDMI audio support is enabled or not.
@@ -481,16 +498,20 @@
"MIC BIAS4 External", "Digital Mic6";
qcom,cdc-mclk-gpios = <&pm8941_gpios 15 0>;
- taiko-mclk-clk = <&pm8941_clkdiv1>;
qcom,taiko-mclk-clk-freq = <9600000>;
qcom,us-euro-gpios = <&pm8941_gpios 20 0>;
qcom,hdmi-audio-rx;
- prim-auxpcm-gpio-clk = <&msmgpio 65 0>;
- prim-auxpcm-gpio-sync = <&msmgpio 66 0>;
- prim-auxpcm-gpio-din = <&msmgpio 67 0>;
- prim-auxpcm-gpio-dout = <&msmgpio 68 0>;
+ qcom,prim-auxpcm-gpio-clk = <&msmgpio 65 0>;
+ qcom,prim-auxpcm-gpio-sync = <&msmgpio 66 0>;
+ qcom,prim-auxpcm-gpio-din = <&msmgpio 67 0>;
+ qcom,prim-auxpcm-gpio-dout = <&msmgpio 68 0>;
+ qcom,prim-auxpcm-gpio-set = "prim-gpio-prim";
+ qcom,sec-auxpcm-gpio-clk = <&msmgpio 79 0>;
+ qcom,sec-auxpcm-gpio-sync = <&msmgpio 80 0>;
+ qcom,sec-auxpcm-gpio-din = <&msmgpio 81 0>;
+ qcom,sec-auxpcm-gpio-dout = <&msmgpio 82 0>;
};
* msm-dai-mi2s
@@ -608,7 +629,7 @@
* msm-adsp-loader
Required properties:
- - compatible : "msm-adsp-loader"
+ - compatible : "qcom,adsp-loader"
- qcom,adsp-state:
It is possible that some MSM use PIL to load the ADSP image. While
other MSM may use SBL to load the ADSP image at boot. Audio APR needs
@@ -623,3 +644,22 @@
compatible = "qcom,adsp-loader";
qcom,adsp-state = <2>;
};
+
+* msm-audio-ion
+
+Required properties:
+ - compatible : "qcom,msm-audio-ion"
+
+Optional properties:
+ - qcom,smmu-enabled:
+ It is possible that some MSM have SMMU in ADSP. While other MSM use
+ no SMMU. Audio lib introduce wrapper for ION APIs. The wrapper needs
+ presence of SMMU in ADSP to handle ION APIs differently.
+ Presence of this property means ADSP has SMMU in it.
+
+Example:
+
+qcom,msm-audio-ion {
+ compatible = "qcom,msm-audio-ion;
+ qcom,smmu-enabled;
+};
diff --git a/Documentation/devicetree/bindings/sound/taiko_codec.txt b/Documentation/devicetree/bindings/sound/taiko_codec.txt
index ffea58f..777933a 100644
--- a/Documentation/devicetree/bindings/sound/taiko_codec.txt
+++ b/Documentation/devicetree/bindings/sound/taiko_codec.txt
@@ -35,6 +35,10 @@
- qcom,cdc-vddcx-2-voltage: cx-2 supply's voltage level min and max in mV.
- qcom,cdc-vddcx-2-current: cx-2 supply's max current in mA.
+ - qcom,cdc-static-supplies: List of supplies to be enabled prior to codec
+ hardware probe. Supplies in this list will be
+ stay enabled.
+
- qcom,cdc-micbias-ldoh-v - LDOH output in volts (should be 1.95 V and 3.00 V).
- qcom,cdc-micbias-cfilt1-mv - cfilt1 output voltage in milli volts.
@@ -67,6 +71,11 @@
values for 9.6MHZ mclk can be 2400000 Hz, 3200000 Hz
and 4800000 Hz. The values for 12.288MHz mclk can be
3072200 Hz, 4096000 Hz and 6144000 Hz.
+
+ - qcom,cdc-on-demand-supplies: List of supplies which can be enabled
+ dynamically.
+ Supplies in this list are off by default.
+
Example:
taiko_codec {
@@ -103,6 +112,16 @@
qcom,cdc-vddcx-2-voltage = <1225000 1225000>;
qcom,cdc-vddcx-2-current = <5000>;
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-vdd-tx-h",
+ "cdc-vdd-rx-h",
+ "cdc-vddpx-1",
+ "cdc-vdd-a-1p2v",
+ "cdc-vddcx-1",
+ "cdc-vddcx-2";
+
+ com,cdc-on-demand-supplies = "cdc-vdd-spkdrv";
+
qcom,cdc-micbias-ldoh-v = <0x3>;
qcom,cdc-micbias-cfilt1-mv = <1800>;
qcom,cdc-micbias-cfilt2-mv = <2700>;
@@ -155,6 +174,10 @@
- qcom,cdc-vddcx-2-voltage: cx-2 supply's voltage level min and max in mV.
- qcom,cdc-vddcx-2-current: cx-2 supply's max current in mA.
+ - qcom,cdc-static-supplies: List of supplies to be enabled prior to codec
+ hardware probe. Supplies in this list will be
+ stay enabled.
+
- qcom,cdc-micbias-ldoh-v - LDOH output in volts (should be 1.95 V and 3.00 V).
- qcom,cdc-micbias-cfilt1-mv - cfilt1 output voltage in milli volts.
@@ -179,6 +202,20 @@
- qcom,cdc-mclk-clk-rate - Specifies the master clock rate in Hz required for
codec.
+Optional properties:
+
+ - cdc-vdd-spkdrv-supply: phandle of spkdrv supply's regulator device tree node.
+ - qcom,cdc-vdd-spkdrv-voltage: spkdrv supply voltage level min and max in mV.
+ - qcom,cdc-vdd-spkdrv-current: spkdrv supply max current in mA.
+
+ - cdc-vdd-spkdrv-supply: phandle of spkdrv supply's regulator device tree node.
+ - qcom,cdc-vdd-spkdrv-voltage: spkdrv supply voltage level min and max in mV.
+ - qcom,cdc-vdd-spkdrv-current: spkdrv supply max current in mA.
+
+ - qcom,cdc-on-demand-supplies: List of supplies which can be enabled
+ dynamically.
+ Supplies in this list are off by default.
+
Example:
i2c@f9925000 {
cell-index = <3>;
@@ -228,6 +265,16 @@
qcom,cdc-vddcx-2-voltage = <1200000 1200000>;
qcom,cdc-vddcx-2-current = <10000>;
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-vdd-tx-h",
+ "cdc-vdd-rx-h",
+ "cdc-vddpx-1",
+ "cdc-vdd-a-1p2v",
+ "cdc-vddcx-1",
+ "cdc-vddcx-2";
+
+ com,cdc-on-demand-supplies = "cdc-vdd-spkdrv";
+
qcom,cdc-micbias-ldoh-v = <0x3>;
qcom,cdc-micbias-cfilt1-mv = <1800>;
qcom,cdc-micbias-cfilt2-mv = <2700>;
diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt
index 1388b7d..9b0f97b 100644
--- a/Documentation/devicetree/bindings/thermal/tsens.txt
+++ b/Documentation/devicetree/bindings/thermal/tsens.txt
@@ -43,6 +43,9 @@
no need to re-initialize them. The control registers are also
under a secure domain which can prevent them from being initialized
locally.
+- qcom,sensor-id : If the flag is present map the TSENS sensors based on the
+ remote sensors that are enabled in HW. Ensure the mapping is not
+ more than the number of supported sensors.
Example:
tsens@fc4a8000 {
diff --git a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
index 5861eea..9754c2e 100644
--- a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
+++ b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
@@ -77,7 +77,6 @@
qcom,msm-bus,name = "serial_uart0";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<84 512 0 0>,
diff --git a/Documentation/devicetree/bindings/tty/serial/msm_serial_hs.txt b/Documentation/devicetree/bindings/tty/serial/msm_serial_hs.txt
index c597536..96c9486 100644
--- a/Documentation/devicetree/bindings/tty/serial/msm_serial_hs.txt
+++ b/Documentation/devicetree/bindings/tty/serial/msm_serial_hs.txt
@@ -93,7 +93,6 @@
qcom,msm-bus,name = "uart7";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<84 512 0 0>,
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index fd5b93e..6d54f7e 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -12,6 +12,7 @@
Optional properties:
- tx-fifo-resize: determines if the FIFO *has* to be reallocated.
+ - host-only-mode: if present then dwc3 should be run in HOST only mode.
This is usually a subnode to DWC3 glue to which it is connected.
diff --git a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
index 1fb2ba9..8ce31d9 100644
--- a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
@@ -38,6 +38,9 @@
STROBE GPIO PAD.
- hsic,data-pad-offset : Offset of TLMM register for configuring HSIC
DATA GPIO PAD.
+- qcom,phy-sof-workaround : If present then HSIC PHY has h/w BUGs related to
+ SOFs. Software workarounds are required for the same.
+
- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
below optional properties:
- qcom,msm_bus,name
@@ -71,7 +74,6 @@
qcom,msm-bus,name = "hsic";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<85 512 0 0>,
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index d0886b0..1613856 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -7,7 +7,8 @@
- regs : offset and length of the register set in the memory map
- interrupts: IRQ line
- interrupt-names: OTG interrupt name(s) referenced in interrupts above
- HSUSB OTG expects "core_irq" and optionally "async_irq".
+ HSUSB OTG expects "core_irq" which is IRQ line from CORE and
+ optional ones are described in next section.
- qcom,hsusb-otg-phy-type: PHY type can be one of
1 - Chipidea 45nm PHY
2 - Synopsis 28nm PHY
@@ -22,8 +23,15 @@
1 - PHY control
2 - PMIC control
3 - User control (via debugfs)
+- <supply-name>-supply: handle to the regulator device tree node
+ Required "supply-name" is "HSUSB_VDDCX" (when voting for VDDCX) or
+ "hsusb_vdd_dig" (when voting for VDDCX Corner voltage),
+ "HSUSB_1p8-supply" and "HSUSB_3p3-supply".
Optional properties :
+- interrupt-names : Optional interrupt resource entries are:
+ "async_irq" : Interrupt from HSPHY for asynchronous wakeup events in LPM.
+ "pmic_id_irq" : Interrupt from PMIC for external ID pin notification.
- qcom,hsusb-otg-disable-reset: If present then core is RESET only during
init, otherwise core is RESET for every cable disconnect as well
- qcom,hsusb-otg-pnoc-errata-fix: If present then workaround for PNOC
@@ -39,7 +47,6 @@
- qcom,hsusb-otg-power-budget: VBUS power budget in mA
0 will be treated as 500mA
- qcom,hsusb-otg-pclk-src-name: The source of pclk
-- qcom,hsusb-otg-pmic-id-irq: ID, routed to PMIC IRQ number
- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
below optional properties:
- qcom,msm_bus,name
@@ -54,16 +61,16 @@
Used for allowing USB to respond for remote wakup.
- qcom,hsusb-otg-delay-lpm: If present then USB core will wait one second
after disconnect before entering low power mode.
-- <supply-name>-supply: handle to the regulator device tree node
- Required "supply-name" is "HSUSB_VDDCX" (when voting for VDDCX) or
- "hsusb_vdd_dig" (when voting for VDDCX Corner voltage),
- "HSUSB_1p8-supply" and "HSUSB_3p3-supply".
+- <supply-name>-supply: handle to the regulator device tree node.
+ Optional "supply-name" is "vbus_otg" to supply vbus in host mode.
- qcom,vdd-voltage-level: This property must be a list of three integer
values (no, min, max) where each value represents either a voltage
in microvolts or a value corresponding to voltage corner.
- qcom,dp-manual-pullup: If present, vbus is not routed to USB controller/phy
and controller driver therefore enables pull-up explicitly before
starting controller using usbcmd run/stop bit.
+- qcom,usb2-enable-hsphy2: If present then USB2 controller is connected to 2nd
+ HSPHY.
Example HSUSB OTG controller device node :
usb@f9690000 {
@@ -81,7 +88,6 @@
qcom,hsusb-otg-phy-init-seq = <0x01 0x90 0xffffffff>;
qcom,hsusb-otg-power-budget = <500>;
qcom,hsusb-otg-pclk-src-name = "dfab_usb_clk";
- qcom,hsusb-otg-pmic-id-irq = <47>
qcom,hsusb-otg-lpm-on-dev-suspend;
qcom,hsusb-otg-clk-always-on-workaround;
hsusb_vdd_dig-supply = <&pm8226_s1_corner>;
@@ -115,6 +121,8 @@
- qcom,pool-64-bit-align: If present then the pool's memory will be aligned
to 64 bits
- qcom,enable_hbm: if present host bus manager is enabled.
+- qcom,disable-park-mode: if present park mode is enabled. Park mode enables executing
+ up to 3 usb packets from each QH.
Example MSM HSUSB EHCI controller device node :
ehci: qcom,ehci-host@f9a55000 {
diff --git a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
index e394b56..c130b26 100644
--- a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
+++ b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
@@ -7,8 +7,8 @@
Required properties:
- compatible: "wcnss_wlan"
-- reg: offset and length of the register set for the device. The pair
- corresponds to PRONTO.
+- reg: physical address and length of the register set for the device.
+- reg-names: "wcnss_mmio", "wcnss_fiq"
- interupts: Pronto to Apps interrupts for tx done and rx pending.
- qcom,pronto-vddmx-supply: regulator to supply pronto pll.
- qcom,pronto-vddcx-supply: regulator to supply WLAN/BT/FM digital module.
@@ -25,8 +25,9 @@
qcom,wcnss-wlan@fb000000 {
compatible = "qcom,wcnss_wlan";
- reg = <0xfb000000 0x280000>;
- reg-names = "wcnss_mmio";
+ reg = <0xfb000000 0x280000>,
+ <0xf9011008 0x04>;
+ reg-names = "wcnss_mmio", "wcnss_fiq";
interrupts = <0 145 0 0 146 0>;
interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 17a44b3..8226e43 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -213,6 +213,9 @@
config ARCH_MTD_XIP
bool
+config ARCH_WANT_KMAP_ATOMIC_FLUSH
+ bool
+
config VECTORS_BASE
hex
default 0xffff0000 if MMU || CPU_HIGH_VECTOR
diff --git a/arch/arm/boot/dts/apq8074-v2-liquid.dts b/arch/arm/boot/dts/apq8074-v2-liquid.dts
new file mode 100644
index 0000000..4ec1cdd
--- /dev/null
+++ b/arch/arm/boot/dts/apq8074-v2-liquid.dts
@@ -0,0 +1,34 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "apq8074-v2.dtsi"
+/include/ "msm8974-liquid.dtsi"
+
+/ {
+ model = "Qualcomm APQ 8074v2 LIQUID";
+ compatible = "qcom,apq8074-liquid", "qcom,apq8074", "qcom,liquid";
+ qcom,msm-id = <184 9 0x20000>;
+};
+
+&usb3 {
+ interrupt-parent = <&usb3>;
+ interrupts = <0 1>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0xffffffff>;
+ interrupt-map = <0x0 0 &intc 0 133 0
+ 0x0 1 &spmi_bus 0x0 0x0 0x9 0x0>;
+ interrupt-names = "hs_phy_irq", "pmic_id_irq";
+
+ qcom,misc-ref = <&pm8941_misc>;
+};
diff --git a/arch/arm/boot/dts/apq8074-v2.dtsi b/arch/arm/boot/dts/apq8074-v2.dtsi
new file mode 100644
index 0000000..3b65236
--- /dev/null
+++ b/arch/arm/boot/dts/apq8074-v2.dtsi
@@ -0,0 +1,48 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only version-specific property overrides should be placed
+ * inside this file. However, device definitions should be placed inside the
+ * msm8974.dtsi file.
+ */
+
+/include/ "msm8974-v2.dtsi"
+
+/ {
+ qcom,qseecom@a700000 {
+ compatible = "qcom,qseecom";
+ reg = <0x0a700000 0x500000>;
+ reg-names = "secapp-region";
+ qcom,disk-encrypt-pipe-pair = <2>;
+ qcom,hlos-ce-hw-instance = <1>;
+ qcom,qsee-ce-hw-instance = <0>;
+ qcom,msm-bus,name = "qseecom-noc";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <55 512 0 0>,
+ <55 512 3936000 393600>,
+ <55 512 3936000 393600>,
+ <55 512 3936000 393600>;
+ };
+};
+
+&memory_hole {
+ qcom,memblock-remove = <0x0a700000 0x5800000>; /* Address and size of the hole */
+};
+
+&qseecom {
+ status = "disabled";
+};
+
diff --git a/arch/arm/boot/dts/msmzinc-ion.dtsi b/arch/arm/boot/dts/apq8084-ion.dtsi
similarity index 91%
rename from arch/arm/boot/dts/msmzinc-ion.dtsi
rename to arch/arm/boot/dts/apq8084-ion.dtsi
index 4bf078a..aac4230 100644
--- a/arch/arm/boot/dts/msmzinc-ion.dtsi
+++ b/arch/arm/boot/dts/apq8084-ion.dtsi
@@ -20,6 +20,10 @@
reg = <30>;
};
+ qcom,ion-heap@21 { /* SYSTEM CONTIG HEAP */
+ reg = <21>;
+ };
+
qcom,ion-heap@25 { /* IOMMU HEAP */
reg = <25>;
};
diff --git a/arch/arm/boot/dts/apq8084-sim.dts b/arch/arm/boot/dts/apq8084-sim.dts
new file mode 100644
index 0000000..45d625c
--- /dev/null
+++ b/arch/arm/boot/dts/apq8084-sim.dts
@@ -0,0 +1,73 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "apq8084.dtsi"
+
+/ {
+ model = "Qualcomm APQ 8084 Simulator";
+ compatible = "qcom,apq8084-sim", "qcom,apq8084", "qcom,sim";
+ qcom,msm-id = <178 0 0>;
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ uart0: serial@f991f000 {
+ status = "ok";
+ };
+};
+
+&sdcc1 {
+ qcom,vdd-always-on;
+ qcom,vdd-lpm-sup;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <800 500000>;
+
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <250 154000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 20000000 25000000 50000000 100000000 200000000>;
+ qcom,sup-voltages = <2950 2950>;
+ qcom,nonremovable;
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+
+ status = "ok";
+};
+
+&sdcc2 {
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <9000 800000>;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <6 22000>;
+ qcom,vdd-io-lpm-sup;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 20000000 25000000 50000000 100000000 200000000>;
+ qcom,sup-voltages = <2950 2950>;
+ qcom,xpc;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+ qcom,current-limit = <800>;
+
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/msmzinc.dtsi b/arch/arm/boot/dts/apq8084.dtsi
similarity index 77%
rename from arch/arm/boot/dts/msmzinc.dtsi
rename to arch/arm/boot/dts/apq8084.dtsi
index 642597d..c3c3759 100644
--- a/arch/arm/boot/dts/msmzinc.dtsi
+++ b/arch/arm/boot/dts/apq8084.dtsi
@@ -11,11 +11,11 @@
*/
/include/ "skeleton.dtsi"
-/include/ "msmzinc-ion.dtsi"
+/include/ "apq8084-ion.dtsi"
/ {
- model = "Qualcomm MSM ZINC";
- compatible = "qcom,msmzinc";
+ model = "Qualcomm APQ 8084";
+ compatible = "qcom,apq8084";
interrupt-parent = <&intc>;
intc: interrupt-controller@f9000000 {
@@ -83,4 +83,28 @@
qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
};
+ sdcc1: qcom,sdcc@f9824000 {
+ cell-index = <1>; /* SDC1 eMMC slot */
+ compatible = "qcom,msm-sdcc";
+ reg = <0xf9824000 0x800>;
+ reg-names = "core_mem";
+ interrupts = <0 123 0>;
+ interrupt-names = "core_irq";
+
+ qcom,bus-width = <8>;
+ status = "disabled";
+ };
+
+ sdcc2: qcom,sdcc@f98a4000 {
+ cell-index = <2>; /* SDC2 SD card slot */
+ compatible = "qcom,msm-sdcc";
+ reg = <0xf98a4000 0x800>;
+ reg-names = "core_mem";
+ interrupts = <0 125 0>;
+ interrupt-names = "core_irq";
+
+
+ qcom,bus-width = <4>;
+ status = "disabled";
+ };
};
diff --git a/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi
index 2825288..c0c9107 100644
--- a/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi
@@ -23,7 +23,7 @@
qcom,mdss-pan-porch-values = <164 8 140 1 1 6>;
qcom,mdss-pan-underflow-clr = <0xff>;
qcom,mdss-pan-bl-ctrl = "bl_ctrl_wled";
- qcom,mdss-pan-bl-levels = <1 255>;
+ qcom,mdss-pan-bl-levels = <1 4095>;
qcom,mdss-pan-dsi-mode = <0>;
qcom,mdss-pan-dsi-h-pulse-mode = <1>;
qcom,mdss-pan-dsi-h-power-stop = <0 0 0>;
@@ -38,7 +38,7 @@
qcom,mdss-pan-dsi-stream = <0>;
qcom,mdss-pan-dsi-mdp-tr = <0x0>;
qcom,mdss-pan-dsi-dma-tr = <0x04>;
- qcom,mdss-pan-frame-rate = <60>;
+ qcom,mdss-pan-dsi-frame-rate = <60>;
qcom,panel-phy-regulatorSettings = [07 09 03 00 /* Regualotor settings */
20 00 01];
qcom,panel-phy-timingSettings = [7d 25 1d 00 37 33
diff --git a/arch/arm/boot/dts/dsi-panel-orise-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-orise-720p-video.dtsi
index 7bd95e7..448d357 100644
--- a/arch/arm/boot/dts/dsi-panel-orise-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-orise-720p-video.dtsi
@@ -36,7 +36,7 @@
qcom,mdss-pan-dsi-stream = <0>;
qcom,mdss-pan-dsi-mdp-tr = <0x0>;
qcom,mdss-pan-dsi-dma-tr = <0x04>;
- qcom,mdss-pan-frame-rate = <60>;
+ qcom,mdss-pan-dsi-frame-rate = <60>;
qcom,panel-phy-regulatorSettings = [03 01 01 00 /* Regualotor settings */
20 00 01];
qcom,panel-phy-timingSettings = [69 29 1f 00 55 55
diff --git a/arch/arm/boot/dts/dsi-panel-sharp-qhd-video.dtsi b/arch/arm/boot/dts/dsi-panel-sharp-qhd-video.dtsi
new file mode 100644
index 0000000..f853285
--- /dev/null
+++ b/arch/arm/boot/dts/dsi-panel-sharp-qhd-video.dtsi
@@ -0,0 +1,67 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ qcom,mdss_dsi_sharp_qhd_video {
+ compatible = "qcom,mdss-dsi-panel";
+ label = "sharp QHD LS043T1LE01 video mode dsi panel";
+ status = "disable";
+ qcom,dsi-ctrl-phandle = <&mdss_dsi0>;
+ qcom,enable-gpio = <&msmgpio 58 0>;
+ qcom,rst-gpio = <&pm8941_gpios 19 0>;
+ qcom,mdss-pan-res = <540 960>;
+ qcom,mdss-pan-bpp = <24>;
+ qcom,mdss-pan-dest = "display_1";
+ qcom,mdss-pan-porch-values = <80 32 48 15 10 3>; /* HBP, HPW, HFP, VBP, VPW, VFP */
+ qcom,mdss-pan-underflow-clr = <0xff>;
+ qcom,mdss-pan-bl-ctrl = "bl_ctrl_wled";
+ qcom,mdss-pan-bl-levels = <1 4095>;
+ qcom,mdss-pan-dsi-mode = <0>;
+ qcom,mdss-pan-dsi-h-pulse-mode = <1>;
+ qcom,mdss-pan-dsi-h-power-stop = <0 0 0>;
+ qcom,mdss-pan-dsi-bllp-power-stop = <1 1>;
+ qcom,mdss-pan-dsi-traffic-mode = <0>;
+ qcom,mdss-pan-dsi-dst-format = <3>;
+ qcom,mdss-pan-dsi-vc = <0>;
+ qcom,mdss-pan-dsi-rgb-swap = <2>;
+ qcom,mdss-pan-dsi-data-lanes = <1 1 0 0>;
+ qcom,mdss-pan-dsi-dlane-swap = <0>;
+ qcom,mdss-pan-dsi-t-clk = <0x1c 0x04>;
+ qcom,mdss-pan-dsi-stream = <0>;
+ qcom,mdss-pan-dsi-mdp-tr = <0x04>;
+ qcom,mdss-pan-dsi-dma-tr = <0x04>;
+ qcom,mdss-pan-frame-rate = <60>;
+ qcom,panel-phy-regulatorSettings = [07 09 03 00 /* Regulator settings */
+ 20 00 01];
+ qcom,panel-phy-timingSettings = [46 1d 20 00 39 3a
+ 21 21 32 03 04 00];
+ qcom,panel-phy-strengthCtrl = [ff 06];
+ qcom,panel-phy-bistCtrl = [00 00 b1 ff /* BIST Ctrl settings */
+ 00 00];
+ qcom,panel-phy-laneConfig = [00 00 00 00 00 00 00 01 97 /* lane0 config */
+ 00 00 00 00 05 00 00 01 97 /* lane1 config */
+ 00 00 00 00 0a 00 00 01 97 /* lane2 config */
+ 00 00 00 00 0f 00 00 01 97 /* lane3 config */
+ 00 c0 00 00 00 00 00 01 bb]; /* Clk ln config */
+ qcom,panel-on-cmds = [05 01 00 00 32 02 01 00 /* sw reset */
+ 05 01 00 00 0a 02 11 00 /* exit sleep */
+ 15 01 00 00 0a 02 53 2c /* backlight on */
+ 15 01 00 00 0a 02 51 ff /* brightness max */
+ 05 01 00 00 0a 02 29 00 /* display on */
+ 15 01 00 00 0a 02 ae 03 /* set num of lanes */
+ 15 01 00 00 0a 02 3a 77 /* rgb_888 */];
+ qcom,on-cmds-dsi-state = "DSI_LP_MODE";
+ qcom,panel-off-cmds = [05 01 00 00 0a 02 28 00 /* display off */
+ 05 01 00 00 78 02 10 00 /* enter sleep */];
+ qcom,off-cmds-dsi-state = "DSI_HS_MODE";
+ };
+};
diff --git a/arch/arm/boot/dts/dsi-panel-sim-video.dtsi b/arch/arm/boot/dts/dsi-panel-sim-video.dtsi
index 98074c8..9a734a0 100644
--- a/arch/arm/boot/dts/dsi-panel-sim-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-sim-video.dtsi
@@ -37,7 +37,7 @@
qcom,mdss-pan-dsi-stream = <0>;
qcom,mdss-pan-dsi-mdp-tr = <0x04>;
qcom,mdss-pan-dsi-dma-tr = <0x04>;
- qcom,mdss-pan-frame-rate = <60>;
+ qcom,mdss-pan-dsi-frame-rate = <60>;
qcom,panel-on-cmds = [32 01 00 00 00 02 00 00];
qcom,on-cmds-dsi-state = "DSI_LP_MODE";
qcom,panel-off-cmds = [22 01 00 00 00 02 00 00];
diff --git a/arch/arm/boot/dts/dsi-panel-toshiba-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-toshiba-720p-video.dtsi
index 42f6033..2937cde 100644
--- a/arch/arm/boot/dts/dsi-panel-toshiba-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-toshiba-720p-video.dtsi
@@ -40,7 +40,7 @@
qcom,mdss-pan-dsi-stream = <0>;
qcom,mdss-pan-dsi-mdp-tr = <0x0>;
qcom,mdss-pan-dsi-dma-tr = <0x04>;
- qcom,mdss-pan-frame-rate = <60>;
+ qcom,mdss-pan-dsi-frame-rate = <60>;
qcom,panel-phy-regulatorSettings = [07 09 03 00 /* Regualotor settings */
20 00 01];
qcom,panel-phy-timingSettings = [b0 23 1b 00 94 93
diff --git a/arch/arm/boot/dts/mpq8092.dtsi b/arch/arm/boot/dts/mpq8092.dtsi
index 75f168d..5c904b4 100644
--- a/arch/arm/boot/dts/mpq8092.dtsi
+++ b/arch/arm/boot/dts/mpq8092.dtsi
@@ -85,7 +85,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -105,7 +105,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
diff --git a/arch/arm/boot/dts/msm-pm8110-rpm-regulator.dtsi b/arch/arm/boot/dts/msm-pm8110-rpm-regulator.dtsi
new file mode 100644
index 0000000..0de72b0
--- /dev/null
+++ b/arch/arm/boot/dts/msm-pm8110-rpm-regulator.dtsi
@@ -0,0 +1,381 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&rpm_bus {
+ rpm-regulator-smpa1 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <1>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ status = "disabled";
+
+ regulator-s1 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_s1";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-smpa3 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <3>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ status = "disabled";
+
+ regulator-s3 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_s3";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-smpa4 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <4>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ status = "disabled";
+
+ regulator-s4 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_s4";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa1 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <1>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l1 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l1";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa2 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <2>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l2 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l2";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa3 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <3>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l3 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l3";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa4 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <4>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l4 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l4";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa5 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <5>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l5 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l5";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa6 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <6>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l6 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l6";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa7 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <7>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l7 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l7";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa8 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <8>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <5000>;
+ status = "disabled";
+
+ regulator-l8 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l8";
+ qcom,set = <1>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa9 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <9>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l9 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l9";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa10 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <10>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l10 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l10";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa12 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <12>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l12 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l12";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa14 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <14>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l14 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l14";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa15 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <15>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l15 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l15";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa16 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <16>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l16 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l16";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa17 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <17>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l17 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l17";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa18 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <18>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l18 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l18";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa19 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <19>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l19 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l19";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa20 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <20>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <5000>;
+ status = "disabled";
+
+ regulator-l20 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l20";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa21 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <21>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l21 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l21";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa22 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <22>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l22 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l22";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msm-pm8110.dtsi b/arch/arm/boot/dts/msm-pm8110.dtsi
index ec42cfc..b88b991 100644
--- a/arch/arm/boot/dts/msm-pm8110.dtsi
+++ b/arch/arm/boot/dts/msm-pm8110.dtsi
@@ -22,6 +22,100 @@
#address-cells = <1>;
#size-cells = <1>;
+ pm8110_chg: qcom,charger {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-charger";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ qcom,vddmax-mv = <4200>;
+ qcom,vddsafe-mv = <4200>;
+ qcom,vinmin-mv = <4200>;
+ qcom,vbatdet-mv = <4100>;
+ qcom,ibatmax-ma = <1500>;
+ qcom,ibatterm-ma = <200>;
+ qcom,ibatsafe-ma = <1500>;
+ qcom,thermal-mitigation = <1500 700 600 325>;
+ qcom,vbatdet-delta-mv = <350>;
+ qcom,tchg-mins = <150>;
+
+ qcom,chgr@1000 {
+ status = "disabled";
+ reg = <0x1000 0x100>;
+ interrupts = <0x0 0x10 0x0>,
+ <0x0 0x10 0x1>,
+ <0x0 0x10 0x2>,
+ <0x0 0x10 0x3>,
+ <0x0 0x10 0x4>,
+ <0x0 0x10 0x5>,
+ <0x0 0x10 0x6>,
+ <0x0 0x10 0x7>;
+
+ interrupt-names = "vbat-det-lo",
+ "vbat-det-hi",
+ "chgwdog",
+ "state-change",
+ "trkl-chg-on",
+ "fast-chg-on",
+ "chg-failed",
+ "chg-done";
+ };
+
+ qcom,buck@1100 {
+ status = "disabled";
+ reg = <0x1100 0x100>;
+ interrupts = <0x0 0x11 0x0>,
+ <0x0 0x11 0x1>,
+ <0x0 0x11 0x2>,
+ <0x0 0x11 0x3>,
+ <0x0 0x11 0x4>,
+ <0x0 0x11 0x5>,
+ <0x0 0x11 0x6>;
+
+ interrupt-names = "vbat-ov",
+ "vreg-ov",
+ "overtemp",
+ "vchg-loop",
+ "ichg-loop",
+ "ibat-loop",
+ "vdd-loop";
+ };
+
+ qcom,bat-if@1200 {
+ status = "disabled";
+ reg = <0x1200 0x100>;
+ interrupts = <0x0 0x12 0x0>,
+ <0x0 0x12 0x1>,
+ <0x0 0x12 0x2>,
+ <0x0 0x12 0x3>,
+ <0x0 0x12 0x4>;
+
+ interrupt-names = "batt-pres",
+ "bat-temp-ok",
+ "bat-fet-on",
+ "vcp-on",
+ "psi";
+ };
+
+ qcom,usb-chgpth@1300 {
+ status = "disabled";
+ reg = <0x1300 0x100>;
+ interrupts = <0 0x13 0x0>,
+ <0 0x13 0x1>,
+ <0x0 0x13 0x2>;
+
+ interrupt-names = "coarse-det-usb",
+ "usbin-valid",
+ "chg-gone";
+ };
+
+ qcom,chg-misc@1600 {
+ status = "disabled";
+ reg = <0x1600 0x100>;
+ };
+ };
+
pm8110_vadc: vadc@3100 {
compatible = "qcom,qpnp-vadc";
reg = <0x3100 0x100>;
@@ -75,7 +169,6 @@
interrupt-names = "eoc-int-en-set";
qcom,adc-bit-resolution = <16>;
qcom,adc-vdd-reference = <1800>;
- qcom,rsense = <1500>;
chan@0 {
label = "internal_rsense";
@@ -88,6 +181,30 @@
qcom,fast-avg-setup = <0>;
};
};
+
+ qcom,pm8110_rtc {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-rtc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ qcom,qpnp-rtc-write = <0>;
+ qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+ qcom,pm8110_rtc_rw@6000 {
+ reg = <0x6000 0x100>;
+ };
+
+ qcom,pm8110_rtc_alarm@6100 {
+ reg = <0x6100 0x100>;
+ interrupts = <0x0 0x61 0x1>;
+ };
+ };
+
+ qcom,leds@a200 {
+ compatible = "qcom,leds-qpnp";
+ reg = <0xa200 0x100>;
+ label = "mpp";
+ };
};
qcom,pm8110@1 {
diff --git a/arch/arm/boot/dts/msm-pm8226.dtsi b/arch/arm/boot/dts/msm-pm8226.dtsi
index 72de900..41920d5 100644
--- a/arch/arm/boot/dts/msm-pm8226.dtsi
+++ b/arch/arm/boot/dts/msm-pm8226.dtsi
@@ -56,16 +56,17 @@
#size-cells = <1>;
status = "disabled";
- qcom,chg-vddmax-mv = <4200>;
- qcom,chg-vddsafe-mv = <4200>;
- qcom,chg-vinmin-mv = <4200>;
- qcom,chg-vbatdet-mv = <4100>;
- qcom,chg-ibatmax-ma = <1500>;
- qcom,chg-ibatterm-ma = <200>;
- qcom,chg-ibatsafe-ma = <1500>;
- qcom,chg-thermal-mitigation = <1500 700 600 325>;
+ qcom,vddmax-mv = <4200>;
+ qcom,vddsafe-mv = <4200>;
+ qcom,vinmin-mv = <4200>;
+ qcom,vbatdet-delta-mv = <150>;
+ qcom,ibatmax-ma = <1500>;
+ qcom,ibatterm-ma = <200>;
+ qcom,ibatsafe-ma = <1500>;
+ qcom,thermal-mitigation = <1500 700 600 325>;
+ qcom,tchg-mins = <150>;
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "disabled";
reg = <0x1000 0x100>;
interrupts = <0x0 0x10 0x0>,
@@ -87,7 +88,7 @@
"chg-done";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "disabled";
reg = <0x1100 0x100>;
interrupts = <0x0 0x11 0x0>,
@@ -107,7 +108,7 @@
"vdd-loop";
};
- qcom,chg-bat-if@1200 {
+ qcom,bat-if@1200 {
status = "disabled";
reg = <0x1200 0x100>;
interrupts = <0x0 0x12 0x0>,
@@ -124,7 +125,7 @@
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "disabled";
reg = <0x1300 0x100>;
interrupts = <0 0x13 0x0>,
@@ -136,7 +137,7 @@
"chg-gone";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "disabled";
reg = <0x1500 0x100>;
interrupts = <0x0 0x15 0x0>,
@@ -162,10 +163,9 @@
qcom,r-sense-uohm = <10000>;
qcom,v-cutoff-uv = <3400000>;
qcom,max-voltage-uv = <4200000>;
- qcom,r-conn-mohm = <18>;
+ qcom,r-conn-mohm = <0>;
qcom,shutdown-soc-valid-limit = <20>;
- qcom,adjust-soc-low-threshold = <25>;
- qcom,adjust-soc-high-threshold = <45>;
+ qcom,adjust-soc-low-threshold = <15>;
qcom,ocv-voltage-high-threshold-uv = <3750000>;
qcom,ocv-voltage-low-threshold-uv = <3650000>;
qcom,low-soc-calculate-soc-threshold = <15>;
@@ -173,6 +173,10 @@
qcom,calculate-soc-ms = <20000>;
qcom,chg-term-ua = <100000>;
qcom,batt-type = <0>;
+ qcom,low-ocv-correction-limit-uv = <100>;
+ qcom,high-ocv-correction-limit-uv = <50>;
+ qcom,hold-soc-est = <3>;
+ qcom,low-voltage-threshold = <3420000>;
qcom,bms-iadc@3800 {
reg = <0x3800 0x100>;
@@ -364,7 +368,6 @@
interrupt-names = "eoc-int-en-set";
qcom,adc-bit-resolution = <16>;
qcom,adc-vdd-reference = <1800>;
- qcom,rsense = <1500>;
chan@0 {
label = "internal_rsense";
@@ -683,6 +686,54 @@
label = "wled";
};
+ pwm@b100 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb100 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <0>;
+ };
+
+ pwm@b200 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb200 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <1>;
+ };
+
+ pwm@b300 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb300 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <2>;
+ };
+
+ pwm@b400 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb400 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <3>;
+ };
+
+ pwm@b500 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb500 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <4>;
+ };
+
+ pwm@b600 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb600 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <5>;
+ };
+
regulator@8000 {
regulator-name = "8226_lvs1";
reg = <0x8000 0x100>;
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index 6042f23..43b7d03 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -107,8 +107,7 @@
qcom,max-voltage-uv = <4200000>;
qcom,r-conn-mohm = <0>;
qcom,shutdown-soc-valid-limit = <20>;
- qcom,adjust-soc-low-threshold = <25>;
- qcom,adjust-soc-high-threshold = <45>;
+ qcom,adjust-soc-low-threshold = <15>;
qcom,ocv-voltage-high-threshold-uv = <3750000>;
qcom,ocv-voltage-low-threshold-uv = <3650000>;
qcom,low-soc-calculate-soc-threshold = <15>;
@@ -117,6 +116,9 @@
qcom,chg-term-ua = <100000>;
qcom,batt-type = <0>;
qcom,low-voltage-threshold = <3420000>;
+ qcom,low-ocv-correction-limit-uv = <100>;
+ qcom,high-ocv-correction-limit-uv = <50>;
+ qcom,hold-soc-est = <3>;
qcom,bms-iadc@3800 {
reg = <0x3800 0x100>;
@@ -169,21 +171,22 @@
#size-cells = <1>;
status = "disabled";
- qcom,chg-vddmax-mv = <4200>;
- qcom,chg-vddsafe-mv = <4200>;
- qcom,chg-vinmin-mv = <4200>;
- qcom,chg-ibatmax-ma = <1500>;
- qcom,chg-ibatsafe-ma = <1500>;
- qcom,chg-thermal-mitigation = <1500 700 600 325>;
- qcom,chg-cool-bat-decidegc = <100>;
- qcom,chg-cool-bat-mv = <4100>;
- qcom,chg-ibatmax-warm-ma = <350>;
- qcom,chg-warm-bat-decidegc = <450>;
- qcom,chg-warm-bat-mv = <4100>;
- qcom,chg-ibatmax-cool-ma = <350>;
- qcom,chg-vbatdet-delta-mv = <350>;
+ qcom,vddmax-mv = <4200>;
+ qcom,vddsafe-mv = <4200>;
+ qcom,vinmin-mv = <4200>;
+ qcom,ibatmax-ma = <1500>;
+ qcom,ibatsafe-ma = <1500>;
+ qcom,thermal-mitigation = <1500 700 600 325>;
+ qcom,cool-bat-decidegc = <100>;
+ qcom,cool-bat-mv = <4100>;
+ qcom,ibatmax-warm-ma = <350>;
+ qcom,warm-bat-decidegc = <450>;
+ qcom,warm-bat-mv = <4100>;
+ qcom,ibatmax-cool-ma = <350>;
+ qcom,vbatdet-delta-mv = <350>;
+ qcom,tchg-mins = <150>;
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "disabled";
reg = <0x1000 0x100>;
interrupts = <0x0 0x10 0x0>,
@@ -205,7 +208,7 @@
"chg-done";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "disabled";
reg = <0x1100 0x100>;
interrupts = <0x0 0x11 0x0>,
@@ -225,7 +228,7 @@
"vdd-loop";
};
- qcom,chg-bat-if@1200 {
+ qcom,bat-if@1200 {
status = "disabled";
reg = <0x1200 0x100>;
interrupts = <0x0 0x12 0x0>,
@@ -242,7 +245,7 @@
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "disabled";
reg = <0x1300 0x100>;
interrupts = <0 0x13 0x0>,
@@ -254,7 +257,7 @@
"chg-gone";
};
- qcom,chg-dc-chgpth@1400 {
+ qcom,dc-chgpth@1400 {
status = "disabled";
reg = <0x1400 0x100>;
interrupts = <0x0 0x14 0x0>,
@@ -264,7 +267,7 @@
"dcin-valid";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "disabled";
reg = <0x1500 0x100>;
interrupts = <0x0 0x15 0x0>,
@@ -766,6 +769,17 @@
qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
+
+ chan@39 {
+ label = "usb_id_nopull";
+ reg = <0x39>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
};
iadc@3600 {
@@ -777,7 +791,6 @@
interrupt-names = "eoc-int-en-set";
qcom,adc-bit-resolution = <16>;
qcom,adc-vdd-reference = <1800>;
- qcom,rsense = <1500>;
chan@0 {
label = "internal_rsense";
diff --git a/arch/arm/boot/dts/msm8226-camera-sensor-cdp-qrd.dtsi b/arch/arm/boot/dts/msm8226-camera-sensor-cdp.dtsi
similarity index 97%
copy from arch/arm/boot/dts/msm8226-camera-sensor-cdp-qrd.dtsi
copy to arch/arm/boot/dts/msm8226-camera-sensor-cdp.dtsi
index b7f837f..c47d48d 100644
--- a/arch/arm/boot/dts/msm8226-camera-sensor-cdp-qrd.dtsi
+++ b/arch/arm/boot/dts/msm8226-camera-sensor-cdp.dtsi
@@ -25,7 +25,7 @@
actuator0: qcom,actuator@6e {
cell-index = <3>;
- reg = <0x6c 0x0>;
+ reg = <0x6c>;
compatible = "qcom,actuator";
qcom,cci-master = <0>;
};
@@ -38,7 +38,7 @@
qcom,csid-sd-index = <0>;
qcom,actuator-src = <&actuator0>;
qcom,led-flash-src = <&led_flash0>;
- qcom,mount-angle = <90>;
+ qcom,mount-angle = <0>;
qcom,sensor-name = "ov8825";
cam_vdig-supply = <&pm8226_l5>;
cam_vana-supply = <&pm8226_l19>;
@@ -74,7 +74,7 @@
qcom,slave-id = <0x20 0x0 0x9724>;
qcom,csiphy-sd-index = <1>;
qcom,csid-sd-index = <0>;
- qcom,mount-angle = <90>;
+ qcom,mount-angle = <0>;
qcom,sensor-name = "ov9724";
cam_vdig-supply = <&pm8226_l5>;
cam_vana-supply = <&pm8226_l19>;
diff --git a/arch/arm/boot/dts/msm8226-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/msm8226-camera-sensor-mtp.dtsi
index 02089be..1f7ba89 100644
--- a/arch/arm/boot/dts/msm8226-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8226-camera-sensor-mtp.dtsi
@@ -25,7 +25,7 @@
actuator0: qcom,actuator@6e {
cell-index = <3>;
- reg = <0x6c 0x0>;
+ reg = <0x6c>;
compatible = "qcom,actuator";
qcom,cci-master = <0>;
};
@@ -38,7 +38,7 @@
qcom,csid-sd-index = <0>;
qcom,actuator-src = <&actuator0>;
qcom,led-flash-src = <&led_flash0>;
- qcom,mount-angle = <90>;
+ qcom,mount-angle = <0>;
qcom,sensor-name = "ov8825";
cam_vdig-supply = <&pm8226_l5>;
cam_vana-supply = <&pm8226_l19>;
diff --git a/arch/arm/boot/dts/msm8226-camera-sensor-cdp-qrd.dtsi b/arch/arm/boot/dts/msm8226-camera-sensor-qrd.dtsi
similarity index 98%
rename from arch/arm/boot/dts/msm8226-camera-sensor-cdp-qrd.dtsi
rename to arch/arm/boot/dts/msm8226-camera-sensor-qrd.dtsi
index b7f837f..5ea02b4 100644
--- a/arch/arm/boot/dts/msm8226-camera-sensor-cdp-qrd.dtsi
+++ b/arch/arm/boot/dts/msm8226-camera-sensor-qrd.dtsi
@@ -25,7 +25,7 @@
actuator0: qcom,actuator@6e {
cell-index = <3>;
- reg = <0x6c 0x0>;
+ reg = <0x6c>;
compatible = "qcom,actuator";
qcom,cci-master = <0>;
};
@@ -38,7 +38,7 @@
qcom,csid-sd-index = <0>;
qcom,actuator-src = <&actuator0>;
qcom,led-flash-src = <&led_flash0>;
- qcom,mount-angle = <90>;
+ qcom,mount-angle = <270>;
qcom,sensor-name = "ov8825";
cam_vdig-supply = <&pm8226_l5>;
cam_vana-supply = <&pm8226_l19>;
diff --git a/arch/arm/boot/dts/msm8226-cdp.dts b/arch/arm/boot/dts/msm8226-cdp.dts
index 52c591f..b203540 100644
--- a/arch/arm/boot/dts/msm8226-cdp.dts
+++ b/arch/arm/boot/dts/msm8226-cdp.dts
@@ -13,11 +13,11 @@
/dts-v1/;
/include/ "msm8226.dtsi"
/include/ "dsi-panel-nt35590-720p-video.dtsi"
-/include/ "msm8226-camera-sensor-cdp-qrd.dtsi"
+/include/ "msm8226-camera-sensor-cdp.dtsi"
/ {
model = "Qualcomm MSM 8226 CDP";
- compatible = "qcom,msm8226-cdp", "qcom,msm8226";
+ compatible = "qcom,msm8226-cdp", "qcom,msm8226", "qcom,cdp";
qcom,msm-id = <145 1 0>;
serial@f991f000 {
@@ -33,11 +33,11 @@
compatible = "synaptics,rmi4";
reg = <0x20>;
interrupt-parent = <&msmgpio>;
- interrupts = <17 0x2>;
+ interrupts = <17 0x2008>;
vdd-supply = <&pm8226_l19>;
vcc_i2c-supply = <&pm8226_lvs1>;
synaptics,reset-gpio = <&msmgpio 16 0x00>;
- synaptics,irq-gpio = <&msmgpio 17 0x00>;
+ synaptics,irq-gpio = <&msmgpio 17 0x2008>;
synaptics,button-map = <139 102 158>;
synaptics,i2c-pull-up;
synaptics,reg-en;
@@ -99,9 +99,9 @@
"MIC BIAS1 Internal1", "Handset Mic",
"AMIC2", "MIC BIAS2 External",
"MIC BIAS2 External", "Headset Mic",
- "AMIC3", "MIC BIAS2 External",
- "MIC BIAS2 External", "ANCRight Headset Mic",
"AMIC4", "MIC BIAS2 External",
+ "MIC BIAS2 External", "ANCRight Headset Mic",
+ "AMIC5", "MIC BIAS2 External",
"MIC BIAS2 External", "ANCLeft Headset Mic",
"DMIC1", "MIC BIAS1 External",
"MIC BIAS1 External", "Digital Mic1",
@@ -131,7 +131,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -140,6 +140,30 @@
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
qcom,nonremovable;
+ status = "disabled";
+};
+
+&sdhc_1 {
+ vdd-supply = <&pm8226_l17>;
+ qcom,vdd-always-on;
+ qcom,vdd-lpm-sup;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <800 500000>;
+
+ vdd-io-supply = <&pm8226_l6>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <250 154000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,nonremovable;
+
status = "ok";
};
@@ -156,7 +180,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -177,6 +201,38 @@
interrupt-names = "core_irq", "bam_irq", "status_irq";
cd-gpios = <&msmgpio 38 0x1>;
+ status = "disabled";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pm8226_l18>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <9000 800000>;
+
+ vdd-io-supply = <&pm8226_l21>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <6 22000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+
+ #address-cells = <0>;
+ interrupt-parent = <&sdhc_2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 221 0
+ 2 &msmgpio 38 0x3>;
+ interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+ cd-gpios = <&msmgpio 38 0x1>;
+
status = "ok";
};
@@ -210,7 +266,7 @@
qcom,mode = <1>; /* Digital output */
qcom,output-type = <0>; /* CMOS logic */
qcom,pull = <5>; /* QPNP_PIN_PULL_NO*/
- qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,vin-sel = <3>; /* QPNP_PIN_VIN3 */
qcom,out-strength = <3>;/* QPNP_PIN_OUT_STRENGTH_HIGH */
qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
qcom,master-en = <1>; /* Enable GPIO */
@@ -220,7 +276,7 @@
qcom,mode = <1>;
qcom,output-type = <0>;
qcom,pull = <5>;
- qcom,vin-sel = <2>;
+ qcom,vin-sel = <3>;
qcom,out-strength = <3>;
qcom,src-sel = <2>;
qcom,master-en = <1>;
@@ -272,6 +328,6 @@
};
&pm8226_chg {
- qcom,chg-charging-disabled;
- qcom,chg-use-default-batt-values;
+ qcom,charging-disabled;
+ qcom,use-default-batt-values;
};
diff --git a/arch/arm/boot/dts/msm8226-fluid.dts b/arch/arm/boot/dts/msm8226-fluid.dts
index af86922..d70ef6e 100644
--- a/arch/arm/boot/dts/msm8226-fluid.dts
+++ b/arch/arm/boot/dts/msm8226-fluid.dts
@@ -15,10 +15,14 @@
/ {
model = "Qualcomm MSM 8226 FLUID";
- compatible = "qcom,msm8226-fluid", "qcom,msm8226";
+ compatible = "qcom,msm8226-fluid", "qcom,msm8226", "qcom,fluid";
qcom,msm-id = <145 3 0>;
serial@f991f000 {
status = "disabled";
};
-};
\ No newline at end of file
+};
+
+&pm8226_bms {
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/msm8226-gpu.dtsi b/arch/arm/boot/dts/msm8226-gpu.dtsi
index 6a8ba3a..bb2f0d4 100644
--- a/arch/arm/boot/dts/msm8226-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8226-gpu.dtsi
@@ -33,7 +33,6 @@
/* Bus Scale Settings */
qcom,msm-bus,name = "grp3d";
qcom,msm-bus,num-cases = <4>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>, <89 604 0 0>,
diff --git a/arch/arm/boot/dts/msm8226-ion.dtsi b/arch/arm/boot/dts/msm8226-ion.dtsi
index 9a35507..f433a49 100644
--- a/arch/arm/boot/dts/msm8226-ion.dtsi
+++ b/arch/arm/boot/dts/msm8226-ion.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,10 @@
reg = <30>;
};
+ qcom,ion-heap@21 { /* SYSTEM CONTIG HEAP */
+ reg = <21>;
+ };
+
qcom,ion-heap@8 { /* CP_MM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <8>;
@@ -46,5 +50,19 @@
qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
qcom,memory-reservation-size = <0x314000>;
};
+ qcom,ion-heap@23 { /* OTHER PIL HEAP */
+ compatible = "qcom,msm-ion-reserve";
+ reg = <23>;
+ qcom,heap-align = <0x1000>;
+ qcom,memory-fixed = <0x06400000 0x2000000>;
+ };
+ qcom,ion-heap@26 { /* MODEM PIL HEAP */
+ compatible = "qcom,msm-ion-reserve";
+ reg = <26>;
+ qcom,heap-align = <0x1000>;
+ qcom,memory-fixed = <0x08400000 0x4E00000>;
+
+ };
+
};
};
diff --git a/arch/arm/boot/dts/msm8226-mdss.dtsi b/arch/arm/boot/dts/msm8226-mdss.dtsi
index 7ab76f1..5aa39d3 100644
--- a/arch/arm/boot/dts/msm8226-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8226-mdss.dtsi
@@ -65,7 +65,6 @@
vddio-supply = <&pm8226_l8>;
vdda-supply = <&pm8226_l4>;
qcom,supply-names = "vdd", "vddio", "vdda";
- qcom,supply-type = "regulator", "regulator", "regulator";
qcom,supply-min-voltage-level = <2800000 1800000 1200000>;
qcom,supply-max-voltage-level = <2800000 1800000 1200000>;
qcom,supply-peak-current = <150000 100000 100000>;
diff --git a/arch/arm/boot/dts/msm8226-mtp.dts b/arch/arm/boot/dts/msm8226-mtp.dts
index 68fa8ba..1f8a773 100644
--- a/arch/arm/boot/dts/msm8226-mtp.dts
+++ b/arch/arm/boot/dts/msm8226-mtp.dts
@@ -17,7 +17,7 @@
/ {
model = "Qualcomm MSM 8226 MTP";
- compatible = "qcom,msm8226-mtp", "qcom,msm8226";
+ compatible = "qcom,msm8226-mtp", "qcom,msm8226", "qcom,mtp";
qcom,msm-id = <145 8 0>;
serial@f991f000 {
@@ -33,11 +33,11 @@
compatible = "synaptics,rmi4";
reg = <0x20>;
interrupt-parent = <&msmgpio>;
- interrupts = <17 0x2>;
+ interrupts = <17 0x2008>;
vdd-supply = <&pm8226_l19>;
vcc_i2c-supply = <&pm8226_lvs1>;
synaptics,reset-gpio = <&msmgpio 16 0x00>;
- synaptics,irq-gpio = <&msmgpio 17 0x00>;
+ synaptics,irq-gpio = <&msmgpio 17 0x2008>;
synaptics,button-map = <139 102 158>;
synaptics,i2c-pull-up;
synaptics,reg-en;
@@ -123,7 +123,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -132,6 +132,30 @@
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
qcom,nonremovable;
+ status = "disabled";
+};
+
+&sdhc_1 {
+ vdd-supply = <&pm8226_l17>;
+ qcom,vdd-always-on;
+ qcom,vdd-lpm-sup;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <800 500000>;
+
+ vdd-io-supply = <&pm8226_l6>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <250 154000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,nonremovable;
+
status = "ok";
};
@@ -148,7 +172,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -166,6 +190,38 @@
interrupt-names = "core_irq", "bam_irq", "status_irq";
cd-gpios = <&msmgpio 38 0x1>;
+ status = "disabled";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pm8226_l18>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <9000 800000>;
+
+ vdd-io-supply = <&pm8226_l21>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <6 22000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+
+ #address-cells = <0>;
+ interrupt-parent = <&sdhc_2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 221 0
+ 2 &msmgpio 38 0x3>;
+ interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+ cd-gpios = <&msmgpio 38 0x1>;
+
status = "ok";
};
@@ -203,7 +259,7 @@
qcom,mode = <1>; /* Digital output */
qcom,output-type = <0>; /* CMOS logic */
qcom,pull = <5>; /* QPNP_PIN_PULL_NO*/
- qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,vin-sel = <3>; /* QPNP_PIN_VIN3 */
qcom,out-strength = <3>;/* QPNP_PIN_OUT_STRENGTH_HIGH */
qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
qcom,master-en = <1>; /* Enable GPIO */
@@ -213,7 +269,7 @@
qcom,mode = <1>;
qcom,output-type = <0>;
qcom,pull = <5>;
- qcom,vin-sel = <2>;
+ qcom,vin-sel = <3>;
qcom,out-strength = <3>;
qcom,src-sel = <2>;
qcom,master-en = <1>;
@@ -299,3 +355,11 @@
qcom,fast-avg-setup = <0>;
};
};
+
+&pm8226_bms {
+ status = "ok";
+};
+
+&pm8226_chg {
+ qcom,charging-disabled;
+};
diff --git a/arch/arm/boot/dts/msm8226-pm.dtsi b/arch/arm/boot/dts/msm8226-pm.dtsi
index 99f0631..97b22aa 100644
--- a/arch/arm/boot/dts/msm8226-pm.dtsi
+++ b/arch/arm/boot/dts/msm8226-pm.dtsi
@@ -157,6 +157,8 @@
qcom,vdd-mem-lower-bound = <3>; /* NORMAL */
qcom,vdd-dig-upper-bound = <5>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <3>; /* NORMAL */
+ qcom,irqs-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <1>;
qcom,ss-power = <784>;
qcom,energy-overhead = <190000>;
@@ -165,22 +167,6 @@
qcom,lpm-level@1 {
reg = <0x1>;
- qcom,mode = "retention";
- qcom,xo = "xo_on";
- qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <5>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <3>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <5>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <3>; /* NORMAL */
- qcom,latency-us = <75>;
- qcom,ss-power = <735>;
- qcom,energy-overhead = <77341>;
- qcom,time-overhead = <105>;
- };
-
-
- qcom,lpm-level@2 {
- reg = <0x2>;
qcom,mode = "standalone_pc";
qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
@@ -188,29 +174,33 @@
qcom,vdd-mem-lower-bound = <3>; /* NORMAL */
qcom,vdd-dig-upper-bound = <5>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <3>; /* NORMAL */
- qcom,latency-us = <95>;
+ qcom,irqs-detectable;
+ qcom,gpio-detectable;
+ qcom,latency-us = <3000>;
qcom,ss-power = <725>;
qcom,energy-overhead = <99500>;
- qcom,time-overhead = <130>;
+ qcom,time-overhead = <3130>;
};
- qcom,lpm-level@3 {
- reg = <0x3>;
+ qcom,lpm-level@2 {
+ reg = <0x2>;
qcom,mode = "pc";
qcom,xo = "xo_on";
- qcom,l2 = "l2_cache_gdhs";
+ qcom,l2 = "l2_cache_retention";
qcom,vdd-mem-upper-bound = <5>; /* SUPER TURBO */
qcom,vdd-mem-lower-bound = <3>; /* NORMAL */
qcom,vdd-dig-upper-bound = <5>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <3>; /* NORMAL */
- qcom,latency-us = <2000>;
+ qcom,irqs-detectable;
+ qcom,gpio-detectable;
+ qcom,latency-us = <8000>;
qcom,ss-power = <138>;
qcom,energy-overhead = <1208400>;
- qcom,time-overhead = <3200>;
+ qcom,time-overhead = <9200>;
};
- qcom,lpm-level@4 {
- reg = <0x4>;
+ qcom,lpm-level@3 {
+ reg = <0x3>;
qcom,mode = "pc";
qcom,xo = "xo_on";
qcom,l2 = "l2_cache_pc";
@@ -218,25 +208,42 @@
qcom,vdd-mem-lower-bound = <2>; /* SVS SOC */
qcom,vdd-dig-upper-bound = <3>; /* NORMAL */
qcom,vdd-dig-lower-bound = <2>; /* SVS SOC */
- qcom,latency-us = <3000>;
+ qcom,irqs-detectable;
+ qcom,gpio-detectable;
+ qcom,latency-us = <9000>;
qcom,ss-power = <110>;
qcom,energy-overhead = <1250300>;
- qcom,time-overhead = <3500>;
+ qcom,time-overhead = <9500>;
+ };
+
+ qcom,lpm-level@4 {
+ reg = <0x4>;
+ qcom,mode = "pc";
+ qcom,xo = "xo_off";
+ qcom,l2 = "l2_cache_pc";
+ qcom,vdd-mem-upper-bound = <5>; /* SUPER TURBO */
+ qcom,vdd-mem-lower-bound = <3>; /* NORMAL */
+ qcom,vdd-dig-upper-bound = <5>; /* SUPER TURBO */
+ qcom,vdd-dig-lower-bound = <3>; /* NORMAL */
+ qcom,latency-us = <16300>;
+ qcom,ss-power = <63>;
+ qcom,energy-overhead = <2128000>;
+ qcom,time-overhead = <24200>;
};
qcom,lpm-level@5 {
reg = <0x5>;
qcom,mode = "pc";
qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_gdhs";
- qcom,vdd-mem-upper-bound = <5>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <3>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <5>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <3>; /* NORMAL */
- qcom,latency-us = <3000>;
- qcom,ss-power = <68>;
- qcom,energy-overhead = <1350200>;
- qcom,time-overhead = <4000>;
+ qcom,l2 = "l2_cache_pc";
+ qcom,vdd-mem-upper-bound = <3>; /* NORMAL */
+ qcom,vdd-mem-lower-bound = <2>; /* SVS SOC */
+ qcom,vdd-dig-upper-bound = <3>; /* NORMAL */
+ qcom,vdd-dig-lower-bound = <2>; /* SVS SOC */
+ qcom,latency-us = <24000>;
+ qcom,ss-power = <10>;
+ qcom,energy-overhead = <3202600>;
+ qcom,time-overhead = <33000>;
};
qcom,lpm-level@6 {
@@ -244,44 +251,14 @@
qcom,mode = "pc";
qcom,xo = "xo_off";
qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <5>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <3>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <5>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <3>; /* NORMAL */
- qcom,latency-us = <10300>;
- qcom,ss-power = <63>;
- qcom,energy-overhead = <2128000>;
- qcom,time-overhead = <18200>;
- };
-
- qcom,lpm-level@7 {
- reg = <0x7>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <3>; /* NORMAL */
- qcom,vdd-mem-lower-bound = <2>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <3>; /* NORMAL */
- qcom,vdd-dig-lower-bound = <2>; /* SVS SOC */
- qcom,latency-us = <18000>;
- qcom,ss-power = <10>;
- qcom,energy-overhead = <3202600>;
- qcom,time-overhead = <27000>;
- };
-
- qcom,lpm-level@8 {
- reg = <0x8>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
qcom,vdd-mem-upper-bound = <2>; /* SVS SOC */
qcom,vdd-mem-lower-bound = <0>; /* RETENTION */
qcom,vdd-dig-upper-bound = <2>; /* SVS SOC */
qcom,vdd-dig-lower-bound = <0>; /* RETENTION */
- qcom,latency-us = <20000>;
+ qcom,latency-us = <26000>;
qcom,ss-power = <2>;
qcom,energy-overhead = <4252000>;
- qcom,time-overhead = <32000>;
+ qcom,time-overhead = <38000>;
};
};
@@ -389,6 +366,18 @@
reg = <0xfe805664 0x40>;
qcom,pc-mode = "tz_l2_int";
qcom,use-sync-timer;
+ qcom,pc-resets-timer;
+ };
+
+ qcom,rpm-log@fc19dc00 {
+ compatible = "qcom,rpm-log";
+ reg = <0xfc19dc00 0x4000>;
+ qcom,rpm-addr-phys = <0xfc000000>;
+ qcom,offset-version = <4>;
+ qcom,offset-page-buffer-addr = <36>;
+ qcom,offset-log-len = <40>;
+ qcom,offset-log-len-mask = <44>;
+ qcom,offset-page-indices = <56>;
};
qcom,rpm-stats@0xfc19dbd0{
diff --git a/arch/arm/boot/dts/msm8226-qrd.dts b/arch/arm/boot/dts/msm8226-qrd.dts
index 618412a..660fb3e 100644
--- a/arch/arm/boot/dts/msm8226-qrd.dts
+++ b/arch/arm/boot/dts/msm8226-qrd.dts
@@ -13,11 +13,11 @@
/dts-v1/;
/include/ "msm8226.dtsi"
/include/ "dsi-panel-nt35590-720p-video.dtsi"
-/include/ "msm8226-camera-sensor-cdp-qrd.dtsi"
+/include/ "msm8226-camera-sensor-qrd.dtsi"
/ {
model = "Qualcomm MSM 8226 QRD";
- compatible = "qcom,msm8226-qrd", "qcom,msm8226";
+ compatible = "qcom,msm8226-qrd", "qcom,msm8226", "qcom,qrd";
qcom,msm-id = <145 11 0>;
serial@f991f000 {
@@ -33,11 +33,11 @@
compatible = "synaptics,rmi4";
reg = <0x20>;
interrupt-parent = <&msmgpio>;
- interrupts = <17 0x2>;
+ interrupts = <17 0x2008>;
vdd-supply = <&pm8226_l19>;
vcc_i2c-supply = <&pm8226_lvs1>;
synaptics,reset-gpio = <&msmgpio 16 0x00>;
- synaptics,irq-gpio = <&msmgpio 17 0x00>;
+ synaptics,irq-gpio = <&msmgpio 17 0x2008>;
synaptics,button-map = <139 102 158>;
synaptics,i2c-pull-up;
synaptics,reg-en;
@@ -123,7 +123,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -132,6 +132,30 @@
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
qcom,nonremovable;
+ status = "disabled";
+};
+
+&sdhc_1 {
+ vdd-supply = <&pm8226_l17>;
+ qcom,vdd-always-on;
+ qcom,vdd-lpm-sup;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <800 500000>;
+
+ vdd-io-supply = <&pm8226_l6>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <250 154000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,nonremovable;
+
status = "ok";
};
@@ -148,7 +172,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -169,6 +193,38 @@
interrupt-names = "core_irq", "bam_irq", "status_irq";
cd-gpios = <&msmgpio 38 0x1>;
+ status = "disabled";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pm8226_l18>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <9000 800000>;
+
+ vdd-io-supply = <&pm8226_l21>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <6 22000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+
+ #address-cells = <0>;
+ interrupt-parent = <&sdhc_2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 221 0
+ 2 &msmgpio 38 0x3>;
+ interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+ cd-gpios = <&msmgpio 38 0x1>;
+
status = "ok";
};
@@ -213,7 +269,7 @@
qcom,mode = <1>; /* Digital output */
qcom,output-type = <0>; /* CMOS logic */
qcom,pull = <5>; /* QPNP_PIN_PULL_NO*/
- qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,vin-sel = <3>; /* QPNP_PIN_VIN3 */
qcom,out-strength = <3>;/* QPNP_PIN_OUT_STRENGTH_HIGH */
qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
qcom,master-en = <1>; /* Enable GPIO */
@@ -223,7 +279,7 @@
qcom,mode = <1>;
qcom,output-type = <0>;
qcom,pull = <5>;
- qcom,vin-sel = <2>;
+ qcom,vin-sel = <3>;
qcom,out-strength = <3>;
qcom,src-sel = <2>;
qcom,master-en = <1>;
diff --git a/arch/arm/boot/dts/msm8226-regulator.dtsi b/arch/arm/boot/dts/msm8226-regulator.dtsi
index f24cea1..70731d2 100644
--- a/arch/arm/boot/dts/msm8226-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8226-regulator.dtsi
@@ -44,6 +44,9 @@
qcom,pvs-corner-ceiling-nom = <975000 1075000 1200000 1200000>;
qcom,pvs-corner-ceiling-fast = <900000 1000000 1140000 1140000>;
vdd-apc-supply = <&pm8226_s2>;
+ vdd-mx-supply = <&pm8226_l3_ao>;
+ qcom,vdd-mx-vmax = <1350000>;
+ qcom,vdd-mx-vmin-method = <1>;
};
};
diff --git a/arch/arm/boot/dts/msm8226-sim.dts b/arch/arm/boot/dts/msm8226-sim.dts
index b6590b3..00c0e2e 100644
--- a/arch/arm/boot/dts/msm8226-sim.dts
+++ b/arch/arm/boot/dts/msm8226-sim.dts
@@ -16,7 +16,7 @@
/ {
model = "Qualcomm MSM 8226 Simulator";
- compatible = "qcom,msm8226-sim", "qcom,msm8226";
+ compatible = "qcom,msm8226-sim", "qcom,msm8226", "qcom,sim";
qcom,msm-id = <145 16 0>;
serial@f991f000 {
@@ -36,7 +36,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
vdd-supply = <&pm8226_l17>;
@@ -62,7 +62,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
diff --git a/arch/arm/boot/dts/msm8226-smp2p.dtsi b/arch/arm/boot/dts/msm8226-smp2p.dtsi
index 91029e2..079e4ca 100644
--- a/arch/arm/boot/dts/msm8226-smp2p.dtsi
+++ b/arch/arm/boot/dts/msm8226-smp2p.dtsi
@@ -148,6 +148,29 @@
gpios = <&smp2pgpio_smp2p_2_out 0 0>;
};
+ /* SMP2P SSR Driver for inbound entry from lpass. */
+ smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* SMP2P SSR Driver for outbound entry to lpass */
+ smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
smp2pgpio_smp2p_4_in: qcom,smp2pgpio-smp2p-4-in {
compatible = "qcom,smp2pgpio";
qcom,entry-name = "smp2p";
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 75cf6e5..aa03951 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -49,6 +49,8 @@
aliases {
spi0 = &spi_0;
+ sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
+ sdhc2 = &sdhc_2; /* SDC2 SD card slot */
};
memory {
@@ -65,6 +67,65 @@
clock-frequency = <19200000>;
};
+ timer@f9020000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0xf9020000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@f9021000 {
+ frame-number = <0>;
+ interrupts = <0 8 0x4>,
+ <0 7 0x4>;
+ reg = <0xf9021000 0x1000>,
+ <0xf9022000 0x1000>;
+ };
+
+ frame@f9023000 {
+ frame-number = <1>;
+ interrupts = <0 9 0x4>;
+ reg = <0xf9023000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9024000 {
+ frame-number = <2>;
+ interrupts = <0 10 0x4>;
+ reg = <0xf9024000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9025000 {
+ frame-number = <3>;
+ interrupts = <0 11 0x4>;
+ reg = <0xf9025000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9026000 {
+ frame-number = <4>;
+ interrupts = <0 12 0x4>;
+ reg = <0xf9026000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9027000 {
+ frame-number = <5>;
+ interrupts = <0 13 0x4>;
+ reg = <0xf9027000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9028000 {
+ frame-number = <6>;
+ interrupts = <0 14 0x4>;
+ reg = <0xf9028000 0x1000>;
+ status = "disabled";
+ };
+ };
+
qcom,vidc@fdc00000 {
compatible = "qcom,msm-vidc";
reg = <0xfdc00000 0xff000>;
@@ -168,6 +229,8 @@
HSUSB_3p3-supply = <&pm8226_l20>;
qcom,vdd-voltage-level = <1 5 7>;
+ qcom,hsusb-otg-phy-init-seq =
+ <0x44 0x80 0x68 0x81 0x24 0x82 0x13 0x83 0xffffffff>;
qcom,hsusb-otg-phy-type = <2>;
qcom,hsusb-otg-mode = <1>;
qcom,hsusb-otg-otg-control = <2>;
@@ -176,7 +239,6 @@
qcom,msm-bus,name = "usb2";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<87 512 0 0>,
@@ -235,6 +297,12 @@
qcom,cdc-vdd-cx-voltage = <1200000 1200000>;
qcom,cdc-vdd-cx-current = <10000>;
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-vdd-h",
+ "cdc-vdd-px",
+ "cdc-vdd-a-1p2v",
+ "cdc-vdd-cx";
+
qcom,cdc-micbias-ldoh-v = <0x3>;
qcom,cdc-micbias-cfilt1-mv = <1800>;
qcom,cdc-micbias-cfilt2-mv = <1800>;
@@ -413,8 +481,9 @@
qcom,wcnss-wlan@fb000000 {
compatible = "qcom,wcnss_wlan";
- reg = <0xfb000000 0x280000>;
- reg-names = "wcnss_mmio";
+ reg = <0xfb000000 0x280000>,
+ <0xf9011008 0x04>;
+ reg-names = "wcnss_mmio", "wcnss_fiq";
interrupts = <0 145 0 0 146 0>;
interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
@@ -536,6 +605,18 @@
status = "disabled";
};
+ sdhc_1: sdhci@f9824900 {
+ compatible = "qcom,sdhci-msm";
+ reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 123 0>, <0 138 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ qcom,bus-width = <8>;
+ status = "disabled";
+ };
+
sdcc2: qcom,sdcc@f98a4000 {
cell-index = <2>; /* SDC2 SD card slot */
compatible = "qcom,msm-sdcc";
@@ -551,6 +632,18 @@
status = "disabled";
};
+ sdhc_2: sdhci@f98a4900 {
+ compatible = "qcom,sdhci-msm";
+ reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 0>, <0 221 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ qcom,bus-width = <4>;
+ status = "disabled";
+ };
+
spmi_bus: qcom,spmi@fc4c0000 {
cell-index = <0>;
compatible = "qcom,spmi-pmic-arb";
@@ -642,8 +735,9 @@
qcom,firmware-name = "wcnss";
- /* GPIO input from wcnss */
+ /* GPIO inputs from wcnss */
qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_4_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_4_in 2 0>;
/* GPIO output to wcnss */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_4_out 0 0>;
@@ -663,6 +757,13 @@
interrupts = <0 162 1>;
qcom,firmware-name = "adsp";
+
+ /* GPIO inputs from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
};
qcom,mss@fc880000 {
@@ -686,8 +787,9 @@
qcom,firmware-name = "mba";
qcom,pil-self-auth;
- /* GPIO input from mss */
+ /* GPIO inputs from mss */
qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
/* GPIO output to mss */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
@@ -695,7 +797,7 @@
qcom,msm-mem-hole {
compatible = "qcom,msm-mem-hole";
- qcom,memblock-remove = <0x8400000 0x7b00000>; /* Address and Size of Hole */
+ qcom,memblock-remove = <0x6400000 0x9b00000>; /* Address and Size of Hole */
};
tsens: tsens@fc4a8000 {
@@ -797,6 +899,60 @@
reg = <0xfd484000 0x400>;
qcom,num-locks = <8>;
};
+
+ qcom,qseecom@d980000 {
+ compatible = "qcom,qseecom";
+ reg = <0xd980000 0x256000>;
+ reg-names = "secapp-region";
+ qcom,disk-encrypt-pipe-pair = <2>;
+ qcom,hlos-ce-hw-instance = <0>;
+ qcom,qsee-ce-hw-instance = <0>;
+ qcom,msm-bus,name = "qseecom-noc";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <55 512 0 0>,
+ <55 512 3936000 393600>,
+ <55 512 3936000 393600>,
+ <55 512 3936000 393600>;
+ };
+
+ qcom,qcrypto@fd404000 {
+ compatible = "qcom,qcrypto";
+ reg = <0xfd400000 0x20000>,
+ <0xfd404000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 207 0>;
+ qcom,bam-pipe-pair = <2>;
+ qcom,ce-hw-instance = <0>;
+ qcom,ce-hw-shared;
+ qcom,msm-bus,name = "qcrypto-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <56 512 0 0>,
+ <56 512 3936000 393600>;
+ };
+
+ qcom,qcedev@fd400000 {
+ compatible = "qcom,qcedev";
+ reg = <0xfd400000 0x20000>,
+ <0xfd404000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 207 0>;
+ qcom,bam-pipe-pair = <1>;
+ qcom,ce-hw-instance = <0>;
+ qcom,ce-hw-shared;
+ qcom,msm-bus,name = "qcedev-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <56 512 0 0>,
+ <56 512 3936000 393600>;
+ };
};
&gdsc_venus {
@@ -916,28 +1072,38 @@
qcom,fast-avg-setup = <0>;
};
+ chan@39 {
+ label = "usb_id_nopull";
+ reg = <0x39>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
};
&pm8226_chg {
status = "ok";
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "ok";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "ok";
};
- qcom,chg-bat-if@1200 {
+ qcom,bat-if@1200 {
status = "ok";
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "ok";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8610-bus.dtsi b/arch/arm/boot/dts/msm8610-bus.dtsi
new file mode 100644
index 0000000..50066f3
--- /dev/null
+++ b/arch/arm/boot/dts/msm8610-bus.dtsi
@@ -0,0 +1,978 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ msm-mmss-noc@fc478000 {
+ compatible = "msm-bus-fabric";
+ reg = <0xfc478000 0x00004000>;
+ cell-id = <2048>;
+ label = "msm_mmss_noc";
+ qcom,fabclk-dual = "bus_clk";
+ qcom,fabclk-active = "bus_a_clk";
+ qcom,ntieredslaves = <0>;
+ qcom,qos-freq = <4800>;
+ qcom,hw-sel = "NoC";
+ qcom,rpm-en;
+
+ mas-mdp-port0 {
+ cell-id = <22>;
+ label = "mas-mdp-port0";
+ qcom,masterp = <2>;
+ qcom,tier = <2>;
+ qcom,hw-sel = "NoC";
+ qcom,perm-mode = "Bypass";
+ qcom,mode = "Bypass";
+ qcom,qport = <0>;
+ qcom,ws = <10000>;
+ qcom,mas-hw-id = <8>;
+ };
+
+ mas-vfe {
+ cell-id = <29>;
+ label = "mas-vfe";
+ qcom,masterp = <3>;
+ qcom,tier = <2>;
+ qcom,hw-sel = "NoC";
+ qcom,perm-mode = "Bypass";
+ qcom,mode = "Bypass";
+ qcom,ws = <10000>;
+ qcom,qport = <2>;
+ qcom,mas-hw-id = <11>;
+ };
+
+ mas-mdpe {
+ cell-id = <92>;
+ label = "mas-mdpe";
+ qcom,masterp = <4>;
+ qcom,tier = <2>;
+ qcom,hw-sel = "NoC";
+ qcom,perm-mode = "Bypass";
+ qcom,mode = "Bypass";
+ qcom,ws = <10000>;
+ qcom,qport = <7>;
+ qcom,mas-hw-id = <11>;
+ };
+
+ fab-bimc {
+ cell-id = <0>;
+ label = "fab-bimc";
+ qcom,gateway;
+ qcom,slavep = <16>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <16>;
+ };
+
+ slv-camera-cfg {
+ cell-id = <589>;
+ label = "slv-camera-cfg";
+ qcom,slavep = <0>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <3>;
+ };
+
+ slv-display-cfg {
+ cell-id = <590>;
+ label = "slv-display-cfg";
+ qcom,slavep = <1>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <4>;
+ };
+
+ slv-cpr-cfg {
+ cell-id = <592>;
+ label = "slv-cpr-cfg";
+ qcom,slavep = <3>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <6>;
+ };
+
+ slv-cpr-xpu-cfg {
+ cell-id = <593>;
+ label = "slv-cpr-xpu-cfg";
+ qcom,slavep = <4>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <7>;
+ };
+
+ slv-misc-cfg {
+ cell-id = <594>;
+ label = "slv-misc-cfg";
+ qcom,slavep = <6>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <8>;
+ };
+
+ slv-misc-xpu-cfg {
+ cell-id = <595>;
+ label = "slv-misc-xpu-cfg";
+ qcom,slavep = <7>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <9>;
+ };
+
+ slv-gfx3d-cfg {
+ cell-id = <598>;
+ label = "slv-gfx3d-cfg";
+ qcom,slavep = <9>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <11>;
+ };
+
+ slv-mmss-clk-cfg {
+ cell-id = <599>;
+ label = "slv-mmss-clk-cfg";
+ qcom,slavep = <11>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <12>;
+ };
+
+ slv-mmss-clk-xpu-cfg {
+ cell-id = <600>;
+ label = "slv-mmss-clk-xpu-cfg";
+ qcom,slavep = <12>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <13>;
+ };
+
+ slv-mnoc-mpu-cfg {
+ cell-id = <601>;
+ label = "slv-mnoc-mpu-cfg";
+ qcom,slavep = <13>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <14>;
+ };
+
+ slv-onoc-mpu-cfg {
+ cell-id = <602>;
+ label = "slv-onoc-mpu-cfg";
+ qcom,slavep = <14>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <15>;
+ };
+
+ slv-service-mnoc {
+ cell-id = <603>;
+ label = "slv-service-mnoc";
+ qcom,slavep = <18>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <17>;
+ };
+
+ slv-dsi-cfg {
+ cell-id = <649>;
+ label = "slv-dsi-cfg";
+ qcom,slavep = <19>;
+ qcom,tier = <2>;
+ qcom,buswidth = <16>;
+ qcom,hw-sel = "NoC";
+ qcom,slv-hw-id = <19>;
+ };
+ };
+
+ msm-sys-noc@fc460000 {
+ compatible = "msm-bus-fabric";
+ reg = <0xfc460000 0x00004000>;
+ cell-id = <1024>;
+ label = "msm_sys_noc";
+ qcom,fabclk-dual = "bus_clk";
+ qcom,fabclk-active = "bus_a_clk";
+ qcom,ntieredslaves = <0>;
+ qcom,qos-freq = <4800>;
+ qcom,hw-sel = "NoC";
+ qcom,rpm-en;
+
+ mas-lpass-ahb {
+ cell-id = <52>;
+ label = "mas-lpass-ahb";
+ qcom,masterp = <0>;
+ qcom,tier = <2>;
+ qcom,mas-hw-id = <18>;
+ };
+
+ mas-qdss-bam {
+ cell-id = <53>;
+ label = "mas-qdss-bam";
+ qcom,masterp = <1>;
+ qcom,tier = <2>;
+ qcom,mas-hw-id = <19>;
+ };
+
+ mas-snoc-cfg {
+ cell-id = <54>;
+ label = "mas-snoc-cfg";
+ qcom,masterp = <2>;
+ qcom,tier = <2>;
+ qcom,mas-hw-id = <20>;
+ };
+
+ fab-bimc {
+ cell-id = <0>;
+ label= "fab-bimc";
+ qcom,gateway;
+ qcom,slavep = <7>;
+ qcom,masterp = <3>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <21>;
+ qcom,slv-hw-id = <24>;
+ };
+
+ fab-cnoc {
+ cell-id = <5120>;
+ label = "fab-cnoc";
+ qcom,gateway;
+ qcom,slavep = <8>;
+ qcom,masterp = <4>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <22>;
+ qcom,slv-hw-id = <25>;
+ };
+
+ fab-pnoc {
+ cell-id = <4096>;
+ label = "fab-pnoc";
+ qcom,gateway;
+ qcom,slavep = <10>;
+ qcom,masterp = <10>;
+ qcom,buswidth = <8>;
+ qcom,qport = <8>;
+ qcom,mas-hw-id = <29>;
+ qcom,slv-hw-id = <28>;
+ qcom,mode = "Fixed";
+ qcom,prio-rd = <2>;
+ qcom,prio-wr = <2>;
+ };
+
+ fab-ovnoc {
+ cell-id = <6144>;
+ label = "fab-ovnoc";
+ qcom,gateway;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <53>;
+ qcom,slv-hw-id = <77>;
+ };
+
+ mas-crypto-core0 {
+ cell-id = <55>;
+ label = "mas-crypto-core0";
+ qcom,masterp = <5>;
+ qcom,tier = <2>;
+ qcom,mas-hw-id = <23>;
+ };
+
+ mas-mss {
+ cell-id = <38>;
+ label = "mas-mss";
+ qcom,masterp = <7>;
+ qcom,tier = <2>;
+ qcom,mas-hw-id = <26>;
+ };
+
+ mas-mss-nav {
+ cell-id = <57>;
+ label = "mas-mss-nav";
+ qcom,masterp = <8>;
+ qcom,tier = <2>;
+ qcom,mas-hw-id = <27>;
+ };
+
+ mas-ocmem-dma {
+ cell-id = <58>;
+ label = "mas-ocmem-dma";
+ qcom,masterp = <9>;
+ qcom,tier = <2>;
+ qcom,mode = "Fixed";
+ qcom,qport = <7>;
+ qcom,mas-hw-id = <28>;
+ };
+
+ mas-wcss {
+ cell-id = <59>;
+ label = "mas-wcss";
+ qcom,masterp = <11>;
+ qcom,tier = <2>;
+ qcom,mas-hw-id = <30>;
+ };
+
+ mas-qdss-etr {
+ cell-id = <60>;
+ label = "mas-qdss-etr";
+ qcom,masterp = <12>;
+ qcom,tier = <2>;
+ qcom,qport = <10>;
+ qcom,mode = "Fixed";
+ qcom,mas-hw-id = <31>;
+ };
+
+ slv-ocmem {
+ cell-id = <604>;
+ label = "slv-gmem";
+ qcom,slavep = <15>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <18>;
+ };
+
+ slv-ampss {
+ cell-id = <520>;
+ label = "slv-ampss";
+ qcom,slavep = <1>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <20>;
+ };
+
+ slv-lpass {
+ cell-id = <522>;
+ label = "slv-lpass";
+ qcom,slavep = <2>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <21>;
+ };
+
+ slv-wcss {
+ cell-id = <584>;
+ label = "slv-wcss";
+ qcom,slavep = <6>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <23>;
+ };
+
+ slv-ocimem {
+ cell-id = <585>;
+ label = "slv-ocimem";
+ qcom,slavep = <10>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <26>;
+ };
+
+ slv-service-snoc {
+ cell-id = <587>;
+ label = "slv-service-snoc";
+ qcom,slavep = <11>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <29>;
+ };
+
+ slv-qdss-stm {
+ cell-id = <588>;
+ label = "slv-qdss-stm";
+ qcom,slavep = <12>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <30>;
+ };
+
+ };
+
+ msm-periph-noc@fc468000 {
+ compatible = "msm-bus-fabric";
+ reg = <0xfc468000 0x00004000>;
+ cell-id = <4096>;
+ label = "msm_periph_noc";
+ qcom,fabclk-dual = "bus_clk";
+ qcom,fabclk-active = "bus_a_clk";
+ qcom,ntieredslaves = <0>;
+ qcom,hw-sel = "NoC";
+ qcom,rpm-en;
+
+ mas-pnoc-cfg {
+ cell-id = <88>;
+ label = "mas-pnoc-cfg";
+ qcom,masterp = <7>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <43>;
+ };
+
+ mas-sdcc-1 {
+ cell-id = <78>;
+ label = "mas-sdcc-1";
+ qcom,masterp = <0>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <33>;
+ };
+
+ mas-sdcc-2 {
+ cell-id = <81>;
+ label = "mas-sdcc-2";
+ qcom,masterp = <2>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <35>;
+ };
+
+ mas-blsp-1 {
+ cell-id = <86>;
+ label = "mas-blsp-1";
+ qcom,masterp = <5>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <41>;
+ };
+
+ mas-usb-hs {
+ cell-id = <87>;
+ label = "mas-usb-hs";
+ qcom,masterp = <6>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <42>;
+ };
+
+ fab-snoc {
+ cell-id = <1024>;
+ label = "fab-snoc";
+ qcom,gateway;
+ qcom,slavep = <12>;
+ qcom,masterp = <8>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <45>;
+ qcom,mas-hw-id = <44>;
+ };
+
+ slv-sdcc-1 {
+ cell-id = <606>;
+ label = "slv-sdcc-1";
+ qcom,slavep = <0>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <31>;
+ };
+
+ slv-sdcc-2 {
+ cell-id = <608>;
+ label = "slv-sdcc-2";
+ qcom,slavep = <2>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <33>;
+ };
+
+ slv-blsp-1 {
+ cell-id = <613>;
+ label = "slv-blsp-1";
+ qcom,slavep = <5>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <39>;
+ };
+
+ slv-usb-hs {
+ cell-id = <614>;
+ label = "slv-usb-hs";
+ qcom,slavep = <6>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <40>;
+ };
+
+ slv-pdm {
+ cell-id = <615>;
+ label = "slv-pdm";
+ qcom,slavep = <7>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <41>;
+ };
+
+ slv-periph-apu-cfg {
+ cell-id = <616>;
+ label = "slv-periph-apu-cfg";
+ qcom,slavep = <8>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <42>;
+ };
+
+ slv-pnoc-mpu-cfg {
+ cell-id = <617>;
+ label = "slv-pnoc-mpu-cfg";
+ qcom,slavep = <9>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <43>;
+ };
+
+ slv-prng {
+ cell-id = <618>;
+ label = "slv-prng";
+ qcom,slavep = <10>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <44>;
+ };
+
+ slv-service-pnoc {
+ cell-id = <619>;
+ label = "slv-service-pnoc";
+ qcom,slavep = <12>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <46>;
+ };
+
+ };
+
+ msm-config-noc@fc480000 {
+ compatible = "msm-bus-fabric";
+ reg = <0xfc480000 0x00004000>;
+ cell-id = <5120>;
+ label = "msm_config_noc";
+ qcom,fabclk-dual = "bus_clk";
+ qcom,fabclk-active = "bus_a_clk";
+ qcom,ntieredslaves = <0>;
+ qcom,hw-sel = "NoC";
+ qcom,rpm-en;
+
+ mas-rpm-inst {
+ cell-id = <72>;
+ label = "mas-rpm-inst";
+ qcom,masterp = <0>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <45>;
+ };
+
+ mas-rpm-data {
+ cell-id = <73>;
+ label = "mas-rpm-data";
+ qcom,masterp = <1>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <46>;
+ };
+
+ mas-rpm-sys {
+ cell-id = <74>;
+ label = "mas-rpm-sys";
+ qcom,masterp = <2>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <47>;
+ };
+
+ mas-dehr {
+ cell-id = <75>;
+ label = "mas-dehr";
+ qcom,masterp = <3>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <48>;
+ };
+
+ mas-qdss-dsp {
+ cell-id = <76>;
+ label = "mas-qdss-dap";
+ qcom,masterp = <4>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <49>;
+ };
+
+ mas-spdm {
+ cell-id = <36>;
+ label = "mas-spdm";
+ qcom,masterp = <5>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <50>;
+ };
+
+ mas-tic {
+ cell-id = <77>;
+ label = "mas-tic";
+ qcom,masterp = <6>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <51>;
+ };
+
+ slv-clk-ctl {
+ cell-id = <620>;
+ label = "slv-clk-ctl";
+ qcom,slavep = <1>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <47>;
+ };
+
+ slv-cnoc-mss {
+ cell-id = <621>;
+ label = "slv-cnoc-mss";
+ qcom,slavep = <2>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <48>;
+ };
+
+ slv-security {
+ cell-id = <622>;
+ label = "slv-security";
+ qcom,slavep = <3>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <49>;
+ };
+
+ slv-tcsr {
+ cell-id = <623>;
+ label = "slv-tcsr";
+ qcom,slavep = <4>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <50>;
+ };
+
+ slv-tlmm {
+ cell-id = <624>;
+ label = "slv-tlmm";
+ qcom,slavep = <5>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <51>;
+ };
+
+ slv-crypto-0-cfg {
+ cell-id = <625>;
+ label = "slv-crypto-0-cfg";
+ qcom,slavep = <6>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <52>;
+ };
+
+ slv-imem-cfg {
+ cell-id = <627>;
+ label = "slv-imem-cfg";
+ qcom,slavep = <7>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <54>;
+ };
+
+ slv-message-ram {
+ cell-id = <628>;
+ label = "slv-message-ram";
+ qcom,slavep = <8>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <55>;
+ };
+
+ slv-bimc-cfg {
+ cell-id = <629>;
+ label = "slv-bimc-cfg";
+ qcom,slavep = <9>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <56>;
+ };
+
+ slv-boot-rom {
+ cell-id = <630>;
+ label = "slv-boot-rom";
+ qcom,slavep = <10>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <57>;
+ };
+
+ slv-pmic-arb {
+ cell-id = <632>;
+ label = "slv-pmic-arb";
+ qcom,slavep = <12>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <59>;
+ };
+
+ slv-spdm-wrapper {
+ cell-id = <633>;
+ label = "slv-spdm-wrapper";
+ qcom,slavep = <13>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <60>;
+ };
+
+ slv-dehr-cfg {
+ cell-id = <634>;
+ label = "slv-dehr-cfg";
+ qcom,slavep = <14>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <61>;
+ };
+
+ slv-mpm {
+ cell-id = <536>;
+ label = "slv-mpm";
+ qcom,slavep = <15>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <62>;
+ };
+
+ slv-qdss-cfg {
+ cell-id = <635>;
+ label = "slv-qdss-cfg";
+ qcom,slavep = <16>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <63>;
+ };
+
+ slv-rbcpr-cfg {
+ cell-id = <636>;
+ label = "slv-rbcpr-cfg";
+ qcom,slavep = <17>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <64>;
+ };
+
+ slv-rbcpr-qdss-apu-cfg {
+ cell-id = <637>;
+ label = "slv-rbcpr-qdss-apu-cfg";
+ qcom,slavep = <18>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <65>;
+ };
+
+ fab-snoc {
+ cell-id = <1024>;
+ label = "fab-snoc";
+ qcom,gateway;
+ qcom,slavep = <26>;
+ qcom,masterp = <7>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,mas-hw-id = <52>;
+ qcom,slv-hw-id = <75>;
+ };
+
+ slv-cnoc-mnoc-mmss-cfg {
+ cell-id = <631>;
+ label = "slv-cnoc-mnoc-mmss-cfg";
+ qcom,slavep = <11>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <58>;
+ };
+
+ slv-cnoc-mnoc-cfg {
+ cell-id = <640>;
+ label = "slv-cnoc-mnoc-cfg";
+ qcom,slavep = <19>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <66>;
+ };
+
+ slv-pnoc-cfg {
+ cell-id = <641>;
+ label = "slv-pnoc-cfg";
+ qcom,slavep = <21>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <69>;
+ };
+
+ slv-snoc-mpu-cfg {
+ cell-id = <638>;
+ label = "slv-snoc-mpu-cfg";
+ qcom,slavep = <20>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <67>;
+ };
+
+ slv-snoc-cfg {
+ cell-id = <642>;
+ label = "slv-snoc-cfg";
+ qcom,slavep = <22>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <70>;
+ };
+
+ slv-phy-apu-cfg {
+ cell-id = <644>;
+ label = "slv-phy-apu-cfg";
+ qcom,slavep = <23>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <72>;
+ };
+
+ slv-ebi1-phy-cfg {
+ cell-id = <645>;
+ label = "slv-ebi1-phy-cfg";
+ qcom,slavep = <24>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <73>;
+ };
+
+ slv-rpm {
+ cell-id = <534>;
+ label = "slv-rpm";
+ qcom,slavep = <25>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <74>;
+ };
+
+ slv-service-cnoc {
+ cell-id = <646>;
+ label = "slv-service-cnoc";
+ qcom,slavep = <27>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <76>;
+ };
+
+ };
+
+ msm-bimc@0xfc380000 {
+ compatible = "msm-bus-fabric";
+ reg = <0xfc380000 0x0006A000>;
+ cell-id = <0>;
+ label = "msm_bimc";
+ qcom,fabclk-dual = "mem_clk";
+ qcom,fabclk-active = "mem_a_clk";
+ qcom,ntieredslaves = <0>;
+ qcom,qos-freq = <4800>;
+ qcom,hw-sel = "BIMC";
+ qcom,rpm-en;
+
+ mas-ampss-m0 {
+ cell-id = <1>;
+ label = "mas-ampss-m0";
+ qcom,masterp = <0>;
+ qcom,tier = <2>;
+ qcom,hw-sel = "BIMC";
+ qcom,mode = "Fixed";
+ qcom,qport = <0>;
+ qcom,ws = <10000>;
+ qcom,mas-hw-id = <0>;
+ qcom,prio-rd = <1>;
+ qcom,prio-wr = <1>;
+ };
+
+ mas-mss-proc {
+ cell-id = <65>;
+ label = "mas-mss-proc";
+ qcom,masterp = <3>;
+ qcom,tier = <2>;
+ qcom,hw-sel = "RPM";
+ qcom,mas-hw-id = <1>;
+ };
+
+ fab-mmss-noc {
+ cell-id = <2048>;
+ label = "fab_mmss_noc";
+ qcom,masterp = <1>;
+ qcom,qport = <1>;
+ qcom,buswidth = <8>;
+ qcom,ws = <10000>;
+ qcom,mas-hw-id = <2>;
+ qcom,hw-sel = "BIMC";
+ qcom,mode = "Bypass";
+ };
+
+ fab-snoc {
+ cell-id = <1024>;
+ label = "fab-snoc";
+ qcom,gateway;
+ qcom,slavep = <2>;
+ qcom,masterp = <2>;
+ qcom,qport = <2>;
+ qcom,buswidth = <8>;
+ qcom,ws = <10000>;
+ qcom,mas-hw-id = <3>;
+ qcom,slv-hw-id = <2>;
+ };
+
+ mas-lpass-proc {
+ cell-id = <11>;
+ label = "mas-lpass-proc";
+ qcom,masterp = <4>;
+ qcom,tier = <2>;
+ qcom,qport = <4>;
+ qcom,mas-hw-id = <25>;
+ qcom,mode = "Fixed";
+ qcom,prio-rd = <1>;
+ qcom,prio-wr = <1>;
+ };
+
+ mas-gfx3d {
+ cell-id = <26>;
+ label = "mas-gfx3d";
+ qcom,masterp = <5>;
+ qcom,tier = <2>;
+ qcom,hw-sel = "NoC";
+ qcom,perm-mode = "Bypass";
+ qcom,mode = "Bypass";
+ qcom,ws = <10000>;
+ qcom,qport = <5>;
+ qcom,prio-rd = <1>;
+ qcom,prio-wr = <1>;
+ qcom,mas-hw-id = <6>;
+ };
+
+
+ slv-ebi-ch0 {
+ cell-id = <512>;
+ label = "slv-ebi-ch0";
+ qcom,slavep = <0>;
+ qcom,tier = <2>;
+ qcom,buswidth = <8>;
+ qcom,slv-hw-id = <0>;
+ };
+ };
+
+};
+
+
diff --git a/arch/arm/boot/dts/msm8610-cdp.dts b/arch/arm/boot/dts/msm8610-cdp.dts
index 390c02a..9b114cc 100644
--- a/arch/arm/boot/dts/msm8610-cdp.dts
+++ b/arch/arm/boot/dts/msm8610-cdp.dts
@@ -16,11 +16,107 @@
/ {
model = "Qualcomm MSM 8610 CDP";
- compatible = "qcom,msm8610-cdp", "qcom,msm8610";
- qcom,msm-id = <147 1 0>;
+ compatible = "qcom,msm8610-cdp", "qcom,msm8610", "qcom,cdp";
+ qcom,msm-id = <147 1 0>, <165 1 0>, <161 1 0>, <162 1 0>,
+ <163 1 0>, <164 1 0>, <166 1 0>;
- serial@f991f000 {
+ serial@f991e000 {
status = "ok";
};
};
+&spmi_bus {
+ qcom,pm8110@0 {
+ qcom,leds@a200 {
+ status = "okay";
+ qcom,led_mpp_3 {
+ label = "mpp";
+ linux,name = "wled-backlight";
+ linux-default-trigger = "none";
+ qcom,default-state = "on";
+ qcom,max-current = <40>;
+ qcom,id = <6>;
+ qcom,source-sel = <1>;
+ qcom,mode-ctrl = <0x10>;
+ };
+ };
+ };
+};
+
+&sdhc_1 {
+ vdd-supply = <&pm8110_l17>;
+ qcom,vdd-always-on;
+ qcom,vdd-lpm-sup;
+ qcom,vdd-voltage-level = <2900000 2900000>;
+ qcom,vdd-current-level = <200 400000>;
+
+ vdd-io-supply = <&pm8110_l6>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <200 60000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,nonremovable;
+
+ status = "ok";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pm8110_l18>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <15000 400000>;
+
+ vdd-io-supply = <&pm8110_l21>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <200 50000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+
+ #address-cells = <0>;
+ interrupt-parent = <&sdhc_2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 221 0
+ 2 &msmgpio 42 0x3>;
+ interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+ cd-gpios = <&msmgpio 42 0x1>;
+
+ status = "ok";
+};
+
+&pm8110_chg {
+ status = "ok";
+ qcom,charging-disabled;
+ qcom,use-default-batt-values;
+
+ qcom,chgr@1000 {
+ status = "ok";
+ };
+
+ qcom,buck@1100 {
+ status = "ok";
+ };
+
+ qcom,usb-chgpth@1300 {
+ status = "ok";
+ };
+
+ qcom,chg-misc@1600 {
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/msm8610-coresight.dtsi b/arch/arm/boot/dts/msm8610-coresight.dtsi
index 89a00f1..a0a2c14 100644
--- a/arch/arm/boot/dts/msm8610-coresight.dtsi
+++ b/arch/arm/boot/dts/msm8610-coresight.dtsi
@@ -113,7 +113,7 @@
coresight-nr-inports = <4>;
coresight-outports = <0>;
coresight-child-list = <&funnel_in1>;
- coresight-child-ports = <5>;
+ coresight-child-ports = <6>;
};
stm: stm@fc302000 {
@@ -126,7 +126,7 @@
coresight-name = "coresight-stm";
coresight-nr-inports = <0>;
coresight-outports = <0>;
- coresight-child-list = <&funnel_in1>;
+ coresight-child-list = <&funnel_in0>;
coresight-child-ports = <7>;
};
diff --git a/arch/arm/boot/dts/msm8610-gpu.dtsi b/arch/arm/boot/dts/msm8610-gpu.dtsi
index f3a8259..5580f73 100644
--- a/arch/arm/boot/dts/msm8610-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8610-gpu.dtsi
@@ -27,13 +27,13 @@
qcom,idle-timeout = <8>; /* <HZ/12> */
qcom,nap-allowed = <1>;
qcom,strtstp-sleepwake;
- qcom,clk-map = <0x000001E>; /* KGSL_CLK_CORE |
- KGSL_CLK_IFACE | KGSL_CLK_MEM | KGSL_CLK_MEM_IFACE */
+ qcom,clk-map = <0x000005E>; /* KGSL_CLK_CORE |
+ KGSL_CLK_IFACE | KGSL_CLK_MEM | KGSL_CLK_MEM_IFACE |
+ KGSL_CLK_ALT_MEM_IFACE */
/* Bus Scale Settings */
qcom,msm-bus,name = "grp3d";
qcom,msm-bus,num-cases = <4>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>,
diff --git a/arch/arm/boot/dts/msm8610-ion.dtsi b/arch/arm/boot/dts/msm8610-ion.dtsi
index 107961d..41b58da 100644
--- a/arch/arm/boot/dts/msm8610-ion.dtsi
+++ b/arch/arm/boot/dts/msm8610-ion.dtsi
@@ -20,6 +20,10 @@
reg = <30>;
};
+ qcom,ion-heap@21 { /* SYSTEM CONTIG HEAP */
+ reg = <21>;
+ };
+
qcom,ion-heap@25 { /* IOMMU HEAP */
reg = <25>;
};
@@ -31,14 +35,6 @@
qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
qcom,memory-reservation-size = <0x100000>;
};
-
- qcom,ion-heap@28 { /* AUDIO HEAP */
- compatible = "qcom,msm-ion-reserve";
- reg = <28>;
- qcom,heap-align = <0x1000>;
- qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
- qcom,memory-reservation-size = <0x314000>;
- };
};
};
diff --git a/arch/arm/boot/dts/msm8610-mtp.dts b/arch/arm/boot/dts/msm8610-mtp.dts
index 70ac0e8..3a26376 100644
--- a/arch/arm/boot/dts/msm8610-mtp.dts
+++ b/arch/arm/boot/dts/msm8610-mtp.dts
@@ -16,11 +16,110 @@
/ {
model = "Qualcomm MSM 8610 MTP";
- compatible = "qcom,msm8610-mtp", "qcom,msm8610";
- qcom,msm-id = <147 8 0>;
+ compatible = "qcom,msm8610-mtp", "qcom,msm8610", "qcom,mtp";
+ qcom,msm-id = <147 8 0>, <165 8 0>, <161 8 0>, <162 8 0>,
+ <163 8 0>, <164 8 0>, <166 8 0>;
- serial@f991f000 {
+ serial@f991e000 {
status = "ok";
};
};
+&spmi_bus {
+ qcom,pm8110@0 {
+ qcom,leds@a200 {
+ status = "okay";
+ qcom,led_mpp_3 {
+ label = "mpp";
+ linux,name = "wled-backlight";
+ linux-default-trigger = "none";
+ qcom,default-state = "on";
+ qcom,max-current = <40>;
+ qcom,id = <6>;
+ qcom,source-sel = <1>;
+ qcom,mode-ctrl = <0x10>;
+ };
+ };
+ };
+};
+
+&sdhc_1 {
+ vdd-supply = <&pm8110_l17>;
+ qcom,vdd-always-on;
+ qcom,vdd-lpm-sup;
+ qcom,vdd-voltage-level = <2900000 2900000>;
+ qcom,vdd-current-level = <200 400000>;
+
+ vdd-io-supply = <&pm8110_l6>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <200 60000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,nonremovable;
+
+ status = "ok";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pm8110_l18>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <15000 400000>;
+
+ vdd-io-supply = <&pm8110_l21>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <200 50000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+
+ #address-cells = <0>;
+ interrupt-parent = <&sdhc_2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 221 0
+ 2 &msmgpio 42 0x3>;
+ interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+ cd-gpios = <&msmgpio 42 0x1>;
+
+ status = "ok";
+};
+
+&pm8110_chg {
+ status = "ok";
+ qcom,charging-disabled;
+
+ qcom,chgr@1000 {
+ status = "ok";
+ };
+
+ qcom,buck@1100 {
+ status = "ok";
+ };
+
+ qcom,bat-if@1200 {
+ status = "ok";
+ };
+
+ qcom,usb-chgpth@1300 {
+ status = "ok";
+ };
+
+ qcom,chg-misc@1600 {
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/msm8610-pm.dtsi b/arch/arm/boot/dts/msm8610-pm.dtsi
index 27af9a9..e8849f6 100644
--- a/arch/arm/boot/dts/msm8610-pm.dtsi
+++ b/arch/arm/boot/dts/msm8610-pm.dtsi
@@ -21,13 +21,13 @@
qcom,core-id = <0>;
qcom,saw2-ver-reg = <0xfd0>;
qcom,saw2-cfg = <0x01>;
- qcom,saw2-spm-dly= <0x20000400>;
- qcom,saw2-spm-ctl = <0x1>;
- qcom,saw2-spm-cmd-wfi = [60 03 60 76 76 0b 0f];
- qcom,saw2-spm-cmd-spc = [00 20 10 80 90 5b 60 03 60 3b 76 76 94
- 5b 80 10 2b 30 06 26 30 0f];
- qcom,saw2-spm-cmd-pc = [00 20 10 80 90 5b 60 07 3b 76 76 0b 94
- 5b 80 10 2b 30 06 26 30 0f];
+ qcom,saw2-spm-dly= <0x3c102800>;
+ qcom,saw2-spm-ctl = <0x0>;
+ qcom,saw2-spm-cmd-wfi = [60 03 60 0b 0f];
+ qcom,saw2-spm-cmd-spc = [00 20 10 80 30 90 5b 60 03 60 3b 76 76
+ 0b 94 5b 80 10 06 26 30 0f];
+ qcom,saw2-spm-cmd-pc = [00 20 10 80 30 90 5b 60 07 60 3b 76 76
+ 0b 94 5b 80 10 06 26 30 0f];
};
qcom,spm@f9099000 {
@@ -38,13 +38,13 @@
qcom,core-id = <1>;
qcom,saw2-ver-reg = <0xfd0>;
qcom,saw2-cfg = <0x01>;
- qcom,saw2-spm-dly= <0x20000400>;
- qcom,saw2-spm-ctl = <0x1>;
- qcom,saw2-spm-cmd-wfi = [60 03 60 76 76 0b 0f];
- qcom,saw2-spm-cmd-spc = [00 20 10 80 90 5b 60 03 60 3b 76 76 94
- 5b 80 10 2b 30 06 26 30 0f];
- qcom,saw2-spm-cmd-pc = [00 20 10 80 90 5b 60 07 3b 76 76 0b 94
- 5b 80 10 2b 30 06 26 30 0f];
+ qcom,saw2-spm-dly= <0x3c102800>;
+ qcom,saw2-spm-ctl = <0x0>;
+ qcom,saw2-spm-cmd-wfi = [60 03 60 0b 0f];
+ qcom,saw2-spm-cmd-spc = [00 20 10 80 30 90 5b 60 03 60 3b 76 76
+ 0b 94 5b 80 10 06 26 30 0f];
+ qcom,saw2-spm-cmd-pc = [00 20 10 80 30 90 5b 60 07 60 3b 76 76
+ 0b 94 5b 80 10 06 26 30 0f];
};
qcom,spm@f90a9000 {
@@ -55,13 +55,13 @@
qcom,core-id = <2>;
qcom,saw2-ver-reg = <0xfd0>;
qcom,saw2-cfg = <0x01>;
- qcom,saw2-spm-dly= <0x20000400>;
- qcom,saw2-spm-ctl = <0x1>;
- qcom,saw2-spm-cmd-wfi = [60 03 60 76 76 0b 0f];
- qcom,saw2-spm-cmd-spc = [00 20 10 80 90 5b 60 03 60 3b 76 76 94
- 5b 80 10 2b 30 06 26 30 0f];
- qcom,saw2-spm-cmd-pc = [00 20 10 80 90 5b 60 07 3b 76 76 0b 94
- 5b 80 10 2b 30 06 26 30 0f];
+ qcom,saw2-spm-dly= <0x3c102800>;
+ qcom,saw2-spm-ctl = <0x0>;
+ qcom,saw2-spm-cmd-wfi = [60 03 60 0b 0f];
+ qcom,saw2-spm-cmd-spc = [00 20 10 80 30 90 5b 60 03 60 3b 76 76
+ 0b 94 5b 80 10 06 26 30 0f];
+ qcom,saw2-spm-cmd-pc = [00 20 10 80 30 90 5b 60 07 60 3b 76 76
+ 0b 94 5b 80 10 06 26 30 0f];
};
qcom,spm@f90b9000 {
@@ -72,13 +72,13 @@
qcom,core-id = <3>;
qcom,saw2-ver-reg = <0xfd0>;
qcom,saw2-cfg = <0x01>;
- qcom,saw2-spm-dly= <0x20000400>;
- qcom,saw2-spm-ctl = <0x1>;
- qcom,saw2-spm-cmd-wfi = [60 03 60 76 76 0b 0f];
- qcom,saw2-spm-cmd-spc = [00 20 10 80 90 5b 60 03 60 3b 76 76 94
- 5b 80 10 2b 30 06 26 30 0f];
- qcom,saw2-spm-cmd-pc = [00 20 10 80 90 5b 60 07 3b 76 76 0b 94
- 5b 80 10 2b 30 06 26 30 0f];
+ qcom,saw2-spm-dly= <0x3c102800>;
+ qcom,saw2-spm-ctl = <0x0>;
+ qcom,saw2-spm-cmd-wfi = [60 03 60 0b 0f];
+ qcom,saw2-spm-cmd-spc = [00 20 10 80 30 90 5b 60 03 60 3b 76 76
+ 0b 94 5b 80 10 06 26 30 0f];
+ qcom,saw2-spm-cmd-pc = [00 20 10 80 30 90 5b 60 07 60 3b 76 76
+ 0b 94 5b 80 10 06 26 30 0f];
};
qcom,spm@f9012000 {
@@ -89,21 +89,18 @@
qcom,core-id = <0xffff>; /* L2/APCS SAW */
qcom,saw2-ver-reg = <0xfd0>;
qcom,saw2-cfg = <0x14>;
- qcom,saw2-spm-dly= <0x20000400>;
- qcom,saw2-spm-ctl = <0x1>;
+ qcom,saw2-spm-dly= <0x3c102800>;
+ qcom,saw2-spm-ctl = <0x0>;
qcom,saw2-pmic-data0 = <0x02030080>;
qcom,saw2-pmic-data1 = <0x00030000>;
qcom,vctl-timeout-us = <50>;
qcom,vctl-port = <0x0>;
qcom,phase-port = <0x1>;
qcom,pfm-port = <0x2>;
- qcom,saw2-spm-cmd-ret = [0b 00 03 00 7b 0f];
- qcom,saw2-spm-cmd-gdhs = [00 20 32 60 70 80 0b 6b c0 e0 d0 42 07
- 78 1f 80 4e d0 e0 c0 22 6b 50 4b 60 02 32 50 7b
- 0f];
- qcom,saw2-spm-cmd-pc = [00 32 60 70 80 b0 0b 10 e0 d0 6b c0
- 42 f0 11 07 01 b0 78 1f 80 4e c0 d0 12 e0 6b 50 4b
- 60 02 32 50 f0 7b 0f]; /*APCS_PMIC_OFF_L2RAM_OFF*/
+ qcom,saw2-spm-cmd-ret = [00 03 00 7b 0f];
+ qcom,saw2-spm-cmd-pc = [00 32 b0 10 e0 d0 6b c0 42 f0
+ 11 07 01 b0 4e c0 d0 12 e0 6b 50 02 32
+ 50 f0 7b 0f]; /*APCS_PMIC_OFF_L2RAM_OFF*/
};
qcom,lpm-resources {
@@ -114,8 +111,8 @@
qcom,lpm-resources@0 {
reg = <0x0>;
qcom,name = "vdd-dig";
- qcom,type = <0x62706d73>; /* "smpb" */
- qcom,id = <0x02>;
+ qcom,type = <0x61706d73>; /* "smpa" */
+ qcom,id = <0x01>;
qcom,key = <0x6e726f63>; /* "corn" */
qcom,init-value = <5>; /* Super Turbo */
};
@@ -123,10 +120,10 @@
qcom,lpm-resources@1 {
reg = <0x1>;
qcom,name = "vdd-mem";
- qcom,type = <0x62706d73>; /* "smpb" */
- qcom,id = <0x01>;
- qcom,key = <0x7675>; /* "uv" */
- qcom,init-value = <1050000>; /* Super Turbo */
+ qcom,type = <0x616F646C>; /* "ldoa" */
+ qcom,id = <0x03>;
+ qcom,key = <0x6e726f63>; /* "corn" */
+ qcom,init-value = <3>; /* Active */
};
qcom,lpm-resources@2 {
@@ -156,10 +153,12 @@
qcom,mode = "wfi";
qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
- qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <5>; /* MAX */
- qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
+ qcom,vdd-mem-upper-bound = <5>; /* SUPER TURBO */
+ qcom,vdd-mem-lower-bound = <3>; /* NORMAL */
+ qcom,vdd-dig-upper-bound = <5>; /* SUPER TURBO */
+ qcom,vdd-dig-lower-bound = <3>; /* NORMAL */
+ qcom,irqs-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <1>;
qcom,ss-power = <784>;
qcom,energy-overhead = <190000>;
@@ -168,78 +167,83 @@
qcom,lpm-level@1 {
reg = <0x1>;
- qcom,mode = "retention";
- qcom,xo = "xo_on";
- qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
- qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <5>; /* MAX */
- qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
- qcom,latency-us = <75>;
- qcom,ss-power = <735>;
- qcom,energy-overhead = <77341>;
- qcom,time-overhead = <105>;
- };
-
-
- qcom,lpm-level@2 {
- reg = <0x2>;
qcom,mode = "standalone_pc";
qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
- qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <5>; /* MAX */
- qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
- qcom,latency-us = <95>;
+ qcom,vdd-mem-upper-bound = <5>; /* SUPER TURBO */
+ qcom,vdd-mem-lower-bound = <3>; /* NORMAL */
+ qcom,vdd-dig-upper-bound = <5>; /* SUPER TURBO */
+ qcom,vdd-dig-lower-bound = <3>; /* NORMAL */
+ qcom,irqs-detectable;
+ qcom,gpio-detectable;
+ qcom,latency-us = <3000>;
qcom,ss-power = <725>;
qcom,energy-overhead = <99500>;
- qcom,time-overhead = <130>;
+ qcom,time-overhead = <3130>;
+ };
+
+ qcom,lpm-level@2 {
+ reg = <0x2>;
+ qcom,mode = "pc";
+ qcom,xo = "xo_on";
+ qcom,l2 = "l2_cache_retention";
+ qcom,vdd-mem-upper-bound = <5>; /* SUPER TURBO */
+ qcom,vdd-mem-lower-bound = <3>; /* NORMAL */
+ qcom,vdd-dig-upper-bound = <5>; /* SUPER TURBO */
+ qcom,vdd-dig-lower-bound = <3>; /* NORMAL */
+ qcom,irqs-detectable;
+ qcom,gpio-detectable;
+ qcom,latency-us = <8000>;
+ qcom,ss-power = <138>;
+ qcom,energy-overhead = <1208400>;
+ qcom,time-overhead = <9200>;
};
qcom,lpm-level@3 {
reg = <0x3>;
qcom,mode = "pc";
qcom,xo = "xo_on";
- qcom,l2 = "l2_cache_gdhs";
- qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
- qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <5>; /* MAX */
- qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
- qcom,latency-us = <2000>;
- qcom,ss-power = <138>;
- qcom,energy-overhead = <1208400>;
- qcom,time-overhead = <3200>;
+ qcom,l2 = "l2_cache_pc";
+ qcom,vdd-mem-upper-bound = <3>; /* NORMAL */
+ qcom,vdd-mem-lower-bound = <2>; /* SVS SOC */
+ qcom,vdd-dig-upper-bound = <3>; /* NORMAL */
+ qcom,vdd-dig-lower-bound = <2>; /* SVS SOC */
+ qcom,irqs-detectable;
+ qcom,gpio-detectable;
+ qcom,latency-us = <9000>;
+ qcom,ss-power = <110>;
+ qcom,energy-overhead = <1250300>;
+ qcom,time-overhead = <9500>;
};
qcom,lpm-level@4 {
reg = <0x4>;
qcom,mode = "pc";
- qcom,xo = "xo_on";
+ qcom,xo = "xo_off";
qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <1050000>; /* ACTIVE */
- qcom,vdd-mem-lower-bound = <750000>; /* RETENTION HIGH */
- qcom,vdd-dig-upper-bound = <3>; /* ACTIVE */
- qcom,vdd-dig-lower-bound = <2>; /* RETENTION HIGH */
- qcom,latency-us = <3000>;
- qcom,ss-power = <110>;
- qcom,energy-overhead = <1250300>;
- qcom,time-overhead = <3500>;
+ qcom,vdd-mem-upper-bound = <5>; /* SUPER TURBO */
+ qcom,vdd-mem-lower-bound = <3>; /* NORMAL */
+ qcom,vdd-dig-upper-bound = <5>; /* SUPER TURBO */
+ qcom,vdd-dig-lower-bound = <3>; /* NORMAL */
+ qcom,latency-us = <16300>;
+ qcom,ss-power = <63>;
+ qcom,energy-overhead = <2128000>;
+ qcom,time-overhead = <24200>;
};
qcom,lpm-level@5 {
reg = <0x5>;
qcom,mode = "pc";
qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_gdhs";
- qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
- qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <5>; /* MAX */
- qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
- qcom,latency-us = <3000>;
- qcom,ss-power = <68>;
- qcom,energy-overhead = <1350200>;
- qcom,time-overhead = <4000>;
+ qcom,l2 = "l2_cache_pc";
+ qcom,vdd-mem-upper-bound = <3>; /* NORMAL */
+ qcom,vdd-mem-lower-bound = <2>; /* SVS SOC */
+ qcom,vdd-dig-upper-bound = <3>; /* NORMAL */
+ qcom,vdd-dig-lower-bound = <2>; /* SVS SOC */
+ qcom,latency-us = <24000>;
+ qcom,ss-power = <10>;
+ qcom,energy-overhead = <3202600>;
+ qcom,time-overhead = <33000>;
};
qcom,lpm-level@6 {
@@ -247,44 +251,14 @@
qcom,mode = "pc";
qcom,xo = "xo_off";
qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
- qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <5>; /* MAX */
- qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
- qcom,latency-us = <10300>;
- qcom,ss-power = <63>;
- qcom,energy-overhead = <2128000>;
- qcom,time-overhead = <18200>;
- };
-
- qcom,lpm-level@7 {
- reg = <0x7>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <1050000>; /* ACTIVE */
- qcom,vdd-mem-lower-bound = <750000>; /* RETENTION HIGH */
- qcom,vdd-dig-upper-bound = <3>; /* ACTIVE */
- qcom,vdd-dig-lower-bound = <2>; /* RETIONTION HIGH */
- qcom,latency-us = <18000>;
- qcom,ss-power = <10>;
- qcom,energy-overhead = <3202600>;
- qcom,time-overhead = <27000>;
- };
-
- qcom,lpm-level@8 {
- reg = <0x8>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <750000>; /* RETENTION HIGH */
- qcom,vdd-mem-lower-bound = <750000>; /* RETENTION LOW */
- qcom,vdd-dig-upper-bound = <2>; /* RETENTION HIGH */
- qcom,vdd-dig-lower-bound = <0>; /* RETENTION LOW */
- qcom,latency-us = <20000>;
+ qcom,vdd-mem-upper-bound = <2>; /* SVS SOC */
+ qcom,vdd-mem-lower-bound = <0>; /* RETENTION */
+ qcom,vdd-dig-upper-bound = <2>; /* SVS SOC */
+ qcom,vdd-dig-lower-bound = <0>; /* RETENTION */
+ qcom,latency-us = <26000>;
qcom,ss-power = <2>;
qcom,energy-overhead = <4252000>;
- qcom,time-overhead = <32000>;
+ qcom,time-overhead = <38000>;
};
};
@@ -394,6 +368,18 @@
reg = <0xfe805664 0x40>;
qcom,pc-mode = "tz_l2_int";
qcom,use-sync-timer;
+ qcom,pc-resets-timer;
+ };
+
+ qcom,rpm-log@fc19dc00 {
+ compatible = "qcom,rpm-log";
+ reg = <0xfc19dc00 0x4000>;
+ qcom,rpm-addr-phys = <0xfc000000>;
+ qcom,offset-version = <4>;
+ qcom,offset-page-buffer-addr = <36>;
+ qcom,offset-log-len = <40>;
+ qcom,offset-log-len-mask = <44>;
+ qcom,offset-page-indices = <56>;
};
qcom,rpm-stats@0xfc19dbd0{
diff --git a/arch/arm/boot/dts/msm8610-regulator.dtsi b/arch/arm/boot/dts/msm8610-regulator.dtsi
index d50902c..67eee5c 100644
--- a/arch/arm/boot/dts/msm8610-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8610-regulator.dtsi
@@ -10,19 +10,6 @@
* GNU General Public License for more details.
*/
- /* Stub Regulators */
-
-/ {
- pm8110_s1_corner: regulator-s1-corner {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_s1_corner";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <1>;
- regulator-max-microvolt = <7>;
- qcom,consumer-supplies = "vdd_dig", "", "vdd_sr2_dig", "";
- };
-};
-
/* SPM controlled regulators */
&spmi_bus {
@@ -60,195 +47,274 @@
};
};
-/* QPNP controlled regulators: */
+/* RPM controlled regulators: */
-&spmi_bus {
+&rpm_bus {
- qcom,pm8110@1 {
-
- pm8110_s1: regulator@1400 {
+ rpm-regulator-smpa1 {
+ status = "okay";
+ pm8110_s1: regulator-s1 {
status = "okay";
- regulator-min-microvolt = <1150000>;
- regulator-max-microvolt = <1150000>;
- qcom,enable-time = <500>;
- qcom,system-load = <100000>;
- regulator-always-on;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1275000>;
};
- pm8110_s3: regulator@1a00 {
- status = "okay";
- regulator-min-microvolt = <1350000>;
+ pm8110_s1_corner: regulator-s1-corner {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_s1_corner";
+ qcom,set = <3>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <7>;
+ qcom,use-voltage-corner;
+ qcom,consumer-supplies = "vdd_dig", "", "vdd_sr2_dig", "";
+ };
+
+ pm8110_s1_corner_ao: regulator-s1-corner-ao {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_s1_corner_ao";
+ qcom,set = <1>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <7>;
+ qcom,use-voltage-corner;
+ };
+ };
+
+ rpm-regulator-smpa3 {
+ status = "okay";
+ pm8110_s3: regulator-s3 {
+ regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1350000>;
- qcom,enable-time = <500>;
- qcom,system-load = <100000>;
- regulator-always-on;
- };
-
- pm8110_s4: regulator@1d00 {
+ qcom,init-voltage = <1200000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-smpa4 {
+ status = "okay";
+ pm8110_s4: regulator-s4 {
regulator-min-microvolt = <2150000>;
regulator-max-microvolt = <2150000>;
- qcom,enable-time = <500>;
- qcom,system-load = <100000>;
- regulator-always-on;
- };
-
- pm8110_l1: regulator@4000 {
+ qcom,init-voltage = <2150000>;
status = "okay";
- parent-supply = <&pm8110_s3>;
+ };
+ };
+
+ rpm-regulator-ldoa1 {
+ status = "okay";
+ pm8110_l1: regulator-l1 {
regulator-min-microvolt = <1225000>;
regulator-max-microvolt = <1225000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l2: regulator@4100 {
+ qcom,init-voltage = <1225000>;
status = "okay";
- parent-supply = <&pm8110_s3>;
+ };
+ };
+
+ rpm-regulator-ldoa2 {
+ status = "okay";
+ pm8110_l2: regulator-l2 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
- qcom,enable-time = <200>;
- qcom,system-load = <10000>;
- regulator-always-on;
+ qcom,init-voltage = <1200000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa3 {
+ status = "okay";
+ pm8110_l3: regulator-l3 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1275000>;
+ status = "okay";
};
- pm8110_l3: regulator@4200 {
+ pm8110_l3_ao: regulator-l3-ao {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l3_ao";
+ qcom,set = <1>;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1275000>;
status = "okay";
- parent-supply = <&pm8110_s3>;
- regulator-min-microvolt = <1150000>;
- regulator-max-microvolt = <1150000>;
- qcom,enable-time = <200>;
- qcom,system-load = <10000>;
- regulator-always-on;
};
- pm8110_l4: regulator@4300 {
+ pm8110_l3_so: regulator-l3-so {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l3_so";
+ qcom,set = <2>;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1275000>;
+ qcom,init-voltage = <750000>;
status = "okay";
- parent-supply = <&pm8110_s3>;
+ };
+ };
+
+ rpm-regulator-ldoa4 {
+ status = "okay";
+ pm8110_l4: regulator-l4 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l5: regulator@4400 {
+ qcom,init-voltage = <1200000>;
status = "okay";
- parent-supply = <&pm8110_s3>;
+ };
+ };
+
+ rpm-regulator-ldoa5 {
+ status = "okay";
+ pm8110_l5: regulator-l5 {
regulator-min-microvolt = <1300000>;
regulator-max-microvolt = <1300000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l6: regulator@4500 {
+ qcom,init-voltage = <1300000>;
status = "okay";
- parent-supply = <&pm8110_s4>;
+ };
+ };
+
+ rpm-regulator-ldoa6 {
+ status = "okay";
+ pm8110_l6: regulator-l6 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- qcom,enable-time = <200>;
- qcom,system-load = <10000>;
- regulator-always-on;
- };
-
- pm8110_l7: regulator@4600 {
+ qcom,init-voltage = <1800000>;
status = "okay";
- parent-supply = <&pm8110_s4>;
+ };
+ };
+
+ rpm-regulator-ldoa7 {
+ status = "okay";
+ pm8110_l7: regulator-l7 {
regulator-min-microvolt = <2050000>;
regulator-max-microvolt = <2050000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l8: regulator@4700 {
+ qcom,init-voltage = <2050000>;
status = "okay";
- parent-supply = <&pm8110_s4>;
+ };
+ };
+
+ rpm-regulator-ldoa8 {
+ status = "okay";
+ pm8110_l8: regulator-l8 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l9: regulator@4800 {
+ qcom,init-voltage = <1800000>;
status = "okay";
- parent-supply = <&pm8110_s4>;
+ };
+ };
+
+ rpm-regulator-ldoa9 {
+ status = "okay";
+ pm8110_l9: regulator-l9 {
regulator-min-microvolt = <2050000>;
regulator-max-microvolt = <2050000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l10: regulator@4900 {
+ qcom,init-voltage = <2050000>;
status = "okay";
- parent-supply = <&pm8110_s4>;
+ };
+ };
+
+ rpm-regulator-ldoa10 {
+ status = "okay";
+ pm8110_l10: regulator-l10 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- qcom,enable-time = <200>;
+ qcom,init-voltage = <1800000>;
+ status = "okay";
qcom,consumer-supplies = "vdd_sr2_pll", "";
};
+ };
- pm8110_l12: regulator@4b00 {
- status = "okay";
+ rpm-regulator-ldoa12 {
+ status = "okay";
+ pm8110_l12: regulator-l12 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l14: regulator@4d00 {
+ qcom,init-voltage = <3300000>;
status = "okay";
- parent-supply = <&pm8110_s4>;
+ };
+ };
+
+ rpm-regulator-ldoa14 {
+ status = "okay";
+ pm8110_l14: regulator-l14 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l15: regulator@4e00 {
+ qcom,init-voltage = <1800000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa15 {
+ status = "okay";
+ pm8110_l15: regulator-l15 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l16: regulator@4f00 {
+ qcom,init-voltage = <3300000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa16 {
+ status = "okay";
+ pm8110_l16: regulator-l16 {
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l17: regulator@5000 {
+ qcom,init-voltage = <3000000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa17 {
+ status = "okay";
+ pm8110_l17: regulator-l17 {
regulator-min-microvolt = <2900000>;
regulator-max-microvolt = <2900000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l18: regulator@5100 {
+ qcom,init-voltage = <2900000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa18 {
+ status = "okay";
+ pm8110_l18: regulator-l18 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2950000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l19: regulator@5200 {
+ qcom,init-voltage = <2950000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa19 {
+ status = "okay";
+ pm8110_l19: regulator-l19 {
regulator-min-microvolt = <2850000>;
regulator-max-microvolt = <2850000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l20: regulator@5300 {
+ qcom,init-voltage = <2850000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa20 {
+ status = "okay";
+ pm8110_l20: regulator-l20 {
regulator-min-microvolt = <3075000>;
regulator-max-microvolt = <3075000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l21: regulator@5400 {
+ qcom,init-voltage = <3075000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa21 {
+ status = "okay";
+ pm8110_l21: regulator-l21 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2950000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l22: regulator@5500 {
+ qcom,init-voltage = <2950000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa22 {
+ status = "okay";
+ pm8110_l22: regulator-l22 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- qcom,enable-time = <200>;
+ qcom,init-voltage = <3300000>;
+ status = "okay";
};
};
};
diff --git a/arch/arm/boot/dts/msm8610-rumi.dts b/arch/arm/boot/dts/msm8610-rumi.dts
index a4507e3..cab7560 100644
--- a/arch/arm/boot/dts/msm8610-rumi.dts
+++ b/arch/arm/boot/dts/msm8610-rumi.dts
@@ -16,7 +16,7 @@
/ {
model = "Qualcomm MSM 8610 Rumi";
- compatible = "qcom,msm8610-rumi", "qcom,msm8610";
+ compatible = "qcom,msm8610-rumi", "qcom,msm8610", "qcom,rumi";
qcom,msm-id = <147 15 0>;
serial@f991f000 {
diff --git a/arch/arm/boot/dts/msm8610-sim.dts b/arch/arm/boot/dts/msm8610-sim.dts
index 2268daf..1838b94 100644
--- a/arch/arm/boot/dts/msm8610-sim.dts
+++ b/arch/arm/boot/dts/msm8610-sim.dts
@@ -16,7 +16,7 @@
/ {
model = "Qualcomm MSM 8610 Simulator";
- compatible = "qcom,msm8610-sim", "qcom,msm8610";
+ compatible = "qcom,msm8610-sim", "qcom,msm8610", "qcom,sim";
qcom,msm-id = <147 16 0>;
serial@f991f000 {
diff --git a/arch/arm/boot/dts/msm8610-smp2p.dtsi b/arch/arm/boot/dts/msm8610-smp2p.dtsi
index 4a5273b..079e4ca 100644
--- a/arch/arm/boot/dts/msm8610-smp2p.dtsi
+++ b/arch/arm/boot/dts/msm8610-smp2p.dtsi
@@ -12,8 +12,7 @@
/ {
qcom,smp2p-modem {
compatible = "qcom,smp2p";
- reg = <0xfa006000 0x1000>, <0x8 0x0>;
- reg-names = "irq-reg-base", "irq-reg-offset";
+ reg = <0xf9011008 0x4>;
qcom,remote-pid = <1>;
qcom,irq-bitmask = <0x4000>;
interrupts = <0 27 1>;
@@ -21,8 +20,7 @@
qcom,smp2p-adsp {
compatible = "qcom,smp2p";
- reg = <0xfa006000 0x1000>, <0x8 0x0>;
- reg-names = "irq-reg-base", "irq-reg-offset";
+ reg = <0xf9011008 0x4>;
qcom,remote-pid = <2>;
qcom,irq-bitmask = <0x400>;
interrupts = <0 158 1>;
@@ -30,8 +28,7 @@
qcom,smp2p-wcnss {
compatible = "qcom,smp2p";
- reg = <0xfa006000 0x1000>, <0x8 0x0>;
- reg-names = "irq-reg-base", "irq-reg-offset";
+ reg = <0xf9011008 0x4>;
qcom,remote-pid = <4>;
qcom,irq-bitmask = <0x40000>;
interrupts = <0 143 1>;
@@ -151,6 +148,29 @@
gpios = <&smp2pgpio_smp2p_2_out 0 0>;
};
+ /* SMP2P SSR Driver for inbound entry from lpass. */
+ smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* SMP2P SSR Driver for outbound entry to lpass */
+ smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
smp2pgpio_smp2p_4_in: qcom,smp2pgpio-smp2p-4-in {
compatible = "qcom,smp2pgpio";
qcom,entry-name = "smp2p";
@@ -177,6 +197,27 @@
#interrupt-cells = <2>;
};
+ smp2pgpio_ssr_smp2p_4_in: qcom,smp2pgpio-ssr-smp2p-4-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <4>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ smp2pgpio_ssr_smp2p_4_out: qcom,smp2pgpio-ssr-smp2p-4-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
qcom,smp2pgpio_test_smp2p_4_out {
compatible = "qcom,smp2pgpio_test_smp2p_4_out";
gpios = <&smp2pgpio_smp2p_4_out 0 0>;
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index 91dfdbe..e406ba8 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -18,6 +18,7 @@
/include/ "msm8610-coresight.dtsi"
/include/ "msm8610-pm.dtsi"
/include/ "msm8610-smp2p.dtsi"
+/include/ "msm8610-bus.dtsi"
/ {
model = "Qualcomm MSM 8610";
@@ -46,6 +47,8 @@
aliases {
spi0 = &spi_0;
+ sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
+ sdhc2 = &sdhc_2; /* SDC2 SD card slot */
};
timer {
@@ -54,6 +57,65 @@
clock-frequency = <19200000>;
};
+ timer@f9020000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0xf9020000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@f9021000 {
+ frame-number = <0>;
+ interrupts = <0 8 0x4>,
+ <0 7 0x4>;
+ reg = <0xf9021000 0x1000>,
+ <0xf9022000 0x1000>;
+ };
+
+ frame@f9023000 {
+ frame-number = <1>;
+ interrupts = <0 9 0x4>;
+ reg = <0xf9023000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9024000 {
+ frame-number = <2>;
+ interrupts = <0 10 0x4>;
+ reg = <0xf9024000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9025000 {
+ frame-number = <3>;
+ interrupts = <0 11 0x4>;
+ reg = <0xf9025000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9026000 {
+ frame-number = <4>;
+ interrupts = <0 12 0x4>;
+ reg = <0xf9026000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9027000 {
+ frame-number = <5>;
+ interrupts = <0 13 0x4>;
+ reg = <0xf9027000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9028000 {
+ frame-number = <6>;
+ interrupts = <0 14 0x4>;
+ reg = <0xf9028000 0x1000>;
+ status = "disabled";
+ };
+ };
+
qcom,msm-adsp-loader {
compatible = "qcom,adsp-loader";
qcom,adsp-state = <0>;
@@ -71,6 +133,13 @@
status = "disabled";
};
+ serial@f991e000 {
+ compatible = "qcom,msm-lsuart-v14";
+ reg = <0xf991e000 0x1000>;
+ interrupts = <0 108 0>;
+ status = "disabled";
+ };
+
qcom,vidc@fdc00000 {
compatible = "qcom,msm-vidc";
qcom,vidc-ns-map = <0x40000000 0x40000000>;
@@ -82,6 +151,35 @@
qcom,max-hw-load = <97200>; /* FWVGA @ 30 * 2 */
};
+ qcom,usbbam@f9a44000 {
+ compatible = "qcom,usb-bam-msm";
+ reg = <0xf9a44000 0x11000>;
+ reg-names = "hsusb";
+ interrupts = <0 135 0>;
+ interrupt-names = "hsusb";
+ qcom,usb-bam-num-pipes = <16>;
+ qcom,usb-bam-fifo-baseaddr = <0xfe803000>;
+ qcom,ignore-core-reset-ack;
+ qcom,disable-clk-gating;
+
+ qcom,pipe0 {
+ label = "hsusb-qdss-in-0";
+ qcom,usb-bam-mem-type = <3>;
+ qcom,bam-type = <1>;
+ qcom,dir = <1>;
+ qcom,pipe-num = <0>;
+ qcom,peer-bam = <1>;
+ qcom,src-bam-physical-address = <0xfc37c000>;
+ qcom,src-bam-pipe-index = <0>;
+ qcom,dst-bam-physical-address = <0xf9a44000>;
+ qcom,dst-bam-pipe-index = <2>;
+ qcom,data-fifo-offset = <0x0>;
+ qcom,data-fifo-size = <0x600>;
+ qcom,descriptor-fifo-offset = <0x600>;
+ qcom,descriptor-fifo-size = <0x200>;
+ };
+ };
+
usb@f9a55000 {
compatible = "qcom,hsusb-otg";
reg = <0xf9a55000 0x400>;
@@ -93,8 +191,17 @@
qcom,hsusb-otg-phy-type = <2>;
qcom,hsusb-otg-mode = <1>;
- qcom,hsusb-otg-otg-control = <1>;
+ qcom,hsusb-otg-otg-control = <2>;
qcom,hsusb-otg-disable-reset;
+ qcom,dp-manual-pullup;
+
+ qcom,msm-bus,name = "usb2";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <87 512 0 0>,
+ <87 512 60000 960000>;
};
android_usb@fe8050c8 {
@@ -126,7 +233,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -134,6 +241,8 @@
qcom,bus-width = <8>;
qcom,nonremovable;
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+
+ status = "disabled";
};
sdcc2: qcom,sdcc@f98a4000 {
@@ -156,7 +265,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -165,6 +274,32 @@
qcom,xpc;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
qcom,current-limit = <800>;
+
+ status = "disabled";
+ };
+
+ sdhc_1: sdhci@f9824900 {
+ compatible = "qcom,sdhci-msm";
+ reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 123 0>, <0 138 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ qcom,bus-width = <8>;
+ status = "disabled";
+ };
+
+ sdhc_2: sdhci@f98a4900 {
+ compatible = "qcom,sdhci-msm";
+ reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 0>, <0 221 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ qcom,bus-width = <4>;
+ status = "disabled";
};
qcom,sps {
@@ -172,9 +307,9 @@
qcom,device-type = <3>;
};
- qcom,smem@d600000 {
+ qcom,smem@d900000 {
compatible = "qcom,smem";
- reg = <0xd600000 0x200000>,
+ reg = <0xd900000 0x200000>,
<0xf9011000 0x1000>,
<0xfc428000 0x4000>;
reg-names = "smem", "irq-reg-base", "aux-mem1";
@@ -249,7 +384,7 @@
qcom,msm-mem-hole {
compatible = "qcom,msm-mem-hole";
- qcom,memblock-remove = <0x07C00000 0x6000000>; /* Address and Size of Hole */
+ qcom,memblock-remove = <0x07B00000 0x6400000>; /* Address and Size of Hole */
};
qcom,wdt@f9017000 {
@@ -266,7 +401,7 @@
reg = <0xf9011050 0x8>;
reg-names = "rcg_base";
a7_cpu-supply = <&apc_vreg_corner>;
- a7_mem-supply = <&pm8110_l3>;
+ a7_mem-supply = <&pm8110_l3_ao>;
};
spmi_bus: qcom,spmi@fc4c0000 {
@@ -279,7 +414,6 @@
/* 190,ee0_krait_hlos_spmi_periph_irq */
/* 187,channel_0_krait_hlos_trans_done_irq */
interrupts = <0 190 0>, <0 187 0>;
- qcom,not-wakeup;
qcom,pmic-arb-ee = <0>;
qcom,pmic-arb-channel = <0>;
};
@@ -330,6 +464,17 @@
vdd_pronto_pll-supply = <&pm8110_l10>;
qcom,firmware-name = "wcnss";
+
+ /* GPIO inputs from wcnss */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_4_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_4_in 2 0>;
+
+ /* GPIO output to wcnss */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_4_out 0 0>;
+ };
+
+ qcom,iris-fm {
+ compatible = "qcom,iris_fm";
};
sound {
@@ -437,6 +582,25 @@
compatible = "qcom,msm-pcm-hostless";
};
+ qcom,wcnss-wlan@fb000000 {
+ compatible = "qcom,wcnss_wlan";
+ reg = <0xfb000000 0x280000>;
+ reg-names = "wcnss_mmio";
+ interrupts = <0 145 0>, <0 146 0>;
+ interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
+
+ qcom,pronto-vddmx-supply = <&pm8110_l3>;
+ qcom,pronto-vddcx-supply = <&pm8110_s1>;
+ qcom,pronto-vddpx-supply = <&pm8110_l6>;
+ qcom,iris-vddxo-supply = <&pm8110_l10>;
+ qcom,iris-vddrfa-supply = <&pm8110_l5>;
+ qcom,iris-vddpa-supply = <&pm8110_l16>;
+ qcom,iris-vdddig-supply = <&pm8110_l5>;
+
+ gpios = <&msmgpio 23 0>, <&msmgpio 24 0>, <&msmgpio 25 0>, <&msmgpio 26 0>, <&msmgpio 27 0>;
+ qcom,has_pronto_hw;
+ };
+
qcom,mss@fc880000 {
compatible = "qcom,pil-q6v5-mss";
reg = <0xfc880000 0x100>,
@@ -456,6 +620,13 @@
qcom,is-loadable;
qcom,firmware-name = "mba";
qcom,pil-self-auth;
+
+ /* GPIO inputs from mss */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+
+ /* GPIO output to mss */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
};
qcom,lpass@fe200000 {
@@ -467,6 +638,13 @@
interrupts = <0 162 1>;
vdd_cx-supply = <&pm8110_s1_corner>;
qcom,firmware-name = "adsp";
+
+ /* GPIO inputs from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
};
tsens: tsens@fc4a8000 {
@@ -480,8 +658,86 @@
qcom,calib-mode = "fuse_map3";
qcom,calibration-less-mode;
qcom,tsens-local-init;
+ qcom,sensor-id = <0 5>;
};
+ qcom,msm-thermal {
+ compatible = "qcom,msm-thermal";
+ qcom,sensor-id = <0>;
+ qcom,poll-ms = <250>;
+ qcom,limit-temp = <60>;
+ qcom,temp-hysteresis = <10>;
+ qcom,freq-step = <2>;
+ };
+
+ qcom,ipc-spinlock@fd484000 {
+ compatible = "qcom,ipc-spinlock-sfpb";
+ reg = <0xfd484000 0x400>;
+ qcom,num-locks = <8>;
+ };
+
+ qcom,bam_dmux@fc834000 {
+ compatible = "qcom,bam_dmux";
+ reg = <0xfc834000 0x7000>;
+ interrupts = <0 29 1>;
+ };
+
+ qcom,qseecom@7B00000 {
+ compatible = "qcom,qseecom";
+ reg = <0x7B00000 0x500000>;
+ reg-names = "secapp-region";
+ qcom,disk-encrypt-pipe-pair = <2>;
+ qcom,hlos-ce-hw-instance = <0>;
+ qcom,qsee-ce-hw-instance = <0>;
+ qcom,msm-bus,name = "qseecom-noc";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <55 512 0 0>,
+ <55 512 3936000 393600>,
+ <55 512 3936000 393600>,
+ <55 512 3936000 393600>;
+ };
+
+ qcom,msm-rng@f9bff000 {
+ compatible = "qcom,msm-rng";
+ reg = <0xf9bff000 0x200>;
+ qcom,msm-rng-iface-clk;
+ };
+
+ jtag_mm0: jtagmm@fc34c000 {
+ compatible = "qcom,jtag-mm";
+ reg = <0xfc34c000 0x1000>,
+ <0xfc340000 0x1000>;
+ reg-names = "etm-base","debug-base";
+ };
+
+ jtag_mm1: jtagmm@fc34d000 {
+ compatible = "qcom,jtag-mm";
+ reg = <0xfc34d000 0x1000>,
+ <0xfc342000 0x1000>;
+ reg-names = "etm-base","debug-base";
+ };
+
+ jtag_mm2: jtagmm@fc34e000 {
+ compatible = "qcom,jtag-mm";
+ reg = <0xfc34e000 0x1000>,
+ <0xfc344000 0x1000>;
+ reg-names = "etm-base","debug-base";
+ };
+
+ jtag_mm3: jtagmm@fc34f000 {
+ compatible = "qcom,jtag-mm";
+ reg = <0xfc34f000 0x1000>,
+ <0xfc346000 0x1000>;
+ reg-names = "etm-base","debug-base";
+ };
+
+ qcom,tz-log@fe805720 {
+ compatible = "qcom,tz-log";
+ reg = <0x0fe805720 0x1000>;
+ };
};
&gdsc_vfe {
@@ -518,6 +774,7 @@
/include/ "msm8610-iommu-domains.dtsi"
+/include/ "msm-pm8110-rpm-regulator.dtsi"
/include/ "msm-pm8110.dtsi"
/include/ "msm8610-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/msm8660-surf.dts
index 45bc4bb..4518fc4 100644
--- a/arch/arm/boot/dts/msm8660-surf.dts
+++ b/arch/arm/boot/dts/msm8660-surf.dts
@@ -4,7 +4,7 @@
/ {
model = "Qualcomm MSM8660 SURF";
- compatible = "qcom,msm8660-surf", "qcom,msm8660";
+ compatible = "qcom,msm8660-surf", "qcom,msm8660", "qcom,surf";
interrupt-parent = <&intc>;
intc: interrupt-controller@02080000 {
diff --git a/arch/arm/boot/dts/msm8974-bus.dtsi b/arch/arm/boot/dts/msm8974-bus.dtsi
index 828e7ae..3e0ef04 100644
--- a/arch/arm/boot/dts/msm8974-bus.dtsi
+++ b/arch/arm/boot/dts/msm8974-bus.dtsi
@@ -284,8 +284,8 @@
qcom,qport = <0>;
qcom,mas-hw-id = <18>;
qcom,mode = "Fixed";
- qcom,prio-rd = <2>;
- qcom,prio-wr = <2>;
+ qcom,prio1 = <2>;
+ qcom,prio0 = <2>;
};
mas-qdss-bam {
@@ -296,6 +296,9 @@
qcom,mode = "Fixed";
qcom,qport = <1>;
qcom,mas-hw-id = <19>;
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
+ qcom,hw-sel = "NoC";
};
mas-snoc-cfg {
@@ -339,8 +342,8 @@
qcom,mas-hw-id = <29>;
qcom,slv-hw-id = <28>;
qcom,mode = "Fixed";
- qcom,prio-rd = <2>;
- qcom,prio-wr = <2>;
+ qcom,prio1 = <2>;
+ qcom,prio0 = <2>;
};
fab-ovnoc {
@@ -361,8 +364,8 @@
qcom,qport = <2>;
qcom,mas-hw-id = <23>;
qcom,hw-sel = "NoC";
- qcom,prio-rd = <1>;
- qcom,prio-wr = <1>;
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
};
mas-crypto-core1 {
@@ -374,8 +377,8 @@
qcom,qport = <3>;
qcom,mas-hw-id = <24>;
qcom,hw-sel = "NoC";
- qcom,prio-rd = <1>;
- qcom,prio-wr = <1>;
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
};
mas-lpass-proc {
@@ -386,8 +389,8 @@
qcom,qport = <4>;
qcom,mas-hw-id = <25>;
qcom,mode = "Fixed";
- qcom,prio-rd = <2>;
- qcom,prio-wr = <2>;
+ qcom,prio1 = <2>;
+ qcom,prio0 = <2>;
};
mas-mss {
@@ -432,6 +435,9 @@
qcom,qport = <10>;
qcom,mode = "Fixed";
qcom,mas-hw-id = <31>;
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
+ qcom,hw-sel = "NoC";
};
mas-usb3 {
@@ -442,8 +448,8 @@
qcom,mode = "Fixed";
qcom,qport = <11>;
qcom,mas-hw-id = <32>;
- qcom,prio-rd = <2>;
- qcom,prio-wr = <2>;
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
qcom,hw-sel = "NoC";
qcom,iface-clk-node = "msm_usb3";
};
@@ -1343,30 +1349,18 @@
qcom,hw-sel = "NoC";
};
- mas-video-p0-ocmem {
+ mas-video-ocmem {
cell-id = <68>;
- label = "mas-video-p0-ocmem";
- qcom,masterp = <3>;
+ label = "mas-video-ocmem";
+ qcom,masterp = <3 4>;
qcom,tier = <2>;
qcom,perm-mode = "Fixed";
qcom,mode = "Fixed";
- qcom,qport = <2>;
+ qcom,qport = <2 3>;
qcom,mas-hw-id = <15>;
qcom,hw-sel = "NoC";
};
- mas-video-p1-ocmem {
- cell-id = <69>;
- label = "mas-video-p1-ocmem";
- qcom,masterp = <4>;
- qcom,tier = <2>;
- qcom,perm-mode = "Fixed";
- qcom,mode = "Fixed";
- qcom,qport = <3>;
- qcom,mas-hw-id = <16>;
- qcom,hw-sel = "NoC";
- };
-
mas-vfe-ocmem {
cell-id = <70>;
label = "mas-vfe-ocmem";
diff --git a/arch/arm/boot/dts/msm8974-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/msm8974-camera-sensor-cdp.dtsi
index a6a115c..b574a31 100644
--- a/arch/arm/boot/dts/msm8974-camera-sensor-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera-sensor-cdp.dtsi
@@ -20,6 +20,13 @@
qcom,cci-master = <0>;
};
+ actuator1: qcom,actuator@36 {
+ cell-index = <1>;
+ reg = <0x36>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ };
+
qcom,camera@6e {
compatible = "qcom,s5k3l1yx";
reg = <0x6e 0x0>;
@@ -61,12 +68,50 @@
status = "ok";
};
+ qcom,camera@20 {
+ compatible = "qcom,imx135";
+ reg = <0x20>;
+ qcom,slave-id = <0x20 0x0016 0x0135>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <90>;
+ qcom,sensor-name = "imx135";
+ qcom,actuator-src = <&actuator1>;
+ cam_vdig-supply = <&pm8941_l3>;
+ cam_vana-supply = <&pm8941_l17>;
+ cam_vio-supply = <&pm8941_lvs3>;
+ cam_vaf-supply = <&pm8941_l23>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_vaf";
+ qcom,cam-vreg-type = <0 1 0 0>;
+ qcom,cam-vreg-min-voltage = <1225000 0 2850000 3000000>;
+ qcom,cam-vreg-max-voltage = <1225000 0 2850000 3000000>;
+ qcom,cam-vreg-op-mode = <105000 0 80000 100000>;
+ qcom,gpio-no-mux = <0>;
+ gpios = <&msmgpio 15 0>,
+ <&msmgpio 90 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK", "CAM_RESET1";
+ qcom,gpio-set-tbl-num = <1 1>;
+ qcom,gpio-set-tbl-flags = <0 2>;
+ qcom,gpio-set-tbl-delay = <1000 30000>;
+ qcom,csi-lane-assign = <0x4320>;
+ qcom,csi-lane-mask = <0x1F>;
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,sensor-type = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ };
+
qcom,camera@6c {
compatible = "qcom,ov2720";
reg = <0x6c 0x0>;
qcom,slave-id = <0x6c 0x300A 0x2720>;
qcom,csiphy-sd-index = <2>;
- qcom,csid-sd-index = <0>;
+ qcom,csid-sd-index = <2>;
qcom,mount-angle = <90>;
qcom,sensor-name = "ov2720";
cam_vdig-supply = <&pm8941_l3>;
diff --git a/arch/arm/boot/dts/msm8974-camera-sensor-fluid.dtsi b/arch/arm/boot/dts/msm8974-camera-sensor-fluid.dtsi
index c9d1abc..748d5f7 100644
--- a/arch/arm/boot/dts/msm8974-camera-sensor-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera-sensor-fluid.dtsi
@@ -20,6 +20,13 @@
qcom,cci-master = <0>;
};
+ actuator1: qcom,actuator@36 {
+ cell-index = <1>;
+ reg = <0x36>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ };
+
qcom,camera@6e {
compatible = "qcom,s5k3l1yx";
reg = <0x6e>;
@@ -62,12 +69,50 @@
status = "ok";
};
+ qcom,camera@20 {
+ compatible = "qcom,imx135";
+ reg = <0x20>;
+ qcom,slave-id = <0x20 0x0016 0x0135>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <270>;
+ qcom,sensor-name = "imx135";
+ qcom,actuator-src = <&actuator1>;
+ cam_vdig-supply = <&pm8941_l3>;
+ cam_vana-supply = <&pm8941_l17>;
+ cam_vio-supply = <&pm8941_lvs3>;
+ cam_vaf-supply = <&pm8941_l23>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_vaf";
+ qcom,cam-vreg-type = <0 1 0 0>;
+ qcom,cam-vreg-min-voltage = <1225000 0 2850000 3000000>;
+ qcom,cam-vreg-max-voltage = <1225000 0 2850000 3000000>;
+ qcom,cam-vreg-op-mode = <105000 0 80000 100000>;
+ qcom,gpio-no-mux = <0>;
+ gpios = <&msmgpio 15 0>,
+ <&msmgpio 90 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK", "CAM_RESET1";
+ qcom,gpio-set-tbl-num = <1 1>;
+ qcom,gpio-set-tbl-flags = <0 2>;
+ qcom,gpio-set-tbl-delay = <1000 30000>;
+ qcom,csi-lane-assign = <0x4320>;
+ qcom,csi-lane-mask = <0x1F>;
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,sensor-type = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ };
+
qcom,camera@6c {
compatible = "qcom,ov2720";
reg = <0x6c>;
qcom,slave-id = <0x6c 0x300A 0x2720>;
qcom,csiphy-sd-index = <2>;
- qcom,csid-sd-index = <0>;
+ qcom,csid-sd-index = <2>;
qcom,mount-angle = <90>;
qcom,sensor-name = "ov2720";
cam_vdig-supply = <&pm8941_l3>;
diff --git a/arch/arm/boot/dts/msm8974-camera-sensor-liquid.dtsi b/arch/arm/boot/dts/msm8974-camera-sensor-liquid.dtsi
index f9b89e1..5a97a11 100644
--- a/arch/arm/boot/dts/msm8974-camera-sensor-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera-sensor-liquid.dtsi
@@ -21,6 +21,13 @@
qcom,cci-master = <0>;
};
+ actuator1: qcom,actuator@36 {
+ cell-index = <1>;
+ reg = <0x36>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ };
+
qcom,camera@6e {
compatible = "qcom,s5k3l1yx";
reg = <0x6e>;
@@ -31,7 +38,7 @@
qcom,sensor-name = "s5k3l1yx";
cam_vdig-supply = <&pm8941_l3>;
cam_vana-supply = <&pm8941_l17>;
- cam_vio-supply = <&pm8941_lvs3>;
+ cam_vio-supply = <&pm8941_lvs2>;
cam_vaf-supply = <&pm8941_l23>;
qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
"cam_vaf";
@@ -58,6 +65,44 @@
status = "ok";
};
+ qcom,camera@20 {
+ compatible = "qcom,imx135";
+ reg = <0x20>;
+ qcom,slave-id = <0x20 0x0016 0x0135>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <0>;
+ qcom,sensor-name = "imx135";
+ qcom,actuator-src = <&actuator1>;
+ cam_vdig-supply = <&pm8941_l3>;
+ cam_vana-supply = <&pm8941_l17>;
+ cam_vio-supply = <&pm8941_lvs2>;
+ cam_vaf-supply = <&pm8941_l23>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_vaf";
+ qcom,cam-vreg-type = <0 1 0 0>;
+ qcom,cam-vreg-min-voltage = <1225000 0 2850000 3000000>;
+ qcom,cam-vreg-max-voltage = <1225000 0 2850000 3000000>;
+ qcom,cam-vreg-op-mode = <105000 0 80000 100000>;
+ qcom,gpio-no-mux = <0>;
+ gpios = <&msmgpio 15 0>,
+ <&msmgpio 90 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK", "CAM_RESET1";
+ qcom,gpio-set-tbl-num = <1 1>;
+ qcom,gpio-set-tbl-flags = <0 2>;
+ qcom,gpio-set-tbl-delay = <1000 30000>;
+ qcom,csi-lane-assign = <0x4320>;
+ qcom,csi-lane-mask = <0x1F>;
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,sensor-type = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ };
+
qcom,camera@6c {
compatible = "qcom,ov2720";
reg = <0x6c>;
@@ -68,7 +113,7 @@
qcom,sensor-name = "ov2720";
cam_vdig-supply = <&pm8941_l3>;
cam_vana-supply = <&pm8941_l17>;
- cam_vio-supply = <&pm8941_lvs3>;
+ cam_vio-supply = <&pm8941_lvs2>;
qcom,cam-vreg-name = "cam_vdig", "cam_vana", "cam_vio";
qcom,cam-vreg-type = <0 0 1>;
qcom,cam-vreg-min-voltage = <1225000 2850000 0>;
@@ -103,7 +148,7 @@
qcom,sensor-name = "mt9m114";
cam_vdig-supply = <&pm8941_l3>;
cam_vana-supply = <&pm8941_l17>;
- cam_vio-supply = <&pm8941_lvs3>;
+ cam_vio-supply = <&pm8941_lvs2>;
qcom,cam-vreg-name = "cam_vdig", "cam_vana", "cam_vio";
qcom,cam-vreg-type = <0 0 1>;
qcom,cam-vreg-min-voltage = <1225000 2850000 0>;
diff --git a/arch/arm/boot/dts/msm8974-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/msm8974-camera-sensor-mtp.dtsi
index 3fb5b20..53f6e9e 100644
--- a/arch/arm/boot/dts/msm8974-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera-sensor-mtp.dtsi
@@ -20,6 +20,13 @@
qcom,cci-master = <0>;
};
+ actuator1: qcom,actuator@36 {
+ cell-index = <1>;
+ reg = <0x36>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ };
+
qcom,camera@6e {
compatible = "qcom,s5k3l1yx";
reg = <0x6e>;
@@ -65,11 +72,12 @@
qcom,camera@20 {
compatible = "qcom,imx135";
reg = <0x20>;
- qcom,slave-id = <0x20 0x0 0x1210>;
+ qcom,slave-id = <0x20 0x0016 0x0135>;
qcom,csiphy-sd-index = <0>;
qcom,csid-sd-index = <0>;
qcom,mount-angle = <90>;
qcom,sensor-name = "imx135";
+ qcom,actuator-src = <&actuator1>;
cam_vdig-supply = <&pm8941_l3>;
cam_vana-supply = <&pm8941_l17>;
cam_vio-supply = <&pm8941_lvs3>;
@@ -105,7 +113,7 @@
reg = <0x6c>;
qcom,slave-id = <0x6c 0x300A 0x2720>;
qcom,csiphy-sd-index = <2>;
- qcom,csid-sd-index = <0>;
+ qcom,csid-sd-index = <2>;
qcom,mount-angle = <90>;
qcom,sensor-name = "ov2720";
cam_vdig-supply = <&pm8941_l3>;
diff --git a/arch/arm/boot/dts/msm8974-camera.dtsi b/arch/arm/boot/dts/msm8974-camera.dtsi
index 0bd303f..94a28f7 100644
--- a/arch/arm/boot/dts/msm8974-camera.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera.dtsi
@@ -23,8 +23,9 @@
qcom,csiphy@fda0ac00 {
cell-index = <0>;
compatible = "qcom,csiphy";
- reg = <0xfda0ac00 0x200>;
- reg-names = "csiphy";
+ reg = <0xfda0ac00 0x200>,
+ <0xfda00030 0x4>;
+ reg-names = "csiphy", "csiphy_clk_mux";
interrupts = <0 78 0>;
interrupt-names = "csiphy";
};
@@ -32,8 +33,9 @@
qcom,csiphy@fda0b000 {
cell-index = <1>;
compatible = "qcom,csiphy";
- reg = <0xfda0b000 0x200>;
- reg-names = "csiphy";
+ reg = <0xfda0b000 0x200>,
+ <0xfda00038 0x4>;
+ reg-names = "csiphy", "csiphy_clk_mux";
interrupts = <0 79 0>;
interrupt-names = "csiphy";
};
@@ -41,8 +43,9 @@
qcom,csiphy@fda0b400 {
cell-index = <2>;
compatible = "qcom,csiphy";
- reg = <0xfda0b400 0x200>;
- reg-names = "csiphy";
+ reg = <0xfda0b400 0x200>,
+ <0xfda00040 0x4>;
+ reg-names = "csiphy", "csiphy_clk_mux";
interrupts = <0 80 0>;
interrupt-names = "csiphy";
};
@@ -94,8 +97,9 @@
qcom,ispif@fda0A000 {
cell-index = <0>;
compatible = "qcom,ispif";
- reg = <0xfda0A000 0x500>;
- reg-names = "ispif";
+ reg = <0xfda0A000 0x500>,
+ <0xfda00020 0x10>;
+ reg-names = "ispif", "csi_clk_mux";
interrupts = <0 55 0>;
interrupt-names = "ispif";
};
@@ -104,7 +108,7 @@
cell-index = <0>;
compatible = "qcom,vfe40";
reg = <0xfda10000 0x1000>,
- <0xfda40000 0x200>;
+ <0xfda40000 0x200>;
reg-names = "vfe", "vfe_vbif";
interrupts = <0 57 0>;
interrupt-names = "vfe";
@@ -115,7 +119,7 @@
cell-index = <1>;
compatible = "qcom,vfe40";
reg = <0xfda14000 0x1000>,
- <0xfda40000 0x200>;
+ <0xfda40000 0x200>;
reg-names = "vfe", "vfe_vbif";
interrupts = <0 58 0>;
interrupt-names = "vfe";
@@ -129,7 +133,7 @@
reg-names = "jpeg";
interrupts = <0 59 0>;
interrupt-names = "jpeg";
- vdd-supply = <&gdsc_jpeg>;
+ vdd-supply = <&gdsc_jpeg>;
};
qcom,jpeg@fda20000 {
@@ -163,8 +167,8 @@
cell-index = <0>;
compatible = "qcom,cpp";
reg = <0xfda04000 0x100>,
- <0xfda40000 0x200>,
- <0xfda18000 0x008>;
+ <0xfda40000 0x200>,
+ <0xfda18000 0x008>;
reg-names = "cpp", "cpp_vbif", "cpp_hw";
interrupts = <0 49 0>;
interrupt-names = "cpp";
@@ -182,7 +186,7 @@
cell-index = <0>;
compatible = "qcom,cci";
reg = <0xfda0C000 0x1000>;
- #address-cells = <1>;
+ #address-cells = <1>;
#size-cells = <0>;
reg-names = "cci";
interrupts = <0 50 0>;
@@ -194,9 +198,9 @@
qcom,gpio-tbl-num = <0 1 2 3>;
qcom,gpio-tbl-flags = <1 1 1 1>;
qcom,gpio-tbl-label = "CCI_I2C_DATA0",
- "CCI_I2C_CLK0",
- "CCI_I2C_DATA1",
- "CCI_I2C_CLK1";
+ "CCI_I2C_CLK0",
+ "CCI_I2C_DATA1",
+ "CCI_I2C_CLK1";
qcom,hw-thigh = <78>;
qcom,hw-tlow = <114>;
qcom,hw-tsu-sto = <28>;
diff --git a/arch/arm/boot/dts/msm8974-cdp.dtsi b/arch/arm/boot/dts/msm8974-cdp.dtsi
index 41e3783..3c1711c 100644
--- a/arch/arm/boot/dts/msm8974-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-cdp.dtsi
@@ -229,13 +229,25 @@
qcom,msm-bus,name = "hsic";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<85 512 0 0>,
<85 512 40000 160000>;
};
+ wlan0: qca,wlan {
+ compatible = "qca,ar6004-hsic";
+ qcom,msm-bus,name = "wlan";
+ qcom,msm-bus,num-cases = <5>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <85 512 0 0>,
+ <85 512 40000 160000>,
+ <85 512 40000 320000>,
+ <85 512 40000 480000>,
+ <85 512 40000 640000>;
+ };
};
&spmi_bus {
@@ -332,7 +344,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,nonremovable;
@@ -362,11 +374,31 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
status = "ok";
};
+/* Drive strength recommendations for clock line from hardware team is 10 mA.
+ * But since the driver has been been using the below values from the start
+ * without any problems, continue to use those.
+ */
+&sdcc1 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
+&sdcc2 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
+&sdhc_1 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
+&sdhc_2 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
&uart7 {
status = "ok";
qcom,tx-gpio = <&msmgpio 41 0x00>;
@@ -382,23 +414,23 @@
&pm8941_chg {
status = "ok";
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "ok";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "ok";
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "ok";
};
- qcom,chg-dc-chgpth@1400 {
+ qcom,dc-chgpth@1400 {
status = "ok";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8974-fluid.dtsi b/arch/arm/boot/dts/msm8974-fluid.dtsi
index 046939e..eaf326e 100644
--- a/arch/arm/boot/dts/msm8974-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-fluid.dtsi
@@ -276,7 +276,6 @@
};
&sdcc1 {
- qcom,bus-width = <4>;
status = "disabled";
};
@@ -309,7 +308,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,nonremovable;
@@ -339,11 +338,31 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
status = "ok";
};
+/* Drive strength recommendations for clock line from hardware team is 10 mA.
+ * But since the driver has been been using the below values from the start
+ * without any problems, continue to use those.
+ */
+&sdcc1 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
+&sdcc2 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
+&sdhc_1 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
+&sdhc_2 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
&usb3 {
qcom,otg-capability;
};
@@ -354,28 +373,29 @@
&pm8941_chg {
status = "ok";
+ qcom,charging-disabled;
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "ok";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "ok";
};
- qcom,chg-bat-if@1200 {
+ qcom,bat-if@1200 {
status = "ok";
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "ok";
};
- qcom,chg-dc-chgpth@1400 {
+ qcom,dc-chgpth@1400 {
status = "ok";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8974-gpu.dtsi b/arch/arm/boot/dts/msm8974-gpu.dtsi
index a7544ab..3779dbd 100644
--- a/arch/arm/boot/dts/msm8974-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8974-gpu.dtsi
@@ -33,7 +33,6 @@
/* Bus Scale Settings */
qcom,msm-bus,name = "grp3d";
qcom,msm-bus,num-cases = <6>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>, <89 604 0 0>,
@@ -67,14 +66,14 @@
qcom,gpu-pwrlevel@1 {
reg = <1>;
- qcom,gpu-freq = <300000000>;
+ qcom,gpu-freq = <320000000>;
qcom,bus-freq = <4>;
qcom,io-fraction = <66>;
};
qcom,gpu-pwrlevel@2 {
reg = <2>;
- qcom,gpu-freq = <300000000>;
+ qcom,gpu-freq = <320000000>;
qcom,bus-freq = <3>;
qcom,io-fraction = <66>;
};
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index 0f38e44..d9acb81 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -347,6 +347,12 @@
qcom,ext-spk-amp-gpio = <&pm8841_mpps 1 0>;
qcom,dock-plug-det-irq = <&pm8841_mpps 2 0>;
qcom,hdmi-audio-rx;
+
+ qcom,prim-auxpcm-gpio-clk = <&msmgpio 74 0>;
+ qcom,prim-auxpcm-gpio-sync = <&msmgpio 75 0>;
+ qcom,prim-auxpcm-gpio-din = <&msmgpio 76 0>;
+ qcom,prim-auxpcm-gpio-dout = <&msmgpio 77 0>;
+ qcom,prim-auxpcm-gpio-set = "prim-gpio-tert";
};
hsic_hub {
@@ -382,7 +388,6 @@
qcom,msm-bus,name = "hsic";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<85 512 0 0>,
@@ -737,25 +742,25 @@
&pm8941_chg {
status = "ok";
- qcom,chg-charging-disabled;
+ qcom,charging-disabled;
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "ok";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "ok";
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "ok";
};
- qcom,chg-dc-chgpth@1400 {
+ qcom,dc-chgpth@1400 {
status = "ok";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "ok";
};
@@ -787,7 +792,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,nonremovable;
@@ -806,7 +811,27 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
status = "ok";
};
+
+/* Drive strength recommendations for clock line from hardware team is 10 mA.
+ * But since the driver has been been using the below values from the start
+ * without any problems, continue to use those.
+ */
+&sdcc1 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
+&sdcc2 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
+&sdhc_1 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
+&sdhc_2 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
diff --git a/arch/arm/boot/dts/msm8974-mdss.dtsi b/arch/arm/boot/dts/msm8974-mdss.dtsi
index 5c42b2c..86f8141 100644
--- a/arch/arm/boot/dts/msm8974-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8974-mdss.dtsi
@@ -49,7 +49,7 @@
<0x017C 0x0FFF0FFF>,
<0x0160 0x22222222>,
<0x0164 0x00002222>;
- qcom,mdp-settings = <0x02E0 0x000000AA>,
+ qcom,mdp-settings = <0x02E0 0x000000E9>,
<0x02E4 0x00000055>,
<0x03AC 0xC0000CCC>,
<0x03B4 0xC0000CCC>,
@@ -111,10 +111,9 @@
core-vdda-supply = <&pm8941_l12>;
core-vcc-supply = <&pm8941_s3>;
qcom,hdmi-tx-supply-names = "hpd-gdsc", "hpd-5v", "core-vdda", "core-vcc";
- qcom,hdmi-tx-supply-type = <1 1 0 0>;
qcom,hdmi-tx-min-voltage-level = <0 0 1800000 1800000>;
qcom,hdmi-tx-max-voltage-level = <0 0 1800000 1800000>;
- qcom,hdmi-tx-op-mode = <0 0 1800000 0>;
+ qcom,hdmi-tx-peak-current = <0 0 1800000 0>;
qcom,hdmi-tx-cec = <&msmgpio 31 0>;
qcom,hdmi-tx-ddc-clk = <&msmgpio 32 0>;
diff --git a/arch/arm/boot/dts/msm8974-mtp.dtsi b/arch/arm/boot/dts/msm8974-mtp.dtsi
index 0090dfd..ca5f663 100644
--- a/arch/arm/boot/dts/msm8974-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-mtp.dtsi
@@ -283,7 +283,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,nonremovable;
@@ -313,11 +313,31 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
status = "ok";
};
+/* Drive strength recommendations for clock line from hardware team is 10 mA.
+ * But since the driver has been been using the below values from the start
+ * without any problems, continue to use those.
+ */
+&sdcc1 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
+&sdcc2 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
+&sdhc_1 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
+&sdhc_2 {
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+};
+
&usb_otg {
qcom,hsusb-otg-otg-control = <2>;
};
@@ -336,28 +356,29 @@
&pm8941_chg {
status = "ok";
+ qcom,charging-disabled;
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "ok";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "ok";
};
- qcom,chg-bat-if@1200 {
+ qcom,bat-if@1200 {
status = "ok";
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "ok";
};
- qcom,chg-dc-chgpth@1400 {
+ qcom,dc-chgpth@1400 {
status = "ok";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "ok";
};
@@ -368,57 +389,99 @@
&pm8941_gpios {
gpio@c000 { /* GPIO 1 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <0>; /* QPNP_PIN_PULL_UP_30 */
+ qcom,master-en = <1>;
};
gpio@c100 { /* GPIO 2 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <0>; /* QPNP_PIN_PULL_UP_30 */
+ qcom,master-en = <1>;
};
gpio@c200 { /* GPIO 3 */
- qcom,mode = <0>;
- qcom,pull = <0>;
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <0>; /* QPNP_PIN_PULL_UP_30 */
qcom,vin-sel = <2>;
- qcom,src-sel = <0>;
+ qcom,src-sel = <0>; /* QPNP_PIN_SEL_FUNC_CONSTANT */
+ qcom,master-en = <1>;
};
gpio@c300 { /* GPIO 4 */
- qcom,mode = <0>;
- qcom,pull = <0>;
- qcom,vin-sel = <2>;
- qcom,src-sel = <0>;
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <0>; /* QPNP_PIN_PULL_UP_30 */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,src-sel = <0>; /* QPNP_PIN_SEL_FUNC_CONSTANT */
+ qcom,master-en = <1>;
};
gpio@c400 { /* GPIO 5 */
- qcom,mode = <0>;
- qcom,pull = <0>;
- qcom,vin-sel = <2>;
- qcom,src-sel = <0>;
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <0>; /* QPNP_PIN_PULL_UP_30 */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,src-sel = <0>; /* QPNP_PIN_SEL_FUNC_CONSTANT */
+ qcom,master-en = <1>;
};
gpio@c500 { /* GPIO 6 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <0>; /* QPNP_PIN_PULL_UP_30 */
+ qcom,master-en = <1>;
};
gpio@c600 { /* GPIO 7 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <0>; /* QPNP_PIN_PULL_UP_30 */
+ qcom,master-en = <1>;
};
gpio@c700 { /* GPIO 8 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <0>; /* QPNP_PIN_PULL_UP_30 */
+ qcom,master-en = <1>;
};
gpio@c800 { /* GPIO 9 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
};
gpio@c900 { /* GPIO 10 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
};
gpio@ca00 { /* GPIO 11 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
};
gpio@cb00 { /* GPIO 12 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
};
gpio@cc00 { /* GPIO 13 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
};
gpio@cd00 { /* GPIO 14 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
};
gpio@ce00 { /* GPIO 15 */
@@ -437,7 +500,7 @@
qcom,pull = <5>;
qcom,vin-sel = <2>;
qcom,out-strength = <3>;
- qcom,src-sel = <2>;
+ qcom,src-sel = <3>; /* QPNP_PIN_SEL_FUNC_2 */
qcom,master-en = <1>;
};
@@ -452,60 +515,102 @@
qcom,output-type = <0>; /* QPNP_PIN_OUT_BUF_CMOS */
qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
- qcom,out-strength = <2>; /* QPNP_PIN_OUT_STRENGTH_MED */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
qcom,src-sel = <0>; /* QPNP_PIN_SEL_FUNC_CONSTANT */
qcom,master-en = <1>;
};
gpio@d300 { /* GPIO 20 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
};
gpio@d400 { /* GPIO 21 */
};
gpio@d500 { /* GPIO 22 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <4>; /* QPNP_PIN_PULL_DN */
+ qcom,master-en = <1>;
};
gpio@d600 { /* GPIO 23 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
};
gpio@d700 { /* GPIO 24 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
};
gpio@d800 { /* GPIO 25 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
};
gpio@d900 { /* GPIO 26 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
};
gpio@da00 { /* GPIO 27 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <4>; /* QPNP_PIN_PULL_DN */
+ qcom,master-en = <1>;
};
gpio@db00 { /* GPIO 28 */
};
gpio@dc00 { /* GPIO 29 */
- qcom,pull = <0>; /* set to default pull */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
qcom,master-en = <1>;
- qcom,vin-sel = <2>; /* select 1.8 V source */
};
gpio@dd00 { /* GPIO 30 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <4>; /* QPNP_PIN_PULL_DN */
+ qcom,master-en = <1>;
};
gpio@de00 { /* GPIO 31 */
};
gpio@df00 { /* GPIO 32 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <4>; /* QPNP_PIN_PULL_DN */
+ qcom,master-en = <1>;
};
gpio@e000 { /* GPIO 33 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <4>; /* QPNP_PIN_PULL_DN */
+ qcom,master-en = <1>;
};
gpio@e100 { /* GPIO 34 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <4>; /* QPNP_PIN_PULL_DN */
+ qcom,master-en = <1>;
};
gpio@e200 { /* GPIO 35 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <4>; /* QPNP_PIN_PULL_DN */
+ qcom,master-en = <1>;
};
gpio@e300 { /* GPIO 36 */
@@ -519,6 +624,9 @@
};
mpp@a100 { /* MPP 2 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,master-en = <1>;
};
mpp@a200 { /* MPP 3 */
@@ -533,6 +641,7 @@
qcom,output-type = <0>; /* CMOS */
qcom,vin-sel = <2>; /* PM8941_S3 1.8V > 1.6V */
qcom,src-sel = <0>; /* CONSTANT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
qcom,master-en = <1>; /* ENABLE MPP */
};
@@ -542,13 +651,20 @@
qcom,output-type = <0>; /* CMOS */
qcom,vin-sel = <2>; /* PM8941_S3 1.8V > 1.6V */
qcom,src-sel = <0>; /* CONSTANT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
qcom,master-en = <1>; /* ENABLE MPP */
};
mpp@a600 { /* MPP 7 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,master-en = <1>;
};
mpp@a700 { /* MPP 8 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,out-strength = <1>; /* QPNP_PIN_OUT_STRENGTH_LOW */
+ qcom,master-en = <1>;
};
};
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 05451671..d1b3334 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -26,15 +26,33 @@
pm8941_mvs1: regulator@8300 {
parent-supply = <&pm8941_boost>;
- qcom,enable-time = <200>;
+ qcom,enable-time = <1000>;
qcom,pull-down-enable = <1>;
+ interrupts = <0x1 0x83 0x2>;
+ interrupt-names = "ocp";
+ qcom,ocp-enable = <1>;
+ qcom,ocp-max-retries = <10>;
+ qcom,ocp-retry-delay = <30>;
+ qcom,soft-start-enable = <1>;
+ qcom,vs-soft-start-strength = <0>;
+ qcom,hpm-enable = <1>;
+ qcom,auto-mode-enable = <0>;
status = "okay";
};
pm8941_mvs2: regulator@8400 {
parent-supply = <&pm8941_boost>;
- qcom,enable-time = <200>;
+ qcom,enable-time = <1000>;
qcom,pull-down-enable = <1>;
+ interrupts = <0x1 0x84 0x2>;
+ interrupt-names = "ocp";
+ qcom,ocp-enable = <1>;
+ qcom,ocp-max-retries = <10>;
+ qcom,ocp-retry-delay = <30>;
+ qcom,soft-start-enable = <1>;
+ qcom,vs-soft-start-strength = <0>;
+ qcom,hpm-enable = <1>;
+ qcom,auto-mode-enable = <0>;
status = "okay";
};
};
@@ -448,6 +466,7 @@
#address-cells = <1>;
#size-cells = <1>;
ranges;
+ qcom,pfm-threshold = <73>;
krait0_vreg: regulator@f9088000 {
compatible = "qcom,krait-regulator";
diff --git a/arch/arm/boot/dts/msm8974-smp2p.dtsi b/arch/arm/boot/dts/msm8974-smp2p.dtsi
index 91029e2..079e4ca 100644
--- a/arch/arm/boot/dts/msm8974-smp2p.dtsi
+++ b/arch/arm/boot/dts/msm8974-smp2p.dtsi
@@ -148,6 +148,29 @@
gpios = <&smp2pgpio_smp2p_2_out 0 0>;
};
+ /* SMP2P SSR Driver for inbound entry from lpass. */
+ smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* SMP2P SSR Driver for outbound entry to lpass */
+ smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
smp2pgpio_smp2p_4_in: qcom,smp2pgpio-smp2p-4-in {
compatible = "qcom,smp2pgpio";
qcom,entry-name = "smp2p";
diff --git a/arch/arm/boot/dts/msm8974-v1-cdp.dts b/arch/arm/boot/dts/msm8974-v1-cdp.dts
index 33bd1fb..cb58026 100644
--- a/arch/arm/boot/dts/msm8974-v1-cdp.dts
+++ b/arch/arm/boot/dts/msm8974-v1-cdp.dts
@@ -17,7 +17,7 @@
/ {
model = "Qualcomm MSM 8974 CDP";
- compatible = "qcom,msm8974-cdp", "qcom,msm8974";
+ compatible = "qcom,msm8974-cdp", "qcom,msm8974", "qcom,cdp";
qcom,msm-id = <126 1 0>;
};
@@ -25,3 +25,7 @@
status = "ok";
vbus-supply = <&usb2_otg_sw>;
};
+
+&hsic_host {
+ qcom,phy-sof-workaround;
+};
diff --git a/arch/arm/boot/dts/msm8974-v1-fluid.dts b/arch/arm/boot/dts/msm8974-v1-fluid.dts
index 9fb287c..8ab24df 100644
--- a/arch/arm/boot/dts/msm8974-v1-fluid.dts
+++ b/arch/arm/boot/dts/msm8974-v1-fluid.dts
@@ -17,11 +17,15 @@
/ {
model = "Qualcomm MSM 8974 FLUID";
- compatible = "qcom,msm8974-fluid", "qcom,msm8974";
+ compatible = "qcom,msm8974-fluid", "qcom,msm8974", "qcom,fluid";
qcom,msm-id = <126 3 0>;
};
&pm8941_chg {
- qcom,chg-charging-disabled;
+ qcom,charging-disabled;
+};
+
+&sdcc1 {
+ qcom,bus-width = <4>;
};
diff --git a/arch/arm/boot/dts/msm8974-v1-liquid.dts b/arch/arm/boot/dts/msm8974-v1-liquid.dts
index 5c12569..ccbd82f 100644
--- a/arch/arm/boot/dts/msm8974-v1-liquid.dts
+++ b/arch/arm/boot/dts/msm8974-v1-liquid.dts
@@ -17,6 +17,6 @@
/ {
model = "Qualcomm MSM 8974 LIQUID";
- compatible = "qcom,msm8974-liquid", "qcom,msm8974";
+ compatible = "qcom,msm8974-liquid", "qcom,msm8974", "qcom,liquid";
qcom,msm-id = <126 9 0>;
};
diff --git a/arch/arm/boot/dts/msm8974-v1-mtp.dts b/arch/arm/boot/dts/msm8974-v1-mtp.dts
index 205ee24..09ea84b 100644
--- a/arch/arm/boot/dts/msm8974-v1-mtp.dts
+++ b/arch/arm/boot/dts/msm8974-v1-mtp.dts
@@ -17,10 +17,10 @@
/ {
model = "Qualcomm MSM 8974 MTP";
- compatible = "qcom,msm8974-mtp", "qcom,msm8974";
+ compatible = "qcom,msm8974-mtp", "qcom,msm8974", "qcom,mtp";
qcom,msm-id = <126 8 0>;
};
&pm8941_chg {
- qcom,chg-charging-disabled;
+ qcom,charging-disabled;
};
diff --git a/arch/arm/boot/dts/msm8974-v1-pm.dtsi b/arch/arm/boot/dts/msm8974-v1-pm.dtsi
index a0b9be6..f9c0920 100644
--- a/arch/arm/boot/dts/msm8974-v1-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1-pm.dtsi
@@ -142,7 +142,7 @@
qcom,type = <0x62706d73>; /* "smpb" */
qcom,id = <0x02>;
qcom,key = <0x6e726f63>; /* "corn" */
- qcom,init-value = <5>; /* Super Turbo */
+ qcom,init-value = <6>; /* Super Turbo */
};
qcom,lpm-resources@1 {
@@ -188,7 +188,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <1>;
qcom,ss-power = <784>;
qcom,energy-overhead = <190000>;
@@ -205,7 +205,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <75>;
qcom,ss-power = <735>;
qcom,energy-overhead = <77341>;
@@ -223,7 +223,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <95>;
qcom,ss-power = <725>;
qcom,energy-overhead = <99500>;
@@ -240,7 +240,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <2000>;
qcom,ss-power = <138>;
qcom,energy-overhead = <1208400>;
@@ -257,7 +257,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <3000>;
qcom,ss-power = <110>;
qcom,energy-overhead = <1250300>;
@@ -446,9 +446,9 @@
qcom,offset-page-indices = <56>;
};
- qcom,rpm-stats@0xfc19dbd0{
+ qcom,rpm-stats@fc19dba0 {
compatible = "qcom,rpm-stats";
- reg = <0xfc19dbd0 0x1000>;
+ reg = <0xfc19dba0 0x1000>;
reg-names = "phys_addr_base";
qcom,sleep-stats-version = <2>;
};
diff --git a/arch/arm/boot/dts/msm8974-v1-rumi.dts b/arch/arm/boot/dts/msm8974-v1-rumi.dts
index ebb37b7..caf89ee 100644
--- a/arch/arm/boot/dts/msm8974-v1-rumi.dts
+++ b/arch/arm/boot/dts/msm8974-v1-rumi.dts
@@ -17,6 +17,6 @@
/ {
model = "Qualcomm MSM 8974 RUMI";
- compatible = "qcom,msm8974-rumi", "qcom,msm8974";
+ compatible = "qcom,msm8974-rumi", "qcom,msm8974", "qcom,rumi";
qcom,msm-id = <126 15 0>;
};
diff --git a/arch/arm/boot/dts/msm8974-v1-sim.dts b/arch/arm/boot/dts/msm8974-v1-sim.dts
index 29add5d..c4b29c2 100644
--- a/arch/arm/boot/dts/msm8974-v1-sim.dts
+++ b/arch/arm/boot/dts/msm8974-v1-sim.dts
@@ -17,6 +17,6 @@
/ {
model = "Qualcomm MSM 8974 Simulator";
- compatible = "qcom,msm8974-sim", "qcom,msm8974";
+ compatible = "qcom,msm8974-sim", "qcom,msm8974", "qcom,sim";
qcom,msm-id = <126 16 0>;
};
diff --git a/arch/arm/boot/dts/msm8974-v1.dtsi b/arch/arm/boot/dts/msm8974-v1.dtsi
index ae8cf83..62837a1 100644
--- a/arch/arm/boot/dts/msm8974-v1.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1.dtsi
@@ -128,3 +128,7 @@
&ldrex_spinlock {
status = "ok";
};
+
+&usb_otg {
+ qcom,hsusb-otg-pnoc-errata-fix;
+};
diff --git a/arch/arm/boot/dts/msm8974-v2-cdp.dts b/arch/arm/boot/dts/msm8974-v2-cdp.dts
index d90abb5..4fa1f2a 100644
--- a/arch/arm/boot/dts/msm8974-v2-cdp.dts
+++ b/arch/arm/boot/dts/msm8974-v2-cdp.dts
@@ -17,7 +17,7 @@
/ {
model = "Qualcomm MSM 8974v2 CDP";
- compatible = "qcom,msm8974-cdp", "qcom,msm8974";
+ compatible = "qcom,msm8974-cdp", "qcom,msm8974", "qcom,cdp";
qcom,msm-id = <126 1 0x20000>;
};
diff --git a/arch/arm/boot/dts/msm8974-v2-fluid.dts b/arch/arm/boot/dts/msm8974-v2-fluid.dts
index 0a09db1..c5779b1 100644
--- a/arch/arm/boot/dts/msm8974-v2-fluid.dts
+++ b/arch/arm/boot/dts/msm8974-v2-fluid.dts
@@ -17,7 +17,7 @@
/ {
model = "Qualcomm MSM 8974v2 FLUID";
- compatible = "qcom,msm8974-fluid", "qcom,msm8974";
+ compatible = "qcom,msm8974-fluid", "qcom,msm8974", "qcom,fluid";
qcom,msm-id = <126 3 0x20000>;
};
diff --git a/arch/arm/boot/dts/msm8974-v2-liquid.dts b/arch/arm/boot/dts/msm8974-v2-liquid.dts
index bbd5071..7132f43 100644
--- a/arch/arm/boot/dts/msm8974-v2-liquid.dts
+++ b/arch/arm/boot/dts/msm8974-v2-liquid.dts
@@ -17,7 +17,7 @@
/ {
model = "Qualcomm MSM 8974v2 LIQUID";
- compatible = "qcom,msm8974-liquid", "qcom,msm8974";
+ compatible = "qcom,msm8974-liquid", "qcom,msm8974", "qcom,liquid";
qcom,msm-id = <126 9 0x20000>;
};
diff --git a/arch/arm/boot/dts/msm8974-v2-mtp.dts b/arch/arm/boot/dts/msm8974-v2-mtp.dts
index e74651e..d38e663 100644
--- a/arch/arm/boot/dts/msm8974-v2-mtp.dts
+++ b/arch/arm/boot/dts/msm8974-v2-mtp.dts
@@ -17,7 +17,7 @@
/ {
model = "Qualcomm MSM 8974v2 MTP";
- compatible = "qcom,msm8974-mtp", "qcom,msm8974";
+ compatible = "qcom,msm8974-mtp", "qcom,msm8974", "qcom,mtp";
qcom,msm-id = <126 8 0x20000>;
};
diff --git a/arch/arm/boot/dts/msm8974-v2-pm.dtsi b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
index 24b68b5..5a1c047 100644
--- a/arch/arm/boot/dts/msm8974-v2-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
@@ -142,7 +142,7 @@
qcom,type = <0x62706d73>; /* "smpb" */
qcom,id = <0x02>;
qcom,key = <0x6e726f63>; /* "corn" */
- qcom,init-value = <5>; /* Super Turbo */
+ qcom,init-value = <6>; /* Super Turbo */
};
qcom,lpm-resources@1 {
@@ -188,7 +188,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <1>;
qcom,ss-power = <784>;
qcom,energy-overhead = <190000>;
@@ -205,7 +205,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <75>;
qcom,ss-power = <735>;
qcom,energy-overhead = <77341>;
@@ -223,7 +223,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <95>;
qcom,ss-power = <725>;
qcom,energy-overhead = <99500>;
@@ -240,7 +240,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <2000>;
qcom,ss-power = <138>;
qcom,energy-overhead = <1208400>;
@@ -257,7 +257,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <3000>;
qcom,ss-power = <110>;
qcom,energy-overhead = <1250300>;
@@ -446,9 +446,9 @@
qcom,offset-page-indices = <56>;
};
- qcom,rpm-stats@0xfc19dbd0{
+ qcom,rpm-stats@fc19dba0 {
compatible = "qcom,rpm-stats";
- reg = <0xfc19dbd0 0x1000>;
+ reg = <0xfc19dba0 0x1000>;
reg-names = "phys_addr_base";
qcom,sleep-stats-version = <2>;
};
diff --git a/arch/arm/boot/dts/msm8974-v2.dtsi b/arch/arm/boot/dts/msm8974-v2.dtsi
index 61f2c4f..494b12c 100644
--- a/arch/arm/boot/dts/msm8974-v2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.dtsi
@@ -48,23 +48,30 @@
/* Nominal / SVS */
<26 512 0 4656000>, <89 604 0 3000000>,
/* Nominal */
- <26 512 0 4656000>, <89 604 0 5334880>,
+ <26 512 0 4656000>, <89 604 0 5120000>,
/* Turbo / Nominal */
- <26 512 0 7464000>, <89 604 0 5334880>,
+ <26 512 0 7464000>, <89 604 0 5120000>,
/* Turbo */
<26 512 0 7464000>, <89 604 0 6400000>;
};
&mdss_mdp {
qcom,vbif-settings = <0x0004 0x00000001>;
- qcom,mdp-settings = <0x02E0 0x000000A9>,
- <0x02E4 0x00000055>;
qcom,mdss-wb-off = <0x00011100 0x00011500
0x00011900 0x00011D00 0x00012100>;
qcom,mdss-intf-off = <0x00012500 0x00012700
0x00012900 0x00012b00>;
qcom,mdss-pingpong-off = <0x00012D00 0x00012E00 0x00012F00>;
+ qcom,mdss-has-bwc;
+ qcom,mdss-has-decimation;
+};
+
+&mdss_hdmi_tx {
+ reg = <0xfd922100 0x370>,
+ <0xfd922500 0x7C>,
+ <0xfc4b8000 0x60F0>;
+ reg-names = "core_physical", "phy_physical", "qfprom_physical";
};
&msm_vidc {
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 22c5d05..f787cf5 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -38,7 +38,7 @@
secure_mem: secure_region {
linux,contiguous-region;
- reg = <0 0x7800000>;
+ reg = <0 0xFC00000>;
label = "secure_mem";
};
@@ -84,6 +84,66 @@
clock-frequency = <19200000>;
};
+ timer@f9020000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0xf9020000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@f9021000 {
+ frame-number = <0>;
+ interrupts = <0 8 0x4>,
+ <0 7 0x4>;
+ reg = <0xf9021000 0x1000>,
+ <0xf9022000 0x1000>;
+ };
+
+ frame@f9023000 {
+ frame-number = <1>;
+ interrupts = <0 9 0x4>;
+ reg = <0xf9023000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9024000 {
+ frame-number = <2>;
+ interrupts = <0 10 0x4>;
+ reg = <0xf9024000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9025000 {
+ frame-number = <3>;
+ interrupts = <0 11 0x4>;
+ reg = <0xf9025000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9026000 {
+ frame-number = <4>;
+ interrupts = <0 12 0x4>;
+ reg = <0xf9026000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9027000 {
+ frame-number = <5>;
+ interrupts = <0 13 0x4>;
+ reg = <0xf9027000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9028000 {
+ frame-number = <6>;
+ interrupts = <0 14 0x4>;
+ reg = <0xf9028000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+
qcom,mpm2-sleep-counter@fc4a3000 {
compatible = "qcom,mpm2-sleep-counter";
reg = <0xfc4a3000 0x1000>;
@@ -102,6 +162,7 @@
qcom,vidc {
compatible = "qcom,msm-vidc";
qcom,hfi = "q6";
+ qcom,max-hw-load = <108000>; /* 720p @ 30 */
};
qcom,wfd {
@@ -130,7 +191,6 @@
qcom,msm-bus,name = "serial_uart2";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<84 512 0 0>,
@@ -144,20 +204,19 @@
reg = <0xf9a55000 0x400>;
interrupts = <0 134 0 0 140 0>;
interrupt-names = "core_irq", "async_irq";
- HSUSB_VDDCX-supply = <&pm8841_s2>;
+ HSUSB_VDDCX-supply = <&pm8841_s2_corner>;
HSUSB_1p8-supply = <&pm8941_l6>;
HSUSB_3p3-supply = <&pm8941_l24>;
+ qcom,vdd-voltage-level = <1 5 7>;
qcom,hsusb-otg-phy-type = <2>;
qcom,hsusb-otg-phy-init-seq = <0x63 0x81 0xffffffff>;
qcom,hsusb-otg-mode = <1>;
qcom,hsusb-otg-otg-control = <1>;
qcom,hsusb-otg-disable-reset;
- qcom,hsusb-otg-pnoc-errata-fix;
qcom,msm-bus,name = "usb2";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<87 512 0 0>,
@@ -187,7 +246,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 20000000 25000000 50000000 100000000 200000000>;
@@ -198,7 +257,6 @@
qcom,msm-bus,name = "sdcc1";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */
<78 512 1600 3200>, /* 400 KB/s*/
@@ -233,7 +291,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 20000000 25000000 50000000 100000000 200000000>;
@@ -245,7 +303,6 @@
qcom,msm-bus,name = "sdcc2";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
<81 512 1600 3200>, /* 400 KB/s*/
@@ -292,7 +349,6 @@
qcom,msm-bus,name = "sdcc3";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <79 512 0 0>, /* No vote */
<79 512 1600 3200>, /* 400 KB/s*/
@@ -338,7 +394,6 @@
qcom,msm-bus,name = "sdcc4";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <80 512 0 0>, /* No vote */
<80 512 1600 3200>, /* 400 KB/s*/
@@ -365,7 +420,6 @@
qcom,msm-bus,name = "sdhc1";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */
<78 512 1600 3200>, /* 400 KB/s*/
@@ -392,7 +446,6 @@
qcom,msm-bus,name = "sdhc2";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
<81 512 1600 3200>, /* 400 KB/s*/
@@ -426,7 +479,6 @@
qcom,msm-bus,name = "sdhc3";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <79 512 0 0>, /* No vote */
<79 512 1600 3200>, /* 400 KB/s*/
@@ -460,7 +512,6 @@
qcom,msm-bus,name = "sdhc4";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <80 512 0 0>, /* No vote */
<80 512 1600 3200>, /* 400 KB/s*/
@@ -582,6 +633,14 @@
qcom,cdc-vddcx-2-voltage = <1225000 1225000>;
qcom,cdc-vddcx-2-current = <10000>;
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-vdd-tx-h",
+ "cdc-vdd-rx-h",
+ "cdc-vddpx-1",
+ "cdc-vdd-a-1p2v",
+ "cdc-vddcx-1",
+ "cdc-vddcx-2";
+
qcom,cdc-micbias-ldoh-v = <0x3>;
qcom,cdc-micbias-cfilt1-mv = <1800>;
qcom,cdc-micbias-cfilt2-mv = <2700>;
@@ -627,12 +686,16 @@
"MIC BIAS4 External", "Digital Mic6";
qcom,cdc-mclk-gpios = <&pm8941_gpios 15 0>;
- taiko-mclk-clk = <&pm8941_clkdiv1>;
qcom,taiko-mclk-clk-freq = <9600000>;
- prim-auxpcm-gpio-clk = <&msmgpio 65 0>;
- prim-auxpcm-gpio-sync = <&msmgpio 66 0>;
- prim-auxpcm-gpio-din = <&msmgpio 67 0>;
- prim-auxpcm-gpio-dout = <&msmgpio 68 0>;
+ qcom,prim-auxpcm-gpio-clk = <&msmgpio 65 0>;
+ qcom,prim-auxpcm-gpio-sync = <&msmgpio 66 0>;
+ qcom,prim-auxpcm-gpio-din = <&msmgpio 67 0>;
+ qcom,prim-auxpcm-gpio-dout = <&msmgpio 68 0>;
+ qcom,prim-auxpcm-gpio-set = "prim-gpio-prim";
+ qcom,sec-auxpcm-gpio-clk = <&msmgpio 79 0>;
+ qcom,sec-auxpcm-gpio-sync = <&msmgpio 80 0>;
+ qcom,sec-auxpcm-gpio-din = <&msmgpio 81 0>;
+ qcom,sec-auxpcm-gpio-dout = <&msmgpio 82 0>;
};
spmi_bus: qcom,spmi@fc4c0000 {
@@ -730,7 +793,6 @@
qcom,msm-bus,name = "usb3";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<61 512 0 0>,
@@ -772,6 +834,13 @@
interrupts = <0 162 1>;
qcom,firmware-name = "adsp";
+
+ /* GPIO inputs from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
};
qcom,msm-adsp-loader {
@@ -779,6 +848,10 @@
qcom,adsp-state = <0>;
};
+ qcom,msm-audio-ion {
+ compatible = "qcom,msm-audio-ion";
+ };
+
qcom,msm-pcm {
compatible = "qcom,msm-pcm-dsp";
qcom,msm-pcm-dsp-id = <0>;
@@ -955,15 +1028,25 @@
qcom,msm-cpudai-auxpcm-data = <0>, <0>;
qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
- qcom,msm-auxpcm-rx {
+ qcom,msm-prim-auxpcm-rx {
qcom,msm-auxpcm-dev-id = <4106>;
compatible = "qcom,msm-auxpcm-dev";
};
- qcom,msm-auxpcm-tx {
+ qcom,msm-prim-auxpcm-tx {
qcom,msm-auxpcm-dev-id = <4107>;
compatible = "qcom,msm-auxpcm-dev";
};
+
+ qcom,msm-sec-auxpcm-rx {
+ qcom,msm-auxpcm-dev-id = <4108>;
+ compatible = "qcom,msm-auxpcm-dev";
+ };
+
+ qcom,msm-sec-auxpcm-tx {
+ qcom,msm-auxpcm-dev-id = <4109>;
+ compatible = "qcom,msm-auxpcm-dev";
+ };
};
qcom,msm-dai-mi2s {
@@ -984,7 +1067,6 @@
compatible = "qcom,msm-ocmem-audio";
qcom,msm-bus,name = "audio-ocmem";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<11 604 0 0>,
@@ -1018,8 +1100,9 @@
qcom,firmware-name = "mba";
qcom,pil-self-auth;
- /* GPIO input from mss */
+ /* GPIO inputs from mss */
qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
/* GPIO output to mss */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
@@ -1036,8 +1119,9 @@
qcom,firmware-name = "wcnss";
- /* GPIO input from wcnss */
+ /* GPIO inputs from wcnss */
qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_4_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_4_in 2 0>;
/* GPIO output to wcnss */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_4_out 0 0>;
@@ -1049,8 +1133,9 @@
qcom,wcnss-wlan@fb000000 {
compatible = "qcom,wcnss_wlan";
- reg = <0xfb000000 0x280000>;
- reg-names = "wcnss_mmio";
+ reg = <0xfb000000 0x280000>,
+ <0xf9011008 0x04>;
+ reg-names = "wcnss_mmio", "wcnss_fiq";
interrupts = <0 145 0 0 146 0>;
interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
@@ -1114,7 +1199,7 @@
reg = <0xf9bff000 0x200>;
};
- qcom,qseecom@fe806000 {
+ qseecom: qcom,qseecom@7f00000 {
compatible = "qcom,qseecom";
reg = <0x7f00000 0x500000>;
reg-names = "secapp-region";
@@ -1123,7 +1208,6 @@
qcom,qsee-ce-hw-instance = <0>;
qcom,msm-bus,name = "qseecom-noc";
qcom,msm-bus,num-cases = <4>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<55 512 0 0>,
@@ -1135,7 +1219,7 @@
qcom,wdt@f9017000 {
compatible = "qcom,msm-watchdog";
reg = <0xf9017000 0x1000>;
- interrupts = <0 3 0 0 4 0>;
+ interrupts = <0 3 0>, <0 4 0>;
qcom,bark-time = <11000>;
qcom,pet-time = <10000>;
qcom,ipi-ping;
@@ -1156,7 +1240,27 @@
qcom,firmware-name = "venus";
};
- qcom,cache_erp {
+ qcom,cache_erp@f9012000 {
+ reg = <0xf9012000 0x80>,
+ <0xf9089000 0x80>,
+ <0xf9099000 0x80>,
+ <0xf90a9000 0x80>,
+ <0xf90b9000 0x80>,
+ <0xf9088000 0x40>,
+ <0xf9098000 0x40>,
+ <0xf90a8000 0x40>,
+ <0xf90b8000 0x40>;
+
+ reg-names = "l2_saw",
+ "krait0_saw",
+ "krait1_saw",
+ "krait2_saw",
+ "krait3_saw",
+ "krait0_acs",
+ "krait1_acs",
+ "krait2_acs",
+ "krait3_acs";
+
compatible = "qcom,cache_erp";
interrupts = <1 9 0>, <0 2 0>;
interrupt-names = "l1_irq", "l2_irq";
@@ -1204,7 +1308,6 @@
qcom,ce-hw-instance = <1>;
qcom,msm-bus,name = "qcedev-noc";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<56 512 0 0>,
@@ -1221,7 +1324,6 @@
qcom,ce-hw-instance = <1>;
qcom,msm-bus,name = "qcrypto-noc";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<56 512 0 0>,
@@ -1284,6 +1386,29 @@
qcom,limit-temp = <60>;
qcom,temp-hysteresis = <10>;
qcom,freq-step = <2>;
+ qcom,core-limit-temp = <80>;
+ qcom,core-temp-hysteresis = <10>;
+ qcom,core-control-mask = <0xe>;
+ qcom,vdd-restriction-temp = <5>;
+ qcom,vdd-restriction-temp-hysteresis = <10>;
+ qcom,pmic-sw-mode-temp = <85>;
+ qcom,pmic-sw-mode-temp-hysteresis = <75>;
+ qcom,pmic-sw-mode-regs = "vdd_dig";
+ vdd_dig-supply = <&pm8841_s2_floor_corner>;
+ vdd_gfx-supply = <&pm8841_s4_floor_corner>;
+
+ qcom,vdd-dig-rstr{
+ qcom,vdd-rstr-reg = "vdd_dig";
+ qcom,levels = <5 7 7>; /* Nominal, Super Turbo, Super Turbo */
+ qcom,min-level = <1>; /* No Request */
+ };
+
+ qcom,vdd-gfx-rstr{
+ qcom,vdd-rstr-reg = "vdd_gfx";
+ qcom,levels = <5 7 7>; /* Nominal, Super Turbo, Super Turbo */
+ qcom,min-level = <1>; /* No Request */
+ };
+
};
qcom,bam_dmux@fc834000 {
@@ -1293,7 +1418,7 @@
qcom,rx-ring-size = <64>;
};
- qcom,msm-mem-hole {
+ memory_hole: qcom,msm-mem-hole {
compatible = "qcom,msm-mem-hole";
qcom,memblock-remove = <0x7f00000 0x8000000>; /* Address and Size of Hole */
};
@@ -1311,7 +1436,6 @@
qcom,bam-rx-ep-pipe-index = <1>;
qcom,msm-bus,name = "uart7";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<84 512 0 0>,
@@ -1436,6 +1560,7 @@
};
&gdsc_oxili_gx {
+ qcom,retain-mems;
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm9625-v2-1-mtp.dts b/arch/arm/boot/dts/msm9625-cdp.dtsi
similarity index 80%
rename from arch/arm/boot/dts/msm9625-v2-1-mtp.dts
rename to arch/arm/boot/dts/msm9625-cdp.dtsi
index 2dc040c..1f9cbb0 100644
--- a/arch/arm/boot/dts/msm9625-v2-1-mtp.dts
+++ b/arch/arm/boot/dts/msm9625-cdp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,17 +10,10 @@
* GNU General Public License for more details.
*/
-/dts-v1/;
-
-/include/ "msm9625-v2-1.dtsi"
+/include/ "msm9625-display.dtsi"
+/include/ "qpic-panel-ili-qvga.dtsi"
/ {
- model = "Qualcomm MSM 9625V2.1 MTP";
- compatible = "qcom,msm9625-mtp", "qcom,msm9625";
- qcom,msm-id = <134 7 0x20001>, <152 7 0x20001>, <149 7 0x20001>,
- <150 7 0x20001>, <151 7 0x20001>, <148 7 0x20001>,
- <173 7 0x20001>, <174 7 0x20001>, <175 7 0x20001>;
-
i2c@f9925000 {
charger@57 {
compatible = "summit,smb137c";
@@ -42,10 +35,18 @@
wlan0: qca,wlan {
cell-index = <0>;
- compatible = "qca,ar6004-sdio";
+ compatible = "qca,ar6004-hsic";
qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,ar6004-vdd-io-supply = <&pm8019_l11>;
+ qca,vdd-io-supply = <&pm8019_l11>;
+ };
+
+ qca,wlan_ar6003 {
+ cell-index = <0>;
+ compatible = "qca,ar6003-sdio";
+ qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
+ qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
+ qca,vdd-io-supply = <&pm8019_l11>;
};
};
diff --git a/arch/arm/boot/dts/msm9625-v2-1-mtp.dts b/arch/arm/boot/dts/msm9625-mtp.dtsi
similarity index 73%
copy from arch/arm/boot/dts/msm9625-v2-1-mtp.dts
copy to arch/arm/boot/dts/msm9625-mtp.dtsi
index 2dc040c..cc0bf5e 100644
--- a/arch/arm/boot/dts/msm9625-v2-1-mtp.dts
+++ b/arch/arm/boot/dts/msm9625-mtp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,17 +10,7 @@
* GNU General Public License for more details.
*/
-/dts-v1/;
-
-/include/ "msm9625-v2-1.dtsi"
-
/ {
- model = "Qualcomm MSM 9625V2.1 MTP";
- compatible = "qcom,msm9625-mtp", "qcom,msm9625";
- qcom,msm-id = <134 7 0x20001>, <152 7 0x20001>, <149 7 0x20001>,
- <150 7 0x20001>, <151 7 0x20001>, <148 7 0x20001>,
- <173 7 0x20001>, <174 7 0x20001>, <175 7 0x20001>;
-
i2c@f9925000 {
charger@57 {
compatible = "summit,smb137c";
@@ -42,10 +32,18 @@
wlan0: qca,wlan {
cell-index = <0>;
- compatible = "qca,ar6004-sdio";
+ compatible = "qca,ar6004-hsic";
qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,ar6004-vdd-io-supply = <&pm8019_l11>;
+ qca,vdd-io-supply = <&pm8019_l11>;
+ };
+
+ qca,wlan_ar6003 {
+ cell-index = <0>;
+ compatible = "qca,ar6003-sdio";
+ qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
+ qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
+ qca,vdd-io-supply = <&pm8019_l11>;
};
};
@@ -89,11 +87,23 @@
};
mpp@a300 { /* MPP 4 */
+ /* VADC channel 19 */
+ qcom,mode = <4>;
+ qcom,ain-route = <3>; /* AMUX 8 */
+ qcom,master-en = <1>;
+ qcom,src-sel = <0>; /* Function constant */
+ qcom,invert = <1>;
};
mpp@a400 { /* MPP 5 */
};
mpp@a500 { /* MPP 6 */
+ /* channel 21 */
+ qcom,mode = <4>;
+ qcom,ain-route = <1>; /* AMUX 6 */
+ qcom,master-en = <1>;
+ qcom,src-sel = <0>; /* Function constant */
+ qcom,invert = <1>;
};
};
diff --git a/arch/arm/boot/dts/msm9625-pm.dtsi b/arch/arm/boot/dts/msm9625-pm.dtsi
index 51a3faa..3e421a8 100644
--- a/arch/arm/boot/dts/msm9625-pm.dtsi
+++ b/arch/arm/boot/dts/msm9625-pm.dtsi
@@ -80,7 +80,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <100>;
qcom,ss-power = <8000>;
qcom,energy-overhead = <100000>;
@@ -97,7 +97,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <2000>;
qcom,ss-power = <5000>;
qcom,energy-overhead = <60100000>;
@@ -114,7 +114,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <3500>;
qcom,ss-power = <5000>;
qcom,energy-overhead = <60350000>;
@@ -131,7 +131,7 @@
qcom,vdd-dig-upper-bound = <4>; /* NORMAL */
qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <4500>;
qcom,ss-power = <5000>;
qcom,energy-overhead = <60350000>;
@@ -289,9 +289,9 @@
qcom,offset-page-indices = <56>;
};
- qcom,rpm-stats@fc19dbd0 {
+ qcom,rpm-stats@fc19dba0 {
compatible = "qcom,rpm-stats";
- reg = <0xfc19dbd0 0x1000>;
+ reg = <0xfc19dba0 0x1000>;
reg-names = "phys_addr_base";
qcom,sleep-stats-version = <2>;
};
diff --git a/arch/arm/boot/dts/msm9625-v1-cdp.dts b/arch/arm/boot/dts/msm9625-v1-cdp.dts
index cc7a758..d7537eb 100644
--- a/arch/arm/boot/dts/msm9625-v1-cdp.dts
+++ b/arch/arm/boot/dts/msm9625-v1-cdp.dts
@@ -13,95 +13,12 @@
/dts-v1/;
/include/ "msm9625-v1.dtsi"
+/include/ "msm9625-cdp.dtsi"
/ {
model = "Qualcomm MSM 9625V1 CDP";
- compatible = "qcom,msm9625-cdp", "qcom,msm9625";
+ compatible = "qcom,msm9625-cdp", "qcom,msm9625", "qcom,cdp";
qcom,msm-id = <134 1 0>, <152 1 0>, <149 1 0>, <150 1 0>,
<151 1 0>, <148 1 0>, <173 1 0>, <174 1 0>,
<175 1 0>;
-
- i2c@f9925000 {
- charger@57 {
- compatible = "summit,smb137c";
- reg = <0x57>;
- summit,chg-current-ma = <1500>;
- summit,term-current-ma = <50>;
- summit,pre-chg-current-ma = <100>;
- summit,float-voltage-mv = <4200>;
- summit,thresh-voltage-mv = <3000>;
- summit,recharge-thresh-mv = <75>;
- summit,system-voltage-mv = <4250>;
- summit,charging-timeout = <382>;
- summit,pre-charge-timeout = <48>;
- summit,therm-current-ua = <10>;
- summit,temperature-min = <4>; /* 0 C */
- summit,temperature-max = <3>; /* 45 C */
- };
- };
-
- wlan0: qca,wlan {
- cell-index = <0>;
- compatible = "qca,ar6004-sdio";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,vdd-io-supply = <&pm8019_l11>;
- };
-
- qca,wlan_ar6003 {
- cell-index = <0>;
- compatible = "qca,ar6003-sdio";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,vdd-io-supply = <&pm8019_l11>;
- };
-};
-
-/* PM8019 GPIO and MPP configuration */
-&pm8019_gpios {
- gpio@c000 { /* GPIO 1 */
- };
-
- gpio@c100 { /* GPIO 2 */
- };
-
- gpio@c200 { /* GPIO 3 */
- };
-
- gpio@c300 { /* GPIO 4 */
- /* ext_2p95v regulator enable config */
- qcom,mode = <1>; /* Digital output */
- qcom,output-type = <0>; /* CMOS */
- qcom,invert = <0>; /* Output low */
- qcom,out-strength = <1>; /* Low */
- qcom,vin-sel = <2>; /* PM8019 L11 - 1.8V */
- qcom,src-sel = <0>; /* Constant */
- qcom,master-en = <1>; /* Enable GPIO */
- };
-
- gpio@c400 { /* GPIO 5 */
- };
-
- gpio@c500 { /* GPIO 6 */
- };
-};
-
-&pm8019_mpps {
- mpp@a000 { /* MPP 1 */
- };
-
- mpp@a100 { /* MPP 2 */
- };
-
- mpp@a200 { /* MPP 3 */
- };
-
- mpp@a300 { /* MPP 4 */
- };
-
- mpp@a400 { /* MPP 5 */
- };
-
- mpp@a500 { /* MPP 6 */
- };
};
diff --git a/arch/arm/boot/dts/msm9625-v1-mtp.dts b/arch/arm/boot/dts/msm9625-v1-mtp.dts
index d78bb77..a70ec1a 100644
--- a/arch/arm/boot/dts/msm9625-v1-mtp.dts
+++ b/arch/arm/boot/dts/msm9625-v1-mtp.dts
@@ -13,95 +13,12 @@
/dts-v1/;
/include/ "msm9625-v1.dtsi"
+/include/ "msm9625-mtp.dtsi"
/ {
model = "Qualcomm MSM 9625V1 MTP";
- compatible = "qcom,msm9625-mtp", "qcom,msm9625";
+ compatible = "qcom,msm9625-mtp", "qcom,msm9625", "qcom,mtp";
qcom,msm-id = <134 7 0>, <152 7 0>, <149 7 0>, <150 7 0>,
<151 7 0>, <148 7 0>, <173 7 0>, <174 7 0>,
<175 7 0>;
-
- i2c@f9925000 {
- charger@57 {
- compatible = "summit,smb137c";
- reg = <0x57>;
- summit,chg-current-ma = <1500>;
- summit,term-current-ma = <50>;
- summit,pre-chg-current-ma = <100>;
- summit,float-voltage-mv = <4200>;
- summit,thresh-voltage-mv = <3000>;
- summit,recharge-thresh-mv = <75>;
- summit,system-voltage-mv = <4250>;
- summit,charging-timeout = <382>;
- summit,pre-charge-timeout = <48>;
- summit,therm-current-ua = <10>;
- summit,temperature-min = <4>; /* 0 C */
- summit,temperature-max = <3>; /* 45 C */
- };
- };
-
- wlan0: qca,wlan {
- cell-index = <0>;
- compatible = "qca,ar6004-sdio";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,vdd-io-supply = <&pm8019_l11>;
- };
-
- qca,wlan_ar6003 {
- cell-index = <0>;
- compatible = "qca,ar6003-sdio";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,vdd-io-supply = <&pm8019_l11>;
- };
-};
-
-/* PM8019 GPIO and MPP configuration */
-&pm8019_gpios {
- gpio@c000 { /* GPIO 1 */
- };
-
- gpio@c100 { /* GPIO 2 */
- };
-
- gpio@c200 { /* GPIO 3 */
- };
-
- gpio@c300 { /* GPIO 4 */
- /* ext_2p95v regulator enable config */
- qcom,mode = <1>; /* Digital output */
- qcom,output-type = <0>; /* CMOS */
- qcom,invert = <0>; /* Output low */
- qcom,out-strength = <1>; /* Low */
- qcom,vin-sel = <2>; /* PM8019 L11 - 1.8V */
- qcom,src-sel = <0>; /* Constant */
- qcom,master-en = <1>; /* Enable GPIO */
- };
-
- gpio@c400 { /* GPIO 5 */
- };
-
- gpio@c500 { /* GPIO 6 */
- };
-};
-
-&pm8019_mpps {
- mpp@a000 { /* MPP 1 */
- };
-
- mpp@a100 { /* MPP 2 */
- };
-
- mpp@a200 { /* MPP 3 */
- };
-
- mpp@a300 { /* MPP 4 */
- };
-
- mpp@a400 { /* MPP 5 */
- };
-
- mpp@a500 { /* MPP 6 */
- };
};
diff --git a/arch/arm/boot/dts/msm9625-v1-rumi.dts b/arch/arm/boot/dts/msm9625-v1-rumi.dts
index a854947..ef00681 100644
--- a/arch/arm/boot/dts/msm9625-v1-rumi.dts
+++ b/arch/arm/boot/dts/msm9625-v1-rumi.dts
@@ -16,7 +16,7 @@
/ {
model = "Qualcomm MSM 9625V1 RUMI";
- compatible = "qcom,msm9625-rumi", "qcom,msm9625";
+ compatible = "qcom,msm9625-rumi", "qcom,msm9625", "qcom,rumi";
qcom,msm-id = <134 15 0>;
chosen{
diff --git a/arch/arm/boot/dts/msm9625-v1.dtsi b/arch/arm/boot/dts/msm9625-v1.dtsi
index 54aa02a..de88ff1 100644
--- a/arch/arm/boot/dts/msm9625-v1.dtsi
+++ b/arch/arm/boot/dts/msm9625-v1.dtsi
@@ -37,6 +37,10 @@
};
};
+&hsic_host {
+ qcom,disable-park-mode;
+};
+
&ipa_hw {
qcom,ipa-hw-ver = <1>; /* IPA h-w revision */
};
@@ -57,3 +61,7 @@
&ldrex_spinlock {
status = "ok";
};
+
+&hsic_host {
+ qcom,phy-sof-workaround;
+};
diff --git a/arch/arm/boot/dts/msm9625-v2-1-cdp.dts b/arch/arm/boot/dts/msm9625-v2-1-cdp.dts
deleted file mode 100644
index 8702184..0000000
--- a/arch/arm/boot/dts/msm9625-v2-1-cdp.dts
+++ /dev/null
@@ -1,99 +0,0 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/dts-v1/;
-
-/include/ "msm9625-v2-1.dtsi"
-
-/ {
- model = "Qualcomm MSM 9625V2.1 CDP";
- compatible = "qcom,msm9625-cdp", "qcom,msm9625";
- qcom,msm-id = <134 1 0x20001>, <152 1 0x20001>, <149 1 0x20001>,
- <150 1 0x20001>, <151 1 0x20001>, <148 1 0x20001>,
- <173 1 0x20001>, <174 1 0x20001>, <175 1 0x20001>;
-
- i2c@f9925000 {
- charger@57 {
- compatible = "summit,smb137c";
- reg = <0x57>;
- summit,chg-current-ma = <1500>;
- summit,term-current-ma = <50>;
- summit,pre-chg-current-ma = <100>;
- summit,float-voltage-mv = <4200>;
- summit,thresh-voltage-mv = <3000>;
- summit,recharge-thresh-mv = <75>;
- summit,system-voltage-mv = <4250>;
- summit,charging-timeout = <382>;
- summit,pre-charge-timeout = <48>;
- summit,therm-current-ua = <10>;
- summit,temperature-min = <4>; /* 0 C */
- summit,temperature-max = <3>; /* 45 C */
- };
- };
-
- wlan0: qca,wlan {
- cell-index = <0>;
- compatible = "qca,ar6004-sdio";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,ar6004-vdd-io-supply = <&pm8019_l11>;
- };
-};
-
-/* PM8019 GPIO and MPP configuration */
-&pm8019_gpios {
- gpio@c000 { /* GPIO 1 */
- };
-
- gpio@c100 { /* GPIO 2 */
- };
-
- gpio@c200 { /* GPIO 3 */
- };
-
- gpio@c300 { /* GPIO 4 */
- /* ext_2p95v regulator enable config */
- qcom,mode = <1>; /* Digital output */
- qcom,output-type = <0>; /* CMOS */
- qcom,invert = <0>; /* Output low */
- qcom,out-strength = <1>; /* Low */
- qcom,vin-sel = <2>; /* PM8019 L11 - 1.8V */
- qcom,src-sel = <0>; /* Constant */
- qcom,master-en = <1>; /* Enable GPIO */
- };
-
- gpio@c400 { /* GPIO 5 */
- };
-
- gpio@c500 { /* GPIO 6 */
- };
-};
-
-&pm8019_mpps {
- mpp@a000 { /* MPP 1 */
- };
-
- mpp@a100 { /* MPP 2 */
- };
-
- mpp@a200 { /* MPP 3 */
- };
-
- mpp@a300 { /* MPP 4 */
- };
-
- mpp@a400 { /* MPP 5 */
- };
-
- mpp@a500 { /* MPP 6 */
- };
-};
diff --git a/arch/arm/boot/dts/msm9625-v2-cdp.dts b/arch/arm/boot/dts/msm9625-v2-cdp.dts
index 94fe019..9fbe5ec 100644
--- a/arch/arm/boot/dts/msm9625-v2-cdp.dts
+++ b/arch/arm/boot/dts/msm9625-v2-cdp.dts
@@ -13,97 +13,12 @@
/dts-v1/;
/include/ "msm9625-v2.dtsi"
-/include/ "msm9625-display.dtsi"
-/include/ "qpic-panel-ili-qvga.dtsi"
+/include/ "msm9625-cdp.dtsi"
/ {
model = "Qualcomm MSM 9625V2 CDP";
- compatible = "qcom,msm9625-cdp", "qcom,msm9625";
+ compatible = "qcom,msm9625-cdp", "qcom,msm9625", "qcom,cdp";
qcom,msm-id = <134 1 0x20000>, <152 1 0x20000>, <149 1 0x20000>,
<150 1 0x20000>, <151 1 0x20000>, <148 1 0x20000>,
<173 1 0x20000>, <174 1 0x20000>, <175 1 0x20000>;
-
- i2c@f9925000 {
- charger@57 {
- compatible = "summit,smb137c";
- reg = <0x57>;
- summit,chg-current-ma = <1500>;
- summit,term-current-ma = <50>;
- summit,pre-chg-current-ma = <100>;
- summit,float-voltage-mv = <4200>;
- summit,thresh-voltage-mv = <3000>;
- summit,recharge-thresh-mv = <75>;
- summit,system-voltage-mv = <4250>;
- summit,charging-timeout = <382>;
- summit,pre-charge-timeout = <48>;
- summit,therm-current-ua = <10>;
- summit,temperature-min = <4>; /* 0 C */
- summit,temperature-max = <3>; /* 45 C */
- };
- };
-
- wlan0: qca,wlan {
- cell-index = <0>;
- compatible = "qca,ar6004-hsic";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,vdd-io-supply = <&pm8019_l11>;
- };
-
- qca,wlan_ar6003 {
- cell-index = <0>;
- compatible = "qca,ar6003-sdio";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,vdd-io-supply = <&pm8019_l11>;
- };
-};
-
-/* PM8019 GPIO and MPP configuration */
-&pm8019_gpios {
- gpio@c000 { /* GPIO 1 */
- };
-
- gpio@c100 { /* GPIO 2 */
- };
-
- gpio@c200 { /* GPIO 3 */
- };
-
- gpio@c300 { /* GPIO 4 */
- /* ext_2p95v regulator enable config */
- qcom,mode = <1>; /* Digital output */
- qcom,output-type = <0>; /* CMOS */
- qcom,invert = <0>; /* Output low */
- qcom,out-strength = <1>; /* Low */
- qcom,vin-sel = <2>; /* PM8019 L11 - 1.8V */
- qcom,src-sel = <0>; /* Constant */
- qcom,master-en = <1>; /* Enable GPIO */
- };
-
- gpio@c400 { /* GPIO 5 */
- };
-
- gpio@c500 { /* GPIO 6 */
- };
-};
-
-&pm8019_mpps {
- mpp@a000 { /* MPP 1 */
- };
-
- mpp@a100 { /* MPP 2 */
- };
-
- mpp@a200 { /* MPP 3 */
- };
-
- mpp@a300 { /* MPP 4 */
- };
-
- mpp@a400 { /* MPP 5 */
- };
-
- mpp@a500 { /* MPP 6 */
- };
};
diff --git a/arch/arm/boot/dts/msm9625-v2-mtp.dts b/arch/arm/boot/dts/msm9625-v2-mtp.dts
index 2840024..5324e2c 100644
--- a/arch/arm/boot/dts/msm9625-v2-mtp.dts
+++ b/arch/arm/boot/dts/msm9625-v2-mtp.dts
@@ -13,10 +13,11 @@
/dts-v1/;
/include/ "msm9625-v2.dtsi"
+/include/ "msm9625-mtp.dtsi"
/ {
model = "Qualcomm MSM 9625V2 MTP";
- compatible = "qcom,msm9625-mtp", "qcom,msm9625";
+ compatible = "qcom,msm9625-mtp", "qcom,msm9625", "qcom,mtp";
qcom,msm-id = <134 7 0x20000>, <152 7 0x20000>, <149 7 0x20000>,
<150 7 0x20000>, <151 7 0x20000>, <148 7 0x20000>,
<173 7 0x20000>, <174 7 0x20000>, <175 7 0x20000>;
@@ -97,11 +98,23 @@
};
mpp@a300 { /* MPP 4 */
+ /* VADC channel 19 */
+ qcom,mode = <4>;
+ qcom,ain-route = <3>; /* AMUX 8 */
+ qcom,master-en = <1>;
+ qcom,src-sel = <0>; /* Function constant */
+ qcom,invert = <1>;
};
mpp@a400 { /* MPP 5 */
};
mpp@a500 { /* MPP 6 */
+ /* channel 21 */
+ qcom,mode = <4>;
+ qcom,ain-route = <1>; /* AMUX 6 */
+ qcom,master-en = <1>;
+ qcom,src-sel = <0>; /* Function constant */
+ qcom,invert = <1>;
};
};
diff --git a/arch/arm/boot/dts/msmzinc-sim.dts b/arch/arm/boot/dts/msm9625-v2.1-cdp.dts
similarity index 61%
copy from arch/arm/boot/dts/msmzinc-sim.dts
copy to arch/arm/boot/dts/msm9625-v2.1-cdp.dts
index 48d7ef1..b643593 100644
--- a/arch/arm/boot/dts/msmzinc-sim.dts
+++ b/arch/arm/boot/dts/msm9625-v2.1-cdp.dts
@@ -12,18 +12,13 @@
/dts-v1/;
-/include/ "msmzinc.dtsi"
+/include/ "msm9625-v2.1.dtsi"
+/include/ "msm9625-cdp.dtsi"
/ {
- model = "Qualcomm MSM ZINC Simulator";
- compatible = "qcom,msmzinc-sim", "qcom,msmzinc";
- qcom,msm-id = <178 0 0>;
-
- aliases {
- serial0 = &uart0;
- };
-
- uart0: serial@f991f000 {
- status = "ok";
- };
+ model = "Qualcomm MSM 9625V2.1 CDP";
+ compatible = "qcom,msm9625-cdp", "qcom,msm9625", "qcom,cdp";
+ qcom,msm-id = <134 1 0x20001>, <152 1 0x20001>, <149 1 0x20001>,
+ <150 1 0x20001>, <151 1 0x20001>, <148 1 0x20001>,
+ <173 1 0x20001>, <174 1 0x20001>, <175 1 0x20001>;
};
diff --git a/arch/arm/boot/dts/msmzinc-sim.dts b/arch/arm/boot/dts/msm9625-v2.1-mtp.dts
similarity index 61%
copy from arch/arm/boot/dts/msmzinc-sim.dts
copy to arch/arm/boot/dts/msm9625-v2.1-mtp.dts
index 48d7ef1..8bbcc0d 100644
--- a/arch/arm/boot/dts/msmzinc-sim.dts
+++ b/arch/arm/boot/dts/msm9625-v2.1-mtp.dts
@@ -12,18 +12,13 @@
/dts-v1/;
-/include/ "msmzinc.dtsi"
+/include/ "msm9625-v2.1.dtsi"
+/include/ "msm9625-mtp.dtsi"
/ {
- model = "Qualcomm MSM ZINC Simulator";
- compatible = "qcom,msmzinc-sim", "qcom,msmzinc";
- qcom,msm-id = <178 0 0>;
-
- aliases {
- serial0 = &uart0;
- };
-
- uart0: serial@f991f000 {
- status = "ok";
- };
+ model = "Qualcomm MSM 9625V2.1 MTP";
+ compatible = "qcom,msm9625-mtp", "qcom,msm9625", "qcom,mtp";
+ qcom,msm-id = <134 7 0x20001>, <152 7 0x20001>, <149 7 0x20001>,
+ <150 7 0x20001>, <151 7 0x20001>, <148 7 0x20001>,
+ <173 7 0x20001>, <174 7 0x20001>, <175 7 0x20001>;
};
diff --git a/arch/arm/boot/dts/msm9625-v2-1.dtsi b/arch/arm/boot/dts/msm9625-v2.1.dtsi
similarity index 100%
rename from arch/arm/boot/dts/msm9625-v2-1.dtsi
rename to arch/arm/boot/dts/msm9625-v2.1.dtsi
diff --git a/arch/arm/boot/dts/msm9625-v2.dtsi b/arch/arm/boot/dts/msm9625-v2.dtsi
index 3ce6844..81d8e00 100644
--- a/arch/arm/boot/dts/msm9625-v2.dtsi
+++ b/arch/arm/boot/dts/msm9625-v2.dtsi
@@ -35,6 +35,10 @@
qcom,ipa-hw-ver = <2>; /* IPA h-w revision */
};
+&hsic_host {
+ qcom,disable-park-mode;
+};
+
&sfpb_spinlock {
status = "disable";
};
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index 3dbc95d..a79f403 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -58,12 +58,70 @@
clock-frequency = <32768>;
};
- timer: msm-qtimer@f9021000 {
- compatible = "arm,armv7-timer";
- reg = <0xF9021000 0x1000>;
- interrupts = <0 7 0>;
- irq-is-not-percpu;
+ timer@f9020000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0xf9020000 0x1000>;
clock-frequency = <19200000>;
+
+ frame@f9021000 {
+ frame-number = <0>;
+ interrupts = <0 7 0x4>,
+ <0 6 0x4>;
+ reg = <0xf9021000 0x1000>,
+ <0xf9022000 0x1000>;
+ };
+
+ frame@f9023000 {
+ frame-number = <1>;
+ interrupts = <0 8 0x4>;
+ reg = <0xf9023000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9024000 {
+ frame-number = <2>;
+ interrupts = <0 9 0x4>;
+ reg = <0xf9024000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9025000 {
+ frame-number = <3>;
+ interrupts = <0 10 0x4>;
+ reg = <0xf9025000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9026000 {
+ frame-number = <4>;
+ interrupts = <0 11 0x4>;
+ reg = <0xf9026000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9027000 {
+ frame-number = <5>;
+ interrupts = <0 12 0x4>;
+ reg = <0xf9027000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9028000 {
+ frame-number = <6>;
+ interrupts = <0 13 0x4>;
+ reg = <0xf9028000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9029000 {
+ frame-number = <7>;
+ interrupts = <0 14 0x4>;
+ reg = <0xf9029000 0x1000>;
+ status = "disabled";
+ };
};
qcom,sps@f9980000 {
@@ -101,14 +159,13 @@
qcom,msm-bus,name = "usb2";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<87 512 0 0>,
<87 512 40000 640000>;
};
- hsic@f9a15000 {
+ hsic_host: hsic@f9a15000 {
compatible = "qcom,hsic-host";
reg = <0xf9a15000 0x400>;
interrupts = <0 136 0>, <0 148 0>;
@@ -118,7 +175,6 @@
qcom,msm-bus,name = "hsic";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<85 512 0 0>,
@@ -336,7 +392,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>;
qcom,pad-pull-off = <0x0 0x3 0x3>;
- qcom,pad-drv-on = <0x7 0x4 0x4>;
+ qcom,pad-drv-on = <0x4 0x4 0x4>;
qcom,pad-drv-off = <0x0 0x0 0x0>;
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -507,6 +563,14 @@
qcom,cdc-vddcx-2-voltage = <1200000 1200000>;
qcom,cdc-vddcx-2-current = <10000>;
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-vdd-tx-h",
+ "cdc-vdd-rx-h",
+ "cdc-vddpx-1",
+ "cdc-vdd-a-1p2v",
+ "cdc-vddcx-1",
+ "cdc-vddcx-2";
+
qcom,cdc-micbias-ldoh-v = <0x3>;
qcom,cdc-micbias-cfilt1-mv = <1800>;
qcom,cdc-micbias-cfilt2-mv = <2700>;
@@ -689,16 +753,17 @@
compatible = "qcom,pil-q6v5-mss";
interrupts = <0 24 1>;
- /* GPIO input from mss */
+ /* GPIO inputs from mss */
qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
/* GPIO output to mss */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
};
- qcom,smem@fa00000 {
+ qcom,smem@0 {
compatible = "qcom,smem";
- reg = <0xfa00000 0x200000>,
+ reg = <0x0 0x100000>,
<0xf9011000 0x1000>,
<0xfc428000 0x4000>;
reg-names = "smem", "irq-reg-base", "aux-mem1";
@@ -825,7 +890,7 @@
};
chan@33 {
- label = "pa_therm1";
+ label = "pa_therm0";
reg = <0x33>;
qcom,decimation = <0>;
qcom,pre-div-channel-scaling = <0>;
@@ -836,7 +901,7 @@
};
chan@34 {
- label = "pa_therm2";
+ label = "pa_therm1";
reg = <0x34>;
qcom,decimation = <0>;
qcom,pre-div-channel-scaling = <0>;
@@ -867,4 +932,26 @@
qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
+
+ chan@13 {
+ label = "case_therm";
+ reg = <0x13>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@15 {
+ label = "ambient_therm";
+ reg = <0x15>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
};
diff --git a/arch/arm/boot/dts/msmzinc-sim.dts b/arch/arm/boot/dts/msmkrypton-sim.dts
similarity index 71%
rename from arch/arm/boot/dts/msmzinc-sim.dts
rename to arch/arm/boot/dts/msmkrypton-sim.dts
index 48d7ef1..1872a36 100644
--- a/arch/arm/boot/dts/msmzinc-sim.dts
+++ b/arch/arm/boot/dts/msmkrypton-sim.dts
@@ -12,18 +12,14 @@
/dts-v1/;
-/include/ "msmzinc.dtsi"
+/include/ "msmkrypton.dtsi"
/ {
- model = "Qualcomm MSM ZINC Simulator";
- compatible = "qcom,msmzinc-sim", "qcom,msmzinc";
- qcom,msm-id = <178 0 0>;
+ model = "Qualcomm MSM KRYPTON SIM";
+ compatible = "qcom,msmkrypton-sim", "qcom,msmkrypton", "qcom,sim";
+ qcom,msm-id = <187 16 0>;
+};
- aliases {
- serial0 = &uart0;
- };
-
- uart0: serial@f991f000 {
- status = "ok";
- };
+&uartdm3{
+ status = "ok";
};
diff --git a/arch/arm/boot/dts/msmkrypton.dtsi b/arch/arm/boot/dts/msmkrypton.dtsi
new file mode 100644
index 0000000..3f51659
--- /dev/null
+++ b/arch/arm/boot/dts/msmkrypton.dtsi
@@ -0,0 +1,112 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ model = "Qualcomm MSM KRYPTON";
+ compatible = "qcom,msmkrypton";
+ interrupt-parent = <&intc>;
+
+ intc: interrupt-controller@f9000000 {
+ compatible = "qcom,msm-qgic2";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0xf9000000 0x1000>,
+ <0xf9002000 0x1000>;
+ };
+
+ msmgpio: gpio@fd510000 {
+ compatible = "qcom,msm-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0xfd510000 0x4000>;
+ ngpio = <89>;
+ interrupts = <0 208 0>;
+ qcom,direct-connect-irqs = <8>;
+ };
+
+ timer@f9020000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0xf9020000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@f9021000 {
+ frame-number = <0>;
+ interrupts = <0 7 0x4>,
+ <0 6 0x4>;
+ reg = <0xf9021000 0x1000>,
+ <0xf9022000 0x1000>;
+ };
+
+ frame@f9023000 {
+ frame-number = <1>;
+ interrupts = <0 8 0x4>;
+ reg = <0xf9023000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9024000 {
+ frame-number = <2>;
+ interrupts = <0 9 0x4>;
+ reg = <0xf9024000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9025000 {
+ frame-number = <3>;
+ interrupts = <0 10 0x4>;
+ reg = <0xf9025000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9026000 {
+ frame-number = <4>;
+ interrupts = <0 11 0x4>;
+ reg = <0xf9026000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9027000 {
+ frame-number = <5>;
+ interrupts = <0 12 0x4>;
+ reg = <0xf9027000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9028000 {
+ frame-number = <6>;
+ interrupts = <0 13 0x4>;
+ reg = <0xf9028000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9029000 {
+ frame-number = <7>;
+ interrupts = <0 14 0x4>;
+ reg = <0xf9029000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ uartdm3: serial@f991f000 {
+ compatible = "qcom,msm-lsuart-v14";
+ reg = <0xf991f000 0x1000>;
+ interrupts = <0 109 0>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm/configs/msmzinc_defconfig b/arch/arm/configs/apq8084_defconfig
similarity index 96%
rename from arch/arm/configs/msmzinc_defconfig
rename to arch/arm/configs/apq8084_defconfig
index 678b086..f595188 100644
--- a/arch/arm/configs/msmzinc_defconfig
+++ b/arch/arm/configs/apq8084_defconfig
@@ -34,7 +34,7 @@
CONFIG_EFI_PARTITION=y
CONFIG_IOSCHED_TEST=y
CONFIG_ARCH_MSM=y
-CONFIG_ARCH_MSMZINC=y
+CONFIG_ARCH_APQ8084=y
CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER=y
CONFIG_MSM_RPM_SMD=y
# CONFIG_MSM_STACKED_MEMORY is not set
@@ -313,6 +313,19 @@
CONFIG_USB_STORAGE_KARMA=y
CONFIG_USB_STORAGE_CYPRESS_ATACB=y
CONFIG_USB_STORAGE_ENE_UB6250=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_BLOCK_MINORS=32
+# CONFIG_MMC_BLOCK_BOUNCE is not set
+CONFIG_MMC_TEST=m
+CONFIG_MMC_BLOCK_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_MSM=y
+CONFIG_MMC_SDHCI_MSM=y
CONFIG_LEDS_QPNP=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
@@ -335,6 +348,7 @@
CONFIG_QPNP_POWER_ON=y
CONFIG_QPNP_CLKDIV=y
CONFIG_MSM_IOMMU=y
+CONFIG_IOMMU_PGTABLES_L2=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index a429b3d..3291919 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -216,6 +216,7 @@
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI4_DEV=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_UINPUT=y
CONFIG_INPUT_GPIO=m
@@ -231,6 +232,7 @@
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=m
CONFIG_SPMI=y
+CONFIG_MSM_BUS_SCALING=y
CONFIG_SPMI_MSM_PMIC_ARB=y
CONFIG_MSM_QPNP_INT=y
CONFIG_SLIMBUS_MSM_NGD=y
@@ -239,6 +241,7 @@
CONFIG_GPIO_QPNP_PIN=y
CONFIG_POWER_SUPPLY=y
CONFIG_QPNP_CHARGER=y
+CONFIG_QPNP_BMS=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_SENSORS_QPNP_ADC_CURRENT=y
CONFIG_THERMAL=y
@@ -264,6 +267,8 @@
CONFIG_MSMB_CAMERA=y
CONFIG_OV9724=y
CONFIG_MSMB_JPEG=y
+CONFIG_SWITCH=y
+CONFIG_MSM_WFD=y
CONFIG_MSM_VIDC_V4L2=y
CONFIG_VIDEOBUF2_MSM_MEM=y
CONFIG_V4L_PLATFORM_DRIVERS=y
@@ -300,7 +305,10 @@
CONFIG_MMC_PARANOID_SD_INIT=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_TEST=m
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_MSM=y
+CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_MSM_SPS_SUPPORT=y
CONFIG_LEDS_QPNP=y
CONFIG_LEDS_TRIGGERS=y
@@ -321,7 +329,6 @@
CONFIG_QPNP_PWM=y
CONFIG_QPNP_POWER_ON=y
CONFIG_MSM_IOMMU=y
-CONFIG_MSM_IOMMU_PMON=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_TMC=y
CONFIG_CORESIGHT_TPIU=y
@@ -369,3 +376,4 @@
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=y
CONFIG_QPNP_VIBRATOR=y
+CONFIG_QSEECOM=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 4e6cb05..f90e5f3 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -483,6 +483,7 @@
CONFIG_SPS_SUPPORT_BAMDMA=y
CONFIG_MSM_AVTIMER=y
CONFIG_MSM_IOMMU=y
+CONFIG_IOMMU_PGTABLES_L2=y
CONFIG_MOBICORE_SUPPORT=m
CONFIG_MOBICORE_API=m
CONFIG_CORESIGHT=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index c4fffb9..f699dee 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -485,6 +485,7 @@
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_BAMDMA=y
CONFIG_MSM_IOMMU=y
+CONFIG_IOMMU_PGTABLES_L2=y
CONFIG_MOBICORE_SUPPORT=m
CONFIG_MOBICORE_API=m
CONFIG_CORESIGHT=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index efca45a..59cafd1 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -98,6 +98,7 @@
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
@@ -238,6 +239,7 @@
CONFIG_CMA=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_TSPP=m
CONFIG_HAPTIC_ISA1200=y
CONFIG_QSEECOM=y
CONFIG_QPNP_MISC=y
@@ -320,6 +322,7 @@
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_DEV=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_DVB_CORE=m
# CONFIG_MSM_CAMERA is not set
CONFIG_MT9M114=y
CONFIG_OV2720=y
@@ -337,6 +340,8 @@
CONFIG_MSMB_JPEG=y
CONFIG_MSM_VIDC_V4L2=y
CONFIG_MSM_WFD=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
CONFIG_VIDEOBUF2_MSM_MEM=y
CONFIG_USB_VIDEO_CLASS=y
CONFIG_V4L_PLATFORM_DRIVERS=y
@@ -345,6 +350,7 @@
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_MSM_KGSL=y
+CONFIG_KGSL_PER_PROCESS_PAGE_TABLE=y
CONFIG_FB=y
CONFIG_FB_MSM=y
# CONFIG_FB_MSM_BACKLIGHT is not set
@@ -386,8 +392,11 @@
CONFIG_USB_STORAGE_CYPRESS_ATACB=y
CONFIG_USB_STORAGE_ENE_UB6250=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_QCOM_DIAG_BRIDGE=y
+CONFIG_USB_QCOM_KS_BRIDGE=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_CI13XXX_MSM=y
CONFIG_USB_DWC3_MSM=y
CONFIG_USB_G_ANDROID=y
CONFIG_MMC=y
@@ -430,6 +439,7 @@
CONFIG_QPNP_REVID=y
CONFIG_QPNP_COINCELL=y
CONFIG_MSM_IOMMU=y
+CONFIG_IOMMU_PGTABLES_L2=y
CONFIG_MOBICORE_SUPPORT=m
CONFIG_MOBICORE_API=m
CONFIG_CORESIGHT=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index a226fe4..fd8a639 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -85,6 +85,7 @@
CONFIG_MSM_ENABLE_WDOG_DEBUG_CONTROL=y
CONFIG_MSM_UARTDM_Core_v14=y
CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_XPU_ERR_FATAL=y
CONFIG_STRICT_MEMORY_RWX=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -102,6 +103,7 @@
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
@@ -242,6 +244,7 @@
CONFIG_CMA=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_UID_STAT=y
CONFIG_TSPP=m
CONFIG_HAPTIC_ISA1200=y
CONFIG_QSEECOM=y
@@ -353,6 +356,7 @@
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_MSM_KGSL=y
+CONFIG_KGSL_PER_PROCESS_PAGE_TABLE=y
CONFIG_FB=y
CONFIG_FB_MSM=y
# CONFIG_FB_MSM_BACKLIGHT is not set
@@ -394,8 +398,11 @@
CONFIG_USB_STORAGE_CYPRESS_ATACB=y
CONFIG_USB_STORAGE_ENE_UB6250=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_QCOM_DIAG_BRIDGE=y
+CONFIG_USB_QCOM_KS_BRIDGE=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_CI13XXX_MSM=y
CONFIG_USB_DWC3_MSM=y
CONFIG_USB_G_ANDROID=y
CONFIG_MMC=y
@@ -438,6 +445,7 @@
CONFIG_QPNP_REVID=y
CONFIG_QPNP_COINCELL=y
CONFIG_MSM_IOMMU=y
+CONFIG_IOMMU_PGTABLES_L2=y
CONFIG_MSM_IOMMU_PMON=y
CONFIG_MOBICORE_SUPPORT=m
CONFIG_MOBICORE_API=m
diff --git a/arch/arm/configs/msm9625-perf_defconfig b/arch/arm/configs/msm9625-perf_defconfig
index 42acd99..ae73bad 100644
--- a/arch/arm/configs/msm9625-perf_defconfig
+++ b/arch/arm/configs/msm9625-perf_defconfig
@@ -230,9 +230,6 @@
CONFIG_REGULATOR_QPNP=y
CONFIG_ION=y
CONFIG_ION_MSM=y
-CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_QPIC_PANEL_DETECT=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index 041e89a..f7c3bff 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -231,9 +231,6 @@
CONFIG_REGULATOR_QPNP=y
CONFIG_ION=y
CONFIG_ION_MSM=y
-CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_QPIC_PANEL_DETECT=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
diff --git a/arch/arm/configs/msmkrypton_defconfig b/arch/arm/configs/msmkrypton_defconfig
new file mode 100644
index 0000000..69bc36e
--- /dev/null
+++ b/arch/arm/configs/msmkrypton_defconfig
@@ -0,0 +1,116 @@
+# CONFIG_ARM_PATCH_PHYS_VIRT is not set
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_MSM=y
+CONFIG_ARCH_MSMKRYPTON=y
+# CONFIG_MSM_STACKED_MEMORY is not set
+CONFIG_CPU_HAS_L2_PMU=y
+# CONFIG_MSM_FIQ_SUPPORT is not set
+# CONFIG_MSM_PROC_COMM is not set
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_PKG4=y
+CONFIG_MSM_IPC_LOGGING=y
+CONFIG_MSM_WATCHDOG_V2=y
+CONFIG_MSM_UARTDM_Core_v14=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_USE_OF=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_RUNTIME=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_MTD_MSM_NAND is not set
+CONFIG_MTD_MSM_QPIC_NAND=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+# CONFIG_ANDROID_PMEM is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=m
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_HSL=y
+CONFIG_SERIAL_MSM_HSL_CONSOLE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=m
+CONFIG_MMC_MSM=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_DRV_MSM is not set
+CONFIG_EXT3_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_YAFFS_FS=y
+CONFIG_YAFFS_DISABLE_TAGS_ECC=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_USER=y
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 366debb..ac4c7a3 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/export.h>
+#include <linux/slab.h>
#include <asm/cputype.h>
#include <asm/delay.h>
@@ -33,46 +34,13 @@
#include <asm/system_info.h>
static unsigned long arch_timer_rate;
+static int arch_timer_spi;
static int arch_timer_ppi;
static int arch_timer_ppi2;
-static int is_irq_percpu;
static struct clock_event_device __percpu **arch_timer_evt;
static void __iomem *timer_base;
-static u32 timer_reg_read_cp15(int reg);
-static void timer_reg_write_cp15(int reg, u32 val);
-static inline cycle_t counter_get_cntpct_cp15(void);
-static inline cycle_t counter_get_cntvct_cp15(void);
-
-static u32 timer_reg_read_mem(int reg);
-static void timer_reg_write_mem(int reg, u32 val);
-static inline cycle_t counter_get_cntpct_mem(void);
-static inline cycle_t counter_get_cntvct_mem(void);
-
-struct arch_timer_operations {
- void (*reg_write)(int, u32);
- u32 (*reg_read)(int);
- cycle_t (*get_cntpct)(void);
- cycle_t (*get_cntvct)(void);
-};
-
-static struct arch_timer_operations arch_timer_ops_cp15 = {
- .reg_read = &timer_reg_read_cp15,
- .reg_write = &timer_reg_write_cp15,
- .get_cntpct = &counter_get_cntpct_cp15,
- .get_cntvct = &counter_get_cntvct_cp15,
-};
-
-static struct arch_timer_operations arch_timer_ops_mem = {
- .reg_read = &timer_reg_read_mem,
- .reg_write = &timer_reg_write_mem,
- .get_cntpct = &counter_get_cntpct_mem,
- .get_cntvct = &counter_get_cntvct_mem,
-};
-
-static struct arch_timer_operations *arch_specific_timer = &arch_timer_ops_cp15;
-
static struct delay_timer arch_delay_timer;
/*
@@ -97,7 +65,7 @@
#define QTIMER_CNTP_TVAL_REG 0x028
#define QTIMER_CNTV_TVAL_REG 0x038
-static void timer_reg_write_mem(int reg, u32 val)
+static inline void timer_reg_write_mem(int reg, u32 val)
{
switch (reg) {
case ARCH_TIMER_REG_CTRL:
@@ -109,7 +77,7 @@
}
}
-static void timer_reg_write_cp15(int reg, u32 val)
+static inline void timer_reg_write_cp15(int reg, u32 val)
{
switch (reg) {
case ARCH_TIMER_REG_CTRL:
@@ -123,7 +91,15 @@
isb();
}
-static u32 timer_reg_read_mem(int reg)
+static inline void arch_timer_reg_write(int cp15, int reg, u32 val)
+{
+ if (cp15)
+ timer_reg_write_cp15(reg, val);
+ else
+ timer_reg_write_mem(reg, val);
+}
+
+static inline u32 timer_reg_read_mem(int reg)
{
u32 val;
@@ -144,7 +120,7 @@
return val;
}
-static u32 timer_reg_read_cp15(int reg)
+static inline u32 timer_reg_read_cp15(int reg)
{
u32 val;
@@ -165,17 +141,23 @@
return val;
}
-static irqreturn_t arch_timer_handler(int irq, void *dev_id)
+static inline u32 arch_timer_reg_read(int cp15, int reg)
{
- struct clock_event_device *evt;
+ if (cp15)
+ return timer_reg_read_cp15(reg);
+ else
+ return timer_reg_read_mem(reg);
+}
+
+static inline irqreturn_t arch_timer_handler(int cp15,
+ struct clock_event_device *evt)
+{
unsigned long ctrl;
- ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
+ ctrl = arch_timer_reg_read(cp15, ARCH_TIMER_REG_CTRL);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
- arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL,
- ctrl);
- evt = *__this_cpu_ptr(arch_timer_evt);
+ arch_timer_reg_write(cp15, ARCH_TIMER_REG_CTRL, ctrl);
evt->event_handler(evt);
return IRQ_HANDLED;
}
@@ -183,16 +165,18 @@
return IRQ_NONE;
}
-static void arch_timer_disable(void)
+static irqreturn_t arch_timer_handler_cp15(int irq, void *dev_id)
{
- unsigned long ctrl;
-
- ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
- ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, ctrl);
+ struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
+ return arch_timer_handler(1, evt);
}
-static void arch_timer_set_mode(enum clock_event_mode mode,
+static irqreturn_t arch_timer_handler_mem(int irq, void *dev_id)
+{
+ return arch_timer_handler(0, dev_id);
+}
+
+static inline void arch_timer_set_mode(int cp15, enum clock_event_mode mode,
struct clock_event_device *clk)
{
unsigned long ctrl;
@@ -200,46 +184,72 @@
switch (mode) {
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
- arch_timer_disable();
+ ctrl = arch_timer_reg_read(cp15, ARCH_TIMER_REG_CTRL);
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_reg_write(cp15, ARCH_TIMER_REG_CTRL, ctrl);
break;
case CLOCK_EVT_MODE_ONESHOT:
- ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
+ ctrl = arch_timer_reg_read(cp15, ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
- arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, ctrl);
+ arch_timer_reg_write(cp15, ARCH_TIMER_REG_CTRL, ctrl);
default:
break;
}
}
-static int arch_timer_set_next_event(unsigned long evt,
+static void arch_timer_set_mode_cp15(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ arch_timer_set_mode(1, mode, clk);
+}
+
+static void arch_timer_set_mode_mem(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ arch_timer_set_mode(0, mode, clk);
+}
+
+static int arch_timer_set_next_event(int cp15, unsigned long evt,
struct clock_event_device *unused)
{
unsigned long ctrl;
- ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
+ ctrl = arch_timer_reg_read(cp15, ARCH_TIMER_REG_CTRL);
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
- arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, ctrl);
- arch_specific_timer->reg_write(ARCH_TIMER_REG_TVAL, evt);
+ arch_timer_reg_write(cp15, ARCH_TIMER_REG_CTRL, ctrl);
+ arch_timer_reg_write(cp15, ARCH_TIMER_REG_TVAL, evt);
return 0;
}
+static int arch_timer_set_next_event_cp15(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ return arch_timer_set_next_event(1, evt, unused);
+}
+
+static int arch_timer_set_next_event_mem(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ return arch_timer_set_next_event(0, evt, unused);
+}
+
static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
{
/* setup clock event only once for CPU 0 */
if (!smp_processor_id() && clk->irq == arch_timer_ppi)
return 0;
- /* Be safe... */
- arch_timer_disable();
-
- clk->features = CLOCK_EVT_FEAT_ONESHOT;
+ clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
clk->rating = 450;
- clk->set_mode = arch_timer_set_mode;
- clk->set_next_event = arch_timer_set_next_event;
+ clk->set_mode = arch_timer_set_mode_cp15;
+ clk->set_next_event = arch_timer_set_next_event_cp15;
clk->irq = arch_timer_ppi;
+ /* Be safe... */
+ clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk);
+
clockevents_config_and_register(clk, arch_timer_rate,
0xf, 0x7fffffff);
@@ -264,8 +274,8 @@
unsigned long freq;
if (arch_timer_rate == 0) {
- arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, 0);
- freq = arch_specific_timer->reg_read(ARCH_TIMER_REG_FREQ);
+ arch_timer_reg_write(1, ARCH_TIMER_REG_CTRL, 0);
+ freq = arch_timer_reg_read(1, ARCH_TIMER_REG_FREQ);
/* Check the timer frequency. */
if (freq == 0) {
@@ -323,9 +333,12 @@
return ((cycle_t) cvalh << 32) | cvall;
}
+static cycle_t (*get_cntpct_func)(void) = counter_get_cntpct_cp15;
+static cycle_t (*get_cntvct_func)(void) = counter_get_cntvct_cp15;
+
cycle_t arch_counter_get_cntpct(void)
{
- return arch_specific_timer->get_cntpct();
+ return get_cntpct_func();
}
EXPORT_SYMBOL(arch_counter_get_cntpct);
@@ -351,7 +364,7 @@
{
cycle_t cntvct;
- cntvct = arch_specific_timer->get_cntvct();
+ cntvct = get_cntvct_func();
/*
* The sched_clock infrastructure only knows about counters
@@ -373,7 +386,7 @@
disable_percpu_irq(clk->irq);
if (arch_timer_ppi2)
disable_percpu_irq(arch_timer_ppi2);
- arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+ clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
}
static struct local_timer_ops arch_timer_ops __cpuinitdata = {
@@ -383,13 +396,23 @@
static struct clock_event_device arch_timer_global_evt;
+static void __init arch_timer_counter_init(void)
+{
+ clocksource_register_hz(&clocksource_counter, arch_timer_rate);
+
+ setup_sched_clock(arch_timer_update_sched_clock, 32, arch_timer_rate);
+
+ /* Use the architected timer for the delay loop. */
+ arch_delay_timer.read_current_timer = &arch_timer_read_current_timer;
+ arch_delay_timer.freq = arch_timer_rate;
+ register_current_timer_delay(&arch_delay_timer);
+}
+
static int __init arch_timer_common_register(void)
{
int err;
- if (timer_base)
- arch_specific_timer = &arch_timer_ops_mem;
- else if (!local_timer_is_architected())
+ if (!local_timer_is_architected())
return -ENXIO;
err = arch_timer_available();
@@ -400,16 +423,8 @@
if (!arch_timer_evt)
return -ENOMEM;
- clocksource_register_hz(&clocksource_counter, arch_timer_rate);
-
- setup_sched_clock(arch_timer_update_sched_clock, 32, arch_timer_rate);
-
- if (is_irq_percpu)
- err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
- "arch_timer", arch_timer_evt);
- else
- err = request_irq(arch_timer_ppi, arch_timer_handler, 0,
- "arch_timer", arch_timer_evt);
+ err = request_percpu_irq(arch_timer_ppi, arch_timer_handler_cp15,
+ "arch_timer", arch_timer_evt);
if (err) {
pr_err("arch_timer: can't register interrupt %d (%d)\n",
arch_timer_ppi, err);
@@ -417,13 +432,9 @@
}
if (arch_timer_ppi2) {
- if (is_irq_percpu)
- err = request_percpu_irq(arch_timer_ppi2,
- arch_timer_handler, "arch_timer",
- arch_timer_evt);
- else
- err = request_irq(arch_timer_ppi2, arch_timer_handler,
- 0, "arch_timer", arch_timer_evt);
+ err = request_percpu_irq(arch_timer_ppi2,
+ arch_timer_handler_cp15,
+ "arch_timer", arch_timer_evt);
if (err) {
pr_err("arch_timer: can't register interrupt %d (%d)\n",
arch_timer_ppi2, err);
@@ -447,10 +458,6 @@
if (err)
goto out_free_irq;
- /* Use the architected timer for the delay loop. */
- arch_delay_timer.read_current_timer = &arch_timer_read_current_timer;
- arch_delay_timer.freq = arch_timer_rate;
- register_current_timer_delay(&arch_delay_timer);
return 0;
out_free_irq:
@@ -464,6 +471,34 @@
return err;
}
+static int __init arch_timer_mem_register(void)
+{
+ int err;
+ struct clock_event_device *clk;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return -ENOMEM;
+
+ clk->features = CLOCK_EVT_FEAT_ONESHOT;
+ clk->name = "arch_mem_timer";
+ clk->rating = 400;
+ clk->set_mode = arch_timer_set_mode_mem;
+ clk->set_next_event = arch_timer_set_next_event_mem;
+ clk->irq = arch_timer_spi;
+ clk->cpumask = cpu_all_mask;
+
+ clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk);
+
+ clockevents_config_and_register(clk, arch_timer_rate,
+ 0xf, 0x7fffffff);
+
+ err = request_irq(arch_timer_spi, arch_timer_handler_mem, 0,
+ "arch_timer", clk);
+
+ return err;
+}
+
int __init arch_timer_register(struct arch_timer *at)
{
if (at->res[0].start <= 0 || !(at->res[0].flags & IORESOURCE_IRQ))
@@ -493,48 +528,86 @@
{},
};
+static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
+ { .compatible = "arm,armv7-timer-mem", },
+ {},
+};
+
int __init arch_timer_of_register(void)
{
- struct device_node *np;
+ struct device_node *np, *frame;
u32 freq;
int ret;
+ int has_cp15 = false, has_mem = false;
np = of_find_matching_node(NULL, arch_timer_of_match);
- if (!np) {
- pr_err("arch_timer: can't find DT node\n");
- return -ENODEV;
+ if (np) {
+ has_cp15 = true;
+ /*
+ * Try to determine the frequency from the device tree
+ */
+ if (!of_property_read_u32(np, "clock-frequency", &freq))
+ arch_timer_rate = freq;
+
+ ret = irq_of_parse_and_map(np, 0);
+ if (ret <= 0) {
+ pr_err("arch_timer: interrupt not specified in timer node\n");
+ return -ENODEV;
+ }
+ arch_timer_ppi = ret;
+ ret = irq_of_parse_and_map(np, 1);
+ if (ret > 0)
+ arch_timer_ppi2 = ret;
+
+ ret = arch_timer_common_register();
+ if (ret)
+ return ret;
}
- /* Try to determine the frequency from the device tree or CNTFRQ */
- if (!of_property_read_u32(np, "clock-frequency", &freq))
- arch_timer_rate = freq;
+ np = of_find_matching_node(NULL, arch_timer_mem_of_match);
+ if (np) {
+ has_mem = true;
- ret = irq_of_parse_and_map(np, 0);
- if (ret <= 0) {
- pr_err("arch_timer: interrupt not specified in timer node\n");
- return -ENODEV;
- }
+ if (!has_cp15) {
+ get_cntpct_func = counter_get_cntpct_mem;
+ get_cntvct_func = counter_get_cntvct_mem;
+ }
+ /*
+ * Try to determine the frequency from the device tree
+ */
+ if (!of_property_read_u32(np, "clock-frequency", &freq))
+ arch_timer_rate = freq;
- if (of_get_address(np, 0, NULL, NULL)) {
- timer_base = of_iomap(np, 0);
+ frame = of_get_next_child(np, NULL);
+ if (!frame) {
+ pr_err("arch_timer: no child frame\n");
+ return -EINVAL;
+ }
+
+ timer_base = of_iomap(frame, 0);
if (!timer_base) {
pr_err("arch_timer: cant map timer base\n");
return -ENOMEM;
}
+
+ arch_timer_spi = irq_of_parse_and_map(frame, 0);
+ if (!arch_timer_spi) {
+ pr_err("arch_timer: no physical timer irq\n");
+ return -EINVAL;
+ }
+
+ ret = arch_timer_mem_register();
+ if (ret)
+ return ret;
}
- if (of_get_property(np, "irq-is-not-percpu", NULL))
- is_irq_percpu = 0;
- else
- is_irq_percpu = 1;
+ if (!has_cp15 && !has_mem) {
+ pr_err("arch_timer: can't find DT node\n");
+ return -ENODEV;
+ }
- arch_timer_ppi = ret;
- ret = irq_of_parse_and_map(np, 1);
- if (ret > 0)
- arch_timer_ppi2 = ret;
- pr_info("arch_timer: found %s irqs %d %d\n",
- np->name, arch_timer_ppi, arch_timer_ppi2);
+ arch_timer_counter_init();
- return arch_timer_common_register();
+ return 0;
}
#endif
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index e8853b9..a70e6c6 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -284,9 +284,10 @@
select MSM_ULTRASOUND_B
select MSM_LPM_TEST
select MSM_RPM_LOG
+ select ARCH_WANT_KMAP_ATOMIC_FLUSH
-config ARCH_MSMZINC
- bool "MSMZINC"
+config ARCH_APQ8084
+ bool "APQ8084"
select ARCH_MSM_KRAITMP
select GPIO_MSM_V3
select ARM_GIC
@@ -302,6 +303,7 @@
select REGULATOR
select ARM_HAS_SG_CHAIN
select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
+ select ARCH_WANT_KMAP_ATOMIC_FLUSH
config ARCH_MPQ8092
bool "MPQ8092"
@@ -391,6 +393,21 @@
select MEMORY_HOLE_CARVEOUT
select MSM_RPM_LOG
+config ARCH_MSMKRYPTON
+ bool "MSMKRYPTON"
+ select ARM_GIC
+ select CPU_V7
+ select MSM_GPIOMUX
+ select MSM_RPM_SMD
+ select MSM_NATIVE_RESTART
+ select MSM_RESTART_V2
+ select MSM_SPM_V2
+ select MSM_PM8X60 if PM
+ select MULTI_IRQ_HANDLER
+ select GPIO_MSM_V3
+ select MAY_HAVE_SPARSE_IRQ
+ select SPARSE_IRQ
+
config ARCH_MSM8610
bool "MSM8610"
select ARM_GIC
@@ -420,6 +437,7 @@
select CPU_FREQ
select CPU_FREQ_GOV_USERSPACE
select CPU_FREQ_GOV_ONDEMAND
+ select CPU_FREQ_GOV_POWERSAVE
select MSM_PIL
select MSM_RUN_QUEUE_STATS
select ARM_HAS_SG_CHAIN
@@ -428,6 +446,8 @@
select MSM_SPM_REGULATOR
select MSM_JTAG_MM if CORESIGHT_ETM
select MSM_CPR_REGULATOR
+ select MSM_RPM_LOG
+ select MSM_RPM_STATS_LOG
config ARCH_MSM8226
bool "MSM8226"
@@ -466,6 +486,9 @@
select MSM_SPM_REGULATOR
select MSM_JTAG_MM if CORESIGHT_ETM
select MSM_CPR_REGULATOR
+ select MSM_RPM_LOG
+ select MSM_RPM_STATS_LOG
+ select ARCH_WANT_KMAP_ATOMIC_FLUSH
endmenu
choice
@@ -1064,12 +1087,13 @@
default "0x80200000" if ARCH_MSM8960
default "0x80200000" if ARCH_MSM8930
default "0x00000000" if ARCH_MSM8974
- default "0x00000000" if ARCH_MSMZINC
+ default "0x00000000" if ARCH_APQ8084
default "0x00000000" if ARCH_MPQ8092
default "0x00000000" if ARCH_MSM8226
default "0x00000000" if ARCH_MSM8610
default "0x10000000" if ARCH_FSM9XXX
default "0x00200000" if ARCH_MSM9625
+ default "0x00000000" if ARCH_MSMKRYPTON
default "0x00200000" if !MSM_STACKED_MEMORY
default "0x00000000" if ARCH_QSD8X50 && MSM_SOC_REV_A
default "0x20000000" if ARCH_QSD8X50
@@ -1216,13 +1240,21 @@
Say Y here if you want the debug print routines to direct
their output to the serial port on MPQ8092 devices.
- config DEBUG_MSMZINC_UART
- bool "Kernel low-level debugging messages via MSMZINC UART"
- depends on ARCH_MSMZINC
+ config DEBUG_APQ8084_UART
+ bool "Kernel low-level debugging messages via APQ8084 UART"
+ depends on ARCH_APQ8084
select MSM_HAS_DEBUG_UART_HS_V14
help
Say Y here if you want the debug print routines to direct
- their output to the serial port on MSMZINC devices.
+ their output to the serial port on APQ8084 devices.
+
+ config DEBUG_MSM9625_UART
+ bool "Kernel low-level debugging messages via MSM9625 UART"
+ depends on ARCH_MSM9625
+ select MSM_HAS_DEBUG_UART_HS_V14
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the serial port on MSM9625 devices.
endchoice
choice
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 1c14ac6..a45f5ec 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -120,7 +120,8 @@
ifndef CONFIG_ARCH_MSM9625
ifndef CONFIG_ARCH_MPQ8092
ifndef CONFIG_ARCH_MSM8610
-ifndef CONFIG_ARCH_MSMZINC
+ifndef CONFIG_ARCH_APQ8084
+ifndef CONFIG_ARCH_MSMKRYPTON
obj-y += nand_partitions.o
endif
endif
@@ -131,6 +132,7 @@
endif
endif
endif
+endif
obj-$(CONFIG_MSM_SDIO_TTY) += sdio_tty.o
obj-$(CONFIG_MSM_SMD_TTY) += smd_tty.o
obj-$(CONFIG_MSM_SMD_QMI) += smd_qmi.o
@@ -295,7 +297,7 @@
obj-$(CONFIG_MACH_MPQ8064_DTV) += board-8064-all.o board-8064-regulator.o
obj-$(CONFIG_ARCH_MSM9615) += board-9615.o devices-9615.o board-9615-regulator.o board-9615-gpiomux.o board-9615-storage.o board-9615-display.o
obj-$(CONFIG_ARCH_MSM9615) += clock-local.o clock-9615.o acpuclock-9615.o clock-rpm.o clock-pll.o
-obj-$(CONFIG_ARCH_MSMZINC) += board-zinc.o board-zinc-gpiomux.o
+obj-$(CONFIG_ARCH_APQ8084) += board-8084.o board-8084-gpiomux.o
obj-$(CONFIG_ARCH_MSM8974) += board-8974.o board-8974-gpiomux.o
obj-$(CONFIG_ARCH_MSM8974) += acpuclock-8974.o
obj-$(CONFIG_ARCH_MSM8974) += clock-local2.o clock-pll.o clock-8974.o clock-rpm.o clock-voter.o clock-mdss-8974.o
@@ -304,6 +306,7 @@
obj-$(CONFIG_ARCH_MSM8226) += gdsc.o
obj-$(CONFIG_ARCH_MSM8610) += gdsc.o
obj-$(CONFIG_ARCH_MSM8974) += krait-regulator.o
+obj-$(CONFIG_ARCH_MSMKRYPTON) += board-krypton.o board-krypton-gpiomux.o
obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
obj-$(CONFIG_ARCH_MSM9625) += clock-local2.o clock-pll.o clock-9625.o clock-rpm.o clock-voter.o acpuclock-9625.o acpuclock-cortex.o
obj-$(CONFIG_ARCH_MSM8930) += acpuclock-8930.o acpuclock-8627.o acpuclock-8930aa.o acpuclock-8930ab.o
@@ -368,10 +371,11 @@
obj-$(CONFIG_ARCH_MSM9615) += gpiomux-v2.o gpiomux.o
obj-$(CONFIG_ARCH_MSM8974) += gpiomux-v2.o gpiomux.o
obj-$(CONFIG_ARCH_MSM9625) += gpiomux-v2.o gpiomux.o
+obj-$(CONFIG_ARCH_MSMKRYPTON) += gpiomux-v2.o gpiomux.o
obj-$(CONFIG_ARCH_MPQ8092) += gpiomux-v2.o gpiomux.o
obj-$(CONFIG_ARCH_MSM8226) += gpiomux-v2.o gpiomux.o
obj-$(CONFIG_ARCH_MSM8610) += gpiomux-v2.o gpiomux.o
-obj-$(CONFIG_ARCH_MSMZINC) += gpiomux-v2.o gpiomux.o
+obj-$(CONFIG_ARCH_APQ8084) += gpiomux-v2.o gpiomux.o
obj-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += idle_stats_device.o
obj-$(CONFIG_MSM_DCVS) += msm_dcvs_scm.o msm_dcvs.o msm_mpdecision.o
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index e3b8d73..d57709d 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -57,11 +57,15 @@
dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2-fluid.dtb
dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2-liquid.dtb
dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2-mtp.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2-liquid.dtb
-# MSMZINC
- zreladdr-$(CONFIG_ARCH_MSMZINC) := 0x00008000
- dtb-$(CONFIG_ARCH_MSMZINC) += msmzinc-sim.dtb
+# APQ8084
+ zreladdr-$(CONFIG_ARCH_APQ8084) := 0x00008000
+ dtb-$(CONFIG_ARCH_APQ8084) += apq8084-sim.dtb
+# MSMKRYPTON
+ zreladdr-$(CONFIG_ARCH_MSMKRYPTON) := 0x00008000
+ dtb-$(CONFIG_ARCH_MSMKRYPTON) += msmkrypton-sim.dtb
# MSM9615
zreladdr-$(CONFIG_ARCH_MSM9615) := 0x40808000
@@ -73,8 +77,8 @@
dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v1-rumi.dtb
dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2-cdp.dtb
dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2-mtp.dtb
- dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2-1-mtp.dtb
- dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2-1-cdp.dtb
+ dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2.1-mtp.dtb
+ dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2.1-cdp.dtb
# MSM8226
zreladdr-$(CONFIG_ARCH_MSM8226) := 0x00008000
diff --git a/arch/arm/mach-msm/acpuclock-8226.c b/arch/arm/mach-msm/acpuclock-8226.c
index 6e93c57..25bebd1 100644
--- a/arch/arm/mach-msm/acpuclock-8226.c
+++ b/arch/arm/mach-msm/acpuclock-8226.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/cpr-regulator.h>
@@ -25,12 +26,13 @@
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
#include <mach/rpm-regulator-smd.h>
+#include <mach/socinfo.h>
#include "acpuclock-cortex.h"
#define RCG_CONFIG_UPDATE_BIT BIT(0)
-static struct msm_bus_paths bw_level_tbl[] = {
+static struct msm_bus_paths bw_level_tbl_8226[] = {
[0] = BW_MBPS(152), /* At least 19 MHz on bus. */
[1] = BW_MBPS(300), /* At least 37.5 MHz on bus. */
[2] = BW_MBPS(400), /* At least 50 MHz on bus. */
@@ -41,9 +43,18 @@
[7] = BW_MBPS(4264), /* At least 533 MHz on bus. */
};
+static struct msm_bus_paths bw_level_tbl_8610[] = {
+ [0] = BW_MBPS(152), /* At least 19 MHz on bus. */
+ [1] = BW_MBPS(300), /* At least 37.5 MHz on bus. */
+ [2] = BW_MBPS(400), /* At least 50 MHz on bus. */
+ [3] = BW_MBPS(800), /* At least 100 MHz on bus. */
+ [4] = BW_MBPS(1600), /* At least 200 MHz on bus. */
+ [5] = BW_MBPS(2128), /* At least 266 MHz on bus. */
+};
+
static struct msm_bus_scale_pdata bus_client_pdata = {
- .usecase = bw_level_tbl,
- .num_usecases = ARRAY_SIZE(bw_level_tbl),
+ .usecase = bw_level_tbl_8226,
+ .num_usecases = ARRAY_SIZE(bw_level_tbl_8226),
.active_only = 1,
.name = "acpuclock",
};
@@ -53,7 +64,7 @@
* 2) Update bus bandwidth
* 3) Depending on Frodo version, may need minimum of LVL_NOM
*/
-static struct clkctl_acpu_speed acpu_freq_tbl[] = {
+static struct clkctl_acpu_speed acpu_freq_tbl_8226[] = {
{ 0, 19200, CXO, 0, 0, CPR_CORNER_SVS, 1150000, 0 },
{ 1, 300000, PLL0, 4, 2, CPR_CORNER_SVS, 1150000, 4 },
{ 1, 384000, ACPUPLL, 5, 0, CPR_CORNER_SVS, 1150000, 4 },
@@ -64,8 +75,19 @@
{ 0 }
};
+static struct clkctl_acpu_speed acpu_freq_tbl_8610[] = {
+ { 0, 19200, CXO, 0, 0, CPR_CORNER_SVS, 1150000, 0 },
+ { 1, 300000, PLL0, 4, 2, CPR_CORNER_SVS, 1150000, 3 },
+ { 1, 384000, ACPUPLL, 5, 0, CPR_CORNER_SVS, 1150000, 3 },
+ { 1, 600000, PLL0, 4, 0, CPR_CORNER_NORMAL, 1150000, 4 },
+ { 1, 787200, ACPUPLL, 5, 0, CPR_CORNER_NORMAL, 1150000, 4 },
+ { 0, 998400, ACPUPLL, 5, 0, CPR_CORNER_TURBO, 1275000, 5 },
+ { 0, 1190400, ACPUPLL, 5, 0, CPR_CORNER_TURBO, 1275000, 5 },
+ { 0 }
+};
+
static struct acpuclk_drv_data drv_data = {
- .freq_tbl = acpu_freq_tbl,
+ .freq_tbl = acpu_freq_tbl_8226,
.current_speed = &(struct clkctl_acpu_speed){ 0 },
.bus_scale = &bus_client_pdata,
.vdd_max_cpu = CPR_CORNER_TURBO,
@@ -82,17 +104,21 @@
.update_mask = RCG_CONFIG_UPDATE_BIT,
.poll_mask = RCG_CONFIG_UPDATE_BIT,
},
+ .power_collapse_khz = 300000,
+ .wait_for_irq_khz = 300000,
};
static int __init acpuclk_a7_probe(struct platform_device *pdev)
{
struct resource *res;
+ u32 i;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rcg_base");
if (!res)
return -EINVAL;
- drv_data.apcs_rcg_cmd = ioremap(res->start, resource_size(res));
+ drv_data.apcs_rcg_cmd = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (!drv_data.apcs_rcg_cmd)
return -ENOMEM;
@@ -110,6 +136,21 @@
return PTR_ERR(drv_data.vdd_mem);
}
+ for (i = 0; i < NUM_SRC; i++) {
+ if (!drv_data.src_clocks[i].name)
+ continue;
+ drv_data.src_clocks[i].clk =
+ devm_clk_get(&pdev->dev, drv_data.src_clocks[i].name);
+ if (IS_ERR(drv_data.src_clocks[i].clk)) {
+ dev_err(&pdev->dev, "Unable to get clock %s\n",
+ drv_data.src_clocks[i].name);
+ return -EPROBE_DEFER;
+ }
+ }
+
+ /* Enable the always on source */
+ clk_prepare_enable(drv_data.src_clocks[PLL0].clk);
+
return acpuclk_cortex_init(pdev, &drv_data);
}
@@ -126,8 +167,18 @@
},
};
+void msm8610_acpu_init(void)
+{
+ drv_data.bus_scale->usecase = bw_level_tbl_8610;
+ drv_data.bus_scale->num_usecases = ARRAY_SIZE(bw_level_tbl_8610);
+ drv_data.freq_tbl = acpu_freq_tbl_8610;
+}
+
static int __init acpuclk_a7_init(void)
{
+ if (cpu_is_msm8610())
+ msm8610_acpu_init();
+
return platform_driver_probe(&acpuclk_a7_driver, acpuclk_a7_probe);
}
device_initcall(acpuclk_a7_init);
diff --git a/arch/arm/mach-msm/acpuclock-8960ab.c b/arch/arm/mach-msm/acpuclock-8960ab.c
index 38658a2..0fa2cde 100644
--- a/arch/arm/mach-msm/acpuclock-8960ab.c
+++ b/arch/arm/mach-msm/acpuclock-8960ab.c
@@ -109,12 +109,12 @@
static struct acpu_level freq_tbl_PVS0[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000, AVS(0x70001F) },
- { 1, { 486000, HFPLL, 2, 0x24 }, L2(3), 950000, AVS(0x0) },
- { 1, { 594000, HFPLL, 1, 0x16 }, L2(3), 975000, AVS(0x0) },
- { 1, { 702000, HFPLL, 1, 0x1A }, L2(3), 1000000, AVS(0x0) },
- { 1, { 810000, HFPLL, 1, 0x1E }, L2(3), 1025000, AVS(0x0) },
- { 1, { 918000, HFPLL, 1, 0x22 }, L2(3), 1050000, AVS(0x0) },
- { 1, { 1026000, HFPLL, 1, 0x26 }, L2(3), 1075000, AVS(0x0) },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(4), 950000, AVS(0x0) },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(4), 975000, AVS(0x0) },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(4), 1000000, AVS(0x0) },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(4), 1025000, AVS(0x0) },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(4), 1050000, AVS(0x0) },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(4), 1075000, AVS(0x0) },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(9), 1100000, AVS(0x70000D) },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(9), 1125000, AVS(0x0) },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(9), 1150000, AVS(0x0) },
@@ -127,12 +127,12 @@
static struct acpu_level freq_tbl_PVS1[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 925000, AVS(0x70001F) },
- { 1, { 486000, HFPLL, 2, 0x24 }, L2(3), 925000, AVS(0x0) },
- { 1, { 594000, HFPLL, 1, 0x16 }, L2(3), 950000, AVS(0x0) },
- { 1, { 702000, HFPLL, 1, 0x1A }, L2(3), 975000, AVS(0x0) },
- { 1, { 810000, HFPLL, 1, 0x1E }, L2(3), 1000000, AVS(0x0) },
- { 1, { 918000, HFPLL, 1, 0x22 }, L2(3), 1025000, AVS(0x0) },
- { 1, { 1026000, HFPLL, 1, 0x26 }, L2(3), 1050000, AVS(0x0) },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(4), 925000, AVS(0x0) },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(4), 950000, AVS(0x0) },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(4), 975000, AVS(0x0) },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(4), 1000000, AVS(0x0) },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(4), 1025000, AVS(0x0) },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(4), 1050000, AVS(0x0) },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(9), 1075000, AVS(0x70000D) },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(9), 1100000, AVS(0x0) },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(9), 1125000, AVS(0x0) },
@@ -145,12 +145,12 @@
static struct acpu_level freq_tbl_PVS2[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 900000, AVS(0x70001F) },
- { 1, { 486000, HFPLL, 2, 0x24 }, L2(3), 900000, AVS(0x0) },
- { 1, { 594000, HFPLL, 1, 0x16 }, L2(3), 925000, AVS(0x0) },
- { 1, { 702000, HFPLL, 1, 0x1A }, L2(3), 950000, AVS(0x0) },
- { 1, { 810000, HFPLL, 1, 0x1E }, L2(3), 975000, AVS(0x0) },
- { 1, { 918000, HFPLL, 1, 0x22 }, L2(3), 1000000, AVS(0x0) },
- { 1, { 1026000, HFPLL, 1, 0x26 }, L2(3), 1025000, AVS(0x0) },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(4), 900000, AVS(0x0) },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(4), 925000, AVS(0x0) },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(4), 950000, AVS(0x0) },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(4), 975000, AVS(0x0) },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(4), 1000000, AVS(0x0) },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(4), 1025000, AVS(0x0) },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(9), 1050000, AVS(0x70000D) },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(9), 1075000, AVS(0x0) },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(9), 1100000, AVS(0x0) },
@@ -163,12 +163,12 @@
static struct acpu_level freq_tbl_PVS3[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 900000, AVS(0x70001F) },
- { 1, { 486000, HFPLL, 2, 0x24 }, L2(3), 900000, AVS(0x0) },
- { 1, { 594000, HFPLL, 1, 0x16 }, L2(3), 900000, AVS(0x0) },
- { 1, { 702000, HFPLL, 1, 0x1A }, L2(3), 925000, AVS(0x0) },
- { 1, { 810000, HFPLL, 1, 0x1E }, L2(3), 950000, AVS(0x0) },
- { 1, { 918000, HFPLL, 1, 0x22 }, L2(3), 975000, AVS(0x0) },
- { 1, { 1026000, HFPLL, 1, 0x26 }, L2(3), 1000000, AVS(0x0) },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(4), 900000, AVS(0x0) },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(4), 900000, AVS(0x0) },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(4), 925000, AVS(0x0) },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(4), 950000, AVS(0x0) },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(4), 975000, AVS(0x0) },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(4), 1000000, AVS(0x0) },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(9), 1025000, AVS(0x70000D) },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(9), 1050000, AVS(0x0) },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(9), 1075000, AVS(0x0) },
@@ -181,12 +181,12 @@
static struct acpu_level freq_tbl_PVS4[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000, AVS(0x70001F) },
- { 1, { 486000, HFPLL, 2, 0x24 }, L2(3), 875000, AVS(0x0) },
- { 1, { 594000, HFPLL, 1, 0x16 }, L2(3), 875000, AVS(0x0) },
- { 1, { 702000, HFPLL, 1, 0x1A }, L2(3), 900000, AVS(0x0) },
- { 1, { 810000, HFPLL, 1, 0x1E }, L2(3), 925000, AVS(0x0) },
- { 1, { 918000, HFPLL, 1, 0x22 }, L2(3), 950000, AVS(0x0) },
- { 1, { 1026000, HFPLL, 1, 0x26 }, L2(3), 975000, AVS(0x0) },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(4), 875000, AVS(0x0) },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(4), 875000, AVS(0x0) },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(4), 900000, AVS(0x0) },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(4), 925000, AVS(0x0) },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(4), 950000, AVS(0x0) },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(4), 975000, AVS(0x0) },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(9), 1000000, AVS(0x70000D) },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(9), 1025000, AVS(0x0) },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(9), 1050000, AVS(0x0) },
@@ -199,12 +199,12 @@
static struct acpu_level freq_tbl_PVS5[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000, AVS(0x70001F) },
- { 1, { 486000, HFPLL, 2, 0x24 }, L2(3), 875000, AVS(0x0) },
- { 1, { 594000, HFPLL, 1, 0x16 }, L2(3), 875000, AVS(0x0) },
- { 1, { 702000, HFPLL, 1, 0x1A }, L2(3), 875000, AVS(0x0) },
- { 1, { 810000, HFPLL, 1, 0x1E }, L2(3), 900000, AVS(0x0) },
- { 1, { 918000, HFPLL, 1, 0x22 }, L2(3), 925000, AVS(0x0) },
- { 1, { 1026000, HFPLL, 1, 0x26 }, L2(3), 950000, AVS(0x0) },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(4), 875000, AVS(0x0) },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(4), 875000, AVS(0x0) },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(4), 875000, AVS(0x0) },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(4), 900000, AVS(0x0) },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(4), 925000, AVS(0x0) },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(4), 950000, AVS(0x0) },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(9), 975000, AVS(0x70000D) },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(9), 1000000, AVS(0x0) },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(9), 1025000, AVS(0x0) },
@@ -217,12 +217,12 @@
static struct acpu_level freq_tbl_PVS6[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 850000, AVS(0x70001F) },
- { 1, { 486000, HFPLL, 2, 0x24 }, L2(3), 850000, AVS(0x0) },
- { 1, { 594000, HFPLL, 1, 0x16 }, L2(3), 850000, AVS(0x0) },
- { 1, { 702000, HFPLL, 1, 0x1A }, L2(3), 850000, AVS(0x0) },
- { 1, { 810000, HFPLL, 1, 0x1E }, L2(3), 875000, AVS(0x0) },
- { 1, { 918000, HFPLL, 1, 0x22 }, L2(3), 900000, AVS(0x0) },
- { 1, { 1026000, HFPLL, 1, 0x26 }, L2(3), 925000, AVS(0x0) },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(4), 850000, AVS(0x0) },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(4), 850000, AVS(0x0) },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(4), 850000, AVS(0x0) },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(4), 875000, AVS(0x0) },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(4), 900000, AVS(0x0) },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(4), 925000, AVS(0x0) },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(9), 950000, AVS(0x70000D) },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(9), 975000, AVS(0x0) },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(9), 1000000, AVS(0x0) },
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index 370be84..a61f5ca 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -39,6 +39,8 @@
.user_val = 0x8,
.user_vco_mask = BIT(20),
.config_val = 0x04D0405D,
+ .has_lock_status = true,
+ .status_offset = 0x1C,
.low_vco_l_max = 65,
.low_vdd_l_max = 52,
.nom_vdd_l_max = 104,
@@ -126,122 +128,122 @@
};
static struct acpu_level acpu_freq_tbl_v1_pvs0[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 825000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 825000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 825000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 825000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 825000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 825000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 825000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 835000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 845000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 860000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 880000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 905000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 920000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 940000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 960000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 980000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 995000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 1015000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 1030000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 1050000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 825000, 73 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(3), 825000, 85 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(3), 825000, 104 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(6), 825000, 124 },
+ { 1, { 576000, HFPLL, 1, 30 }, L2(6), 825000, 144 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(7), 825000, 165 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(7), 825000, 186 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(10), 835000, 208 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(10), 845000, 229 },
+ { 0, { 960000, HFPLL, 1, 50 }, L2(10), 860000, 252 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 880000, 275 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 905000, 298 },
+ { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 920000, 321 },
+ { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 940000, 346 },
+ { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 960000, 371 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 980000, 397 },
+ { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 995000, 423 },
+ { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 1015000, 450 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 1030000, 477 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 1050000, 506 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_v1_pvs1[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 825000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 825000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 825000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 825000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 825000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 825000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 825000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 835000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 845000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 860000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 880000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 905000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 920000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 940000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 960000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 980000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 995000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 1015000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 1030000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 1050000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 825000, 73 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(3), 825000, 85 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(3), 825000, 104 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(6), 825000, 124 },
+ { 1, { 576000, HFPLL, 1, 30 }, L2(6), 825000, 144 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(7), 825000, 165 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(7), 825000, 186 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(10), 835000, 208 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(10), 845000, 229 },
+ { 0, { 960000, HFPLL, 1, 50 }, L2(10), 860000, 252 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 880000, 275 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 905000, 298 },
+ { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 920000, 321 },
+ { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 940000, 346 },
+ { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 960000, 371 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 980000, 397 },
+ { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 995000, 423 },
+ { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 1015000, 450 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 1030000, 477 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 1050000, 506 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_v1_pvs2[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 825000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 825000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 825000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 825000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 825000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 825000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 825000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 825000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 825000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 835000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 855000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 875000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 895000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 915000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 930000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 945000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 960000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 975000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 990000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 1000000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 825000, 73 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(3), 825000, 85 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(3), 825000, 104 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(6), 825000, 124 },
+ { 1, { 576000, HFPLL, 1, 30 }, L2(6), 825000, 144 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(7), 825000, 165 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(7), 825000, 186 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(10), 825000, 208 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(10), 825000, 229 },
+ { 0, { 960000, HFPLL, 1, 50 }, L2(10), 835000, 252 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 855000, 275 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 875000, 298 },
+ { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 895000, 321 },
+ { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 915000, 346 },
+ { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 930000, 371 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 945000, 397 },
+ { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 960000, 423 },
+ { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 975000, 450 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 990000, 477 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 1000000, 506 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_v1_pvs3[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 825000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 825000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 825000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 825000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 825000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 825000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 825000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 825000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 825000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 835000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 855000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 875000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 895000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 915000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 930000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 945000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 960000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 975000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 990000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 1000000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 825000, 73 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(3), 825000, 85 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(3), 825000, 104 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(6), 825000, 124 },
+ { 1, { 576000, HFPLL, 1, 30 }, L2(6), 825000, 144 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(7), 825000, 165 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(7), 825000, 186 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(10), 825000, 208 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(10), 825000, 229 },
+ { 0, { 960000, HFPLL, 1, 50 }, L2(10), 835000, 252 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 855000, 275 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 875000, 298 },
+ { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 895000, 321 },
+ { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 915000, 346 },
+ { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 930000, 371 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 945000, 397 },
+ { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 960000, 423 },
+ { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 975000, 450 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 990000, 477 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 1000000, 506 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_v1_pvs4[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 825000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 825000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 825000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 825000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 825000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 825000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 825000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 825000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 825000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 825000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 825000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 835000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 855000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 870000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 885000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 900000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 910000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 925000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 940000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 950000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 825000, 73 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(3), 825000, 85 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(3), 825000, 104 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(6), 825000, 124 },
+ { 1, { 576000, HFPLL, 1, 30 }, L2(6), 825000, 144 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(7), 825000, 165 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(7), 825000, 186 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(10), 825000, 208 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(10), 825000, 229 },
+ { 0, { 960000, HFPLL, 1, 50 }, L2(10), 825000, 252 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 825000, 275 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 835000, 298 },
+ { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 855000, 321 },
+ { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 870000, 346 },
+ { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 885000, 371 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 900000, 397 },
+ { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 910000, 423 },
+ { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 925000, 450 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 940000, 477 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 950000, 506 },
{ 0, { 0 } }
};
@@ -260,430 +262,640 @@
static struct l2_level l2_freq_tbl_v2[] __initdata = {
[0] = { { 300000, PLL_0, 0, 0 }, LVL_LOW, 950000, 0 },
[1] = { { 345600, HFPLL, 2, 36 }, LVL_LOW, 950000, 1 },
- [2] = { { 422400, HFPLL, 2, 44 }, LVL_LOW, 950000, 1 },
- [3] = { { 499200, HFPLL, 2, 52 }, LVL_LOW, 950000, 2 },
- [4] = { { 576000, HFPLL, 1, 30 }, LVL_LOW, 950000, 3 },
- [5] = { { 652800, HFPLL, 1, 34 }, LVL_NOM, 950000, 3 },
- [6] = { { 729600, HFPLL, 1, 38 }, LVL_NOM, 950000, 3 },
+ [2] = { { 422400, HFPLL, 2, 44 }, LVL_LOW, 950000, 2 },
+ [3] = { { 499200, HFPLL, 2, 52 }, LVL_LOW, 950000, 3 },
+ [4] = { { 576000, HFPLL, 1, 30 }, LVL_LOW, 950000, 4 },
+ [5] = { { 652800, HFPLL, 1, 34 }, LVL_NOM, 950000, 4 },
+ [6] = { { 729600, HFPLL, 1, 38 }, LVL_NOM, 950000, 4 },
[7] = { { 806400, HFPLL, 1, 42 }, LVL_NOM, 950000, 4 },
- [8] = { { 883200, HFPLL, 1, 46 }, LVL_NOM, 950000, 4 },
- [9] = { { 960000, HFPLL, 1, 50 }, LVL_NOM, 950000, 4 },
- [10] = { { 1036800, HFPLL, 1, 54 }, LVL_NOM, 950000, 5 },
- [11] = { { 1113600, HFPLL, 1, 58 }, LVL_HIGH, 1050000, 5 },
+ [8] = { { 883200, HFPLL, 1, 46 }, LVL_NOM, 950000, 5 },
+ [9] = { { 960000, HFPLL, 1, 50 }, LVL_NOM, 950000, 5 },
+ [10] = { { 1036800, HFPLL, 1, 54 }, LVL_NOM, 950000, 6 },
+ [11] = { { 1113600, HFPLL, 1, 58 }, LVL_HIGH, 1050000, 6 },
[12] = { { 1190400, HFPLL, 1, 62 }, LVL_HIGH, 1050000, 6 },
- [13] = { { 1267200, HFPLL, 1, 66 }, LVL_HIGH, 1050000, 6 },
+ [13] = { { 1267200, HFPLL, 1, 66 }, LVL_HIGH, 1050000, 7 },
[14] = { { 1344000, HFPLL, 1, 70 }, LVL_HIGH, 1050000, 7 },
[15] = { { 1420800, HFPLL, 1, 74 }, LVL_HIGH, 1050000, 7 },
[16] = { { 1497600, HFPLL, 1, 78 }, LVL_HIGH, 1050000, 7 },
- [17] = { { 1574400, HFPLL, 1, 82 }, LVL_HIGH, 1050000, 8 },
- [18] = { { 1651200, HFPLL, 1, 86 }, LVL_HIGH, 1050000, 8 },
+ [17] = { { 1574400, HFPLL, 1, 82 }, LVL_HIGH, 1050000, 7 },
+ [18] = { { 1651200, HFPLL, 1, 86 }, LVL_HIGH, 1050000, 7 },
[19] = { { 1728000, HFPLL, 1, 90 }, LVL_HIGH, 1050000, 8 },
{ }
};
static struct acpu_level acpu_freq_tbl_2g_pvs0[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 815000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 825000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 835000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 845000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 855000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 865000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 875000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 890000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 900000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 915000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 925000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 940000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 950000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 965000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 980000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 995000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 1010000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 1025000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 1040000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 1055000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 1070000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 1085000, 3200000 },
- { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1100000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 815000, 73 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 825000, 85 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 835000, 104 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 845000, 124 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 855000, 144 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 865000, 165 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 875000, 186 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 890000, 208 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 900000, 229 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 915000, 252 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 925000, 275 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 940000, 298 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 950000, 321 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 965000, 346 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 980000, 371 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 995000, 397 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 1010000, 423 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 1025000, 450 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 1040000, 477 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 1055000, 506 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 1070000, 536 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 1085000, 567 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1100000, 598 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2g_pvs1[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 800000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 810000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 820000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 830000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 840000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 850000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 860000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 875000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 885000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 895000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 910000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 920000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 930000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 945000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 960000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 975000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 990000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 1005000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 1020000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 1030000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 1045000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 1060000, 3200000 },
- { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1075000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 800000, 73 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 810000, 85 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 820000, 104 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 830000, 124 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 840000, 144 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 850000, 165 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 860000, 186 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 875000, 208 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 885000, 229 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 895000, 252 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 910000, 275 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 920000, 298 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 930000, 321 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 945000, 346 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 960000, 371 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 975000, 397 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 990000, 423 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 1005000, 450 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 1020000, 477 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 1030000, 506 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 1045000, 536 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 1060000, 567 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1075000, 598 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2g_pvs2[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 785000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 795000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 805000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 815000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 825000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 835000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 845000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 855000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 865000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 875000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 890000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 900000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 910000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 925000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 940000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 955000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 970000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 980000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 995000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 1005000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 1020000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 1035000, 3200000 },
- { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1050000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 785000, 73 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 795000, 85 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 805000, 104 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 815000, 124 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 825000, 144 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 835000, 165 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 845000, 186 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 855000, 208 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 865000, 229 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 875000, 252 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 890000, 275 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 900000, 298 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 910000, 321 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 925000, 346 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 940000, 371 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 955000, 397 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 970000, 423 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 980000, 450 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 995000, 477 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 1005000, 506 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 1020000, 536 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 1035000, 567 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1050000, 598 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2g_pvs3[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 780000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 790000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 800000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 810000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 820000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 830000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 840000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 850000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 860000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 875000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 885000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 895000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 910000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 925000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 935000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 950000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 960000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 970000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 985000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 995000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 1010000, 3200000 },
- { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1025000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 73 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 780000, 85 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 790000, 104 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 800000, 124 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 810000, 144 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 820000, 165 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 830000, 186 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 840000, 208 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 850000, 229 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 860000, 252 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 875000, 275 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 885000, 298 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 895000, 321 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 910000, 346 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 925000, 371 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 935000, 397 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 950000, 423 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 960000, 450 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 970000, 477 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 985000, 506 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 995000, 536 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 1010000, 567 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1025000, 598 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2g_pvs4[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 775000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 780000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 790000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 800000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 810000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 820000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 830000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 840000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 850000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 860000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 870000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 880000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 895000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 910000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 920000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 930000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 940000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 950000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 960000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 975000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 985000, 3200000 },
- { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1000000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 73 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 775000, 85 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 780000, 104 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 790000, 124 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 800000, 144 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 810000, 165 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 820000, 186 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 830000, 208 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 840000, 229 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 850000, 252 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 860000, 275 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 870000, 298 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 880000, 321 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 895000, 346 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 910000, 371 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 920000, 397 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 930000, 423 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 940000, 450 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 950000, 477 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 960000, 506 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 975000, 536 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 985000, 567 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1000000, 598 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2g_pvs5[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 750000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 760000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 770000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 780000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 790000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 800000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 810000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 820000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 830000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 840000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 850000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 860000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 870000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 880000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 890000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 900000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 910000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 920000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 930000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 940000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 955000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 965000, 3200000 },
- { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 975000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 750000, 73 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 760000, 85 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 770000, 104 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 780000, 124 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 790000, 144 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 800000, 165 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 810000, 186 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 820000, 208 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 830000, 229 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 840000, 252 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 850000, 275 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 860000, 298 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 870000, 321 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 880000, 346 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 890000, 371 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 900000, 397 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 910000, 423 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 920000, 450 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 930000, 477 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 940000, 506 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 955000, 536 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 965000, 567 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 975000, 598 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2g_pvs6[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 750000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 750000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 760000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 770000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 780000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 790000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 800000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 810000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 820000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 830000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 840000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 850000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 860000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 870000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 875000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 885000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 895000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 905000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 915000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 920000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 930000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 940000, 3200000 },
- { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 950000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 750000, 73 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 750000, 85 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 760000, 104 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 770000, 124 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 780000, 144 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 790000, 165 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 800000, 186 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 810000, 208 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 820000, 229 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 830000, 252 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 840000, 275 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 850000, 298 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 860000, 321 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 870000, 346 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 875000, 371 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 885000, 397 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 895000, 423 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 905000, 450 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 915000, 477 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 920000, 506 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 930000, 536 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 940000, 567 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 950000, 598 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_2p2g_pvs0[] __initdata = {
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 800000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 800000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 805000, 102 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 815000, 121 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 825000, 141 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 835000, 161 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 845000, 181 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 855000, 202 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 865000, 223 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 875000, 245 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 890000, 267 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 900000, 289 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 915000, 313 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 925000, 336 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 940000, 360 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 950000, 383 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 965000, 409 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 980000, 435 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 995000, 461 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 1010000, 488 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 1025000, 516 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 1040000, 543 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1055000, 573 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 1070000, 604 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 1085000, 636 },
+ { 1, { 2150400, HFPLL, 1, 112 }, L2(19), 1100000, 656 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_2p2g_pvs1[] __initdata = {
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 800000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 800000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 800000, 102 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 800000, 121 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 810000, 141 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 820000, 161 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 830000, 181 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 840000, 202 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 850000, 223 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 860000, 245 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 875000, 267 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 885000, 289 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 895000, 313 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 910000, 336 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 920000, 360 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 930000, 383 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 945000, 409 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 960000, 435 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 975000, 461 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 990000, 488 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 1005000, 516 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 1020000, 543 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1030000, 573 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 1045000, 604 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 1060000, 636 },
+ { 1, { 2150400, HFPLL, 1, 112 }, L2(19), 1075000, 656 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_2p2g_pvs2[] __initdata = {
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 775000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 775000, 102 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 785000, 121 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 795000, 141 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 805000, 161 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 815000, 181 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 825000, 202 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 835000, 223 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 845000, 245 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 855000, 267 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 865000, 289 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 875000, 313 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 890000, 336 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 900000, 360 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 910000, 383 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 925000, 409 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 940000, 435 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 955000, 461 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 970000, 488 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 980000, 516 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 995000, 543 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1005000, 573 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 1020000, 604 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 1035000, 636 },
+ { 1, { 2150400, HFPLL, 1, 112 }, L2(19), 1050000, 656 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_2p2g_pvs3[] __initdata = {
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 775000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 775000, 102 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 775000, 121 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 780000, 141 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 790000, 161 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 800000, 181 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 810000, 202 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 820000, 223 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 830000, 245 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 840000, 267 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 850000, 289 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 860000, 313 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 875000, 336 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 885000, 360 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 895000, 383 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 910000, 409 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 925000, 435 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 935000, 461 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 950000, 488 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 960000, 516 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 970000, 543 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 985000, 573 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 995000, 604 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 1010000, 636 },
+ { 1, { 2150400, HFPLL, 1, 112 }, L2(19), 1025000, 656 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_2p2g_pvs4[] __initdata = {
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 775000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 775000, 102 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 775000, 121 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 775000, 141 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 780000, 161 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 790000, 181 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 800000, 202 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 810000, 223 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 820000, 245 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 830000, 267 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 840000, 289 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 850000, 313 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 860000, 336 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 870000, 360 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 880000, 383 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 895000, 409 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 910000, 435 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 920000, 461 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 930000, 488 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 940000, 516 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 950000, 543 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 960000, 573 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 975000, 604 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 985000, 636 },
+ { 1, { 2150400, HFPLL, 1, 112 }, L2(19), 1000000, 656 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_2p2g_pvs5[] __initdata = {
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 750000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 750000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 750000, 102 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 750000, 121 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 760000, 141 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 770000, 161 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 780000, 181 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 790000, 202 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 800000, 223 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 810000, 245 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 820000, 267 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 830000, 289 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 840000, 313 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 850000, 336 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 860000, 360 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 870000, 383 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 880000, 409 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 890000, 435 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 900000, 461 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 910000, 488 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 920000, 516 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 930000, 543 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 940000, 573 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 955000, 604 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 965000, 636 },
+ { 1, { 2150400, HFPLL, 1, 112 }, L2(19), 975000, 656 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_2p2g_pvs6[] __initdata = {
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 750000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 750000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 750000, 102 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 750000, 121 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 750000, 141 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 760000, 161 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 770000, 181 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 780000, 202 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 790000, 223 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 800000, 245 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 810000, 267 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 820000, 289 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 830000, 313 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 840000, 336 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 850000, 360 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 860000, 383 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 870000, 409 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 875000, 435 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 885000, 461 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 895000, 488 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 905000, 516 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 915000, 543 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 920000, 573 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 930000, 604 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 940000, 636 },
+ { 1, { 2150400, HFPLL, 1, 112 }, L2(19), 950000, 656 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2p3g_pvs0[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 800000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 800000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 800000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 805000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 815000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 825000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 835000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 845000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 855000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 865000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 875000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 890000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 900000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 915000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 925000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 940000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 950000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 965000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 980000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 995000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 1010000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 1025000, 3200000 },
- { 0, { 1958400, HFPLL, 1, 102 }, L2(19), 1040000, 3200000 },
- { 1, { 2035200, HFPLL, 1, 106 }, L2(19), 1055000, 3200000 },
- { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 1070000, 3200000 },
- { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 1085000, 3200000 },
- { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 1100000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 800000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 800000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 800000, 101 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 805000, 120 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 815000, 139 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 825000, 159 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 835000, 180 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 845000, 200 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 855000, 221 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 865000, 242 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 875000, 264 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 890000, 287 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 900000, 308 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 915000, 333 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 925000, 356 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 940000, 380 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 950000, 404 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 965000, 430 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 980000, 456 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 995000, 482 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 1010000, 510 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 1025000, 538 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1040000, 565 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 1055000, 596 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 1070000, 627 },
+ { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 1085000, 659 },
+ { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 1100000, 691 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2p3g_pvs1[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 800000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 800000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 800000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 800000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 800000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 810000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 820000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 830000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 840000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 850000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 860000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 875000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 885000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 895000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 910000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 920000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 930000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 945000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 960000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 975000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 990000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 1005000, 3200000 },
- { 0, { 1958400, HFPLL, 1, 102 }, L2(19), 1020000, 3200000 },
- { 1, { 2035200, HFPLL, 1, 106 }, L2(19), 1030000, 3200000 },
- { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 1045000, 3200000 },
- { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 1060000, 3200000 },
- { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 1075000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 800000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 800000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 800000, 101 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 800000, 120 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 800000, 139 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 810000, 159 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 820000, 180 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 830000, 200 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 840000, 221 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 850000, 242 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 860000, 264 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 875000, 287 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 885000, 308 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 895000, 333 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 910000, 356 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 920000, 380 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 930000, 404 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 945000, 430 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 960000, 456 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 975000, 482 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 990000, 510 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 1005000, 538 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 1020000, 565 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 1030000, 596 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 1045000, 627 },
+ { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 1060000, 659 },
+ { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 1075000, 691 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2p3g_pvs2[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 775000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 775000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 775000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 785000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 795000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 805000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 815000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 825000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 835000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 845000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 855000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 865000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 875000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 890000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 900000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 910000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 925000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 940000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 955000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 970000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 980000, 3200000 },
- { 0, { 1958400, HFPLL, 1, 102 }, L2(19), 995000, 3200000 },
- { 1, { 2035200, HFPLL, 1, 106 }, L2(19), 1005000, 3200000 },
- { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 1020000, 3200000 },
- { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 1035000, 3200000 },
- { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 1050000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 775000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 775000, 101 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 775000, 120 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 785000, 139 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 795000, 159 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 805000, 180 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 815000, 200 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 825000, 221 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 835000, 242 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 845000, 264 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 855000, 287 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 865000, 308 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 875000, 333 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 890000, 356 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 900000, 380 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 910000, 404 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 925000, 430 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 940000, 456 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 955000, 482 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 970000, 510 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 980000, 538 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 995000, 565 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 1005000, 596 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 1020000, 627 },
+ { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 1035000, 659 },
+ { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 1050000, 691 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2p3g_pvs3[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 775000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 775000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 775000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 775000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 780000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 790000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 800000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 810000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 820000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 830000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 840000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 850000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 860000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 875000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 885000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 895000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 910000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 925000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 935000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 950000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 960000, 3200000 },
- { 0, { 1958400, HFPLL, 1, 102 }, L2(19), 970000, 3200000 },
- { 1, { 2035200, HFPLL, 1, 106 }, L2(19), 985000, 3200000 },
- { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 995000, 3200000 },
- { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 1010000, 3200000 },
- { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 1025000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 775000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 775000, 101 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 775000, 120 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 775000, 139 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 780000, 159 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 790000, 180 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 800000, 200 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 810000, 221 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 820000, 242 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 830000, 264 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 840000, 287 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 850000, 308 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 860000, 333 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 875000, 356 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 885000, 380 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 895000, 404 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 910000, 430 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 925000, 456 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 935000, 482 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 950000, 510 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 960000, 538 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 970000, 565 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 985000, 596 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 995000, 627 },
+ { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 1010000, 659 },
+ { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 1025000, 691 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2p3g_pvs4[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 775000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 775000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 775000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 775000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 775000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 780000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 790000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 800000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 810000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 820000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 830000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 840000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 850000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 860000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 870000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 880000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 895000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 910000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 920000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 930000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 940000, 3200000 },
- { 0, { 1958400, HFPLL, 1, 102 }, L2(19), 950000, 3200000 },
- { 1, { 2035200, HFPLL, 1, 106 }, L2(19), 960000, 3200000 },
- { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 975000, 3200000 },
- { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 985000, 3200000 },
- { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 1000000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 775000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 775000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 775000, 101 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 775000, 120 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 775000, 139 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 775000, 159 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 780000, 180 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 790000, 200 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 800000, 221 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 810000, 242 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 820000, 264 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 830000, 287 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 840000, 308 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 850000, 333 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 860000, 356 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 870000, 380 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 880000, 404 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 895000, 430 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 910000, 456 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 920000, 482 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 930000, 510 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 940000, 538 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 950000, 565 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 960000, 596 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 975000, 627 },
+ { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 985000, 659 },
+ { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 1000000, 691 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2p3g_pvs5[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 750000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 750000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 750000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 750000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 750000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 760000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 770000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 780000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 790000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 800000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 810000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 820000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 830000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 840000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 850000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 860000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 870000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 880000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 890000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 900000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 910000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 920000, 3200000 },
- { 0, { 1958400, HFPLL, 1, 102 }, L2(19), 930000, 3200000 },
- { 1, { 2035200, HFPLL, 1, 106 }, L2(19), 940000, 3200000 },
- { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 955000, 3200000 },
- { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 965000, 3200000 },
- { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 975000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 750000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 750000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 750000, 101 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 750000, 120 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 750000, 139 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 760000, 159 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 770000, 180 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 780000, 200 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 790000, 221 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 800000, 242 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 810000, 264 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 820000, 287 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 830000, 308 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 840000, 333 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 850000, 356 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 860000, 380 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 870000, 404 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 880000, 430 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 890000, 456 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 900000, 482 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 910000, 510 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 920000, 538 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 930000, 565 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 940000, 596 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 955000, 627 },
+ { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 965000, 659 },
+ { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 975000, 691 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_2p3g_pvs6[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 750000, 400000 },
- { 0, { 345600, HFPLL, 2, 36 }, L2(3), 750000, 3200000 },
- { 1, { 422400, HFPLL, 2, 44 }, L2(3), 750000, 3200000 },
- { 0, { 499200, HFPLL, 2, 52 }, L2(6), 750000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(6), 750000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(7), 750000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(7), 760000, 3200000 },
- { 0, { 806400, HFPLL, 1, 42 }, L2(10), 770000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(10), 780000, 3200000 },
- { 0, { 960000, HFPLL, 1, 50 }, L2(10), 790000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 800000, 3200000 },
- { 0, { 1113600, HFPLL, 1, 58 }, L2(12), 810000, 3200000 },
- { 0, { 1190400, HFPLL, 1, 62 }, L2(12), 820000, 3200000 },
- { 0, { 1267200, HFPLL, 1, 66 }, L2(12), 830000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(12), 840000, 3200000 },
- { 0, { 1420800, HFPLL, 1, 74 }, L2(16), 850000, 3200000 },
- { 0, { 1497600, HFPLL, 1, 78 }, L2(16), 860000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(16), 870000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(16), 875000, 3200000 },
- { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 885000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(19), 895000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(19), 905000, 3200000 },
- { 0, { 1958400, HFPLL, 1, 102 }, L2(19), 915000, 3200000 },
- { 1, { 2035200, HFPLL, 1, 106 }, L2(19), 920000, 3200000 },
- { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 930000, 3200000 },
- { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 940000, 3200000 },
- { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 950000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 750000, 72 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(1), 750000, 83 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(2), 750000, 101 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(2), 750000, 120 },
+ { 0, { 576000, HFPLL, 1, 30 }, L2(3), 750000, 139 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(3), 750000, 159 },
+ { 1, { 729600, HFPLL, 1, 38 }, L2(4), 760000, 180 },
+ { 0, { 806400, HFPLL, 1, 42 }, L2(4), 770000, 200 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(4), 780000, 221 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(9), 790000, 242 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(10), 800000, 264 },
+ { 0, { 1113600, HFPLL, 1, 58 }, L2(10), 810000, 287 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(10), 820000, 308 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(13), 830000, 333 },
+ { 0, { 1344000, HFPLL, 1, 70 }, L2(14), 840000, 356 },
+ { 0, { 1420800, HFPLL, 1, 74 }, L2(15), 850000, 380 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 860000, 404 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(17), 870000, 430 },
+ { 0, { 1651200, HFPLL, 1, 86 }, L2(17), 875000, 456 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(18), 885000, 482 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(18), 895000, 510 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(18), 905000, 538 },
+ { 1, { 1958400, HFPLL, 1, 102 }, L2(19), 915000, 565 },
+ { 0, { 2035200, HFPLL, 1, 106 }, L2(19), 920000, 596 },
+ { 0, { 2112000, HFPLL, 1, 110 }, L2(19), 930000, 627 },
+ { 0, { 2188800, HFPLL, 1, 114 }, L2(19), 940000, 659 },
+ { 1, { 2265600, HFPLL, 1, 118 }, L2(19), 950000, 691 },
{ 0, { 0 } }
};
@@ -716,6 +928,17 @@
[1][5] = { acpu_freq_tbl_2p3g_pvs5, sizeof(acpu_freq_tbl_2p3g_pvs5) },
[1][6] = { acpu_freq_tbl_2p3g_pvs6, sizeof(acpu_freq_tbl_2p3g_pvs6) },
[1][7] = { acpu_freq_tbl_2p3g_pvs6, sizeof(acpu_freq_tbl_2p3g_pvs6) },
+
+ /* 8974v2 2.0GHz Parts */
+ [2][0] = { acpu_freq_tbl_2p2g_pvs0, sizeof(acpu_freq_tbl_2p2g_pvs0) },
+ [2][1] = { acpu_freq_tbl_2p2g_pvs1, sizeof(acpu_freq_tbl_2p2g_pvs1) },
+ [2][2] = { acpu_freq_tbl_2p2g_pvs2, sizeof(acpu_freq_tbl_2p2g_pvs2) },
+ [2][3] = { acpu_freq_tbl_2p2g_pvs3, sizeof(acpu_freq_tbl_2p2g_pvs3) },
+ [2][4] = { acpu_freq_tbl_2p2g_pvs4, sizeof(acpu_freq_tbl_2p2g_pvs4) },
+ [2][5] = { acpu_freq_tbl_2p2g_pvs5, sizeof(acpu_freq_tbl_2p2g_pvs5) },
+ [2][6] = { acpu_freq_tbl_2p2g_pvs6, sizeof(acpu_freq_tbl_2p2g_pvs6) },
+ [2][7] = { acpu_freq_tbl_2p2g_pvs6, sizeof(acpu_freq_tbl_2p2g_pvs6) },
+
};
static struct msm_bus_scale_pdata bus_scale_data __initdata = {
diff --git a/arch/arm/mach-msm/acpuclock-9625.c b/arch/arm/mach-msm/acpuclock-9625.c
index 34952fb..42659f9 100644
--- a/arch/arm/mach-msm/acpuclock-9625.c
+++ b/arch/arm/mach-msm/acpuclock-9625.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -80,18 +81,21 @@
.update_mask = RCG_CONFIG_PGM_DATA_BIT | RCG_CONFIG_PGM_ENA_BIT,
.poll_mask = RCG_CONFIG_PGM_DATA_BIT,
},
+ .power_collapse_khz = 19200,
+ .wait_for_irq_khz = 19200,
};
static int __init acpuclk_9625_probe(struct platform_device *pdev)
{
struct resource *res;
- u32 regval;
+ u32 regval, i;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rcg_base");
if (!res)
return -EINVAL;
- drv_data.apcs_rcg_config = ioremap(res->start, resource_size(res));
+ drv_data.apcs_rcg_config = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (!drv_data.apcs_rcg_config)
return -ENOMEM;
@@ -117,6 +121,18 @@
return PTR_ERR(drv_data.vdd_mem);
}
+ for (i = 0; i < NUM_SRC; i++) {
+ if (!drv_data.src_clocks[i].name)
+ continue;
+ drv_data.src_clocks[i].clk =
+ devm_clk_get(&pdev->dev, drv_data.src_clocks[i].name);
+ if (IS_ERR(drv_data.src_clocks[i].clk)) {
+ dev_err(&pdev->dev, "Unable to get clock %s\n",
+ drv_data.src_clocks[i].name);
+ return -EPROBE_DEFER;
+ }
+ }
+
/* Disable hardware gating of gpll0 to A5SS */
regval = readl_relaxed(drv_data.apcs_cpu_pwr_ctl);
regval |= GPLL0_TO_A5_ALWAYS_ENABLE;
diff --git a/arch/arm/mach-msm/acpuclock-cortex.c b/arch/arm/mach-msm/acpuclock-cortex.c
index 88bf919..47bf27a 100644
--- a/arch/arm/mach-msm/acpuclock-cortex.c
+++ b/arch/arm/mach-msm/acpuclock-cortex.c
@@ -121,7 +121,7 @@
writel_relaxed(regval, apcs_rcg_cmd);
/* Wait for the update to take effect */
- rc = readl_poll_timeout(apcs_rcg_cmd, regval,
+ rc = readl_poll_timeout_noirq(apcs_rcg_cmd, regval,
!(regval & r->poll_mask),
POLL_INTERVAL_US,
APCS_RCG_UPDATE_TIMEOUT_US);
@@ -129,12 +129,26 @@
pr_warn("acpu rcg didn't update its configuration\n");
}
-/*
- * This function can be called in both atomic and nonatomic context.
- * Since regulator APIS can sleep, we cannot always use the clk prepare
- * unprepare API.
- */
-static int set_speed(struct clkctl_acpu_speed *tgt_s, bool atomic)
+static int set_speed_atomic(struct clkctl_acpu_speed *tgt_s)
+{
+ struct clkctl_acpu_speed *strt_s = priv->current_speed;
+ struct clk *strt = priv->src_clocks[strt_s->src].clk;
+ struct clk *tgt = priv->src_clocks[tgt_s->src].clk;
+ int rc = 0;
+
+ WARN(strt_s->src == ACPUPLL && tgt_s->src == ACPUPLL,
+ "can't reprogram ACPUPLL during atomic context\n");
+ rc = clk_enable(tgt);
+ if (rc)
+ return rc;
+
+ select_clk_source_div(priv, tgt_s);
+ clk_disable(strt);
+
+ return rc;
+}
+
+static int set_speed(struct clkctl_acpu_speed *tgt_s)
{
int rc = 0;
unsigned int tgt_freq_hz = tgt_s->khz * 1000;
@@ -148,19 +162,13 @@
select_clk_source_div(priv, cxo_s);
/* Re-program acpu pll */
- if (atomic)
- clk_disable(tgt);
- else
- clk_disable_unprepare(tgt);
+ clk_disable_unprepare(tgt);
rc = clk_set_rate(tgt, tgt_freq_hz);
if (rc)
pr_err("Failed to set ACPU PLL to %u\n", tgt_freq_hz);
- if (atomic)
- BUG_ON(clk_enable(tgt));
- else
- BUG_ON(clk_prepare_enable(tgt));
+ BUG_ON(clk_prepare_enable(tgt));
/* Switch back to acpu pll */
select_clk_source_div(priv, tgt_s);
@@ -172,10 +180,7 @@
return rc;
}
- if (atomic)
- rc = clk_enable(tgt);
- else
- rc = clk_prepare_enable(tgt);
+ rc = clk_prepare_enable(tgt);
if (rc) {
pr_err("ACPU PLL enable failed\n");
@@ -184,16 +189,10 @@
select_clk_source_div(priv, tgt_s);
- if (atomic)
- clk_disable(strt);
- else
- clk_disable_unprepare(strt);
+ clk_disable_unprepare(strt);
} else {
- if (atomic)
- rc = clk_enable(tgt);
- else
- rc = clk_prepare_enable(tgt);
+ rc = clk_prepare_enable(tgt);
if (rc) {
pr_err("%s enable failed\n",
@@ -203,10 +202,7 @@
select_clk_source_div(priv, tgt_s);
- if (atomic)
- clk_disable(strt);
- else
- clk_disable_unprepare(strt);
+ clk_disable_unprepare(strt);
}
@@ -250,9 +246,9 @@
/* Switch CPU speed. Flag indicates atomic context */
if (reason == SETRATE_CPUFREQ || reason == SETRATE_INIT)
- rc = set_speed(tgt_s, false);
+ rc = set_speed(tgt_s);
else
- rc = set_speed(tgt_s, true);
+ rc = set_speed_atomic(tgt_s);
if (rc)
goto out;
@@ -317,8 +313,6 @@
static struct acpuclk_data acpuclk_cortex_data = {
.set_rate = acpuclk_cortex_set_rate,
.get_rate = acpuclk_cortex_get_rate,
- .power_collapse_khz = 19200,
- .wait_for_irq_khz = 19200,
};
int __init acpuclk_cortex_init(struct platform_device *pdev,
@@ -330,20 +324,15 @@
priv = data;
mutex_init(&priv->lock);
+ acpuclk_cortex_data.power_collapse_khz = priv->wait_for_irq_khz;
+ acpuclk_cortex_data.wait_for_irq_khz = priv->wait_for_irq_khz;
+
bus_perf_client = msm_bus_scale_register_client(priv->bus_scale);
if (!bus_perf_client) {
pr_err("Unable to register bus client\n");
BUG();
}
- for (i = 0; i < NUM_SRC; i++) {
- if (!priv->src_clocks[i].name)
- continue;
- priv->src_clocks[i].clk =
- devm_clk_get(&pdev->dev, priv->src_clocks[i].name);
- BUG_ON(IS_ERR(priv->src_clocks[i].clk));
- }
-
/* Improve boot time by ramping up CPU immediately */
for (i = 0; priv->freq_tbl[i].khz != 0; i++)
if (priv->freq_tbl[i].use_for_scaling)
diff --git a/arch/arm/mach-msm/acpuclock-cortex.h b/arch/arm/mach-msm/acpuclock-cortex.h
index 2db3987..89a0a84 100644
--- a/arch/arm/mach-msm/acpuclock-cortex.h
+++ b/arch/arm/mach-msm/acpuclock-cortex.h
@@ -63,6 +63,8 @@
unsigned long vdd_max_mem;
struct src_clock src_clocks[NUM_SRC];
struct acpuclk_reg_data reg_data;
+ unsigned long power_collapse_khz;
+ unsigned long wait_for_irq_khz;
};
/* Instantaneous bandwidth requests in MB/s. */
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index a6f4423..e3a3f54 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -22,6 +22,7 @@
#include <linux/cpufreq.h>
#include <linux/cpu.h>
#include <linux/regulator/consumer.h>
+#include <linux/iopoll.h>
#include <asm/mach-types.h>
#include <asm/cpu.h>
@@ -131,8 +132,14 @@
writel_relaxed(0x6, sc->hfpll_base + drv.hfpll_data->mode_offset);
/* Wait for PLL to lock. */
- mb();
- udelay(60);
+ if (drv.hfpll_data->has_lock_status) {
+ u32 regval;
+ readl_tight_poll(sc->hfpll_base + drv.hfpll_data->status_offset,
+ regval, regval & BIT(16));
+ } else {
+ mb();
+ udelay(60);
+ }
/* Enable PLL output. */
writel_relaxed(0x7, sc->hfpll_base + drv.hfpll_data->mode_offset);
diff --git a/arch/arm/mach-msm/acpuclock-krait.h b/arch/arm/mach-msm/acpuclock-krait.h
index 11d58dd..f02af98 100644
--- a/arch/arm/mach-msm/acpuclock-krait.h
+++ b/arch/arm/mach-msm/acpuclock-krait.h
@@ -171,8 +171,10 @@
* @user_val: Value to initialize the @user_offset register to.
* @user_vco_mask: Bit in the @user_offset to enable high-frequency VCO mode.
* @has_droop_ctl: Indicates the presence of a voltage droop controller.
+ * @has_lock_status: Indicates the presence of a lock status bit.
* @droop_offset: Droop controller register offset from base address.
* @droop_val: Value to initialize the @config_offset register to.
+ * @status_offset: PLL status register offset.
* @low_vdd_l_max: Maximum "L" value supported at HFPLL_VDD_LOW.
* @nom_vdd_l_max: Maximum "L" value supported at HFPLL_VDD_NOM.
* @low_vco_l_max: Maximum "L" value supported in low-frequency VCO mode.
@@ -190,8 +192,10 @@
const u32 user_val;
const u32 user_vco_mask;
const bool has_droop_ctl;
+ const bool has_lock_status;
const u32 droop_offset;
const u32 droop_val;
+ const u32 status_offset;
u32 low_vdd_l_max;
u32 nom_vdd_l_max;
const u32 low_vco_l_max;
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index cbb9e37..bed794b 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -434,8 +434,7 @@
list_add_tail(&info->list_node, &bam_rx_pool);
rx_len_cached = ++bam_rx_pool_len;
ret = sps_transfer_one(bam_rx_pipe, info->dma_address,
- BUFFER_SIZE, info,
- SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
+ BUFFER_SIZE, info, 0);
if (ret) {
list_del(&info->list_node);
rx_len_cached = --bam_rx_pool_len;
@@ -657,7 +656,7 @@
spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
list_add_tail(&pkt->list_node, &bam_tx_pool);
rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
- pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
+ pkt, SPS_IOVEC_FLAG_EOT);
if (rc) {
DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
__func__, rc);
@@ -830,7 +829,7 @@
spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
list_add_tail(&pkt->list_node, &bam_tx_pool);
rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
- pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
+ pkt, SPS_IOVEC_FLAG_EOT);
if (rc) {
DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
__func__, rc);
@@ -914,8 +913,10 @@
if (!bam_is_connected) {
read_unlock(&ul_wakeup_lock);
ul_wakeup();
- if (unlikely(in_global_reset == 1))
+ if (unlikely(in_global_reset == 1)) {
+ kfree(hdr);
return -EFAULT;
+ }
read_lock(&ul_wakeup_lock);
notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
}
@@ -1385,12 +1386,11 @@
struct list_head *temp;
struct outside_notify_func *func;
+ BAM_DMUX_LOG("%s: event=%d, data=%lu\n", __func__, event, data);
+
for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
- if (bam_ch_is_open(i)) {
+ if (bam_ch_is_open(i))
bam_ch[i].notify(bam_ch[i].priv, event, data);
- BAM_DMUX_LOG("%s: cid=%d, event=%d, data=%lu\n",
- __func__, i, event, data);
- }
}
__list_for_each(temp, &bam_other_notify_funcs) {
@@ -1757,10 +1757,13 @@
/* in_ssr documentation/assumptions found in restart_notifier_cb */
if (!power_management_only_mode) {
if (likely(!in_ssr)) {
+ BAM_DMUX_LOG("%s: disconnect tx\n", __func__);
sps_disconnect(bam_tx_pipe);
+ BAM_DMUX_LOG("%s: disconnect rx\n", __func__);
sps_disconnect(bam_rx_pipe);
__memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
__memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
+ BAM_DMUX_LOG("%s: device reset\n", __func__);
sps_device_reset(a2_device_handle);
} else {
ssr_skipped_disconnect = 1;
@@ -1980,6 +1983,8 @@
a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
a2_props.num_pipes = A2_NUM_PIPES;
a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
+ a2_props.constrained_logging = true;
+ a2_props.logging_number = 1;
if (cpu_is_msm9615() || satellite_mode)
a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
/* need to free on tear down */
diff --git a/arch/arm/mach-msm/bms-batterydata-desay.c b/arch/arm/mach-msm/bms-batterydata-desay.c
index dd3f346..e2b62be 100644
--- a/arch/arm/mach-msm/bms-batterydata-desay.c
+++ b/arch/arm/mach-msm/bms-batterydata-desay.c
@@ -84,4 +84,5 @@
.pc_sf_lut = &desay_5200_pc_sf,
.default_rbatt_mohm = 156,
.rbatt_capacitive_mohm = 50,
+ .flat_ocv_threshold_uv = 3800000,
};
diff --git a/arch/arm/mach-msm/bms-batterydata-oem.c b/arch/arm/mach-msm/bms-batterydata-oem.c
index 036bf88..e4c42d7 100644
--- a/arch/arm/mach-msm/bms-batterydata-oem.c
+++ b/arch/arm/mach-msm/bms-batterydata-oem.c
@@ -105,4 +105,5 @@
.pc_temp_ocv_lut = &pc_temp_ocv,
.rbatt_sf_lut = &rbatt_sf,
.default_rbatt_mohm = 236,
+ .flat_ocv_threshold_uv = 3800000,
};
diff --git a/arch/arm/mach-msm/bms-batterydata.c b/arch/arm/mach-msm/bms-batterydata.c
index 0c39df6..dc98c57 100644
--- a/arch/arm/mach-msm/bms-batterydata.c
+++ b/arch/arm/mach-msm/bms-batterydata.c
@@ -106,4 +106,5 @@
.rbatt_sf_lut = &rbatt_sf,
.default_rbatt_mohm = 236,
.rbatt_capacitive_mohm = 50,
+ .flat_ocv_threshold_uv = 3800000,
};
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 42bde8f..f969e31 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -186,7 +186,7 @@
#endif
}
-static int apq8064_paddr_to_memtype(unsigned int paddr)
+static int apq8064_paddr_to_memtype(phys_addr_t paddr)
{
return MEMTYPE_EBI1;
}
@@ -437,7 +437,7 @@
if (fixed_position != NOT_FIXED)
fixed_size += heap->size;
- else
+ else if (!use_cma)
reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
if (fixed_position == FIXED_LOW) {
@@ -675,6 +675,7 @@
static struct msm_hsic_host_platform_data msm_hsic_pdata = {
.strobe = 88,
.data = 89,
+ .phy_sof_workaround = true,
.bus_scale_table = &hsic_bus_scale_pdata,
};
#else
@@ -2343,6 +2344,8 @@
static struct msm_pcie_platform msm_pcie_platform_data = {
.axi_addr = PCIE_AXI_BAR_PHYS,
.axi_size = PCIE_AXI_BAR_SIZE,
+ .parf_deemph = 0x282828,
+ .parf_swing = 0x7F7F,
};
/* FSM8064_EP PCIe gpios */
@@ -2356,7 +2359,9 @@
.axi_addr = PCIE_AXI_BAR_PHYS,
.axi_size = PCIE_AXI_BAR_SIZE,
.wake_n = PM8921_GPIO_IRQ(PM8921_IRQ_BASE, PCIE_EP_WAKE_N_PMIC_GPIO),
- .vreg_n = 4
+ .vreg_n = 4,
+ .parf_deemph = 0x101010,
+ .parf_swing = 0x6B6B,
};
static int __init mpq8064_pcie_enabled(void)
@@ -3580,6 +3585,18 @@
if (socinfo_get_pmic_model() == PMIC_MODEL_PM8917)
apq8064_pm8917_pdata_fixup();
platform_device_register(&msm_gpio_device);
+ if (cpu_is_apq8064ab())
+ apq8064ab_update_krait_spm();
+ if (cpu_is_krait_v3()) {
+ struct msm_pm_init_data_type *pdata =
+ msm8064_pm_8x60.dev.platform_data;
+ pdata->retention_calls_tz = false;
+ apq8064ab_update_retention_spm();
+ }
+ platform_device_register(&msm8064_pm_8x60);
+
+ msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
+ msm_spm_l2_init(msm_spm_l2_data);
msm_tsens_early_init(&apq_tsens_pdata);
msm_thermal_init(&msm_thermal_pdata);
if (socinfo_init() < 0)
@@ -3694,18 +3711,6 @@
apq8064_init_dsps();
platform_device_register(&msm_8960_riva);
}
- if (cpu_is_apq8064ab())
- apq8064ab_update_krait_spm();
- if (cpu_is_krait_v3()) {
- struct msm_pm_init_data_type *pdata =
- msm8064_pm_8x60.dev.platform_data;
- pdata->retention_calls_tz = false;
- apq8064ab_update_retention_spm();
- }
- platform_device_register(&msm8064_pm_8x60);
-
- msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
- msm_spm_l2_init(msm_spm_l2_data);
BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
apq8064_epm_adc_init();
}
@@ -3804,6 +3809,8 @@
if (meminfo_init(SYS_MEMORY, SZ_256M) < 0)
pr_err("meminfo_init() failed!\n");
+ msm_thermal_pdata.limit_temp_degC = 80;
+
apq8064_common_init();
ethernet_init();
fsm8064_ep_pcie_init();
diff --git a/arch/arm/mach-msm/board-zinc-gpiomux.c b/arch/arm/mach-msm/board-8084-gpiomux.c
similarity index 94%
rename from arch/arm/mach-msm/board-zinc-gpiomux.c
rename to arch/arm/mach-msm/board-8084-gpiomux.c
index ac4daa8..8d5bb49 100644
--- a/arch/arm/mach-msm/board-zinc-gpiomux.c
+++ b/arch/arm/mach-msm/board-8084-gpiomux.c
@@ -17,7 +17,7 @@
#include <mach/board.h>
#include <mach/gpiomux.h>
-void __init msmzinc_init_gpiomux(void)
+void __init apq8084_init_gpiomux(void)
{
int rc;
diff --git a/arch/arm/mach-msm/board-8084.c b/arch/arm/mach-msm/board-8084.c
new file mode 100644
index 0000000..e45266e
--- /dev/null
+++ b/arch/arm/mach-msm/board-8084.c
@@ -0,0 +1,141 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/memory.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/map.h>
+#include <asm/mach/arch.h>
+#include <mach/board.h>
+#include <mach/gpiomux.h>
+#include <mach/msm_iomap.h>
+#include <mach/msm_memtypes.h>
+#include <mach/msm_smd.h>
+#include <mach/restart.h>
+#include <mach/socinfo.h>
+#include <mach/clk-provider.h>
+#include "board-dt.h"
+#include "clock.h"
+#include "devices.h"
+#include "platsmp.h"
+
+static struct memtype_reserve apq8084_reserve_table[] __initdata = {
+ [MEMTYPE_SMI] = {
+ },
+ [MEMTYPE_EBI0] = {
+ .flags = MEMTYPE_FLAGS_1M_ALIGN,
+ },
+ [MEMTYPE_EBI1] = {
+ .flags = MEMTYPE_FLAGS_1M_ALIGN,
+ },
+};
+
+static int apq8084_paddr_to_memtype(phys_addr_t paddr)
+{
+ return MEMTYPE_EBI1;
+}
+
+static struct reserve_info apq8084_reserve_info __initdata = {
+ .memtype_reserve_table = apq8084_reserve_table,
+ .paddr_to_memtype = apq8084_paddr_to_memtype,
+};
+
+static struct of_dev_auxdata apq8084_auxdata_lookup[] __initdata = {
+ OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF9824000, \
+ "msm_sdcc.1", NULL),
+ OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98A4000, \
+ "msm_sdcc.2", NULL),
+ {}
+};
+
+void __init apq8084_reserve(void)
+{
+ reserve_info = &apq8084_reserve_info;
+ of_scan_flat_dt(dt_scan_for_memory_reserve, apq8084_reserve_table);
+ msm_reserve();
+}
+
+static void __init apq8084_early_memory(void)
+{
+ reserve_info = &apq8084_reserve_info;
+ of_scan_flat_dt(dt_scan_for_memory_hole, apq8084_reserve_table);
+}
+
+static struct clk_lookup msm_clocks_dummy[] = {
+ CLK_DUMMY("core_clk", BLSP1_UART_CLK, "f991f000.serial", OFF),
+ CLK_DUMMY("iface_clk", BLSP1_UART_CLK, "f991f000.serial", OFF),
+ CLK_DUMMY("core_clk", SDC1_CLK, "msm_sdcc.1", OFF),
+ CLK_DUMMY("iface_clk", SDC1_P_CLK, "msm_sdcc.1", OFF),
+ CLK_DUMMY("core_clk", SDC2_CLK, "msm_sdcc.2", OFF),
+ CLK_DUMMY("iface_clk", SDC2_P_CLK, "msm_sdcc.2", OFF),
+};
+
+static struct clock_init_data msm_dummy_clock_init_data __initdata = {
+ .table = msm_clocks_dummy,
+ .size = ARRAY_SIZE(msm_clocks_dummy),
+};
+
+/*
+ * Used to satisfy dependencies for devices that need to be
+ * run early or in a particular order. Most likely your device doesn't fall
+ * into this category, and thus the driver should not be added here. The
+ * EPROBE_DEFER can satisfy most dependency problems.
+ */
+void __init apq8084_add_drivers(void)
+{
+ msm_smd_init();
+ msm_clock_init(&msm_dummy_clock_init_data);
+}
+
+static void __init apq8084_map_io(void)
+{
+ msm_map_8084_io();
+}
+
+void __init apq8084_init(void)
+{
+ struct of_dev_auxdata *adata = apq8084_auxdata_lookup;
+
+ if (socinfo_init() < 0)
+ pr_err("%s: socinfo_init() failed\n", __func__);
+
+ apq8084_init_gpiomux();
+ of_platform_populate(NULL, of_default_bus_match_table, adata, NULL);
+ apq8084_add_drivers();
+}
+
+void __init apq8084_init_very_early(void)
+{
+ apq8084_early_memory();
+}
+
+static const char *apq8084_dt_match[] __initconst = {
+ "qcom,apq8084",
+ NULL
+};
+
+DT_MACHINE_START(APQ8084_DT, "Qualcomm APQ 8084 (Flattened Device Tree)")
+ .map_io = apq8084_map_io,
+ .init_irq = msm_dt_init_irq,
+ .init_machine = apq8084_init,
+ .handle_irq = gic_handle_irq,
+ .timer = &msm_dt_timer,
+ .dt_compat = apq8084_dt_match,
+ .reserve = apq8084_reserve,
+ .init_very_early = apq8084_init_very_early,
+ .restart = msm_restart,
+ .smp = &msm8974_smp_ops,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 872fabe..a892e32 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -25,6 +25,7 @@
#include <linux/of_irq.h>
#include <linux/memory.h>
#include <linux/regulator/qpnp-regulator.h>
+#include <linux/msm_tsens.h>
#include <asm/mach/map.h>
#include <asm/hardware/gic.h>
#include <asm/mach/arch.h>
@@ -72,6 +73,10 @@
"msm_sdcc.1", NULL),
OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98A4000, \
"msm_sdcc.2", NULL),
+ OF_DEV_AUXDATA("qcom,sdhci-msm", 0xF9824900, \
+ "msm_sdcc.1", NULL),
+ OF_DEV_AUXDATA("qcom,sdhci-msm", 0xF98A4900, \
+ "msm_sdcc.2", NULL),
{}
};
@@ -108,11 +113,11 @@
msm_spm_device_init();
rpm_regulator_smd_driver_init();
qpnp_regulator_init();
- if (machine_is_msm8226_rumi())
+ if (of_board_is_rumi())
msm_clock_init(&msm8226_rumi_clock_init_data);
else
msm_clock_init(&msm8226_clock_init_data);
-
+ tsens_tm_init_driver();
msm_thermal_device_init();
}
diff --git a/arch/arm/mach-msm/board-8610-gpiomux.c b/arch/arm/mach-msm/board-8610-gpiomux.c
index 15d7679..4b435de 100644
--- a/arch/arm/mach-msm/board-8610-gpiomux.c
+++ b/arch/arm/mach-msm/board-8610-gpiomux.c
@@ -35,6 +35,48 @@
.pull = GPIOMUX_PULL_DOWN,
};
+static struct gpiomux_setting wcnss_5wire_suspend_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting wcnss_5wire_active_cfg = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting lcd_en_act_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+ .dir = GPIOMUX_OUT_HIGH,
+};
+
+static struct gpiomux_setting lcd_en_sus_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct msm_gpiomux_config msm_lcd_configs[] __initdata = {
+ {
+ .gpio = 41,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &lcd_en_act_cfg,
+ [GPIOMUX_SUSPENDED] = &lcd_en_sus_cfg,
+ },
+ },
+ {
+ .gpio = 7,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &lcd_en_act_cfg,
+ [GPIOMUX_SUSPENDED] = &lcd_en_sus_cfg,
+ },
+ },
+};
+
static struct msm_gpiomux_config msm_blsp_configs[] __initdata = {
{
.gpio = 10, /* BLSP1 QUP3 I2C_SDA */
@@ -74,6 +116,44 @@
},
};
+static struct msm_gpiomux_config wcnss_5wire_interface[] = {
+ {
+ .gpio = 23,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 24,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 25,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 26,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 27,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg,
+ },
+ },
+};
+
void __init msm8610_init_gpiomux(void)
{
int rc;
@@ -85,4 +165,7 @@
}
msm_gpiomux_install(msm_blsp_configs, ARRAY_SIZE(msm_blsp_configs));
+ msm_gpiomux_install(wcnss_5wire_interface,
+ ARRAY_SIZE(wcnss_5wire_interface));
+ msm_gpiomux_install(msm_lcd_configs, ARRAY_SIZE(msm_lcd_configs));
}
diff --git a/arch/arm/mach-msm/board-8610.c b/arch/arm/mach-msm/board-8610.c
index 5f5366f..2cd7134 100644
--- a/arch/arm/mach-msm/board-8610.c
+++ b/arch/arm/mach-msm/board-8610.c
@@ -24,6 +24,7 @@
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
#include <linux/memory.h>
+#include <linux/msm_tsens.h>
#include <asm/mach/map.h>
#include <asm/arch_timer.h>
#include <asm/hardware/gic.h>
@@ -43,6 +44,7 @@
#include <mach/clk-provider.h>
#include <mach/msm_smd.h>
#include <mach/rpm-smd.h>
+#include <mach/rpm-regulator-smd.h>
#include <linux/msm_thermal.h>
#include "board-dt.h"
#include "clock.h"
@@ -72,6 +74,10 @@
"msm_sdcc.1", NULL),
OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98A4000, \
"msm_sdcc.2", NULL),
+ OF_DEV_AUXDATA("qcom,sdhci-msm", 0xF9824900, \
+ "msm_sdcc.1", NULL),
+ OF_DEV_AUXDATA("qcom,sdhci-msm", 0xF98A4900, \
+ "msm_sdcc.2", NULL),
{}
};
@@ -100,10 +106,12 @@
msm_rpm_driver_init();
msm_lpmrs_module_init();
msm_spm_device_init();
+ rpm_regulator_smd_driver_init();
qpnp_regulator_init();
+ tsens_tm_init_driver();
msm_thermal_device_init();
- if (machine_is_msm8610_rumi())
+ if (of_board_is_rumi())
msm_clock_init(&msm8610_rumi_clock_init_data);
else
msm_clock_init(&msm8610_clock_init_data);
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 771e678..6ccaba6 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -226,7 +226,7 @@
#endif
}
-static int msm8930_paddr_to_memtype(unsigned int paddr)
+static int msm8930_paddr_to_memtype(phys_addr_t paddr)
{
return MEMTYPE_EBI1;
}
@@ -477,7 +477,7 @@
if (fixed_position != NOT_FIXED)
fixed_size += heap->size;
- else
+ else if (!use_cma)
reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
if (fixed_position == FIXED_LOW) {
@@ -957,7 +957,7 @@
},
{
ARRAY_SIZE(qseecom_enable_dfab_vectors),
- qseecom_enable_sfpb_vectors,
+ qseecom_enable_dfab_vectors,
},
{
ARRAY_SIZE(qseecom_enable_sfpb_vectors),
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 7ef6fed..5d96389 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -236,7 +236,7 @@
#endif
}
-static int msm8960_paddr_to_memtype(unsigned int paddr)
+static int msm8960_paddr_to_memtype(phys_addr_t paddr)
{
return MEMTYPE_EBI1;
}
@@ -532,7 +532,7 @@
if (fixed_position != NOT_FIXED)
fixed_size += heap->size;
- else
+ else if (!use_cma)
reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
if (fixed_position == FIXED_LOW) {
@@ -1459,8 +1459,9 @@
#ifdef CONFIG_USB_EHCI_MSM_HSIC
#define HSIC_HUB_RESET_GPIO 91
static struct msm_hsic_host_platform_data msm_hsic_pdata = {
- .strobe = 150,
- .data = 151,
+ .strobe = 150,
+ .data = 151,
+ .phy_sof_workaround = true,
};
static struct smsc_hub_platform_data hsic_hub_pdata = {
diff --git a/arch/arm/mach-msm/board-8974-gpiomux.c b/arch/arm/mach-msm/board-8974-gpiomux.c
index 688c6f7..705275c 100644
--- a/arch/arm/mach-msm/board-8974-gpiomux.c
+++ b/arch/arm/mach-msm/board-8974-gpiomux.c
@@ -100,6 +100,18 @@
.pull = GPIOMUX_PULL_DOWN,
};
+static struct gpiomux_setting ath_gpio_active_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting ath_gpio_suspend_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
static struct gpiomux_setting gpio_i2c_config = {
.func = GPIOMUX_FUNC_3,
/*
@@ -718,46 +730,110 @@
},
};
-static struct gpiomux_setting pri_auxpcm_act_cfg = {
+static struct gpiomux_setting auxpcm_act_cfg = {
.func = GPIOMUX_FUNC_1,
.drv = GPIOMUX_DRV_8MA,
.pull = GPIOMUX_PULL_NONE,
};
-static struct gpiomux_setting pri_auxpcm_sus_cfg = {
+static struct gpiomux_setting auxpcm_sus_cfg = {
.func = GPIOMUX_FUNC_1,
.drv = GPIOMUX_DRV_2MA,
.pull = GPIOMUX_PULL_DOWN,
};
-static struct msm_gpiomux_config msm8974_pri_auxpcm_configs[] __initdata = {
+/* Primary AUXPCM port sharing GPIO lines with Primary MI2S */
+static struct msm_gpiomux_config msm8974_pri_pri_auxpcm_configs[] __initdata = {
{
.gpio = 65,
.settings = {
- [GPIOMUX_SUSPENDED] = &pri_auxpcm_sus_cfg,
- [GPIOMUX_ACTIVE] = &pri_auxpcm_act_cfg,
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
},
},
{
.gpio = 66,
.settings = {
- [GPIOMUX_SUSPENDED] = &pri_auxpcm_sus_cfg,
- [GPIOMUX_ACTIVE] = &pri_auxpcm_act_cfg,
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
},
},
{
.gpio = 67,
.settings = {
- [GPIOMUX_SUSPENDED] = &pri_auxpcm_sus_cfg,
- [GPIOMUX_ACTIVE] = &pri_auxpcm_act_cfg,
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
},
},
{
.gpio = 68,
.settings = {
- [GPIOMUX_SUSPENDED] = &pri_auxpcm_sus_cfg,
- [GPIOMUX_ACTIVE] = &pri_auxpcm_act_cfg,
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
+ },
+ },
+};
+
+/* Primary AUXPCM port sharing GPIO lines with Tertiary MI2S */
+static struct msm_gpiomux_config msm8974_pri_ter_auxpcm_configs[] __initdata = {
+ {
+ .gpio = 74,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
+ },
+ },
+ {
+ .gpio = 75,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
+ },
+ },
+ {
+ .gpio = 76,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
+ },
+ },
+ {
+ .gpio = 77,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
+ },
+ },
+};
+
+static struct msm_gpiomux_config msm8974_sec_auxpcm_configs[] __initdata = {
+ {
+ .gpio = 79,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
+ },
+ },
+ {
+ .gpio = 80,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
+ },
+ },
+ {
+ .gpio = 81,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
+ },
+ },
+ {
+ .gpio = 82,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
},
},
};
@@ -800,6 +876,24 @@
},
};
+
+static struct msm_gpiomux_config ath_gpio_configs[] = {
+ {
+ .gpio = 51,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &ath_gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &ath_gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 79,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &ath_gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &ath_gpio_suspend_cfg,
+ },
+ },
+};
+
static struct msm_gpiomux_config msm_taiko_config[] __initdata = {
{
.gpio = 63, /* SYS_RST_N */
@@ -1003,7 +1097,9 @@
ARRAY_SIZE(msm_blsp2_uart7_configs));
msm_gpiomux_install(wcnss_5wire_interface,
ARRAY_SIZE(wcnss_5wire_interface));
-
+ if (of_board_is_liquid())
+ msm_gpiomux_install_nowrite(ath_gpio_configs,
+ ARRAY_SIZE(ath_gpio_configs));
msm_gpiomux_install(msm8974_slimbus_config,
ARRAY_SIZE(msm8974_slimbus_config));
@@ -1024,17 +1120,24 @@
ARRAY_SIZE(msm_hsic_hub_configs));
msm_gpiomux_install(msm_hdmi_configs, ARRAY_SIZE(msm_hdmi_configs));
- if (machine_is_msm8974_fluid())
+ if (of_board_is_fluid())
msm_gpiomux_install(msm_mhl_configs,
ARRAY_SIZE(msm_mhl_configs));
- msm_gpiomux_install(msm8974_pri_auxpcm_configs,
- ARRAY_SIZE(msm8974_pri_auxpcm_configs));
+ if (of_board_is_liquid())
+ msm_gpiomux_install(msm8974_pri_ter_auxpcm_configs,
+ ARRAY_SIZE(msm8974_pri_ter_auxpcm_configs));
+ else
+ msm_gpiomux_install(msm8974_pri_pri_auxpcm_configs,
+ ARRAY_SIZE(msm8974_pri_pri_auxpcm_configs));
+
+ msm_gpiomux_install(msm8974_sec_auxpcm_configs,
+ ARRAY_SIZE(msm8974_sec_auxpcm_configs));
msm_gpiomux_install_nowrite(msm_lcd_configs,
ARRAY_SIZE(msm_lcd_configs));
- if (machine_is_msm8974_rumi())
+ if (of_board_is_rumi())
msm_gpiomux_install(msm_rumi_blsp_configs,
ARRAY_SIZE(msm_rumi_blsp_configs));
}
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index e624e3f..3eed219 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -22,6 +22,7 @@
#include <linux/memory.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/krait-regulator.h>
+#include <linux/msm_tsens.h>
#include <linux/msm_thermal.h>
#include <asm/mach/map.h>
#include <asm/hardware/gic.h>
@@ -59,7 +60,7 @@
},
};
-static int msm8974_paddr_to_memtype(unsigned int paddr)
+static int msm8974_paddr_to_memtype(phys_addr_t paddr)
{
return MEMTYPE_EBI1;
}
@@ -97,10 +98,11 @@
rpm_regulator_smd_driver_init();
msm_spm_device_init();
krait_power_init();
- if (machine_is_msm8974_rumi())
+ if (of_board_is_rumi())
msm_clock_init(&msm8974_rumi_clock_init_data);
else
msm_clock_init(&msm8974_clock_init_data);
+ tsens_tm_init_driver();
msm_thermal_device_init();
}
@@ -172,6 +174,7 @@
static const char *msm8974_dt_match[] __initconst = {
"qcom,msm8974",
+ "qcom,apq8074",
NULL
};
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 50f4fd7..b77a3b9 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -841,6 +841,10 @@
.prv_data = &msm_hsic_peripheral_pdata_private,
};
+static struct msm_hsic_host_platform_data msm_hsic_pdata = {
+ .phy_sof_workaround = true,
+};
+
#define PID_MAGIC_ID 0x71432909
#define SERIAL_NUM_MAGIC_ID 0x61945374
#define SERIAL_NUMBER_LENGTH 127
@@ -1073,6 +1077,7 @@
&msm_peripheral_pdata;
msm_device_hsic_peripheral.dev.platform_data =
&msm_hsic_peripheral_pdata;
+ msm_device_hsic_host.dev.platform_data = &msm_hsic_pdata;
msm_device_usb_bam.dev.platform_data = &msm_usb_bam_pdata;
platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
msm9615_pm8xxx_gpio_mpp_init();
diff --git a/arch/arm/mach-msm/board-9625-gpiomux.c b/arch/arm/mach-msm/board-9625-gpiomux.c
index 75aaaec..a6ac986 100644
--- a/arch/arm/mach-msm/board-9625-gpiomux.c
+++ b/arch/arm/mach-msm/board-9625-gpiomux.c
@@ -276,6 +276,7 @@
},
};
+#ifdef CONFIG_FB_MSM_QPIC
static struct gpiomux_setting qpic_lcdc_a_d = {
.func = GPIOMUX_FUNC_1,
.drv = GPIOMUX_DRV_10MA,
@@ -327,6 +328,17 @@
},
};
+static void msm9625_disp_init_gpiomux(void)
+{
+ msm_gpiomux_install(msm9625_qpic_lcdc_configs,
+ ARRAY_SIZE(msm9625_qpic_lcdc_configs));
+}
+#else
+static void msm9625_disp_init_gpiomux(void)
+{
+}
+#endif /* CONFIG_FB_MSM_QPIC */
+
void __init msm9625_init_gpiomux(void)
{
int rc;
@@ -347,7 +359,5 @@
ARRAY_SIZE(mdm9625_cdc_reset_config));
msm_gpiomux_install(sdc2_card_det_config,
ARRAY_SIZE(sdc2_card_det_config));
- msm_gpiomux_install(msm9625_qpic_lcdc_configs,
- ARRAY_SIZE(msm9625_qpic_lcdc_configs));
-
+ msm9625_disp_init_gpiomux();
}
diff --git a/arch/arm/mach-msm/board-9625.c b/arch/arm/mach-msm/board-9625.c
index 923dc2a..3bb00bb 100644
--- a/arch/arm/mach-msm/board-9625.c
+++ b/arch/arm/mach-msm/board-9625.c
@@ -21,6 +21,7 @@
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/memory.h>
+#include <linux/msm_tsens.h>
#include <asm/mach/map.h>
#include <asm/hardware/gic.h>
#include <asm/mach/arch.h>
@@ -237,6 +238,7 @@
msm_spm_device_init();
msm_clock_init(&msm9625_clock_init_data);
msm9625_init_buses();
+ tsens_tm_init_driver();
}
void __init msm9625_init(void)
diff --git a/arch/arm/mach-msm/board-krypton-gpiomux.c b/arch/arm/mach-msm/board-krypton-gpiomux.c
new file mode 100644
index 0000000..3d86ba7
--- /dev/null
+++ b/arch/arm/mach-msm/board-krypton-gpiomux.c
@@ -0,0 +1,52 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <mach/board.h>
+#include <mach/gpio.h>
+#include <mach/gpiomux.h>
+
+static struct gpiomux_setting gpio_uart_config = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct msm_gpiomux_config msm_blsp_configs[] __initdata = {
+ {
+ .gpio = 8, /* BLSP1 UART TX */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &gpio_uart_config,
+ },
+ },
+ {
+ .gpio = 9, /* BLSP1 UART RX */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &gpio_uart_config,
+ },
+ },
+};
+
+void __init msmkrypton_init_gpiomux(void)
+{
+ int rc;
+
+ rc = msm_gpiomux_init_dt();
+ if (rc) {
+ pr_err("%s failed %d\n", __func__, rc);
+ return;
+ }
+
+ msm_gpiomux_install(msm_blsp_configs, ARRAY_SIZE(msm_blsp_configs));
+}
diff --git a/arch/arm/mach-msm/board-krypton.c b/arch/arm/mach-msm/board-krypton.c
new file mode 100644
index 0000000..aada3b0
--- /dev/null
+++ b/arch/arm/mach-msm/board-krypton.c
@@ -0,0 +1,84 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/memory.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/map.h>
+#include <asm/mach/arch.h>
+#include <mach/board.h>
+#include <mach/gpiomux.h>
+#include <mach/msm_iomap.h>
+#include <mach/msm_memtypes.h>
+#include <mach/msm_smd.h>
+#include <mach/restart.h>
+#include <mach/socinfo.h>
+#include <mach/clk-provider.h>
+#include "board-dt.h"
+#include "clock.h"
+#include "devices.h"
+
+static struct clk_lookup msm_clocks_dummy[] = {
+ CLK_DUMMY("core_clk", BLSP1_UART_CLK, "f991f000.serial", OFF),
+ CLK_DUMMY("iface_clk", BLSP1_UART_CLK, "f991f000.serial", OFF),
+};
+
+static struct clock_init_data msm_dummy_clock_init_data __initdata = {
+ .table = msm_clocks_dummy,
+ .size = ARRAY_SIZE(msm_clocks_dummy),
+};
+
+/*
+ * Used to satisfy dependencies for devices that need to be
+ * run early or in a particular order. Most likely your device doesn't fall
+ * into this category, and thus the driver should not be added here. The
+ * EPROBE_DEFER can satisfy most dependency problems.
+ */
+void __init msmkrypton_add_drivers(void)
+{
+ msm_smd_init();
+ msm_clock_init(&msm_dummy_clock_init_data);
+}
+
+static void __init msmkrypton_map_io(void)
+{
+ msm_map_msmkrypton_io();
+}
+
+void __init msmkrypton_init(void)
+{
+ if (socinfo_init() < 0)
+ pr_err("%s: socinfo_init() failed\n", __func__);
+
+ msmkrypton_init_gpiomux();
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ msmkrypton_add_drivers();
+}
+
+static const char *msmkrypton_dt_match[] __initconst = {
+ "qcom,msmkrypton",
+ NULL
+};
+
+DT_MACHINE_START(MSMKRYPTON_DT, "Qualcomm MSM Krypton (Flattened Device Tree)")
+ .map_io = msmkrypton_map_io,
+ .init_irq = msm_dt_init_irq,
+ .init_machine = msmkrypton_init,
+ .handle_irq = gic_handle_irq,
+ .timer = &msm_dt_timer,
+ .dt_compat = msmkrypton_dt_match,
+ .restart = msm_restart,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 6b98393..0e1c03e 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -5558,7 +5558,7 @@
reserve_rtb_memory();
}
-static int msm8x60_paddr_to_memtype(unsigned int paddr)
+static int msm8x60_paddr_to_memtype(phys_addr_t paddr)
{
if (paddr >= 0x40000000 && paddr < 0x60000000)
return MEMTYPE_EBI1;
diff --git a/arch/arm/mach-msm/board-zinc.c b/arch/arm/mach-msm/board-zinc.c
deleted file mode 100644
index fa19e39..0000000
--- a/arch/arm/mach-msm/board-zinc.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/memory.h>
-#include <asm/hardware/gic.h>
-#include <asm/mach/map.h>
-#include <asm/mach/arch.h>
-#include <mach/board.h>
-#include <mach/gpiomux.h>
-#include <mach/msm_iomap.h>
-#include <mach/msm_memtypes.h>
-#include <mach/msm_smd.h>
-#include <mach/restart.h>
-#include <mach/socinfo.h>
-#include <mach/clk-provider.h>
-#include "board-dt.h"
-#include "clock.h"
-#include "devices.h"
-#include "platsmp.h"
-
-static struct memtype_reserve msmzinc_reserve_table[] __initdata = {
- [MEMTYPE_SMI] = {
- },
- [MEMTYPE_EBI0] = {
- .flags = MEMTYPE_FLAGS_1M_ALIGN,
- },
- [MEMTYPE_EBI1] = {
- .flags = MEMTYPE_FLAGS_1M_ALIGN,
- },
-};
-
-static int msmzinc_paddr_to_memtype(unsigned int paddr)
-{
- return MEMTYPE_EBI1;
-}
-
-static struct reserve_info msmzinc_reserve_info __initdata = {
- .memtype_reserve_table = msmzinc_reserve_table,
- .paddr_to_memtype = msmzinc_paddr_to_memtype,
-};
-
-void __init msmzinc_reserve(void)
-{
- reserve_info = &msmzinc_reserve_info;
- of_scan_flat_dt(dt_scan_for_memory_reserve, msmzinc_reserve_table);
- msm_reserve();
-}
-
-static void __init msmzinc_early_memory(void)
-{
- reserve_info = &msmzinc_reserve_info;
- of_scan_flat_dt(dt_scan_for_memory_hole, msmzinc_reserve_table);
-}
-
-static struct clk_lookup msm_clocks_dummy[] = {
- CLK_DUMMY("core_clk", BLSP1_UART_CLK, "f991f000.serial", OFF),
- CLK_DUMMY("iface_clk", BLSP1_UART_CLK, "f991f000.serial", OFF),
-};
-
-static struct clock_init_data msm_dummy_clock_init_data __initdata = {
- .table = msm_clocks_dummy,
- .size = ARRAY_SIZE(msm_clocks_dummy),
-};
-
-/*
- * Used to satisfy dependencies for devices that need to be
- * run early or in a particular order. Most likely your device doesn't fall
- * into this category, and thus the driver should not be added here. The
- * EPROBE_DEFER can satisfy most dependency problems.
- */
-void __init msmzinc_add_drivers(void)
-{
- msm_smd_init();
- msm_clock_init(&msm_dummy_clock_init_data);
-}
-
-static void __init msmzinc_map_io(void)
-{
- msm_map_zinc_io();
-}
-
-void __init msmzinc_init(void)
-{
- if (socinfo_init() < 0)
- pr_err("%s: socinfo_init() failed\n", __func__);
-
- msmzinc_init_gpiomux();
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
- msmzinc_add_drivers();
-}
-
-void __init msmzinc_init_very_early(void)
-{
- msmzinc_early_memory();
-}
-
-static const char *msmzinc_dt_match[] __initconst = {
- "qcom,msmzinc",
- NULL
-};
-
-DT_MACHINE_START(MSMZINC_DT, "Qualcomm MSM ZINC (Flattened Device Tree)")
- .map_io = msmzinc_map_io,
- .init_irq = msm_dt_init_irq,
- .init_machine = msmzinc_init,
- .handle_irq = gic_handle_irq,
- .timer = &msm_dt_timer,
- .dt_compat = msmzinc_dt_match,
- .reserve = msmzinc_reserve,
- .init_very_early = msmzinc_init_very_early,
- .restart = msm_restart,
- .smp = &msm8974_smp_ops,
-MACHINE_END
diff --git a/arch/arm/mach-msm/cache_erp.c b/arch/arm/mach-msm/cache_erp.c
index ddea91c..f52bc28 100644
--- a/arch/arm/mach-msm/cache_erp.c
+++ b/arch/arm/mach-msm/cache_erp.c
@@ -123,11 +123,18 @@
unsigned int mplxrexnok;
};
+struct msm_erp_dump_region {
+ struct resource *res;
+ void __iomem *va;
+};
+
static DEFINE_PER_CPU(struct msm_l1_err_stats, msm_l1_erp_stats);
static struct msm_l2_err_stats msm_l2_erp_stats;
static int l1_erp_irq, l2_erp_irq;
static struct proc_dir_entry *procfs_entry;
+static int num_dump_regions;
+static struct msm_erp_dump_region *dump_regions;
#ifdef CONFIG_MSM_L1_ERR_LOG
static struct proc_dir_entry *procfs_log_entry;
@@ -211,6 +218,22 @@
return len;
}
+static int msm_erp_dump_regions(void)
+{
+ int i = 0;
+ struct msm_erp_dump_region *r;
+
+ for (i = 0; i < num_dump_regions; i++) {
+ r = &dump_regions[i];
+
+ pr_alert("%s %pR:\n", r->res->name, r->res);
+ print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 32, 4, r->va,
+ resource_size(r->res), 0);
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_MSM_L1_ERR_LOG
static int proc_read_log(char *page, char **start, off_t off, int count,
int *eof, void *data)
@@ -267,6 +290,7 @@
pr_alert("\tCESR = 0x%08x\n", cesr);
pr_alert("\tCPU speed = %lu\n", acpuclk_get_rate(cpu));
pr_alert("\tMIDR = 0x%08x\n", read_cpuid_id());
+ msm_erp_dump_regions();
}
if (cesr & CESR_DCTPE) {
@@ -425,6 +449,9 @@
if (port_error && print_alert)
ERP_PORT_ERR("L2 master port error detected");
+ if (soft_error && print_alert)
+ msm_erp_dump_regions();
+
if (soft_error && !unrecoverable)
ERP_1BIT_ERR("L2 single-bit error detected");
@@ -464,6 +491,37 @@
.notifier_call = cache_erp_cpu_callback,
};
+static int msm_erp_read_dump_regions(struct platform_device *pdev)
+{
+ int i;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+
+ num_dump_regions = of_property_count_strings(np, "reg-names");
+
+ if (num_dump_regions <= 0) {
+ num_dump_regions = 0;
+ return 0; /* Not an error - this is an optional property */
+ }
+
+ dump_regions = devm_kzalloc(&pdev->dev,
+ sizeof(*dump_regions) * num_dump_regions,
+ GFP_KERNEL);
+ if (!dump_regions)
+ return -ENOMEM;
+
+ for (i = 0; i < num_dump_regions; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ dump_regions[i].res = res;
+ dump_regions[i].va = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!dump_regions[i].va)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int msm_cache_erp_probe(struct platform_device *pdev)
{
struct resource *r;
@@ -511,6 +569,11 @@
goto fail_l2;
}
+ ret = msm_erp_read_dump_regions(pdev);
+
+ if (ret)
+ goto fail_l2;
+
get_online_cpus();
register_hotcpu_notifier(&cache_erp_cpu_notifier);
for_each_cpu(cpu, cpu_online_mask)
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index 4079b5a..af027f0 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -1709,13 +1709,19 @@
F_MMSS( 100000000, gpll0, 6, 0, 0),
F_MMSS( 109090000, gpll0, 5.5, 0, 0),
F_MMSS( 133330000, gpll0, 4.5, 0, 0),
+ F_MMSS( 150000000, gpll0, 4, 0, 0),
F_MMSS( 200000000, gpll0, 3, 0, 0),
F_MMSS( 228570000, mmpll0_pll, 3.5, 0, 0),
F_MMSS( 266670000, mmpll0_pll, 3, 0, 0),
F_MMSS( 320000000, mmpll0_pll, 2.5, 0, 0),
+ F_MMSS( 400000000, mmpll0_pll, 2, 0, 0),
F_END
};
+static unsigned long camss_vfe_vfe0_fmax_v2[VDD_DIG_NUM] = {
+ 150000000, 320000000, 400000000,
+};
+
static struct rcg_clk vfe0_clk_src = {
.cmd_rcgr_reg = VFE0_CMD_RCGR,
.set_rate = set_rate_hid,
@@ -1972,11 +1978,17 @@
static struct clk_freq_tbl ftbl_camss_vfe_cpp_clk[] = {
F_MMSS( 133330000, gpll0, 4.5, 0, 0),
+ F_MMSS( 150000000, gpll0, 4, 0, 0),
F_MMSS( 266670000, mmpll0_pll, 3, 0, 0),
F_MMSS( 320000000, mmpll0_pll, 2.5, 0, 0),
+ F_MMSS( 400000000, mmpll0_pll, 2, 0, 0),
F_END
};
+static unsigned long camss_vfe_cpp_fmax_v2[VDD_DIG_NUM] = {
+ 150000000, 320000000, 400000000,
+};
+
static struct rcg_clk cpp_clk_src = {
.cmd_rcgr_reg = CPP_CMD_RCGR,
.set_rate = set_rate_hid,
@@ -2697,7 +2709,6 @@
.base = &virt_bases[LPASS_BASE],
.c = {
.dbg_name = "q6ss_xo_clk",
- .parent = &xo.c,
.ops = &clk_ops_branch,
CLK_INIT(q6ss_xo_clk.c),
},
@@ -2776,6 +2787,7 @@
},
.base = &virt_bases[APCS_PLL_BASE],
.c = {
+ .parent = &xo_a_clk.c,
.dbg_name = "a7sspll",
.ops = &clk_ops_sr2_pll,
.vdd_class = &vdd_sr2_pll,
@@ -2810,6 +2822,9 @@
static DEFINE_CLK_VOTER(pnoc_sps_clk, &pnoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(qseecom_ce1_clk_src, &ce1_clk_src.c, 100000000);
+static DEFINE_CLK_VOTER(scm_ce1_clk_src, &ce1_clk_src.c, 100000000);
+
static DEFINE_CLK_BRANCH_VOTER(cxo_otg_clk, &xo.c);
static DEFINE_CLK_BRANCH_VOTER(cxo_pil_lpass_clk, &xo.c);
static DEFINE_CLK_BRANCH_VOTER(cxo_pil_mss_clk, &xo.c);
@@ -3056,7 +3071,7 @@
/* WCNSS CLOCKS */
CLK_LOOKUP("xo", cxo_wlan_clk.c, "fb000000.qcom,wcnss-wlan"),
- CLK_LOOKUP("rf_clk", cxo_a2.c, "fb000000.qcom,wcnss-wlan"),
+ CLK_LOOKUP("rf_clk", cxo_a1.c, "fb000000.qcom,wcnss-wlan"),
/* BUS DRIVER */
CLK_LOOKUP("bus_clk", cnoc_msmbus_clk.c, "msm_config_noc"),
@@ -3171,12 +3186,14 @@
CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "qseecom"),
CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "qseecom"),
CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "qseecom"),
- CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "qseecom"),
+ CLK_LOOKUP("core_clk_src", qseecom_ce1_clk_src.c, "qseecom"),
CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "scm"),
CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "scm"),
CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "scm"),
- CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "scm"),
+ CLK_LOOKUP("core_clk_src", scm_ce1_clk_src.c, "scm"),
+
+ CLK_LOOKUP("core_clk_src", ce1_clk_src.c, ""),
/* SDCC */
CLK_LOOKUP("iface_clk", gcc_sdcc1_ahb_clk.c, "f9824000.qcom,sdcc"),
@@ -3322,6 +3339,8 @@
CLK_LOOKUP("csi1_rdi_clk", camss_csi1rdi_clk.c, "fda08400.qcom,csid"),
/* ISPIF clocks */
+ CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
+ "fda0a000.qcom,ispif"),
CLK_LOOKUP("camss_vfe_vfe_clk", camss_vfe_vfe0_clk.c,
"fda0a000.qcom,ispif"),
CLK_LOOKUP("camss_csi_vfe_clk", camss_csi_vfe0_clk.c,
@@ -3409,6 +3428,18 @@
CLK_LOOKUP("osr_clk", div_clk1.c, "msm-dai-q6-dev.16390"),
CLK_LOOKUP("osr_clk", div_clk1.c, "msm-dai-q6-dev.16391"),
+ /* Add QCEDEV clocks */
+ CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "fd400000.qcom,qcedev"),
+ CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "fd400000.qcom,qcedev"),
+ CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "fd400000.qcom,qcedev"),
+ CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "fd400000.qcom,qcedev"),
+
+ /* Add QCRYPTO clocks */
+ CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "fd404000.qcom,qcrypto"),
+ CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "fd404000.qcom,qcrypto"),
+ CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "fd404000.qcom,qcrypto"),
+ CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "fd404000.qcom,qcrypto"),
+
};
static struct clk_lookup msm_clocks_8226_rumi[] = {
@@ -3524,17 +3555,6 @@
panic("clock-8226: Unable to get the vdd_sr2_dig regulator!");
/*
- * These regulators are used at boot. Ensure they stay on
- * while the clock framework comes online.
- */
- vote_vdd_level(&vdd_sr2_pll, VDD_SR2_PLL_TUR);
- regulator_enable(vdd_sr2_pll.regulator[0]);
- regulator_enable(vdd_sr2_pll.regulator[1]);
-
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- regulator_enable(vdd_dig.regulator[0]);
-
- /*
* Hold an active set vote at a rate of 40MHz for the MMSS NOC AHB
* source. Sleep set vote is 0.
* RPM will also turn on gcc_mmss_noc_cfg_ahb_clk, which is needed to
@@ -3549,6 +3569,12 @@
reg_init();
+ /* v2 specific changes */
+ if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 2) {
+ cpp_clk_src.c.fmax = camss_vfe_cpp_fmax_v2;
+ vfe0_clk_src.c.fmax = camss_vfe_vfe0_fmax_v2;
+ }
+
/*
* MDSS needs the ahb clock and needs to init before we register the
* lookup table.
@@ -3556,17 +3582,9 @@
mdss_clk_ctrl_pre_init(&mdss_ahb_clk.c);
}
-static int __init msm8226_clock_late_init(void)
-{
- unvote_vdd_level(&vdd_sr2_pll, VDD_SR2_PLL_TUR);
- unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- return 0;
-}
-
struct clock_init_data msm8226_clock_init_data __initdata = {
.table = msm_clocks_8226,
.size = ARRAY_SIZE(msm_clocks_8226),
.pre_init = msm8226_clock_pre_init,
.post_init = msm8226_clock_post_init,
- .late_init = msm8226_clock_late_init,
};
diff --git a/arch/arm/mach-msm/clock-8610.c b/arch/arm/mach-msm/clock-8610.c
index d63d722..3cb3ea4 100644
--- a/arch/arm/mach-msm/clock-8610.c
+++ b/arch/arm/mach-msm/clock-8610.c
@@ -143,6 +143,7 @@
#define CE1_AHB_CBCR 0x104C
#define COPSS_SMMU_AHB_CBCR 0x015C
#define LPSS_SMMU_AHB_CBCR 0x0158
+#define BIMC_SMMU_CBCR 0x1120
#define LPASS_Q6_AXI_CBCR 0x11C0
#define APCS_GPLL_ENA_VOTE 0x1480
#define APCS_CLOCK_BRANCH_ENA_VOTE 0x1484
@@ -461,9 +462,9 @@
#define D0_ID 1
#define D1_ID 2
-#define A0_ID 3
-#define A1_ID 4
-#define A2_ID 5
+#define A0_ID 4
+#define A1_ID 5
+#define A2_ID 6
#define DIFF_CLK_ID 7
#define DIV_CLK_ID 11
@@ -564,6 +565,7 @@
},
.base = &virt_bases[APCS_PLL_BASE],
.c = {
+ .parent = &gcc_xo_a_clk_src.c,
.dbg_name = "a7sspll",
.ops = &clk_ops_sr2_pll,
.vdd_class = &vdd_sr2_pll,
@@ -1499,6 +1501,17 @@
},
};
+static struct branch_clk gcc_bimc_smmu_clk = {
+ .cbcr_reg = BIMC_SMMU_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_bimc_smmu_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_bimc_smmu_clk.c),
+ },
+};
+
static struct clk_freq_tbl ftbl_csi0_1_clk[] = {
F_MM(100000000, gpll0, 6, 0, 0),
F_MM(200000000, mmpll0, 4, 0, 0),
@@ -1794,6 +1807,8 @@
.dbg_name = "bimc_gfx_clk",
.ops = &clk_ops_branch,
CLK_INIT(bimc_gfx_clk.c),
+ /* FIXME: Remove once kgsl votes on the depends clock. */
+ .depends = &gcc_bimc_smmu_clk.c,
},
};
@@ -2230,7 +2245,6 @@
.bcr_reg = LPASS_Q6SS_BCR,
.base = &virt_bases[LPASS_BASE],
.c = {
- .parent = &gcc_xo_clk_src.c,
.dbg_name = "q6ss_xo_clk",
.ops = &clk_ops_branch,
CLK_INIT(q6ss_xo_clk.c),
@@ -2289,6 +2303,7 @@
{ &gcc_ce1_ahb_clk.c, GCC_BASE, 0x013a},
{ &gcc_xo_clk_src.c, GCC_BASE, 0x0149},
{ &bimc_clk.c, GCC_BASE, 0x0154},
+ { &gcc_bimc_smmu_clk.c, GCC_BASE, 0x015e},
{ &gcc_lpass_q6_axi_clk.c, GCC_BASE, 0x0160},
{ &mmssnoc_ahb_clk.c, MMSS_BASE, 0x0001},
@@ -2549,6 +2564,8 @@
CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f991f000.serial"),
CLK_LOOKUP("core_clk", gcc_blsp1_uart3_apps_clk.c, "f991f000.serial"),
+ CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f991e000.serial"),
+ CLK_LOOKUP("core_clk", gcc_blsp1_uart2_apps_clk.c, "f991e000.serial"),
CLK_LOOKUP("dfab_clk", pnoc_sps_clk.c, "msm_sps"),
CLK_LOOKUP("bus_clk", pnoc_qseecom_clk.c, "qseecom"),
@@ -2602,6 +2619,10 @@
CLK_LOOKUP("core_clk", qdss_clk.c, "fc352000.cti"),
CLK_LOOKUP("core_clk", qdss_clk.c, "fc353000.cti"),
CLK_LOOKUP("core_clk", qdss_clk.c, "fc354000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc34c000.jtagmm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc34d000.jtagmm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc34e000.jtagmm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc34f000.jtagmm"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc326000.tmc"),
@@ -2631,6 +2652,10 @@
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc352000.cti"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc353000.cti"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc354000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc34c000.jtagmm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc34d000.jtagmm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc34e000.jtagmm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc34f000.jtagmm"),
@@ -2688,7 +2713,7 @@
CLK_LOOKUP("core_clk", gcc_mss_q6_bimc_axi_clk.c, ""),
CLK_LOOKUP("core_clk", gcc_pdm2_clk.c, ""),
CLK_LOOKUP("iface_clk", gcc_pdm_ahb_clk.c, ""),
- CLK_LOOKUP("iface_clk", gcc_prng_ahb_clk.c, ""),
+ CLK_LOOKUP("iface_clk", gcc_prng_ahb_clk.c, "f9bff000.qcom,msm-rng"),
CLK_LOOKUP("iface_clk", gcc_sdcc1_ahb_clk.c, "msm_sdcc.1"),
CLK_LOOKUP("core_clk", gcc_sdcc1_apps_clk.c, "msm_sdcc.1"),
CLK_LOOKUP("iface_clk", gcc_sdcc2_ahb_clk.c, "msm_sdcc.2"),
@@ -2752,6 +2777,8 @@
CLK_LOOKUP("iface_clk", oxili_ahb_clk.c, "fdc00000.qcom,kgsl-3d0"),
CLK_LOOKUP("mem_iface_clk", bimc_gfx_clk.c, "fdc00000.qcom,kgsl-3d0"),
CLK_LOOKUP("mem_clk", gmem_gfx3d_clk.c, "fdc00000.qcom,kgsl-3d0"),
+ CLK_LOOKUP("alt_mem_iface_clk", gcc_bimc_smmu_clk.c,
+ "fdc00000.qcom,kgsl-3d0"),
CLK_LOOKUP("iface_clk", vfe_ahb_clk.c, "fd890000.qcom,iommu"),
CLK_LOOKUP("core_clk", vfe_axi_clk.c, "fd890000.qcom,iommu"),
@@ -2761,6 +2788,7 @@
CLK_LOOKUP("core_clk", mdp_axi_clk.c, "fd870000.qcom,iommu"),
CLK_LOOKUP("iface_clk", oxili_ahb_clk.c, "fd880000.qcom,iommu"),
CLK_LOOKUP("core_clk", bimc_gfx_clk.c, "fd880000.qcom,iommu"),
+ CLK_LOOKUP("alt_core_clk", gcc_bimc_smmu_clk.c, "fd880000.qcom,iommu"),
CLK_LOOKUP("iface_clk", gcc_lpss_smmu_ahb_clk.c, "fd000000.qcom,iommu"),
CLK_LOOKUP("core_clk", gcc_lpass_q6_axi_clk.c, "fd000000.qcom,iommu"),
CLK_LOOKUP("iface_clk", gcc_copss_smmu_ahb_clk.c,
@@ -2781,6 +2809,31 @@
CLK_LOOKUP("measure_clk", apc2_m_clk, ""),
CLK_LOOKUP("measure_clk", apc3_m_clk, ""),
CLK_LOOKUP("measure_clk", l2_m_clk, ""),
+
+ CLK_LOOKUP("xo", gcc_xo_clk_src.c, "fb000000.qcom,wcnss-wlan"),
+ CLK_LOOKUP("rf_clk", cxo_a1.c, "fb000000.qcom,wcnss-wlan"),
+
+ CLK_LOOKUP("iface_clk", mdp_ahb_clk.c, "fd900000.qcom,mdss_mdp"),
+ CLK_LOOKUP("core_clk", mdp_axi_clk.c, "fd900000.qcom,mdss_mdp"),
+ CLK_LOOKUP("lcdc_clk", mdp_lcdc_clk.c, "fd900000.qcom,mdss_mdp"),
+ CLK_LOOKUP("vsync_clk", mdp_vsync_clk.c, "fd900000.qcom,mdss_mdp"),
+ CLK_LOOKUP("dsi_clk", mdp_dsi_clk.c, "fd900000.qcom,mdss_mdp"),
+ CLK_LOOKUP("iface_clk", dsi_ahb_clk.c, "fdd00000.qcom,mdss_dsi"),
+ CLK_LOOKUP("dsi_clk", dsi_clk.c, "fdd00000.qcom,mdss_dsi"),
+ CLK_LOOKUP("byte_clk", dsi_byte_clk.c, "fdd00000.qcom,mdss_dsi"),
+ CLK_LOOKUP("esc_clk", dsi_esc_clk.c, "fdd00000.qcom,mdss_dsi"),
+ CLK_LOOKUP("pixel_clk", dsi_pclk_clk.c, "fdd00000.qcom,mdss_dsi"),
+
+ /* QSEECOM Clocks */
+ CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "qseecom"),
+ CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "qseecom"),
+ CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "qseecom"),
+ CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "qseecom"),
+
+ CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "scm"),
+ CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "scm"),
+ CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "scm"),
+ CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "scm"),
};
static struct clk_lookup msm_clocks_8610_rumi[] = {
@@ -2956,19 +3009,6 @@
if (IS_ERR(vdd_sr2_pll.regulator[1]))
panic("clock-8610: Unable to get the vdd_sr2_dig regulator!");
- vote_vdd_level(&vdd_sr2_pll, VDD_SR2_PLL_TUR);
- regulator_enable(vdd_sr2_pll.regulator[0]);
- regulator_enable(vdd_sr2_pll.regulator[1]);
-
- /*
- * TODO: Set a voltage and enable vdd_dig, leaving the voltage high
- * until late_init. This may not be necessary with clock handoff;
- * Investigate this code on a real non-simulator target to determine
- * its necessity.
- */
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- regulator_enable(vdd_dig.regulator[0]);
-
enable_rpm_scaling();
/* Enable a clock to allow access to MMSS clock registers */
@@ -2985,17 +3025,9 @@
clk_prepare_enable(&mmss_s0_axi_clk.c);
}
-static int __init msm8610_clock_late_init(void)
-{
- unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- unvote_vdd_level(&vdd_sr2_pll, VDD_SR2_PLL_TUR);
- return 0;
-}
-
struct clock_init_data msm8610_clock_init_data __initdata = {
.table = msm_clocks_8610,
.size = ARRAY_SIZE(msm_clocks_8610),
.pre_init = msm8610_clock_pre_init,
.post_init = msm8610_clock_post_init,
- .late_init = msm8610_clock_late_init,
};
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index aefaa5c..be6d965 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -5419,11 +5419,6 @@
CLK_LOOKUP("csi_phy_clk", csi0_phy_clk.c, "msm_csid.0"),
CLK_LOOKUP("csi_phy_clk", csi1_phy_clk.c, "msm_csid.1"),
CLK_LOOKUP("csi_phy_clk", csi2_phy_clk.c, "msm_csid.2"),
- CLK_LOOKUP("csi_pix_clk", csi_pix_clk.c, "msm_ispif.0"),
- CLK_LOOKUP("csi_pix1_clk", csi_pix1_clk.c, "msm_ispif.0"),
- CLK_LOOKUP("csi_rdi_clk", csi_rdi_clk.c, "msm_ispif.0"),
- CLK_LOOKUP("csi_rdi1_clk", csi_rdi1_clk.c, "msm_ispif.0"),
- CLK_LOOKUP("csi_rdi2_clk", csi_rdi2_clk.c, "msm_ispif.0"),
CLK_LOOKUP("csiphy_timer_src_clk",
csiphy_timer_src_clk.c, "msm_csiphy.0"),
CLK_LOOKUP("csiphy_timer_src_clk",
@@ -6740,8 +6735,6 @@
if ((readl_relaxed(PRNG_CLK_NS_REG) & 0x7F) == 0x2B)
prng_clk.freq_tbl = clk_tbl_prng_64;
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
-
clk_ops_local_pll.enable = sr_pll_clk_enable;
}
@@ -6852,7 +6845,7 @@
if (WARN(rc, "cfpb_a_clk not enabled (%d)\n", rc))
return rc;
- return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
+ return 0;
}
struct clock_init_data msm8960_clock_init_data __initdata = {
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index e6874b7..707e6b6 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -1405,7 +1405,14 @@
};
static struct clk_freq_tbl ftbl_gcc_gp_clk[] = {
- F(19200000, cxo, 1, 0, 0),
+ F( 4800000, cxo, 4, 0, 0),
+ F( 6000000, gpll0, 10, 1, 10),
+ F( 6750000, gpll0, 1, 1, 89),
+ F( 8000000, gpll0, 15, 1, 5),
+ F( 9600000, cxo, 2, 0, 0),
+ F(16000000, gpll0, 1, 2, 75),
+ F(19200000, cxo, 1, 0, 0),
+ F(24000000, gpll0, 5, 1, 5),
F_END
};
@@ -4750,7 +4757,8 @@
CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f991f000.serial"),
CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f9924000.i2c"),
CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f991e000.serial"),
- CLK_LOOKUP("core_clk", gcc_blsp1_qup1_i2c_apps_clk.c, ""),
+ CLK_LOOKUP("core_clk", gcc_blsp1_qup1_i2c_apps_clk.c, "f9923000.i2c"),
+ CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f9923000.i2c"),
CLK_LOOKUP("core_clk", gcc_blsp1_qup2_i2c_apps_clk.c, "f9924000.i2c"),
CLK_LOOKUP("core_clk", gcc_blsp1_qup2_spi_apps_clk.c, ""),
CLK_LOOKUP("core_clk", gcc_blsp1_qup1_spi_apps_clk.c, "f9923000.spi"),
@@ -4816,6 +4824,11 @@
CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "qseecom"),
CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "qseecom"),
+ CLK_LOOKUP("ce_drv_core_clk", gcc_ce2_clk.c, "qseecom"),
+ CLK_LOOKUP("ce_drv_iface_clk", gcc_ce2_ahb_clk.c, "qseecom"),
+ CLK_LOOKUP("ce_drv_bus_clk", gcc_ce2_axi_clk.c, "qseecom"),
+ CLK_LOOKUP("ce_drv_core_clk_src", ce2_clk_src.c, "qseecom"),
+
CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "scm"),
CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "scm"),
CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "scm"),
@@ -4934,77 +4947,78 @@
"fda0b400.qcom,csiphy"),
CLK_LOOKUP("csiphy_timer_clk", camss_phy2_csi2phytimer_clk.c,
"fda0b400.qcom,csiphy"),
+
/* CSID clocks */
- CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
- "fda08000.qcom,csid"),
CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
- "fda08000.qcom,csid"),
- CLK_LOOKUP("csi0_ahb_clk", camss_csi0_ahb_clk.c, "fda08000.qcom,csid"),
- CLK_LOOKUP("csi0_src_clk", csi0_clk_src.c, "fda08000.qcom,csid"),
- CLK_LOOKUP("csi0_phy_clk", camss_csi0phy_clk.c, "fda08000.qcom,csid"),
- CLK_LOOKUP("csi0_clk", camss_csi0_clk.c, "fda08000.qcom,csid"),
- CLK_LOOKUP("csi0_pix_clk", camss_csi0pix_clk.c, "fda08000.qcom,csid"),
- CLK_LOOKUP("csi0_rdi_clk", camss_csi0rdi_clk.c, "fda08000.qcom,csid"),
+ "fda08000.qcom,csid"),
+ CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
+ "fda08000.qcom,csid"),
+ CLK_LOOKUP("csi_ahb_clk", camss_csi0_ahb_clk.c,
+ "fda08000.qcom,csid"),
+ CLK_LOOKUP("csi_src_clk", csi0_clk_src.c,
+ "fda08000.qcom,csid"),
+ CLK_LOOKUP("csi_phy_clk", camss_csi0phy_clk.c,
+ "fda08000.qcom,csid"),
+ CLK_LOOKUP("csi_clk", camss_csi0_clk.c,
+ "fda08000.qcom,csid"),
+ CLK_LOOKUP("csi_pix_clk", camss_csi0pix_clk.c,
+ "fda08000.qcom,csid"),
+ CLK_LOOKUP("csi_rdi_clk", camss_csi0rdi_clk.c,
+ "fda08000.qcom,csid"),
- CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
- "fda08400.qcom,csid"),
CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
- "fda08400.qcom,csid"),
- CLK_LOOKUP("csi0_ahb_clk", camss_csi0_ahb_clk.c, "fda08400.qcom,csid"),
- CLK_LOOKUP("csi1_ahb_clk", camss_csi1_ahb_clk.c, "fda08400.qcom,csid"),
- CLK_LOOKUP("csi0_src_clk", csi0_clk_src.c, "fda08400.qcom,csid"),
- CLK_LOOKUP("csi1_src_clk", csi1_clk_src.c, "fda08400.qcom,csid"),
- CLK_LOOKUP("csi0_phy_clk", camss_csi0phy_clk.c, "fda08400.qcom,csid"),
- CLK_LOOKUP("csi1_phy_clk", camss_csi1phy_clk.c, "fda08400.qcom,csid"),
- CLK_LOOKUP("csi0_clk", camss_csi0_clk.c, "fda08400.qcom,csid"),
- CLK_LOOKUP("csi1_clk", camss_csi1_clk.c, "fda08400.qcom,csid"),
- CLK_LOOKUP("csi0_pix_clk", camss_csi0pix_clk.c, "fda08400.qcom,csid"),
- CLK_LOOKUP("csi1_pix_clk", camss_csi1pix_clk.c, "fda08400.qcom,csid"),
- CLK_LOOKUP("csi0_rdi_clk", camss_csi0rdi_clk.c, "fda08400.qcom,csid"),
- CLK_LOOKUP("csi1_rdi_clk", camss_csi1rdi_clk.c, "fda08400.qcom,csid"),
+ "fda08400.qcom,csid"),
+ CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
+ "fda08400.qcom,csid"),
+ CLK_LOOKUP("csi_ahb_clk", camss_csi1_ahb_clk.c,
+ "fda08400.qcom,csid"),
+ CLK_LOOKUP("csi_src_clk", csi1_clk_src.c,
+ "fda08400.qcom,csid"),
+ CLK_LOOKUP("csi_phy_clk", camss_csi1phy_clk.c,
+ "fda08400.qcom,csid"),
+ CLK_LOOKUP("csi_clk", camss_csi1_clk.c,
+ "fda08400.qcom,csid"),
+ CLK_LOOKUP("csi_pix_clk", camss_csi1pix_clk.c,
+ "fda08400.qcom,csid"),
+ CLK_LOOKUP("csi_rdi_clk", camss_csi1rdi_clk.c,
+ "fda08400.qcom,csid"),
- CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
- "fda08800.qcom,csid"),
CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
- "fda08800.qcom,csid"),
- CLK_LOOKUP("csi0_ahb_clk", camss_csi0_ahb_clk.c, "fda08800.qcom,csid"),
- CLK_LOOKUP("csi2_ahb_clk", camss_csi2_ahb_clk.c, "fda08800.qcom,csid"),
- CLK_LOOKUP("csi0_src_clk", csi0_clk_src.c, "fda08800.qcom,csid"),
- CLK_LOOKUP("csi2_src_clk", csi2_clk_src.c, "fda08800.qcom,csid"),
- CLK_LOOKUP("csi0_phy_clk", camss_csi0phy_clk.c, "fda08800.qcom,csid"),
- CLK_LOOKUP("csi2_phy_clk", camss_csi2phy_clk.c, "fda08800.qcom,csid"),
- CLK_LOOKUP("csi0_clk", camss_csi0_clk.c, "fda08800.qcom,csid"),
- CLK_LOOKUP("csi2_clk", camss_csi2_clk.c, "fda08800.qcom,csid"),
- CLK_LOOKUP("csi0_pix_clk", camss_csi0pix_clk.c, "fda08800.qcom,csid"),
- CLK_LOOKUP("csi2_pix_clk", camss_csi2pix_clk.c, "fda08800.qcom,csid"),
- CLK_LOOKUP("csi0_rdi_clk", camss_csi0rdi_clk.c, "fda08800.qcom,csid"),
- CLK_LOOKUP("csi2_rdi_clk", camss_csi2rdi_clk.c, "fda08800.qcom,csid"),
+ "fda08800.qcom,csid"),
+ CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
+ "fda08800.qcom,csid"),
+ CLK_LOOKUP("csi_ahb_clk", camss_csi2_ahb_clk.c,
+ "fda08800.qcom,csid"),
+ CLK_LOOKUP("csi_src_clk", csi2_clk_src.c,
+ "fda08800.qcom,csid"),
+ CLK_LOOKUP("csi_phy_clk", camss_csi2phy_clk.c,
+ "fda08800.qcom,csid"),
+ CLK_LOOKUP("csi_clk", camss_csi2_clk.c,
+ "fda08800.qcom,csid"),
+ CLK_LOOKUP("csi_pix_clk", camss_csi2pix_clk.c,
+ "fda08800.qcom,csid"),
+ CLK_LOOKUP("csi_rdi_clk", camss_csi2rdi_clk.c,
+ "fda08800.qcom,csid"),
- CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
- "fda08c00.qcom,csid"),
CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
- "fda08c00.qcom,csid"),
- CLK_LOOKUP("csi0_ahb_clk", camss_csi0_ahb_clk.c, "fda08c00.qcom,csid"),
- CLK_LOOKUP("csi3_ahb_clk", camss_csi3_ahb_clk.c, "fda08c00.qcom,csid"),
- CLK_LOOKUP("csi0_src_clk", csi0_clk_src.c, "fda08c00.qcom,csid"),
- CLK_LOOKUP("csi3_src_clk", csi3_clk_src.c, "fda08c00.qcom,csid"),
- CLK_LOOKUP("csi0_phy_clk", camss_csi0phy_clk.c, "fda08c00.qcom,csid"),
- CLK_LOOKUP("csi3_phy_clk", camss_csi3phy_clk.c, "fda08c00.qcom,csid"),
- CLK_LOOKUP("csi0_clk", camss_csi0_clk.c, "fda08c00.qcom,csid"),
- CLK_LOOKUP("csi3_clk", camss_csi3_clk.c, "fda08c00.qcom,csid"),
- CLK_LOOKUP("csi0_pix_clk", camss_csi0pix_clk.c, "fda08c00.qcom,csid"),
- CLK_LOOKUP("csi3_pix_clk", camss_csi3pix_clk.c, "fda08c00.qcom,csid"),
- CLK_LOOKUP("csi0_rdi_clk", camss_csi0rdi_clk.c, "fda08c00.qcom,csid"),
- CLK_LOOKUP("csi3_rdi_clk", camss_csi3rdi_clk.c, "fda08c00.qcom,csid"),
+ "fda08c00.qcom,csid"),
+ CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
+ "fda08c00.qcom,csid"),
+ CLK_LOOKUP("csi_ahb_clk", camss_csi3_ahb_clk.c,
+ "fda08c00.qcom,csid"),
+ CLK_LOOKUP("csi_src_clk", csi3_clk_src.c,
+ "fda08c00.qcom,csid"),
+ CLK_LOOKUP("csi_phy_clk", camss_csi3phy_clk.c,
+ "fda08c00.qcom,csid"),
+ CLK_LOOKUP("csi_clk", camss_csi3_clk.c,
+ "fda08c00.qcom,csid"),
+ CLK_LOOKUP("csi_pix_clk", camss_csi3pix_clk.c,
+ "fda08c00.qcom,csid"),
+ CLK_LOOKUP("csi_rdi_clk", camss_csi3rdi_clk.c,
+ "fda08c00.qcom,csid"),
/* ISPIF clocks */
- CLK_LOOKUP("camss_vfe_vfe_clk", camss_vfe_vfe0_clk.c,
- "fda0a000.qcom,ispif"),
- CLK_LOOKUP("camss_csi_vfe_clk", camss_csi_vfe0_clk.c,
- "fda0a000.qcom,ispif"),
- CLK_LOOKUP("camss_vfe_vfe_clk1", camss_vfe_vfe1_clk.c,
- "fda0a000.qcom,ispif"),
- CLK_LOOKUP("camss_csi_vfe_clk1", camss_csi_vfe1_clk.c,
+ CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
"fda0a000.qcom,ispif"),
/*VFE clocks*/
@@ -5107,6 +5121,7 @@
CLK_LOOKUP("bus_clk", venus0_axi_clk.c, "fdc00000.qcom,vidc"),
CLK_LOOKUP("mem_clk", venus0_ocmemnoc_clk.c, "fdc00000.qcom,vidc"),
+ CLK_LOOKUP("core_clk", oxili_gfx3d_clk.c, "fd8c4024.qcom,gdsc"),
/* LPASS clocks */
CLK_LOOKUP("bus_clk", gcc_mss_q6_bimc_axi_clk.c, "fc880000.qcom,mss"),
@@ -5487,15 +5502,6 @@
if (IS_ERR(vdd_dig.regulator[0]))
panic("clock-8974: Unable to get the vdd_dig regulator!");
- /*
- * TODO: Set a voltage and enable vdd_dig, leaving the voltage high
- * until late_init. This may not be necessary with clock handoff;
- * Investigate this code on a real non-simulator target to determine
- * its necessity.
- */
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- regulator_enable(vdd_dig.regulator[0]);
-
enable_rpm_scaling();
reg_init();
@@ -5532,11 +5538,6 @@
mdss_clk_ctrl_pre_init(&mdss_ahb_clk.c);
}
-static int __init msm8974_clock_late_init(void)
-{
- return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
-}
-
static void __init msm8974_rumi_clock_pre_init(void)
{
virt_bases[GCC_BASE] = ioremap(GCC_CC_PHYS, GCC_CC_SIZE);
@@ -5552,15 +5553,6 @@
vdd_dig.regulator[0] = regulator_get(NULL, "vdd_dig");
if (IS_ERR(vdd_dig.regulator[0]))
panic("clock-8974: Unable to get the vdd_dig regulator!");
-
- /*
- * TODO: Set a voltage and enable vdd_dig, leaving the voltage high
- * until late_init. This may not be necessary with clock handoff;
- * Investigate this code on a real non-simulator target to determine
- * its necessity.
- */
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- regulator_enable(vdd_dig.regulator[0]);
}
struct clock_init_data msm8974_clock_init_data __initdata = {
@@ -5568,7 +5560,6 @@
.size = ARRAY_SIZE(msm_clocks_8974),
.pre_init = msm8974_clock_pre_init,
.post_init = msm8974_clock_post_init,
- .late_init = msm8974_clock_late_init,
};
struct clock_init_data msm8974_rumi_clock_init_data __initdata = {
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index d0b4a32..5d55966 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -3777,8 +3777,6 @@
static void __init msm8660_clock_pre_init(void)
{
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
-
/* Setup MM_PLL2 (PLL3), but turn it off. Rate set by set_rate_tv(). */
rmwreg(0, MM_PLL2_MODE_REG, BIT(0)); /* Disable output */
/* Set ref, bypass, assert reset, disable output, disable test mode */
@@ -3900,7 +3898,7 @@
if (WARN(rc, "mmfpb_a_clk not enabled (%d)\n", rc))
return rc;
- return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
+ return 0;
}
struct clock_init_data msm8x60_clock_init_data __initdata = {
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index d6ae4335..6b218a1 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -1754,8 +1754,6 @@
{
u32 regval, is_pll_enabled, pll9_lval;
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
-
clk_ops_local_pll.enable = sr_pll_clk_enable;
/* Enable PDM CXO source. */
@@ -1831,15 +1829,9 @@
clk_disable_unprepare(&pdm_clk.c);
}
-static int __init msm9615_clock_late_init(void)
-{
- return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
-}
-
struct clock_init_data msm9615_clock_init_data __initdata = {
.table = msm_clocks_9615,
.size = ARRAY_SIZE(msm_clocks_9615),
.pre_init = msm9615_clock_pre_init,
.post_init = msm9615_clock_post_init,
- .late_init = msm9615_clock_late_init,
};
diff --git a/arch/arm/mach-msm/clock-9625.c b/arch/arm/mach-msm/clock-9625.c
index 6817c6c..4984255 100644
--- a/arch/arm/mach-msm/clock-9625.c
+++ b/arch/arm/mach-msm/clock-9625.c
@@ -411,6 +411,7 @@
},
.base = &virt_bases[APCS_PLL_BASE],
.c = {
+ .parent = &cxo_a_clk_src.c,
.dbg_name = "apcspll_clk_src",
.ops = &clk_ops_local_pll,
CLK_INIT(apcspll_clk_src.c),
@@ -2017,12 +2018,6 @@
*/
clk_prepare_enable(&cxo_a_clk_src.c);
- /*
- * TODO: This call is to prevent sending 0Hz to rpm to turn off pnoc.
- * Needs to remove this after vote of pnoc from sdcc driver is ready.
- */
- clk_prepare_enable(&pnoc_msmbus_a_clk.c);
-
/* Set rates for single-rate clocks. */
clk_set_rate(&usb_hs_system_clk_src.c,
usb_hs_system_clk_src.freq_tbl[0].freq_hz);
@@ -2089,9 +2084,6 @@
if (IS_ERR(vdd_dig.regulator[0]))
panic("clock-9625: Unable to get the vdd_dig regulator!");
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- regulator_enable(vdd_dig.regulator[0]);
-
enable_rpm_scaling();
reg_init();
@@ -2107,15 +2099,9 @@
measure_mux_common, sizeof(measure_mux_common));
}
-static int __init msm9625_clock_late_init(void)
-{
- return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
-}
-
struct clock_init_data msm9625_clock_init_data __initdata = {
.table = msm_clocks_9625,
.size = ARRAY_SIZE(msm_clocks_9625),
.pre_init = msm9625_clock_pre_init,
.post_init = msm9625_clock_post_init,
- .late_init = msm9625_clock_late_init,
};
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index 5da1663..0b8240c 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -756,10 +756,10 @@
spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
reg_val = readl_relaxed(b->retain_reg);
switch (flags) {
- case CLKFLAG_RETAIN:
+ case CLKFLAG_RETAIN_MEM:
reg_val |= b->retain_mask;
break;
- case CLKFLAG_NORETAIN:
+ case CLKFLAG_NORETAIN_MEM:
reg_val &= ~b->retain_mask;
break;
default:
diff --git a/arch/arm/mach-msm/clock-local2.c b/arch/arm/mach-msm/clock-local2.c
index 8bdc496..0d1104e 100644
--- a/arch/arm/mach-msm/clock-local2.c
+++ b/arch/arm/mach-msm/clock-local2.c
@@ -570,6 +570,47 @@
return __branch_clk_reset(BCR_REG(branch), action);
}
+static int branch_clk_set_flags(struct clk *c, unsigned flags)
+{
+ u32 cbcr_val;
+ unsigned long irq_flags;
+ struct branch_clk *branch = to_branch_clk(c);
+ int ret = 0;
+
+ spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
+ cbcr_val = readl_relaxed(CBCR_REG(branch));
+ switch (flags) {
+ case CLKFLAG_RETAIN_PERIPH:
+ cbcr_val |= BIT(13);
+ break;
+ case CLKFLAG_NORETAIN_PERIPH:
+ cbcr_val &= ~BIT(13);
+ break;
+ case CLKFLAG_RETAIN_MEM:
+ cbcr_val |= BIT(14);
+ break;
+ case CLKFLAG_NORETAIN_MEM:
+ cbcr_val &= ~BIT(14);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ writel_relaxed(cbcr_val, CBCR_REG(branch));
+ /*
+ * 8974v2.2 has a requirement that writes to set bits 13 and 14 are
+ * separated by at least 2 bus cycles. Cover one of these cycles by
+ * performing an extra write here. The other cycle is covered by the
+ * read-modify-write design of this function.
+ */
+ writel_relaxed(cbcr_val, CBCR_REG(branch));
+ spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
+
+ /* Make sure write is issued before returning. */
+ mb();
+
+ return ret;
+}
+
/*
* Voteable clock functions
*/
@@ -824,6 +865,7 @@
.list_rate = branch_clk_list_rate,
.round_rate = branch_clk_round_rate,
.reset = branch_clk_reset,
+ .set_flags = branch_clk_set_flags,
.handoff = branch_clk_handoff,
};
diff --git a/arch/arm/mach-msm/clock-mdss-8226.c b/arch/arm/mach-msm/clock-mdss-8226.c
index f2c8d58..edfaf90 100644
--- a/arch/arm/mach-msm/clock-mdss-8226.c
+++ b/arch/arm/mach-msm/clock-mdss-8226.c
@@ -71,7 +71,6 @@
{
u32 status;
- clk_prepare_enable(mdss_dsi_ahb_clk);
/* poll for PLL ready status */
if (readl_poll_timeout_noirq((mdss_dsi_base + 0x02c0),
status,
@@ -83,7 +82,6 @@
} else {
pll_initialized = 1;
}
- clk_disable_unprepare(mdss_dsi_ahb_clk);
return pll_initialized;
}
@@ -177,28 +175,141 @@
return ret;
}
-static void mdss_dsi_uniphy_pll_lock_detect_setting(void)
-{
- REG_W(0x04, mdss_dsi_base + 0x0264); /* LKDetect CFG2 */
- udelay(100);
- REG_W(0x05, mdss_dsi_base + 0x0264); /* LKDetect CFG2 */
- udelay(500);
-}
-
static void mdss_dsi_uniphy_pll_sw_reset(void)
{
+ /*
+ * Add hardware recommended delays after toggling the
+ * software reset bit off and back on.
+ */
REG_W(0x01, mdss_dsi_base + 0x0268); /* PLL TEST CFG */
- udelay(1);
+ udelay(300);
REG_W(0x00, mdss_dsi_base + 0x0268); /* PLL TEST CFG */
+ udelay(300);
+}
+
+static void mdss_dsi_pll_enable_casem(void)
+{
+ int i;
+
+ /*
+ * Add hardware recommended delays between register writes for
+ * the updates to take effect. These delays are necessary for the
+ * PLL to successfully lock.
+ */
+ REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1000);
+
+ for (i = 0; (i < 3) && !mdss_dsi_check_pll_lock(); i++) {
+ REG_W(0x07, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1);
+
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1000);
+ }
+
+ if (pll_initialized)
+ pr_debug("%s: PLL Locked after %d attempts\n", __func__, i);
+ else
+ pr_debug("%s: PLL failed to lock\n", __func__);
+}
+
+static void mdss_dsi_pll_enable_casef1(void)
+{
+ /*
+ * Add hardware recommended delays between register writes for
+ * the updates to take effect. These delays are necessary for the
+ * PLL to successfully lock.
+ */
+ REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x0d, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1000);
+
+ if (mdss_dsi_check_pll_lock())
+ pr_debug("%s: PLL Locked\n", __func__);
+ else
+ pr_debug("%s: PLL failed to lock\n", __func__);
+}
+
+static void mdss_dsi_pll_enable_cased(void)
+{
+ /*
+ * Add hardware recommended delays between register writes for
+ * the updates to take effect. These delays are necessary for the
+ * PLL to successfully lock.
+ */
+ REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
udelay(1);
+ REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1);
+ REG_W(0x07, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1);
+ REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1);
+ REG_W(0x07, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1);
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1);
+
+ if (mdss_dsi_check_pll_lock())
+ pr_debug("%s: PLL Locked\n", __func__);
+ else
+ pr_debug("%s: PLL failed to lock\n", __func__);
+}
+
+static void mdss_dsi_pll_enable_casec(void)
+{
+ /*
+ * Add hardware recommended delays between register writes for
+ * the updates to take effect. These delays are necessary for the
+ * PLL to successfully lock.
+ */
+ REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1000);
+
+ if (mdss_dsi_check_pll_lock())
+ pr_debug("%s: PLL Locked\n", __func__);
+ else
+ pr_debug("%s: PLL failed to lock\n", __func__);
+}
+
+static void mdss_dsi_pll_enable_casee(void)
+{
+ /*
+ * Add hardware recommended delays between register writes for
+ * the updates to take effect. These delays are necessary for the
+ * PLL to successfully lock.
+ */
+ REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x0d, mdss_dsi_base + 0x0220); /* GLB CFG */
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1000);
+
+ if (mdss_dsi_check_pll_lock())
+ pr_debug("%s: PLL Locked\n", __func__);
+ else
+ pr_debug("%s: PLL failed to lock\n", __func__);
}
static int __mdss_dsi_pll_enable(struct clk *c)
{
- u32 status;
- u32 max_reads, timeout_us;
- int i;
-
if (!pll_initialized) {
if (dsi_pll_rate)
__mdss_dsi_pll_byte_set_rate(c, dsi_pll_rate);
@@ -207,56 +318,45 @@
__func__);
}
+ /*
+ * Try all PLL power-up sequences one-by-one until
+ * PLL lock is detected
+ */
mdss_dsi_uniphy_pll_sw_reset();
- /* PLL power up */
- /* Add HW recommended delay between
- register writes for the update to propagate */
- REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(20);
- REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(100);
- REG_W(0x0d, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(20);
- REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(200);
+ mdss_dsi_pll_enable_casem();
+ if (pll_initialized)
+ goto pll_locked;
- for (i = 0; i < 3; i++) {
- mdss_dsi_uniphy_pll_lock_detect_setting();
- /* poll for PLL ready status */
- max_reads = 5;
- timeout_us = 100;
- if (readl_poll_timeout_noirq((mdss_dsi_base + 0x02c0),
- status,
- ((status & 0x01) == 1),
- max_reads, timeout_us)) {
- pr_debug("%s: DSI PLL status=%x failed to Lock\n",
- __func__, status);
- pr_debug("%s:Trying to power UP PLL again\n",
- __func__);
- } else
- break;
+ mdss_dsi_uniphy_pll_sw_reset();
+ mdss_dsi_pll_enable_cased();
+ if (pll_initialized)
+ goto pll_locked;
- mdss_dsi_uniphy_pll_sw_reset();
- udelay(1000);
- /* Add HW recommended delay between
- register writes for the update to propagate */
- REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(20);
- REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(100);
- REG_W(0x0d, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(20);
- REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(200);
- }
+ mdss_dsi_uniphy_pll_sw_reset();
+ mdss_dsi_pll_enable_cased();
+ if (pll_initialized)
+ goto pll_locked;
- if ((status & 0x01) != 1) {
- pr_err("%s: DSI PLL status=%x failed to Lock\n",
- __func__, status);
- return -EINVAL;
- }
+ mdss_dsi_uniphy_pll_sw_reset();
+ mdss_dsi_pll_enable_casef1();
+ if (pll_initialized)
+ goto pll_locked;
- pr_debug("%s: **** PLL Lock success\n", __func__);
+ mdss_dsi_uniphy_pll_sw_reset();
+ mdss_dsi_pll_enable_casec();
+ if (pll_initialized)
+ goto pll_locked;
+
+ mdss_dsi_uniphy_pll_sw_reset();
+ mdss_dsi_pll_enable_casee();
+ if (pll_initialized)
+ goto pll_locked;
+
+ pr_err("%s: DSI PLL failed to Lock\n", __func__);
+ return -EINVAL;
+
+pll_locked:
+ pr_debug("%s: PLL Lock success\n", __func__);
return 0;
}
@@ -264,7 +364,7 @@
static void __mdss_dsi_pll_disable(void)
{
writel_relaxed(0x00, mdss_dsi_base + 0x0220); /* GLB CFG */
- pr_debug("%s: **** disable pll Initialize\n", __func__);
+ pr_debug("%s: PLL disabled\n", __func__);
pll_initialized = 0;
}
@@ -305,13 +405,17 @@
/* todo: Adjust these values appropriately */
static enum handoff mdss_dsi_pll_byte_handoff(struct clk *c)
{
- if (mdss_gdsc_enabled() && mdss_dsi_check_pll_lock()) {
- c->rate = 59000000;
- dsi_pll_rate = 59000000;
- pll_byte_clk_rate = 59000000;
- pll_pclk_rate = 117000000;
- dsipll_refcount++;
- return HANDOFF_ENABLED_CLK;
+ if (mdss_gdsc_enabled()) {
+ clk_prepare_enable(mdss_dsi_ahb_clk);
+ if (mdss_dsi_check_pll_lock()) {
+ c->rate = 59000000;
+ dsi_pll_rate = 59000000;
+ pll_byte_clk_rate = 59000000;
+ pll_pclk_rate = 117000000;
+ dsipll_refcount++;
+ return HANDOFF_ENABLED_CLK;
+ }
+ clk_disable_unprepare(mdss_dsi_ahb_clk);
}
return HANDOFF_DISABLED_CLK;
@@ -320,10 +424,14 @@
/* todo: Adjust these values appropriately */
static enum handoff mdss_dsi_pll_pixel_handoff(struct clk *c)
{
- if (mdss_gdsc_enabled() && mdss_dsi_check_pll_lock()) {
- c->rate = 117000000;
- dsipll_refcount++;
- return HANDOFF_ENABLED_CLK;
+ if (mdss_gdsc_enabled()) {
+ clk_prepare_enable(mdss_dsi_ahb_clk);
+ if (mdss_dsi_check_pll_lock()) {
+ c->rate = 117000000;
+ dsipll_refcount++;
+ return HANDOFF_ENABLED_CLK;
+ }
+ clk_disable_unprepare(mdss_dsi_ahb_clk);
}
return HANDOFF_DISABLED_CLK;
diff --git a/arch/arm/mach-msm/clock-rpm.c b/arch/arm/mach-msm/clock-rpm.c
index ee91a34..3870e2b 100644
--- a/arch/arm/mach-msm/clock-rpm.c
+++ b/arch/arm/mach-msm/clock-rpm.c
@@ -53,12 +53,8 @@
if (rc < 0)
return rc;
- if (!r->branch) {
- r->last_set_khz = iv.value;
- if (!r->active_only)
- r->last_set_sleep_khz = iv.value;
+ if (!r->branch)
r->c.rate = iv.value * r->factor;
- }
return 0;
}
@@ -78,12 +74,8 @@
static int clk_rpmrs_handoff_smd(struct rpm_clk *r)
{
- if (!r->branch) {
- r->last_set_khz = INT_MAX;
- if (!r->active_only)
- r->last_set_sleep_khz = INT_MAX;
- r->c.rate = 1 * r->factor;
- }
+ if (!r->branch)
+ r->c.rate = INT_MAX;
return 0;
}
@@ -113,6 +105,22 @@
static DEFINE_MUTEX(rpm_clock_lock);
+static void to_active_sleep_khz(struct rpm_clk *r, unsigned long rate,
+ unsigned long *active_khz, unsigned long *sleep_khz)
+{
+ /* Convert the rate (hz) to khz */
+ *active_khz = DIV_ROUND_UP(rate, r->factor);
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep_khz = 0;
+ else
+ *sleep_khz = *active_khz;
+}
+
static int rpm_clk_prepare(struct clk *clk)
{
struct rpm_clk *r = to_rpm_clk(clk);
@@ -124,18 +132,16 @@
mutex_lock(&rpm_clock_lock);
- this_khz = r->last_set_khz;
+ to_active_sleep_khz(r, r->c.rate, &this_khz, &this_sleep_khz);
+
/* Don't send requests to the RPM if the rate has not been set. */
if (this_khz == 0)
goto out;
- this_sleep_khz = r->last_set_sleep_khz;
-
/* Take peer clock's rate into account only if it's enabled. */
- if (peer->enabled) {
- peer_khz = peer->last_set_khz;
- peer_sleep_khz = peer->last_set_sleep_khz;
- }
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
value = max(this_khz, peer_khz);
if (r->branch)
@@ -171,17 +177,16 @@
mutex_lock(&rpm_clock_lock);
- if (r->last_set_khz) {
+ if (r->c.rate) {
uint32_t value;
struct rpm_clk *peer = r->peer;
unsigned long peer_khz = 0, peer_sleep_khz = 0;
int rc;
/* Take peer clock's rate into account only if it's enabled. */
- if (peer->enabled) {
- peer_khz = peer->last_set_khz;
- peer_sleep_khz = peer->last_set_sleep_khz;
- }
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
value = r->branch ? !!peer_khz : peer_khz;
rc = clk_rpmrs_set_rate_active(r, value);
@@ -204,27 +209,19 @@
unsigned long this_khz, this_sleep_khz;
int rc = 0;
- this_khz = DIV_ROUND_UP(rate, r->factor);
-
mutex_lock(&rpm_clock_lock);
- /* Active-only clocks don't care what the rate is during sleep. So,
- * they vote for zero. */
- if (r->active_only)
- this_sleep_khz = 0;
- else
- this_sleep_khz = this_khz;
-
if (r->enabled) {
uint32_t value;
struct rpm_clk *peer = r->peer;
unsigned long peer_khz = 0, peer_sleep_khz = 0;
+ to_active_sleep_khz(r, rate, &this_khz, &this_sleep_khz);
+
/* Take peer clock's rate into account only if it's enabled. */
- if (peer->enabled) {
- peer_khz = peer->last_set_khz;
- peer_sleep_khz = peer->last_set_sleep_khz;
- }
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
value = max(this_khz, peer_khz);
rc = clk_rpmrs_set_rate_active(r, value);
@@ -234,10 +231,6 @@
value = max(this_sleep_khz, peer_sleep_khz);
rc = clk_rpmrs_set_rate_sleep(r, value);
}
- if (!rc) {
- r->last_set_khz = this_khz;
- r->last_set_sleep_khz = this_sleep_khz;
- }
out:
mutex_unlock(&rpm_clock_lock);
diff --git a/arch/arm/mach-msm/clock-rpm.h b/arch/arm/mach-msm/clock-rpm.h
index 8d328e3..b20c3d6 100644
--- a/arch/arm/mach-msm/clock-rpm.h
+++ b/arch/arm/mach-msm/clock-rpm.h
@@ -37,9 +37,6 @@
const int rpm_clk_id;
const int rpm_status_id;
const bool active_only;
- unsigned last_set_khz;
- /* 0 if active_only. Otherwise, same as last_set_khz. */
- unsigned last_set_sleep_khz;
bool enabled;
bool branch; /* true: RPM only accepts 1 for ON and 0 for OFF */
unsigned factor;
@@ -107,8 +104,6 @@
.rpm_status_id = (stat_id), \
.rpm_key = (key), \
.peer = &active, \
- .last_set_khz = ((r) / 1000), \
- .last_set_sleep_khz = ((r) / 1000), \
.factor = 1000, \
.branch = true, \
.rpmrs_data = (rpmrsdata),\
@@ -125,7 +120,6 @@
.rpm_status_id = (stat_id), \
.rpm_key = (key), \
.peer = &name, \
- .last_set_khz = ((r) / 1000), \
.active_only = true, \
.factor = 1000, \
.branch = true, \
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
index ecd25fc..044fc2c 100644
--- a/arch/arm/mach-msm/clock.c
+++ b/arch/arm/mach-msm/clock.c
@@ -33,6 +33,12 @@
};
static LIST_HEAD(handoff_list);
+struct handoff_vdd {
+ struct list_head list;
+ struct clk_vdd_class *vdd_class;
+};
+static LIST_HEAD(handoff_vdd_list);
+
/* Find the voltage level required for a given rate. */
int find_vdd_level(struct clk *clk, unsigned long rate)
{
@@ -159,7 +165,7 @@
unvote_vdd_level(clk->vdd_class, level);
}
-/* Returns true if the rate is valid without voting for it */
+/* Check if the rate is within the voltage limits of the clock. */
static bool is_rate_valid(struct clk *clk, unsigned long rate)
{
int level;
@@ -171,6 +177,92 @@
return level >= 0;
}
+/**
+ * __clk_pre_reparent() - Set up the new parent before switching to it and
+ * prevent the enable state of the child clock from changing.
+ * @c: The child clock that's going to switch parents
+ * @new: The new parent that the child clock is going to switch to
+ * @flags: Pointer to scratch space to save spinlock flags
+ *
+ * Cannot be called from atomic context.
+ *
+ * Use this API to set up the @new parent clock to be able to support the
+ * current prepare and enable state of the child clock @c. Once the parent is
+ * set up, the child clock can safely switch to it.
+ *
+ * The caller shall grab the prepare_lock of clock @c before calling this API
+ * and only release it after calling __clk_post_reparent() for clock @c (or
+ * if this API fails). This is necessary to prevent the prepare state of the
+ * child clock @c from changing while the reparenting is in progress. Since
+ * this API takes care of grabbing the enable lock of @c, only atomic
+ * operation are allowed between calls to __clk_pre_reparent and
+ * __clk_post_reparent()
+ *
+ * The scratch space pointed to by @flags should not be altered before
+ * calling __clk_post_reparent() for clock @c.
+ *
+ * See also: __clk_post_reparent()
+ */
+int __clk_pre_reparent(struct clk *c, struct clk *new, unsigned long *flags)
+{
+ int rc;
+
+ if (c->prepare_count) {
+ rc = clk_prepare(new);
+ if (rc)
+ return rc;
+ }
+
+ spin_lock_irqsave(&c->lock, *flags);
+ if (c->count) {
+ rc = clk_enable(new);
+ if (rc) {
+ spin_unlock_irqrestore(&c->lock, *flags);
+ clk_unprepare(new);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+/**
+ * __clk_post_reparent() - Release requirements on old parent after switching
+ * away from it and allow changes to the child clock's enable state.
+ * @c: The child clock that switched parents
+ * @old: The old parent that the child clock switched away from or the new
+ * parent of a failed reparent attempt.
+ * @flags: Pointer to scratch space where spinlock flags were saved
+ *
+ * Cannot be called from atomic context.
+ *
+ * This API works in tandem with __clk_pre_reparent. Use this API to
+ * - Remove prepare and enable requirements from the @old parent after
+ * switching away from it
+ * - Or, undo the effects of __clk_pre_reparent() after a failed attempt to
+ * change parents
+ *
+ * The caller shall release the prepare_lock of @c that was grabbed before
+ * calling __clk_pre_reparent() only after this API is called (or if
+ * __clk_pre_reparent() fails). This is necessary to prevent the prepare
+ * state of the child clock @c from changing while the reparenting is in
+ * progress. Since this API releases the enable lock of @c, the limit to
+ * atomic operations set by __clk_pre_reparent() is no longer present.
+ *
+ * The scratch space pointed to by @flags shall not be altered since the call
+ * to __clk_pre_reparent() for clock @c.
+ *
+ * See also: __clk_pre_reparent()
+ */
+void __clk_post_reparent(struct clk *c, struct clk *old, unsigned long *flags)
+{
+ if (c->count)
+ clk_disable(old);
+ spin_unlock_irqrestore(&c->lock, *flags);
+
+ if (c->prepare_count)
+ clk_unprepare(old);
+}
+
int clk_prepare(struct clk *clk)
{
int ret = 0;
@@ -357,6 +449,9 @@
if (!clk->ops->set_rate)
return -ENOSYS;
+ if (!is_rate_valid(clk, rate))
+ return -EINVAL;
+
mutex_lock(&clk->prepare_lock);
/* Return early if the rate isn't going to change */
@@ -364,31 +459,32 @@
goto out;
trace_clock_set_rate(name, rate, raw_smp_processor_id());
+
+ start_rate = clk->rate;
+
+ /* Enforce vdd requirements for target frequency. */
if (clk->prepare_count) {
- start_rate = clk->rate;
- /* Enforce vdd requirements for target frequency. */
rc = vote_rate_vdd(clk, rate);
if (rc)
goto out;
- rc = clk->ops->set_rate(clk, rate);
- if (rc)
- goto err_set_rate;
- /* Release vdd requirements for starting frequency. */
- unvote_rate_vdd(clk, start_rate);
- } else if (is_rate_valid(clk, rate)) {
- rc = clk->ops->set_rate(clk, rate);
- } else {
- rc = -EINVAL;
}
- if (!rc)
- clk->rate = rate;
+ rc = clk->ops->set_rate(clk, rate);
+ if (rc)
+ goto err_set_rate;
+ clk->rate = rate;
+
+ /* Release vdd requirements for starting frequency. */
+ if (clk->prepare_count)
+ unvote_rate_vdd(clk, start_rate);
+
out:
mutex_unlock(&clk->prepare_lock);
return rc;
err_set_rate:
- unvote_rate_vdd(clk, rate);
+ if (clk->prepare_count)
+ unvote_rate_vdd(clk, rate);
goto out;
}
EXPORT_SYMBOL(clk_set_rate);
@@ -419,10 +515,21 @@
int clk_set_parent(struct clk *clk, struct clk *parent)
{
- if (!clk->ops->set_parent)
- return 0;
+ int rc = 0;
- return clk->ops->set_parent(clk, parent);
+ if (!clk->ops->set_parent)
+ return -ENOSYS;
+
+ mutex_lock(&clk->prepare_lock);
+ if (clk->parent == parent)
+ goto out;
+ rc = clk->ops->set_parent(clk, parent);
+ if (!rc)
+ clk->parent = parent;
+out:
+ mutex_unlock(&clk->prepare_lock);
+
+ return rc;
}
EXPORT_SYMBOL(clk_set_parent);
@@ -487,6 +594,38 @@
}
EXPORT_SYMBOL(msm_clock_register);
+
+static void vdd_class_init(struct clk_vdd_class *vdd)
+{
+ struct handoff_vdd *v;
+ int i;
+
+ if (!vdd)
+ return;
+
+ list_for_each_entry(v, &handoff_vdd_list, list) {
+ if (v->vdd_class == vdd)
+ return;
+ }
+
+ pr_debug("voting for vdd_class %s\n", vdd->class_name);
+ if (vote_vdd_level(vdd, vdd->num_levels - 1))
+ pr_err("failed to vote for %s\n", vdd->class_name);
+
+ for (i = 0; i < vdd->num_regulators; i++)
+ regulator_enable(vdd->regulator[i]);
+
+ v = kmalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ pr_err("Unable to kmalloc. %s will be stuck at max.\n",
+ vdd->class_name);
+ return;
+ }
+
+ v->vdd_class = vdd;
+ list_add_tail(&v->list, &handoff_vdd_list);
+}
+
static int __init __handoff_clk(struct clk *clk)
{
enum handoff state = HANDOFF_DISABLED_CLK;
@@ -592,6 +731,16 @@
init_sibling_lists(clock_tbl, num_clocks);
/*
+ * Enable regulators and temporarily set them up at maximum voltage.
+ * Once all the clocks have made their respective vote, remove this
+ * temporary vote. The removing of the temporary vote is done at
+ * late_init, by which time we assume all the clocks would have been
+ * handed off.
+ */
+ for (n = 0; n < num_clocks; n++)
+ vdd_class_init(clock_tbl[n].clk->vdd_class);
+
+ /*
* Detect and preserve initial clock state until clock_late_init() or
* a driver explicitly changes it, whichever is first.
*/
@@ -612,8 +761,12 @@
static int __init clock_late_init(void)
{
struct handoff_clk *h, *h_temp;
+ struct handoff_vdd *v, *v_temp;
int ret = 0;
+ if (clk_init_data->late_init)
+ ret = clk_init_data->late_init();
+
pr_info("%s: Removing enables held for handed-off clocks\n", __func__);
list_for_each_entry_safe(h, h_temp, &handoff_list, list) {
clk_disable_unprepare(h->clk);
@@ -621,8 +774,12 @@
kfree(h);
}
- if (clk_init_data->late_init)
- ret = clk_init_data->late_init();
+ list_for_each_entry_safe(v, v_temp, &handoff_vdd_list, list) {
+ unvote_vdd_level(v->vdd_class, v->vdd_class->num_levels - 1);
+ list_del(&v->list);
+ kfree(v);
+ }
+
return ret;
}
late_initcall(clock_late_init);
diff --git a/arch/arm/mach-msm/cpr-regulator.c b/arch/arm/mach-msm/cpr-regulator.c
index 4e95e4e..08923e4 100644
--- a/arch/arm/mach-msm/cpr-regulator.c
+++ b/arch/arm/mach-msm/cpr-regulator.c
@@ -43,10 +43,16 @@
/* Process voltage variables */
u32 pvs_bin;
u32 pvs_process;
- u32 *process_vmax;
+ u32 *corner_ceiling;
/* APC voltage regulator */
struct regulator *vdd_apc;
+
+ /* Dependency parameters */
+ struct regulator *vdd_mx;
+ int vdd_mx_vmax;
+ int vdd_mx_vmin_method;
+ int vdd_mx_vmin;
};
static int cpr_regulator_is_enabled(struct regulator_dev *rdev)
@@ -59,11 +65,23 @@
static int cpr_regulator_enable(struct regulator_dev *rdev)
{
struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
- int rc;
+ int rc = 0;
+
+ /* Enable dependency power before vdd_apc */
+ if (cpr_vreg->vdd_mx) {
+ rc = regulator_enable(cpr_vreg->vdd_mx);
+ if (rc) {
+ pr_err("regulator_enable: vdd_mx: rc=%d\n", rc);
+ return rc;
+ }
+ }
rc = regulator_enable(cpr_vreg->vdd_apc);
if (!rc)
cpr_vreg->enabled = true;
+ else
+ pr_err("regulator_enable: vdd_apc: rc=%d\n", rc);
+
return rc;
}
@@ -73,8 +91,18 @@
int rc;
rc = regulator_disable(cpr_vreg->vdd_apc);
- if (!rc)
- cpr_vreg->enabled = false;
+ if (!rc) {
+ if (cpr_vreg->vdd_mx)
+ rc = regulator_disable(cpr_vreg->vdd_mx);
+
+ if (rc)
+ pr_err("regulator_disable: vdd_mx: rc=%d\n", rc);
+ else
+ cpr_vreg->enabled = false;
+ } else {
+ pr_err("regulator_disable: vdd_apc: rc=%d\n", rc);
+ }
+
return rc;
}
@@ -83,14 +111,84 @@
{
struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
int rc;
- int vdd_apc_min, vdd_apc_max;
+ int vdd_apc_min, vdd_apc_max, vdd_mx_vmin = 0;
+ int change_dir = 0;
- vdd_apc_min = cpr_vreg->process_vmax[min_uV];
- vdd_apc_max = cpr_vreg->process_vmax[CPR_CORNER_SUPER_TURBO];
+ if (cpr_vreg->vdd_mx) {
+ if (min_uV > cpr_vreg->corner)
+ change_dir = 1;
+ else if (min_uV < cpr_vreg->corner)
+ change_dir = -1;
+ }
+
+ vdd_apc_min = cpr_vreg->corner_ceiling[min_uV];
+ vdd_apc_max = cpr_vreg->corner_ceiling[CPR_CORNER_SUPER_TURBO];
+
+ if (change_dir) {
+ /* Determine the vdd_mx voltage */
+ switch (cpr_vreg->vdd_mx_vmin_method) {
+ case VDD_MX_VMIN_APC:
+ vdd_mx_vmin = vdd_apc_min;
+ break;
+ case VDD_MX_VMIN_APC_CORNER_CEILING:
+ vdd_mx_vmin = vdd_apc_min;
+ break;
+ case VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+ vdd_mx_vmin = cpr_vreg->pvs_corner_ceiling
+ [APC_PVS_SLOW][min_uV];
+ break;
+ case VDD_MX_VMIN_MX_VMAX:
+ default:
+ vdd_mx_vmin = cpr_vreg->vdd_mx_vmax;
+ break;
+ }
+ }
+
+ if (change_dir > 0) {
+ if (vdd_mx_vmin < cpr_vreg->vdd_mx_vmin) {
+ /* Check and report the value in case */
+ pr_err("Up: but new %d < old %d uV\n", vdd_mx_vmin,
+ cpr_vreg->vdd_mx_vmin);
+ }
+
+ rc = regulator_set_voltage(cpr_vreg->vdd_mx, vdd_mx_vmin,
+ cpr_vreg->vdd_mx_vmax);
+ if (!rc) {
+ cpr_vreg->vdd_mx_vmin = vdd_mx_vmin;
+ } else {
+ pr_err("set: vdd_mx [%d] = %d uV: rc=%d\n",
+ min_uV, vdd_mx_vmin, rc);
+ return rc;
+ }
+ }
+
rc = regulator_set_voltage(cpr_vreg->vdd_apc,
vdd_apc_min, vdd_apc_max);
- if (!rc)
+ if (!rc) {
cpr_vreg->corner = min_uV;
+ } else {
+ pr_err("set: vdd_apc [%d] = %d uV: rc=%d\n",
+ min_uV, vdd_apc_min, rc);
+ return rc;
+ }
+
+ if (change_dir < 0) {
+ if (vdd_mx_vmin > cpr_vreg->vdd_mx_vmin) {
+ /* Check and report the value in case */
+ pr_err("Down: but new %d >= old %d uV\n", vdd_mx_vmin,
+ cpr_vreg->vdd_mx_vmin);
+ }
+
+ rc = regulator_set_voltage(cpr_vreg->vdd_mx, vdd_mx_vmin,
+ cpr_vreg->vdd_mx_vmax);
+ if (!rc) {
+ cpr_vreg->vdd_mx_vmin = vdd_mx_vmin;
+ } else {
+ pr_err("set: vdd_mx [%d] = %d uV: rc=%d\n",
+ min_uV, vdd_mx_vmin, rc);
+ return rc;
+ }
+ }
pr_debug("set [corner:%d] = %d uV: rc=%d\n", min_uV, vdd_apc_min, rc);
return rc;
@@ -146,7 +244,7 @@
= cpr_vreg->pvs_corner_ceiling[APC_PVS_SLOW]
[CPR_CORNER_SUPER_TURBO];
- cpr_vreg->process_vmax =
+ cpr_vreg->corner_ceiling =
cpr_vreg->pvs_corner_ceiling[cpr_vreg->pvs_process];
iounmap(efuse_base);
@@ -162,19 +260,62 @@
static int __init cpr_regulator_apc_init(struct platform_device *pdev,
struct cpr_regulator *cpr_vreg)
{
+ struct device_node *of_node = pdev->dev.of_node;
+ int rc;
+
cpr_vreg->vdd_apc = devm_regulator_get(&pdev->dev, "vdd-apc");
if (IS_ERR_OR_NULL(cpr_vreg->vdd_apc)) {
- pr_err("devm_regulator_get: rc=%d\n",
- (int)PTR_ERR(cpr_vreg->vdd_apc));
+ rc = PTR_RET(cpr_vreg->vdd_apc);
+ if (rc != -EPROBE_DEFER)
+ pr_err("devm_regulator_get: rc=%d\n", rc);
+ return rc;
}
- return PTR_RET(cpr_vreg->vdd_apc);
+ /* Check dependencies */
+ if (of_property_read_bool(of_node, "vdd-mx-supply")) {
+ cpr_vreg->vdd_mx = devm_regulator_get(&pdev->dev, "vdd-mx");
+ if (IS_ERR_OR_NULL(cpr_vreg->vdd_mx)) {
+ rc = PTR_RET(cpr_vreg->vdd_mx);
+ if (rc != -EPROBE_DEFER)
+ pr_err("devm_regulator_get: vdd-mx: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* Parse dependency parameters */
+ if (cpr_vreg->vdd_mx) {
+ rc = of_property_read_u32(of_node, "qcom,vdd-mx-vmax",
+ &cpr_vreg->vdd_mx_vmax);
+ if (rc < 0) {
+ pr_err("vdd-mx-vmax missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,vdd-mx-vmin-method",
+ &cpr_vreg->vdd_mx_vmin_method);
+ if (rc < 0) {
+ pr_err("vdd-mx-vmin-method missing: rc=%d\n", rc);
+ return rc;
+ }
+ if (cpr_vreg->vdd_mx_vmin_method > VDD_MX_VMIN_MX_VMAX) {
+ pr_err("Invalid vdd-mx-vmin-method(%d)\n",
+ cpr_vreg->vdd_mx_vmin_method);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
}
static void cpr_regulator_apc_exit(struct cpr_regulator *cpr_vreg)
{
- if (cpr_vreg->enabled)
+ if (cpr_vreg->enabled) {
regulator_disable(cpr_vreg->vdd_apc);
+
+ if (cpr_vreg->vdd_mx)
+ regulator_disable(cpr_vreg->vdd_mx);
+ }
}
static int __init cpr_regulator_parse_dt(struct platform_device *pdev,
@@ -323,10 +464,10 @@
platform_set_drvdata(pdev, cpr_vreg);
pr_info("PVS [%d %d %d %d] uV\n",
- cpr_vreg->process_vmax[CPR_CORNER_SVS],
- cpr_vreg->process_vmax[CPR_CORNER_NORMAL],
- cpr_vreg->process_vmax[CPR_CORNER_TURBO],
- cpr_vreg->process_vmax[CPR_CORNER_SUPER_TURBO]);
+ cpr_vreg->corner_ceiling[CPR_CORNER_SVS],
+ cpr_vreg->corner_ceiling[CPR_CORNER_NORMAL],
+ cpr_vreg->corner_ceiling[CPR_CORNER_TURBO],
+ cpr_vreg->corner_ceiling[CPR_CORNER_SUPER_TURBO]);
return 0;
}
diff --git a/arch/arm/mach-msm/ebi_erp.c b/arch/arm/mach-msm/ebi_erp.c
index eb38101..6b300d8 100644
--- a/arch/arm/mach-msm/ebi_erp.c
+++ b/arch/arm/mach-msm/ebi_erp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,9 @@
#include <linux/errno.h>
#include <linux/proc_fs.h>
#include <linux/cpu.h>
+#include <mach/usb_trace.h>
+
+DEFINE_TRACE(usb_daytona_invalid_access);
#define MODULE_NAME "msm_ebi_erp"
@@ -113,6 +116,11 @@
err_cntl |= CNTL_CLEAR_ERR;
writel_relaxed(err_cntl, base + SLV_ERR_CNTL);
mb(); /* Ensure interrupt is cleared before returning */
+
+ if ((err_apacket0 & AMID_MASK) == 0x00000102)
+ trace_usb_daytona_invalid_access(err_addr, err_apacket0,
+ err_apacket1);
+
return IRQ_HANDLED;
}
diff --git a/arch/arm/mach-msm/footswitch-8x60.c b/arch/arm/mach-msm/footswitch-8x60.c
index d5fe866..76ad9b8 100644
--- a/arch/arm/mach-msm/footswitch-8x60.c
+++ b/arch/arm/mach-msm/footswitch-8x60.c
@@ -206,7 +206,7 @@
}
/* Prevent core memory from collapsing when its clock is gated. */
- clk_set_flags(fs->core_clk, CLKFLAG_RETAIN);
+ clk_set_flags(fs->core_clk, CLKFLAG_RETAIN_MEM);
/* Return clocks to their state before this function. */
restore_clocks(fs);
@@ -238,7 +238,7 @@
return rc;
/* Allow core memory to collapse when its clock is gated. */
- clk_set_flags(fs->core_clk, CLKFLAG_NORETAIN);
+ clk_set_flags(fs->core_clk, CLKFLAG_NORETAIN_MEM);
/* Halt all bus ports in the power domain. */
if (fs->bus_port0) {
@@ -292,7 +292,7 @@
err_port2_halt:
msm_bus_axi_portunhalt(fs->bus_port0);
err:
- clk_set_flags(fs->core_clk, CLKFLAG_RETAIN);
+ clk_set_flags(fs->core_clk, CLKFLAG_RETAIN_MEM);
restore_clocks(fs);
return rc;
}
@@ -360,7 +360,7 @@
clk_prepare_enable(fs->core_clk);
/* Prevent core memory from collapsing when its clock is gated. */
- clk_set_flags(fs->core_clk, CLKFLAG_RETAIN);
+ clk_set_flags(fs->core_clk, CLKFLAG_RETAIN_MEM);
/* Return clocks to their state before this function. */
restore_clocks(fs);
@@ -390,7 +390,7 @@
return rc;
/* Allow core memory to collapse when its clock is gated. */
- clk_set_flags(fs->core_clk, CLKFLAG_NORETAIN);
+ clk_set_flags(fs->core_clk, CLKFLAG_NORETAIN_MEM);
/* Halt all bus ports in the power domain. */
if (fs->bus_port0) {
@@ -436,7 +436,7 @@
return 0;
err:
- clk_set_flags(fs->core_clk, CLKFLAG_RETAIN);
+ clk_set_flags(fs->core_clk, CLKFLAG_RETAIN_MEM);
restore_clocks(fs);
return rc;
}
diff --git a/arch/arm/mach-msm/gdsc.c b/arch/arm/mach-msm/gdsc.c
index e5b9d93..6665d66 100644
--- a/arch/arm/mach-msm/gdsc.c
+++ b/arch/arm/mach-msm/gdsc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,8 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
+#include <linux/clk.h>
+#include <mach/clk.h>
#define PWR_ON_MASK BIT(31)
#define EN_REST_WAIT_MASK (0xF << 20)
@@ -42,6 +44,7 @@
struct regulator_dev *rdev;
struct regulator_desc rdesc;
void __iomem *gdscr;
+ struct clk *core_clk;
};
static int gdsc_is_enabled(struct regulator_dev *rdev)
@@ -108,6 +111,7 @@
struct resource *res;
struct gdsc *sc;
uint32_t regval;
+ bool retain_mems;
int ret;
sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
@@ -151,6 +155,16 @@
regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
writel_relaxed(regval, sc->gdscr);
+ retain_mems = of_property_read_bool(pdev->dev.of_node,
+ "qcom,retain-mems");
+ if (retain_mems) {
+ sc->core_clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(sc->core_clk))
+ return PTR_ERR(sc->core_clk);
+ clk_set_flags(sc->core_clk, CLKFLAG_RETAIN_MEM);
+ clk_set_flags(sc->core_clk, CLKFLAG_RETAIN_PERIPH);
+ }
+
sc->rdev = regulator_register(&sc->rdesc, &pdev->dev, init_data, sc,
pdev->dev.of_node);
if (IS_ERR(sc->rdev)) {
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 35257b2..22f74c8 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -600,7 +600,8 @@
void msm_map_msm7x30_io(void);
void msm_map_fsm9xxx_io(void);
void msm_map_8974_io(void);
-void msm_map_zinc_io(void);
+void msm_map_8084_io(void);
+void msm_map_msmkrypton_io(void);
void msm_map_msm8625_io(void);
void msm_map_msm9625_io(void);
void msm_init_irq(void);
@@ -609,8 +610,9 @@
void msm_8974_reserve(void);
void msm_8974_very_early(void);
void msm_8974_init_gpiomux(void);
-void msmzinc_init_gpiomux(void);
+void apq8084_init_gpiomux(void);
void msm9625_init_gpiomux(void);
+void msmkrypton_init_gpiomux(void);
void msm_map_mpq8092_io(void);
void mpq8092_init_gpiomux(void);
void msm_map_msm8226_io(void);
@@ -651,7 +653,7 @@
void msm_snddev_tx_route_config(void);
void msm_snddev_tx_route_deconfig(void);
-extern unsigned int msm_shared_ram_phys; /* defined in arch/arm/mach-msm/io.c */
+extern phys_addr_t msm_shared_ram_phys; /* defined in arch/arm/mach-msm/io.c */
#endif
diff --git a/arch/arm/mach-msm/include/mach/clk-provider.h b/arch/arm/mach-msm/include/mach/clk-provider.h
index 528e9d5..2a33228 100644
--- a/arch/arm/mach-msm/include/mach/clk-provider.h
+++ b/arch/arm/mach-msm/include/mach/clk-provider.h
@@ -156,6 +156,8 @@
int vote_vdd_level(struct clk_vdd_class *vdd_class, int level);
int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level);
+int __clk_pre_reparent(struct clk *c, struct clk *new, unsigned long *flags);
+void __clk_post_reparent(struct clk *c, struct clk *old, unsigned long *flags);
/* Register clocks with the MSM clock driver */
int msm_clock_register(struct clk_lookup *table, size_t size);
diff --git a/arch/arm/mach-msm/include/mach/clk.h b/arch/arm/mach-msm/include/mach/clk.h
index 1191bb7..1809456 100644
--- a/arch/arm/mach-msm/include/mach/clk.h
+++ b/arch/arm/mach-msm/include/mach/clk.h
@@ -16,8 +16,10 @@
#define CLKFLAG_NOINVERT 0x00000002
#define CLKFLAG_NONEST 0x00000004
#define CLKFLAG_NORESET 0x00000008
-#define CLKFLAG_RETAIN 0x00000040
-#define CLKFLAG_NORETAIN 0x00000080
+#define CLKFLAG_RETAIN_PERIPH 0x00000010
+#define CLKFLAG_NORETAIN_PERIPH 0x00000020
+#define CLKFLAG_RETAIN_MEM 0x00000040
+#define CLKFLAG_NORETAIN_MEM 0x00000080
#define CLKFLAG_SKIP_HANDOFF 0x00000100
#define CLKFLAG_MIN 0x00000400
#define CLKFLAG_MAX 0x00000800
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index f750dc8..23d204a 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -129,6 +129,7 @@
* @iommu_power_off: Turn off power to unit
* @iommu_clk_on: Turn on clks to unit
* @iommu_clk_off: Turn off clks to unit
+ * @iommu_lock_initialize: Initialize the remote lock
* @iommu_lock_acquire: Acquire any locks needed
* @iommu_lock_release: Release locks needed
*/
@@ -137,6 +138,7 @@
void (*iommu_power_off)(struct msm_iommu_drvdata *);
int (*iommu_clk_on)(struct msm_iommu_drvdata *);
void (*iommu_clk_off)(struct msm_iommu_drvdata *);
+ void * (*iommu_lock_initialize)(void);
void (*iommu_lock_acquire)(void);
void (*iommu_lock_release)(void);
};
diff --git a/arch/arm/mach-msm/include/mach/iommu_perfmon.h b/arch/arm/mach-msm/include/mach/iommu_perfmon.h
index dcae83b..dc4671c 100644
--- a/arch/arm/mach-msm/include/mach/iommu_perfmon.h
+++ b/arch/arm/mach-msm/include/mach/iommu_perfmon.h
@@ -63,6 +63,7 @@
* @iommu_dev: pointer to iommu device
* @ops: iommu access operations pointer.
* @hw_ops: iommu pm hw access operations pointer.
+ * @always_on: 1 if iommu is always on, 0 otherwise.
*/
struct iommu_info {
const char *iommu_name;
@@ -71,6 +72,7 @@
struct device *iommu_dev;
struct iommu_access_ops *ops;
struct iommu_pm_hw_ops *hw_ops;
+ unsigned int always_on;
};
/**
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
index cc03c48..90757b6 100644
--- a/arch/arm/mach-msm/include/mach/ipa.h
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -557,17 +557,6 @@
int ipa_set_single_ndp_per_mbim(bool enable);
/*
- * rmnet bridge
- */
-int rmnet_bridge_init(void);
-
-int rmnet_bridge_disconnect(void);
-
-int rmnet_bridge_connect(u32 producer_hdl,
- u32 consumer_hdl,
- int wwan_logical_channel_id);
-
-/*
* SW bridge (between IPA and A2)
*/
int ipa_bridge_setup(enum ipa_bridge_dir dir, enum ipa_bridge_type type,
@@ -917,26 +906,6 @@
}
/*
- * rmnet bridge
- */
-static inline int rmnet_bridge_init(void)
-{
- return -EPERM;
-}
-
-static inline int rmnet_bridge_disconnect(void)
-{
- return -EPERM;
-}
-
-static inline int rmnet_bridge_connect(u32 producer_hdl,
- u32 consumer_hdl,
- int wwan_logical_channel_id)
-{
- return -EPERM;
-}
-
-/*
* SW bridge (between IPA and A2)
*/
static inline int ipa_bridge_setup(enum ipa_bridge_dir dir,
diff --git a/arch/arm/mach-msm/include/mach/kgsl.h b/arch/arm/mach-msm/include/mach/kgsl.h
index b68aff8..349dbe7 100644
--- a/arch/arm/mach-msm/include/mach/kgsl.h
+++ b/arch/arm/mach-msm/include/mach/kgsl.h
@@ -20,6 +20,7 @@
#define KGSL_CLK_MEM 0x00000008
#define KGSL_CLK_MEM_IFACE 0x00000010
#define KGSL_CLK_AXI 0x00000020
+#define KGSL_CLK_ALT_MEM_IFACE 0x00000040
#define KGSL_MAX_PWRLEVELS 10
@@ -50,9 +51,19 @@
enum kgsl_iommu_context_id ctx_id;
};
+/*
+ * struct kgsl_device_iommu_data - Struct holding iommu context data obtained
+ * from dtsi file
+ * @iommu_ctxs: Pointer to array of struct hoding context name and id
+ * @iommu_ctx_count: Number of contexts defined in the dtsi file
+ * @iommu_halt_enable: Indicated if smmu halt h/w feature is supported
+ * @physstart: Start of iommu registers physical address
+ * @physend: End of iommu registers physical address
+ */
struct kgsl_device_iommu_data {
const struct kgsl_iommu_ctx *iommu_ctxs;
int iommu_ctx_count;
+ int iommu_halt_enable;
unsigned int physstart;
unsigned int physend;
};
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index 56c4afd..6119a3c 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -70,7 +70,7 @@
#ifndef __ASSEMBLY__
void *allocate_contiguous_ebi(unsigned long, unsigned long, int);
-unsigned long allocate_contiguous_ebi_nomap(unsigned long, unsigned long);
+phys_addr_t allocate_contiguous_ebi_nomap(unsigned long, unsigned long);
void clean_and_invalidate_caches(unsigned long, unsigned long, unsigned long);
void clean_caches(unsigned long, unsigned long, unsigned long);
void invalidate_caches(unsigned long, unsigned long, unsigned long);
diff --git a/arch/arm/mach-msm/include/mach/msm_bus.h b/arch/arm/mach-msm/include/mach/msm_bus.h
index 049cf02..ebc43da 100644
--- a/arch/arm/mach-msm/include/mach/msm_bus.h
+++ b/arch/arm/mach-msm/include/mach/msm_bus.h
@@ -112,6 +112,8 @@
#endif
#if defined(CONFIG_OF) && defined(CONFIG_MSM_BUS_SCALING)
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node);
struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev);
void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata);
#else
@@ -121,6 +123,12 @@
return NULL;
}
+static inline struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node)
+{
+ return NULL;
+}
+
static inline void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
{
}
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-zinc.h b/arch/arm/mach-msm/include/mach/msm_iomap-8084.h
similarity index 74%
rename from arch/arm/mach-msm/include/mach/msm_iomap-zinc.h
rename to arch/arm/mach-msm/include/mach/msm_iomap-8084.h
index 0a33055..43f1de0 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-zinc.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8084.h
@@ -11,8 +11,8 @@
*/
-#ifndef __ASM_ARCH_MSM_IOMAP_zinc_H
-#define __ASM_ARCH_MSM_IOMAP_zinc_H
+#ifndef __ASM_ARCH_MSM_IOMAP_8084_H
+#define __ASM_ARCH_MSM_IOMAP_8084_H
/* Physical base address and size of peripherals.
* Ordered by the virtual base addresses they will be mapped at.
@@ -23,15 +23,15 @@
*
*/
-#define MSMZINC_SHARED_RAM_PHYS 0x0FA00000
+#define APQ8084_SHARED_RAM_PHYS 0x0FA00000
-#define MSMZINC_QGIC_DIST_PHYS 0xF9000000
-#define MSMZINC_QGIC_DIST_SIZE SZ_4K
+#define APQ8084_QGIC_DIST_PHYS 0xF9000000
+#define APQ8084_QGIC_DIST_SIZE SZ_4K
-#define MSMZINC_TLMM_PHYS 0xFD510000
-#define MSMZINC_TLMM_SIZE SZ_16K
+#define APQ8084_TLMM_PHYS 0xFD510000
+#define APQ8084_TLMM_SIZE SZ_16K
-#ifdef CONFIG_DEBUG_MSMZINC_UART
+#ifdef CONFIG_DEBUG_APQ8084_UART
#define MSM_DEBUG_UART_BASE IOMEM(0xFA71E000)
#define MSM_DEBUG_UART_PHYS 0xF991E000
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8610.h b/arch/arm/mach-msm/include/mach/msm_iomap-8610.h
index b07ddba..2a62460 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8610.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8610.h
@@ -22,7 +22,7 @@
*
*/
-#define MSM8610_MSM_SHARED_RAM_PHYS 0x0D600000
+#define MSM8610_MSM_SHARED_RAM_PHYS 0x0D900000
#define MSM8610_APCS_GCC_PHYS 0xF9011000
#define MSM8610_APCS_GCC_SIZE SZ_4K
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-9625.h b/arch/arm/mach-msm/include/mach/msm_iomap-9625.h
index 9a8bfc1..31b19b3 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-9625.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-9625.h
@@ -38,8 +38,8 @@
#define MSM9625_MPM2_PSHOLD_SIZE SZ_4K
#ifdef CONFIG_DEBUG_MSM9625_UART
-#define MSM_DEBUG_UART_BASE IOMEM(0xFA71E000)
-#define MSM_DEBUG_UART_PHYS 0xF991E000
+#define MSM_DEBUG_UART_BASE IOMEM(0xFA71F000)
+#define MSM_DEBUG_UART_PHYS 0xF991F000
#endif
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-krypton.h b/arch/arm/mach-msm/include/mach/msm_iomap-krypton.h
new file mode 100644
index 0000000..a8b9da5
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-krypton.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_IOMAP_MSMKRYPTON_H
+#define __ASM_ARCH_MSM_IOMAP_MSMKRYPTON_H
+
+/* Physical base address and size of peripherals.
+ * Ordered by the virtual base addresses they will be mapped at.
+ *
+ * If you add or remove entries here, you'll want to edit the
+ * io desc array in arch/arm/mach-msm/io.c to reflect your
+ * changes.
+ *
+ */
+
+#define MSMKRYPTON_SHARED_RAM_PHYS 0x00000000
+
+#define MSMKRYPTON_TLMM_PHYS 0xFD510000
+#define MSMKRYPTON_TLMM_SIZE SZ_16K
+
+#define MSMKRYPTON_MPM2_PSHOLD_PHYS 0xFC4AB000
+#define MSMKRYPTON_MPM2_PSHOLD_SIZE SZ_4K
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index d3706cd..a90e78a 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -129,11 +129,12 @@
#include "msm_iomap-8064.h"
#include "msm_iomap-9615.h"
#include "msm_iomap-8974.h"
-#include "msm_iomap-zinc.h"
+#include "msm_iomap-8084.h"
#include "msm_iomap-9625.h"
#include "msm_iomap-8092.h"
#include "msm_iomap-8226.h"
#include "msm_iomap-8610.h"
+#include "msm_iomap-krypton.h"
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_memtypes.h b/arch/arm/mach-msm/include/mach/msm_memtypes.h
index 264dad5..3bf05e6 100644
--- a/arch/arm/mach-msm/include/mach/msm_memtypes.h
+++ b/arch/arm/mach-msm/include/mach/msm_memtypes.h
@@ -45,9 +45,9 @@
#define MEMTYPE_FLAGS_1M_ALIGN 0x2
struct memtype_reserve {
- unsigned long start;
- unsigned long size;
- unsigned long limit;
+ phys_addr_t start;
+ phys_addr_t size;
+ phys_addr_t limit;
int flags;
};
@@ -55,7 +55,7 @@
struct memtype_reserve *memtype_reserve_table;
void (*calculate_reserve_sizes)(void);
void (*reserve_fixed_area)(unsigned long);
- int (*paddr_to_memtype)(unsigned int);
+ int (*paddr_to_memtype)(phys_addr_t);
unsigned long low_unstable_address;
unsigned long max_unstable_size;
unsigned long bank_size;
diff --git a/arch/arm/mach-msm/include/mach/msm_pcie.h b/arch/arm/mach-msm/include/mach/msm_pcie.h
index 790a390..99d1a4d 100644
--- a/arch/arm/mach-msm/include/mach/msm_pcie.h
+++ b/arch/arm/mach-msm/include/mach/msm_pcie.h
@@ -37,6 +37,8 @@
uint32_t axi_size;
uint32_t wake_n;
uint32_t vreg_n;
+ uint32_t parf_deemph;
+ uint32_t parf_swing;
};
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_serial_hs.h b/arch/arm/mach-msm/include/mach/msm_serial_hs.h
index dd53911..e6b677e 100644
--- a/arch/arm/mach-msm/include/mach/msm_serial_hs.h
+++ b/arch/arm/mach-msm/include/mach/msm_serial_hs.h
@@ -51,6 +51,7 @@
unsigned int msm_hs_tx_empty(struct uart_port *uport);
void msm_hs_request_clock_off(struct uart_port *uport);
void msm_hs_request_clock_on(struct uart_port *uport);
+struct uart_port *msm_hs_get_uart_port(int port_index);
void msm_hs_set_mctrl(struct uart_port *uport,
unsigned int mctrl);
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h
index a8c7bb7..d155c6f 100644
--- a/arch/arm/mach-msm/include/mach/msm_smd.h
+++ b/arch/arm/mach-msm/include/mach/msm_smd.h
@@ -279,6 +279,17 @@
*/
int smd_write_end(smd_channel_t *ch);
+/**
+ * smd_write_segment_avail() - available write space for packet transactions
+ * @ch: channel to write packet to
+ * @returns: number of bytes available to write to, or -ENODEV for invalid ch
+ *
+ * This is a version of smd_write_avail() intended for use with packet
+ * transactions. This version correctly accounts for any internal reserved
+ * space at all stages of the transaction.
+ */
+int smd_write_segment_avail(smd_channel_t *ch);
+
/*
* Returns a pointer to the subsystem name or NULL if no
* subsystem name is available.
@@ -441,6 +452,11 @@
return -ENODEV;
}
+static inline int smd_write_segment_avail(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
static inline const char *smd_edge_to_subsystem(uint32_t type)
{
return NULL;
diff --git a/arch/arm/mach-msm/include/mach/msm_smsm.h b/arch/arm/mach-msm/include/mach/msm_smsm.h
index d983ce5..81a6399 100644
--- a/arch/arm/mach-msm/include/mach/msm_smsm.h
+++ b/arch/arm/mach-msm/include/mach/msm_smsm.h
@@ -256,6 +256,18 @@
int smsm_check_for_modem_crash(void);
void *smem_find(unsigned id, unsigned size);
void *smem_get_entry(unsigned id, unsigned *size);
+
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Virtual address returned by smem_alloc()/smem_alloc2()
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address);
+
#else
static inline void *smem_alloc(unsigned id, unsigned size)
{
@@ -339,5 +351,9 @@
{
return NULL;
}
+static inline phys_addr_t smem_virt_to_phys(void *smem_address)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
index abb5653..8539dcc 100644
--- a/arch/arm/mach-msm/include/mach/ocmem_priv.h
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -85,6 +85,12 @@
atomic_long_t z_stat[NR_OCMEM_ZSTAT_ITEMS];
struct gen_pool *z_pool;
struct ocmem_zone_ops *z_ops;
+ unsigned int max_alloc_time;
+ unsigned int min_alloc_time;
+ u64 total_alloc_time;
+ unsigned int max_free_time;
+ unsigned int min_free_time;
+ u64 total_free_time;
};
enum op_code {
diff --git a/arch/arm/mach-msm/include/mach/qdsp6v2/usf.h b/arch/arm/mach-msm/include/mach/qdsp6v2/usf.h
index ff39929..2e15cae 100644
--- a/arch/arm/mach-msm/include/mach/qdsp6v2/usf.h
+++ b/arch/arm/mach-msm/include/mach/qdsp6v2/usf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -97,6 +97,10 @@
/* Max size of the client name */
#define USF_MAX_CLIENT_NAME_SIZE 20
+
+/* Max number of the ports (mics/speakers) */
+#define USF_MAX_PORT_NUM 8
+
/* Info structure common for TX and RX */
struct us_xx_info_type {
/* Input: general info */
@@ -115,7 +119,7 @@
/* Number of the microphones (TX) or speakers(RX) */
uint16_t port_cnt;
/* Microphones(TX) or speakers(RX) indexes in their enumeration */
- uint8_t port_id[4];
+ uint8_t port_id[USF_MAX_PORT_NUM];
/* Bits per sample 16 or 32 */
uint16_t bits_per_sample;
/* Input: Transparent info for encoder in the LPASS */
diff --git a/arch/arm/mach-msm/include/mach/qseecomi.h b/arch/arm/mach-msm/include/mach/qseecomi.h
index 3a13af8..e889242 100644
--- a/arch/arm/mach-msm/include/mach/qseecomi.h
+++ b/arch/arm/mach-msm/include/mach/qseecomi.h
@@ -16,6 +16,16 @@
#include <linux/qseecom.h>
+#define QSEECOM_KEY_ID_SIZE 32
+
+#define QSEOS_RESULT_FAIL_LOAD_KS -48
+#define QSEOS_RESULT_FAIL_SAVE_KS -49
+#define QSEOS_RESULT_FAIL_MAX_KEYS -50
+#define QSEOS_RESULT_FAIL_KEY_ID_EXISTS -51
+#define QSEOS_RESULT_FAIL_KEY_ID_DNE -52
+#define QSEOS_RESULT_FAIL_KS_OP -53
+#define QSEOS_RESULT_FAIL_CE_PIPE_INVALID -54
+
enum qseecom_command_scm_resp_type {
QSEOS_APP_ID = 0xEE01,
QSEOS_LISTENER_ID
@@ -36,6 +46,8 @@
QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
QSEOS_APP_REGION_NOTIFICATION,
QSEOS_REGISTER_LOG_BUF_COMMAND,
+ QSEE_RPMB_PROVISION_KEY_COMMAND,
+ QSEE_RPMB_ERASE_COMMAND,
QSEOS_CMD_MAX = 0xEFFFFFFF
};
@@ -45,6 +57,22 @@
QSEOS_RESULT_FAILURE = 0xFFFFFFFF
};
+/* Key Management requests */
+enum qseecom_qceos_key_gen_cmd_id {
+ QSEOS_GENERATE_KEY = 0x11,
+ QSEOS_DELETE_KEY,
+ QSEOS_MAX_KEY_COUNT,
+ QSEOS_SET_KEY,
+ QSEOS_KEY_CMD_MAX = 0xEFFFFFFF
+};
+
+enum qseecom_pipe_type {
+ QSEOS_PIPE_ENC = 0,
+ QSEOS_PIPE_ENC_XTS,
+ QSEOS_PIPE_AUTH,
+ QSEOS_PIPE_ENUM_FILL = 0x7FFFFFFF
+};
+
__packed struct qsee_apps_region_info_ireq {
uint32_t qsee_cmd_id;
uint32_t addr;
@@ -127,4 +155,52 @@
unsigned int data;
};
+struct qseecom_rpmb_provision_key {
+ uint32_t key_type;
+};
+
+__packed struct qseecom_client_send_service_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t key_type; /* in */
+ unsigned int req_len; /* in */
+ void *rsp_ptr; /* in/out */
+ unsigned int rsp_len; /* in/out */
+};
+
+__packed struct qseecom_key_generate_ireq {
+ uint32_t qsee_command_id;
+ uint32_t flags;
+ uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+};
+
+__packed struct qseecom_key_select_ireq {
+ uint32_t qsee_command_id;
+ uint32_t ce;
+ uint32_t pipe;
+ uint32_t pipe_type;
+ uint32_t flags;
+ uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+ unsigned char hash[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_delete_ireq {
+ uint32_t qsee_command_id;
+ uint32_t flags;
+ uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+};
+
+__packed struct qseecom_key_max_count_query_ireq {
+ uint32_t flags;
+};
+
+__packed struct qseecom_key_max_count_query_irsp {
+ uint32_t max_key_count;
+};
+
+struct key_id_info {
+ uint32_t ce_hw;
+ uint32_t pipe;
+ bool flags;
+};
+
#endif /* __QSEECOMI_H_ */
diff --git a/arch/arm/mach-msm/ramdump.h b/arch/arm/mach-msm/include/mach/ramdump.h
similarity index 100%
rename from arch/arm/mach-msm/ramdump.h
rename to arch/arm/mach-msm/include/mach/ramdump.h
diff --git a/arch/arm/mach-msm/include/mach/scm.h b/arch/arm/mach-msm/include/mach/scm.h
index 0cc7bbf..4186603 100644
--- a/arch/arm/mach-msm/include/mach/scm.h
+++ b/arch/arm/mach-msm/include/mach/scm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,7 @@
#define SCM_SVC_PWR 0x9
#define SCM_SVC_MP 0xC
#define SCM_SVC_DCVS 0xD
+#define SCM_SVC_ES 0x10
#define SCM_SVC_TZSCHEDULER 0xFC
#ifdef CONFIG_MSM_SCM
@@ -31,6 +32,7 @@
extern s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1);
extern s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2);
+extern s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3);
extern s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3,
u32 arg4, u32 *ret1, u32 *ret2);
@@ -58,6 +60,12 @@
return 0;
}
+static inline s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+ u32 arg3)
+{
+ return 0;
+}
+
static inline s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
{
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 45f2646..d4ea4ac 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -32,77 +32,39 @@
#define SOCINFO_VERSION_MINOR(ver) (ver & 0x0000ffff)
#ifdef CONFIG_OF
-#define early_machine_is_msm8974() \
- of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8974")
-#define machine_is_msm8974() \
- of_machine_is_compatible("qcom,msm8974")
-#define machine_is_msm8974_sim() \
- of_machine_is_compatible("qcom,msm8974-sim")
-#define machine_is_msm8974_rumi() \
- of_machine_is_compatible("qcom,msm8974-rumi")
-#define machine_is_msm8974_fluid() \
- of_machine_is_compatible("qcom,msm8974-fluid")
-#define early_machine_is_msm9625() \
- of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm9625")
-#define machine_is_msm9625() \
- of_machine_is_compatible("qcom,msm9625")
-#define early_machine_is_mpq8092() \
- of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mpq8092")
-#define machine_is_mpq8092_sim() \
- of_machine_is_compatible("qcom,mpq8092-sim")
-#define early_machine_is_msm8226() \
- of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8226")
-#define machine_is_msm8226() \
- of_machine_is_compatible("qcom,msm8226")
-#define machine_is_msm8226_sim() \
- of_machine_is_compatible("qcom,msm8226-sim")
-#define machine_is_msm8226_rumi() \
- of_machine_is_compatible("qcom,msm8226-rumi")
-#define machine_is_msm8226_cdp() \
- of_machine_is_compatible("qcom,msm8226-cdp")
-#define machine_is_msm8226_fluid() \
- of_machine_is_compatible("qcom,msm8226-fluid")
-#define machine_is_msm8226_mtp() \
- of_machine_is_compatible("qcom,msm8226-mtp")
-#define machine_is_msm8226_qrd() \
- of_machine_is_compatible("qcom,msm8226-qrd")
+#define of_board_is_sim() of_machine_is_compatible("qcom,sim")
+#define of_board_is_rumi() of_machine_is_compatible("qcom,rumi")
+#define of_board_is_fluid() of_machine_is_compatible("qcom,fluid")
+#define of_board_is_liquid() of_machine_is_compatible("qcom,liquid")
+
+#define machine_is_msm8974() of_machine_is_compatible("qcom,msm8974")
+#define machine_is_msm9625() of_machine_is_compatible("qcom,msm9625")
+#define machine_is_msm8610() of_machine_is_compatible("qcom,msm8610")
+#define machine_is_msm8226() of_machine_is_compatible("qcom,msm8226")
+
#define early_machine_is_msm8610() \
of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8610")
-#define machine_is_msm8610() \
- of_machine_is_compatible("qcom,msm8610")
-#define machine_is_msm8610_sim() \
- of_machine_is_compatible("qcom,msm8610-sim")
-#define machine_is_msm8610_rumi() \
- of_machine_is_compatible("qcom,msm8610-rumi")
-#define machine_is_msm8610_mtp() \
- of_machine_is_compatible("qcom,msm8610-mtp")
-#define machine_is_msm8610_cdp() \
- of_machine_is_compatible("qcom,msm8610-cdp")
-#define early_machine_is_msmzinc() \
- of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmzinc")
-#define machine_is_msmzinc_sim() \
- of_machine_is_compatible("qcom,msmzinc-sim")
+#define early_machine_is_mpq8092() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mpq8092")
+#define early_machine_is_apq8084() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,apq8084")
+#define early_machine_is_msmkrypton() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmkrypton")
#else
-#define early_machine_is_msm8974() 0
-#define machine_is_msm8974() 0
-#define machine_is_msm8974_sim() 0
-#define machine_is_msm8974_rumi() 0
-#define machine_is_msm8974_fluid() 0
-#define early_machine_is_msm9625() 0
-#define machine_is_msm9625() 0
-#define early_machine_is_mpq8092() 0
-#define machine_is_mpq8092_sim() 0
-#define early_machine_is_msm8226() 0
-#define machine_is_msm8226() 0
-#define machine_is_msm8226_sim() 0
-#define machine_is_msm8226_rumi() 0
-#define early_machine_is_msm8610() 0
-#define machine_is_msm8610() 0
-#define machine_is_msm8610_sim() 0
-#define machine_is_msm8610_rumi() 0
-#define early_machine_is_msmzinc() 0
-#define machine_is_msmzinc_sim() 0
+#define of_board_is_sim() 0
+#define of_board_is_rumi() 0
+#define of_board_is_fluid() 0
+#define of_board_is_liquid() 0
+#define machine_is_msm8974() 0
+#define machine_is_msm9625() 0
+#define machine_is_msm8610() 0
+#define machine_is_msm8226() 0
+
+#define early_machine_is_msm8610() 0
+#define early_machine_is_mpq8092() 0
+#define early_machine_is_apq8084() 0
+#define early_machine_is_msmkrypton() 0
#endif
#define PLATFORM_SUBTYPE_SGLTE 6
@@ -140,7 +102,8 @@
MSM_CPU_8226,
MSM_CPU_8610,
MSM_CPU_8625Q,
- MSM_CPU_ZINC,
+ MSM_CPU_8084,
+ MSM_CPU_KRYPTON,
};
enum pmic_model {
diff --git a/arch/arm/mach-msm/include/mach/sps.h b/arch/arm/mach-msm/include/mach/sps.h
index c20576a..c5ad35d 100644
--- a/arch/arm/mach-msm/include/mach/sps.h
+++ b/arch/arm/mach-msm/include/mach/sps.h
@@ -146,6 +146,8 @@
SPS_O_WRITE_NWD = 0x00040000,
/* Options to enable software features */
+ /* Do not disable a pipe during disconnection */
+ SPS_O_NO_DISABLE = 0x00800000,
/* Transfer operation should be polled */
SPS_O_POLL = 0x01000000,
/* Disable queuing of transfer events for the connection end point */
@@ -256,6 +258,7 @@
enum sps_callback_case {
SPS_CALLBACK_BAM_ERROR_IRQ = 1, /* BAM ERROR IRQ */
SPS_CALLBACK_BAM_HRESP_ERR_IRQ, /* Erroneous HResponse */
+ SPS_CALLBACK_BAM_TIMER_IRQ, /* Inactivity timer */
};
/*
@@ -408,6 +411,11 @@
u32 sec_config;
struct sps_bam_sec_config_props *p_sec_config_props;
+
+ /* Logging control */
+
+ bool constrained_logging;
+ u32 logging_number;
};
/**
diff --git a/arch/arm/mach-msm/include/mach/usb_trace.h b/arch/arm/mach-msm/include/mach/usb_trace.h
new file mode 100644
index 0000000..02ca8ca
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/usb_trace.h
@@ -0,0 +1,27 @@
+/* include/asm-arm/arch-msm/usbtrace.h
+ *
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _USB_TRACE_H_
+#define _USB_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+DECLARE_TRACE(usb_daytona_invalid_access,
+ TP_PROTO(unsigned int ebi_addr,
+ unsigned int ebi_apacket0, unsigned int ebi_apacket1),
+ TP_ARGS(ebi_addr, ebi_apacket0, ebi_apacket1));
+
+#endif
+
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 19c7acd..37dbbab 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -45,7 +45,7 @@
/* msm_shared_ram_phys default value of 0x00100000 is the most common value
* and should work as-is for any target without stacked memory.
*/
-unsigned int msm_shared_ram_phys = 0x00100000;
+phys_addr_t msm_shared_ram_phys = 0x00100000;
static void __init msm_map_io(struct map_desc *io_desc, int size)
{
@@ -321,27 +321,27 @@
}
#endif /* CONFIG_ARCH_MSM8974 */
-#ifdef CONFIG_ARCH_MSMZINC
-static struct map_desc msm_zinc_io_desc[] __initdata = {
- MSM_CHIP_DEVICE(QGIC_DIST, MSMZINC),
- MSM_CHIP_DEVICE(TLMM, MSMZINC),
+#ifdef CONFIG_ARCH_APQ8084
+static struct map_desc msm_8084_io_desc[] __initdata = {
+ MSM_CHIP_DEVICE(QGIC_DIST, APQ8084),
+ MSM_CHIP_DEVICE(TLMM, APQ8084),
{
.virtual = (unsigned long) MSM_SHARED_RAM_BASE,
.length = MSM_SHARED_RAM_SIZE,
.type = MT_DEVICE,
},
-#ifdef CONFIG_DEBUG_MSMZINC_UART
+#ifdef CONFIG_DEBUG_APQ8084_UART
MSM_DEVICE(DEBUG_UART),
#endif
};
-void __init msm_map_zinc_io(void)
+void __init msm_map_8084_io(void)
{
- msm_shared_ram_phys = MSMZINC_SHARED_RAM_PHYS;
- msm_map_io(msm_zinc_io_desc, ARRAY_SIZE(msm_zinc_io_desc));
+ msm_shared_ram_phys = APQ8084_SHARED_RAM_PHYS;
+ msm_map_io(msm_8084_io_desc, ARRAY_SIZE(msm_8084_io_desc));
of_scan_flat_dt(msm_scan_dt_map_imem, NULL);
}
-#endif /* CONFIG_ARCH_MSMZINC */
+#endif /* CONFIG_ARCH_APQ8084 */
#ifdef CONFIG_ARCH_MSM7X30
static struct map_desc msm7x30_io_desc[] __initdata = {
@@ -507,6 +507,25 @@
}
#endif /* CONFIG_ARCH_MSM9625 */
+#ifdef CONFIG_ARCH_MSMKRYPTON
+static struct map_desc msmkrypton_io_desc[] __initdata = {
+ MSM_CHIP_DEVICE(TLMM, MSMKRYPTON),
+ MSM_CHIP_DEVICE(MPM2_PSHOLD, MSMKRYPTON),
+ {
+ .virtual = (unsigned long) MSM_SHARED_RAM_BASE,
+ .length = MSM_SHARED_RAM_SIZE,
+ .type = MT_DEVICE,
+ },
+};
+
+void __init msm_map_msmkrypton_io(void)
+{
+ msm_shared_ram_phys = MSMKRYPTON_SHARED_RAM_PHYS;
+ msm_map_io(msmkrypton_io_desc, ARRAY_SIZE(msmkrypton_io_desc));
+ of_scan_flat_dt(msm_scan_dt_map_imem, NULL);
+}
+#endif /* CONFIG_ARCH_MSMKRYPTON */
+
#ifdef CONFIG_ARCH_MPQ8092
static struct map_desc mpq8092_io_desc[] __initdata = {
MSM_CHIP_DEVICE(QGIC_DIST, MPQ8092),
diff --git a/arch/arm/mach-msm/ipc_router.c b/arch/arm/mach-msm/ipc_router.c
index d81dbb4..0d617a6 100644
--- a/arch/arm/mach-msm/ipc_router.c
+++ b/arch/arm/mach-msm/ipc_router.c
@@ -1587,9 +1587,11 @@
if (!rport_ptr)
pr_err("%s: Remote port create "
"failed\n", __func__);
- rport_ptr->sec_rule =
- msm_ipc_get_security_rule(
- msg->srv.service, msg->srv.instance);
+ else
+ rport_ptr->sec_rule =
+ msm_ipc_get_security_rule(
+ msg->srv.service,
+ msg->srv.instance);
}
wake_up(&newserver_wait);
}
@@ -1890,6 +1892,7 @@
head_skb = skb_peek(pkt->pkt_fragment_q);
if (!head_skb) {
pr_err("%s: pkt_fragment_q is empty\n", __func__);
+ release_pkt(pkt);
return -EINVAL;
}
hdr = (struct rr_header *)skb_push(head_skb, IPC_ROUTER_HDR_SIZE);
diff --git a/arch/arm/mach-msm/ipc_router_smd_xprt.c b/arch/arm/mach-msm/ipc_router_smd_xprt.c
index 88ab8e0..b2ec816 100644
--- a/arch/arm/mach-msm/ipc_router_smd_xprt.c
+++ b/arch/arm/mach-msm/ipc_router_smd_xprt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -146,11 +146,11 @@
skb_queue_walk(pkt->pkt_fragment_q, ipc_rtr_pkt) {
offset = 0;
while (offset < ipc_rtr_pkt->len) {
- if (!smd_write_avail(smd_xprtp->channel))
+ if (!smd_write_segment_avail(smd_xprtp->channel))
smd_enable_read_intr(smd_xprtp->channel);
wait_event(smd_xprtp->write_avail_wait_q,
- (smd_write_avail(smd_xprtp->channel) ||
+ (smd_write_segment_avail(smd_xprtp->channel) ||
smd_xprtp->ss_reset));
smd_disable_read_intr(smd_xprtp->channel);
spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
@@ -175,11 +175,11 @@
}
if (align_sz) {
- if (smd_write_avail(smd_xprtp->channel) < align_sz)
+ if (smd_write_segment_avail(smd_xprtp->channel) < align_sz)
smd_enable_read_intr(smd_xprtp->channel);
wait_event(smd_xprtp->write_avail_wait_q,
- ((smd_write_avail(smd_xprtp->channel) >=
+ ((smd_write_segment_avail(smd_xprtp->channel) >=
align_sz) || smd_xprtp->ss_reset));
smd_disable_read_intr(smd_xprtp->channel);
spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
@@ -357,7 +357,7 @@
if (smd_read_avail(smd_xprtp->channel))
queue_delayed_work(smd_xprtp->smd_xprt_wq,
&smd_xprtp->read_work, 0);
- if (smd_write_avail(smd_xprtp->channel))
+ if (smd_write_segment_avail(smd_xprtp->channel))
wake_up(&smd_xprtp->write_avail_wait_q);
break;
diff --git a/arch/arm/mach-msm/ipc_socket.c b/arch/arm/mach-msm/ipc_socket.c
index f40bd5d..342663e 100644
--- a/arch/arm/mach-msm/ipc_socket.c
+++ b/arch/arm/mach-msm/ipc_socket.c
@@ -194,7 +194,7 @@
static int msm_ipc_router_extract_msg(struct msghdr *m,
struct sk_buff_head *msg_head)
{
- struct sockaddr_msm_ipc *addr = (struct sockaddr_msm_ipc *)m->msg_name;
+ struct sockaddr_msm_ipc *addr;
struct rr_header *hdr;
struct sk_buff *temp;
int offset = 0, data_len = 0, copy_len;
@@ -203,10 +203,11 @@
pr_err("%s: Invalid pointers passed\n", __func__);
return -EINVAL;
}
+ addr = (struct sockaddr_msm_ipc *)m->msg_name;
temp = skb_peek(msg_head);
hdr = (struct rr_header *)(temp->data);
- if (addr || (hdr->src_port_id != IPC_ROUTER_ADDRESS)) {
+ if (addr && (hdr->src_port_id != IPC_ROUTER_ADDRESS)) {
addr->family = AF_MSM_IPC;
addr->address.addrtype = MSM_IPC_ADDR_ID;
addr->address.addr.port_addr.node_id = hdr->src_node_id;
@@ -366,7 +367,8 @@
if (port_ptr->type == CLIENT_PORT)
wait_for_irsc_completion();
ipc_buf = skb_peek(msg);
- msm_ipc_router_ipc_log(IPC_SEND, ipc_buf, port_ptr);
+ if (ipc_buf)
+ msm_ipc_router_ipc_log(IPC_SEND, ipc_buf, port_ptr);
ret = msm_ipc_router_send_to(port_ptr, msg, &dest->address);
if (ret == (IPC_ROUTER_HDR_SIZE + total_len))
ret = total_len;
@@ -428,7 +430,8 @@
ret = msm_ipc_router_extract_msg(m, msg);
ipc_buf = skb_peek(msg);
- msm_ipc_router_ipc_log(IPC_RECV, ipc_buf, port_ptr);
+ if (ipc_buf)
+ msm_ipc_router_ipc_log(IPC_RECV, ipc_buf, port_ptr);
msm_ipc_router_release_msg(msg);
msg = NULL;
release_sock(sk);
diff --git a/arch/arm/mach-msm/krait-regulator.c b/arch/arm/mach-msm/krait-regulator.c
index 953f941d..52d20e3 100644
--- a/arch/arm/mach-msm/krait-regulator.c
+++ b/arch/arm/mach-msm/krait-regulator.c
@@ -61,9 +61,7 @@
#define PMIC_VOLTAGE_MAX 1355000
#define LV_RANGE_STEP 5000
-#define LOAD_PER_PHASE 3200000
-
-#define CORE_VOLTAGE_MIN 900000
+#define CORE_VOLTAGE_BOOTUP 900000
#define KRAIT_LDO_VOLTAGE_MIN 465000
#define KRAIT_LDO_VOLTAGE_OFFSET 465000
@@ -146,7 +144,10 @@
* regulator's callback functions to prevent
* simultaneous updates to the pmic's phase
* voltage.
- * @apcs_gcc_base virtual address of the APCS GCC registers
+ * @apcs_gcc_base: virtual address of the APCS GCC registers
+ * @manage_phases: begin phase control
+ * @pfm_threshold: the sum of coefficients below which PFM can be
+ * enabled
*/
struct pmic_gang_vreg {
const char *name;
@@ -159,6 +160,8 @@
bool retention_enabled;
bool use_phase_switching;
void __iomem *apcs_gcc_base;
+ bool manage_phases;
+ int pfm_threshold;
};
static struct pmic_gang_vreg *the_gang;
@@ -168,6 +171,9 @@
LDO_MODE = REGULATOR_MODE_IDLE,
};
+#define WAIT_FOR_LOAD 0x2
+#define WAIT_FOR_VOLTAGE 0x1
+
struct krait_power_vreg {
struct list_head link;
struct regulator_desc desc;
@@ -175,7 +181,7 @@
const char *name;
struct pmic_gang_vreg *pvreg;
int uV;
- int load_uA;
+ int load;
enum krait_supply_mode mode;
void __iomem *reg_base;
void __iomem *mdd_base;
@@ -185,7 +191,10 @@
int ldo_threshold_uV;
int ldo_delta_uV;
int cpu_num;
+ int coeff1;
+ int coeff2;
bool online;
+ int online_at_probe;
};
DEFINE_PER_CPU(struct krait_power_vreg *, krait_vregs);
@@ -293,6 +302,249 @@
return 0;
}
+#define COEFF2_UV_THRESHOLD 850000
+static int get_coeff2(int krait_uV)
+{
+ int coeff2 = 0;
+ int krait_mV = krait_uV / 1000;
+
+ if (krait_uV <= COEFF2_UV_THRESHOLD)
+ coeff2 = (612229 * krait_mV) / 1000 - 211258;
+ else
+ coeff2 = (892564 * krait_mV) / 1000 - 449543;
+
+ return coeff2;
+}
+
+static int get_coeff1(int actual_uV, int requested_uV, int load)
+{
+ int ratio = actual_uV * 1000 / requested_uV;
+ int coeff1 = 330 * load + (load * 673 * ratio / 1000);
+
+ return coeff1;
+}
+
+static int get_coeff_total(struct krait_power_vreg *from)
+{
+ int coeff_total = 0;
+ struct krait_power_vreg *kvreg;
+ struct pmic_gang_vreg *pvreg = from->pvreg;
+
+ list_for_each_entry(kvreg, &pvreg->krait_power_vregs, link) {
+ if (!kvreg->online)
+ continue;
+
+ if (kvreg->mode == LDO_MODE) {
+ kvreg->coeff1 =
+ get_coeff1(kvreg->uV - kvreg->ldo_delta_uV,
+ kvreg->uV, kvreg->load);
+ kvreg->coeff2 =
+ get_coeff2(kvreg->uV - kvreg->ldo_delta_uV);
+ } else {
+ kvreg->coeff1 =
+ get_coeff1(pvreg->pmic_vmax_uV,
+ kvreg->uV, kvreg->load);
+ kvreg->coeff2 = get_coeff2(pvreg->pmic_vmax_uV);
+ }
+ coeff_total += kvreg->coeff1 + kvreg->coeff2;
+ }
+
+ return coeff_total;
+}
+
+static int set_pmic_gang_phases(struct pmic_gang_vreg *pvreg, int phase_count)
+{
+ pr_debug("programming phase_count = %d\n", phase_count);
+ if (pvreg->use_phase_switching)
+ /*
+ * note the PMIC sets the phase count to one more than
+ * the value in the register - hence subtract 1 from it
+ */
+ return msm_spm_apcs_set_phase(phase_count - 1);
+ else
+ return 0;
+}
+
+static int num_online(struct pmic_gang_vreg *pvreg)
+{
+ int online_total = 0;
+ struct krait_power_vreg *kvreg;
+
+ list_for_each_entry(kvreg, &pvreg->krait_power_vregs, link) {
+ if (kvreg->online)
+ online_total++;
+ }
+ return online_total;
+}
+
+static int get_total_load(struct krait_power_vreg *from)
+{
+ int load_total = 0;
+ struct krait_power_vreg *kvreg;
+ struct pmic_gang_vreg *pvreg = from->pvreg;
+
+ list_for_each_entry(kvreg, &pvreg->krait_power_vregs, link) {
+ if (!kvreg->online)
+ continue;
+ load_total += kvreg->load;
+ }
+
+ return load_total;
+}
+
+static bool enable_phase_management(struct pmic_gang_vreg *pvreg)
+{
+ struct krait_power_vreg *kvreg;
+
+ list_for_each_entry(kvreg, &pvreg->krait_power_vregs, link) {
+ pr_debug("%s online_at_probe:0x%x\n", kvreg->name,
+ kvreg->online_at_probe);
+ if (kvreg->online_at_probe)
+ return false;
+ }
+ return true;
+}
+
+#define PMIC_FTS_MODE_PFM 0x00
+#define PMIC_FTS_MODE_PWM 0x80
+#define ONE_PHASE_COEFF 1000000
+#define TWO_PHASE_COEFF 2000000
+
+#define PWM_SETTLING_TIME_US 50
+#define PHASE_SETTLING_TIME_US 50
+static unsigned int pmic_gang_set_phases(struct krait_power_vreg *from,
+ int coeff_total)
+{
+ struct pmic_gang_vreg *pvreg = from->pvreg;
+ int phase_count;
+ int rc = 0;
+ int n_online = num_online(pvreg);
+ int load_total;
+
+ load_total = get_total_load(from);
+
+ if (pvreg->manage_phases == false) {
+ if (enable_phase_management(pvreg))
+ pvreg->manage_phases = true;
+ else
+ return 0;
+ }
+
+ /* First check if the coeff is low for PFM mode */
+ if (load_total <= pvreg->pfm_threshold && n_online == 1) {
+ if (!pvreg->pfm_mode) {
+ rc = msm_spm_enable_fts_lpm(PMIC_FTS_MODE_PFM);
+ if (rc) {
+ pr_err("%s PFM en failed load_t %d rc = %d\n",
+ from->name, load_total, rc);
+ return rc;
+ } else {
+ pvreg->pfm_mode = true;
+ }
+ }
+ return rc;
+ }
+
+ /* coeff is high switch to PWM mode before changing phases */
+ if (pvreg->pfm_mode) {
+ rc = msm_spm_enable_fts_lpm(PMIC_FTS_MODE_PWM);
+ if (rc) {
+ pr_err("%s PFM exit failed load %d rc = %d\n",
+ from->name, coeff_total, rc);
+ return rc;
+ } else {
+ pvreg->pfm_mode = false;
+ udelay(PWM_SETTLING_TIME_US);
+ }
+ }
+
+ /* calculate phases */
+ if (coeff_total < ONE_PHASE_COEFF)
+ phase_count = 1;
+ else if (coeff_total < TWO_PHASE_COEFF)
+ phase_count = 2;
+ else
+ phase_count = 4;
+
+ /* don't increase the phase count higher than number of online cpus */
+ if (phase_count > n_online)
+ phase_count = n_online;
+
+ if (phase_count != pvreg->pmic_phase_count) {
+ rc = set_pmic_gang_phases(pvreg, phase_count);
+ if (rc < 0) {
+ pr_err("%s failed set phase %d rc = %d\n",
+ from->name, phase_count, rc);
+ return rc;
+ }
+
+ /* complete the writes before the delay */
+ mb();
+
+ /*
+ * delay until the phases are settled when
+ * the count is raised
+ */
+ if (phase_count > pvreg->pmic_phase_count)
+ udelay(PHASE_SETTLING_TIME_US);
+
+ pvreg->pmic_phase_count = phase_count;
+ }
+
+ return rc;
+}
+
+static unsigned int _get_optimum_mode(struct regulator_dev *rdev,
+ int input_uV, int output_uV, int load)
+{
+ struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
+ int coeff_total;
+ int rc;
+
+ kvreg->online_at_probe &= ~WAIT_FOR_LOAD;
+ coeff_total = get_coeff_total(kvreg);
+
+ rc = pmic_gang_set_phases(kvreg, coeff_total);
+ if (rc < 0) {
+ dev_err(&rdev->dev, "%s failed set mode %d rc = %d\n",
+ kvreg->name, coeff_total, rc);
+ }
+
+ return kvreg->mode;
+}
+
+static unsigned int krait_power_get_optimum_mode(struct regulator_dev *rdev,
+ int input_uV, int output_uV, int load_uA)
+{
+ struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
+ struct pmic_gang_vreg *pvreg = kvreg->pvreg;
+ int rc;
+
+ mutex_lock(&pvreg->krait_power_vregs_lock);
+ kvreg->load = load_uA;
+ if (!kvreg->online) {
+ mutex_unlock(&pvreg->krait_power_vregs_lock);
+ return kvreg->mode;
+ }
+
+ rc = _get_optimum_mode(rdev, input_uV, output_uV, load_uA);
+ mutex_unlock(&pvreg->krait_power_vregs_lock);
+
+ return rc;
+}
+
+static int krait_power_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ return 0;
+}
+
+static unsigned int krait_power_get_mode(struct regulator_dev *rdev)
+{
+ struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
+
+ return kvreg->mode;
+}
+
static int switch_to_using_hs(struct krait_power_vreg *kvreg)
{
if (kvreg->mode == HS_MODE)
@@ -368,19 +620,6 @@
return 0;
}
-static int set_pmic_gang_phases(struct pmic_gang_vreg *pvreg, int phase_count)
-{
- pr_debug("programming phase_count = %d\n", phase_count);
- if (pvreg->use_phase_switching)
- /*
- * note the PMIC sets the phase count to one more than
- * the value in the register - hence subtract 1 from it
- */
- return msm_spm_apcs_set_phase(phase_count - 1);
- else
- return 0;
-}
-
static int set_pmic_gang_voltage(struct pmic_gang_vreg *pvreg, int uV)
{
int setpoint;
@@ -524,46 +763,6 @@
return rc;
}
-#define PHASE_SETTLING_TIME_US 10
-static unsigned int pmic_gang_set_phases(struct krait_power_vreg *from,
- int load_uA)
-{
- struct pmic_gang_vreg *pvreg = from->pvreg;
- int phase_count = DIV_ROUND_UP(load_uA, LOAD_PER_PHASE);
- int rc = 0;
-
- if (phase_count <= 0)
- phase_count = 1;
-
- /* Increase phases if it is less than the number of cpus online */
- if (phase_count < num_online_cpus()) {
- phase_count = num_online_cpus();
- }
-
- if (phase_count != pvreg->pmic_phase_count) {
- rc = set_pmic_gang_phases(pvreg, phase_count);
- if (rc < 0) {
- dev_err(&from->rdev->dev,
- "%s failed set phase %d rc = %d\n",
- pvreg->name, phase_count, rc);
- return rc;
- }
-
- /* complete the writes before the delay */
- mb();
-
- /*
- * delay until the phases are settled when
- * the count is raised
- */
- if (phase_count > pvreg->pmic_phase_count)
- udelay(PHASE_SETTLING_TIME_US);
-
- pvreg->pmic_phase_count = phase_count;
- }
- return rc;
-}
-
static int krait_power_get_voltage(struct regulator_dev *rdev)
{
struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
@@ -590,21 +789,6 @@
return vmax;
}
-static int get_total_load(struct krait_power_vreg *from)
-{
- int load_total = 0;
- struct krait_power_vreg *kvreg;
- struct pmic_gang_vreg *pvreg = from->pvreg;
-
- list_for_each_entry(kvreg, &pvreg->krait_power_vregs, link) {
- if (!kvreg->online)
- continue;
- load_total += kvreg->load_uA;
- }
-
- return load_total;
-}
-
#define ROUND_UP_VOLTAGE(v, res) (DIV_ROUND_UP(v, res) * res)
static int _set_voltage(struct regulator_dev *rdev,
int orig_krait_uV, int requested_uV)
@@ -613,6 +797,7 @@
struct pmic_gang_vreg *pvreg = kvreg->pvreg;
int rc;
int vmax;
+ int coeff_total;
pr_debug("%s: %d to %d\n", kvreg->name, orig_krait_uV, requested_uV);
/*
@@ -636,6 +821,11 @@
kvreg->name, requested_uV, orig_krait_uV, rc);
}
+ kvreg->online_at_probe &= ~WAIT_FOR_VOLTAGE;
+ coeff_total = get_coeff_total(kvreg);
+ /* adjust the phases since coeff2 would have changed */
+ rc = pmic_gang_set_phases(kvreg, coeff_total);
+
return rc;
}
@@ -670,89 +860,6 @@
return rc;
}
-#define PMIC_FTS_MODE_PFM 0x00
-#define PMIC_FTS_MODE_PWM 0x80
-#define PFM_LOAD_UA 500000
-static unsigned int _get_optimum_mode(struct regulator_dev *rdev,
- int input_uV, int output_uV, int load_uA)
-{
- struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
- struct pmic_gang_vreg *pvreg = kvreg->pvreg;
- int rc;
- int load_total_uA;
-
- load_total_uA = get_total_load(kvreg);
-
- if (load_total_uA < PFM_LOAD_UA) {
- if (!pvreg->pfm_mode) {
- rc = msm_spm_enable_fts_lpm(PMIC_FTS_MODE_PFM);
- if (rc) {
- dev_err(&rdev->dev,
- "%s enter PFM failed load %d rc = %d\n",
- kvreg->name, load_total_uA, rc);
- goto out;
- } else {
- pvreg->pfm_mode = true;
- }
- }
- return kvreg->mode;
- }
-
- if (pvreg->pfm_mode) {
- rc = msm_spm_enable_fts_lpm(PMIC_FTS_MODE_PWM);
- if (rc) {
- dev_err(&rdev->dev,
- "%s exit PFM failed load %d rc = %d\n",
- kvreg->name, load_total_uA, rc);
- goto out;
- } else {
- pvreg->pfm_mode = false;
- }
- }
-
- rc = pmic_gang_set_phases(kvreg, load_total_uA);
- if (rc < 0) {
- dev_err(&rdev->dev, "%s failed set mode %d rc = %d\n",
- kvreg->name, load_total_uA, rc);
- goto out;
- }
-
-out:
- return kvreg->mode;
-}
-
-static unsigned int krait_power_get_optimum_mode(struct regulator_dev *rdev,
- int input_uV, int output_uV, int load_uA)
-{
- struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
- struct pmic_gang_vreg *pvreg = kvreg->pvreg;
- int rc;
-
- mutex_lock(&pvreg->krait_power_vregs_lock);
- kvreg->load_uA = load_uA;
- if (!kvreg->online) {
- mutex_unlock(&pvreg->krait_power_vregs_lock);
- return kvreg->mode;
- }
-
- rc = _get_optimum_mode(rdev, input_uV, output_uV, load_uA);
- mutex_unlock(&pvreg->krait_power_vregs_lock);
-
- return rc;
-}
-
-static int krait_power_set_mode(struct regulator_dev *rdev, unsigned int mode)
-{
- return 0;
-}
-
-static unsigned int krait_power_get_mode(struct regulator_dev *rdev)
-{
- struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
-
- return kvreg->mode;
-}
-
static int krait_power_is_enabled(struct regulator_dev *rdev)
{
struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
@@ -769,7 +876,7 @@
mutex_lock(&pvreg->krait_power_vregs_lock);
__krait_power_mdd_enable(kvreg, true);
kvreg->online = true;
- rc = _get_optimum_mode(rdev, kvreg->uV, kvreg->uV, kvreg->load_uA);
+ rc = _get_optimum_mode(rdev, kvreg->uV, kvreg->uV, kvreg->load);
if (rc < 0)
goto en_err;
/*
@@ -791,8 +898,7 @@
mutex_lock(&pvreg->krait_power_vregs_lock);
kvreg->online = false;
- rc = _get_optimum_mode(rdev, kvreg->uV, kvreg->uV,
- kvreg->load_uA);
+ rc = _get_optimum_mode(rdev, kvreg->uV, kvreg->uV, kvreg->load);
if (rc < 0)
goto dis_err;
@@ -851,8 +957,10 @@
DEFINE_SIMPLE_ATTRIBUTE(retention_fops,
get_retention_dbg_uV, set_retention_dbg_uV, "%llu\n");
+#define CPU_PWR_CTL_ONLINE_MASK 0x80
static void kvreg_hw_init(struct krait_power_vreg *kvreg)
{
+ int online;
/*
* bhs_cnt value sets the ramp-up time from power collapse,
* initialize the ramp up time
@@ -865,6 +973,10 @@
/* Enable MDD */
writel_relaxed(0x00000002, kvreg->mdd_base + MDD_MODE);
mb();
+ online = CPU_PWR_CTL_ONLINE_MASK
+ & readl_relaxed(kvreg->reg_base + CPU_PWR_CTL);
+ kvreg->online_at_probe
+ = online ? (WAIT_FOR_LOAD | WAIT_FOR_VOLTAGE) : 0x0;
}
static void glb_init(void __iomem *apcs_gcc_base)
@@ -1012,7 +1124,7 @@
kvreg->desc.ops = &krait_power_ops;
kvreg->desc.type = REGULATOR_VOLTAGE;
kvreg->desc.owner = THIS_MODULE;
- kvreg->uV = CORE_VOLTAGE_MIN;
+ kvreg->uV = CORE_VOLTAGE_BOOTUP;
kvreg->mode = HS_MODE;
kvreg->desc.ops = &krait_power_ops;
kvreg->headroom_uV = headroom_uV;
@@ -1111,6 +1223,7 @@
{
int rc;
bool use_phase_switching = false;
+ int pfm_threshold;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct pmic_gang_vreg *pvreg;
@@ -1123,6 +1236,13 @@
use_phase_switching = of_property_read_bool(node,
"qcom,use-phase-switching");
+
+ rc = of_property_read_u32(node, "qcom,pfm-threshold", &pfm_threshold);
+ if (rc < 0) {
+ dev_err(dev, "pfm-threshold missing rc=%d, pfm disabled\n", rc);
+ return -EINVAL;
+ }
+
pvreg = devm_kzalloc(&pdev->dev,
sizeof(struct pmic_gang_vreg), GFP_KERNEL);
if (!pvreg) {
@@ -1148,6 +1268,7 @@
pvreg->retention_enabled = true;
pvreg->pmic_min_uV_for_retention = INT_MAX;
pvreg->use_phase_switching = use_phase_switching;
+ pvreg->pfm_threshold = pfm_threshold;
mutex_init(&pvreg->krait_power_vregs_lock);
INIT_LIST_HEAD(&pvreg->krait_power_vregs);
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index edfb45b..1680993 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/memory_alloc.h>
#include <linux/memblock.h>
+#include <asm/memblock.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/mach/map.h>
@@ -151,8 +152,8 @@
if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
if (mt->size > mt->limit) {
- pr_warning("%lx size for %s too large, setting to %lx\n",
- mt->size, memtype_name[i], mt->limit);
+ pr_warning("%pa size for %s too large, setting to %pa\n",
+ &mt->size, memtype_name[i], &mt->limit);
mt->size = mt->limit;
}
}
@@ -160,42 +161,18 @@
static void __init reserve_memory_for_mempools(void)
{
- int memtype, memreg_type;
+ int memtype;
struct memtype_reserve *mt;
- struct memblock_region *mr, *mr_candidate = NULL;
- int ret;
+ phys_addr_t alignment;
mt = &reserve_info->memtype_reserve_table[0];
for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
continue;
-
- /* Choose the memory block with the highest physical
- * address which is large enough, so that we will not
- * take memory from the lowest memory bank which the kernel
- * is in (and cause boot problems) and so that we might
- * be able to steal memory that would otherwise become
- * highmem.
- */
- for_each_memblock(memory, mr) {
- memreg_type =
- reserve_info->paddr_to_memtype(mr->base);
- if (memtype != memreg_type)
- continue;
- if (mr->size >= mt->size
- && (mr_candidate == NULL
- || mr->base > mr_candidate->base))
- mr_candidate = mr;
- }
- BUG_ON(mr_candidate == NULL);
- /* bump mt up against the top of the region */
- mt->start = mr_candidate->base + mr_candidate->size - mt->size;
- ret = memblock_reserve(mt->start, mt->size);
- BUG_ON(ret);
- ret = memblock_free(mt->start, mt->size);
- BUG_ON(ret);
- ret = memblock_remove(mt->start, mt->size);
- BUG_ON(ret);
+ alignment = (mt->flags & MEMTYPE_FLAGS_1M_ALIGN) ?
+ SZ_1M : PAGE_SIZE;
+ mt->start = arm_memblock_steal(mt->size, alignment);
+ BUG_ON(!mt->start);
}
}
@@ -257,7 +234,7 @@
}
EXPORT_SYMBOL(allocate_contiguous_ebi);
-unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
+phys_addr_t allocate_contiguous_ebi_nomap(unsigned long size,
unsigned long align)
{
return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
diff --git a/arch/arm/mach-msm/mpm-of.c b/arch/arm/mach-msm/mpm-of.c
index 09f784d..5c654b0 100644
--- a/arch/arm/mach-msm/mpm-of.c
+++ b/arch/arm/mach-msm/mpm-of.c
@@ -70,9 +70,6 @@
#define MSM_MPM_IRQ_INDEX(irq) (irq / 32)
#define MSM_MPM_IRQ_MASK(irq) BIT(irq % 32)
-#define MSM_MPM_DETECT_CTL_INDEX(irq) (irq / 16)
-#define MSM_MPM_DETECT_CTL_SHIFT(irq) ((irq % 16) * 2)
-
#define hashfn(val) (val % MSM_MPM_NR_MPM_IRQS)
#define SCLK_HZ (32768)
#define ARCH_TIMER_HZ (19200000)
@@ -81,8 +78,8 @@
enum mpm_reg_offsets {
MSM_MPM_REG_WAKEUP,
MSM_MPM_REG_ENABLE,
- MSM_MPM_REG_DETECT_CTL,
- MSM_MPM_REG_DETECT_CTL1,
+ MSM_MPM_REG_FALLING_EDGE,
+ MSM_MPM_REG_RISING_EDGE,
MSM_MPM_REG_POLARITY,
MSM_MPM_REG_STATUS,
};
@@ -91,7 +88,8 @@
static uint32_t msm_mpm_enabled_irq[MSM_MPM_REG_WIDTH];
static uint32_t msm_mpm_wake_irq[MSM_MPM_REG_WIDTH];
-static uint32_t msm_mpm_detect_ctl[MSM_MPM_REG_WIDTH * 2];
+static uint32_t msm_mpm_falling_edge[MSM_MPM_REG_WIDTH];
+static uint32_t msm_mpm_rising_edge[MSM_MPM_REG_WIDTH];
static uint32_t msm_mpm_polarity[MSM_MPM_REG_WIDTH];
enum {
@@ -174,11 +172,11 @@
reg = MSM_MPM_REG_ENABLE;
msm_mpm_write(reg, i, irqs[i]);
- reg = MSM_MPM_REG_DETECT_CTL;
- msm_mpm_write(reg, i, msm_mpm_detect_ctl[i]);
+ reg = MSM_MPM_REG_FALLING_EDGE;
+ msm_mpm_write(reg, i, msm_mpm_falling_edge[i]);
- reg = MSM_MPM_REG_DETECT_CTL1;
- msm_mpm_write(reg, i, msm_mpm_detect_ctl[2+i]);
+ reg = MSM_MPM_REG_RISING_EDGE;
+ msm_mpm_write(reg, i, msm_mpm_rising_edge[i]);
reg = MSM_MPM_REG_POLARITY;
msm_mpm_write(reg, i, msm_mpm_polarity[i]);
@@ -264,23 +262,24 @@
return 0;
}
-static void msm_mpm_set_detect_ctl(int pin, unsigned int flow_type)
+static void msm_mpm_set_edge_ctl(int pin, unsigned int flow_type)
{
uint32_t index;
- uint32_t val = 0;
- uint32_t shift;
+ uint32_t mask;
- index = MSM_MPM_DETECT_CTL_INDEX(pin);
- shift = MSM_MPM_DETECT_CTL_SHIFT(pin);
-
- if (flow_type & IRQ_TYPE_EDGE_RISING)
- val |= 0x02;
+ index = MSM_MPM_IRQ_INDEX(pin);
+ mask = MSM_MPM_IRQ_MASK(pin);
if (flow_type & IRQ_TYPE_EDGE_FALLING)
- val |= 0x01;
+ msm_mpm_falling_edge[index] |= mask;
+ else
+ msm_mpm_falling_edge[index] &= ~mask;
- msm_mpm_detect_ctl[index] &= ~(0x3 << shift);
- msm_mpm_detect_ctl[index] |= (val & 0x03) << shift;
+ if (flow_type & IRQ_TYPE_EDGE_RISING)
+ msm_mpm_rising_edge[index] |= mask;
+ else
+ msm_mpm_rising_edge[index] &= ~mask;
+
}
static int msm_mpm_set_irq_type_exclusive(
@@ -300,7 +299,7 @@
if (index >= MSM_MPM_REG_WIDTH)
return -EFAULT;
- msm_mpm_set_detect_ctl(mpm_irq, flow_type);
+ msm_mpm_set_edge_ctl(mpm_irq, flow_type);
if (flow_type & IRQ_TYPE_LEVEL_HIGH)
msm_mpm_polarity[index] |= mask;
@@ -378,7 +377,7 @@
if (!msm_mpm_is_initialized())
return -EINVAL;
- if (pin > MSM_MPM_NR_MPM_IRQS)
+ if (pin >= MSM_MPM_NR_MPM_IRQS)
return -EINVAL;
spin_lock_irqsave(&msm_mpm_lock, flags);
@@ -429,7 +428,7 @@
spin_lock_irqsave(&msm_mpm_lock, flags);
- msm_mpm_set_detect_ctl(pin, flow_type);
+ msm_mpm_set_edge_ctl(pin, flow_type);
if (flow_type & IRQ_TYPE_LEVEL_HIGH)
msm_mpm_polarity[index] |= mask;
@@ -768,7 +767,7 @@
return;
failed_malloc:
- for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++) {
+ for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
mpm_of_map[i].chip->irq_mask = NULL;
mpm_of_map[i].chip->irq_unmask = NULL;
mpm_of_map[i].chip->irq_disable = NULL;
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index 3c8348d..cd6693e 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -477,9 +477,11 @@
#define M_MODE_ADDR(b, n) \
(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210)
enum bimc_m_mode {
- M_MODE_RMSK = 0xf0000001,
+ M_MODE_RMSK = 0xf0000011,
M_MODE_WR_GATHER_BEATS_BMSK = 0xf0000000,
M_MODE_WR_GATHER_BEATS_SHFT = 0x1c,
+ M_MODE_NARROW_WR_BMSK = 0x10,
+ M_MODE_NARROW_WR_SHFT = 0x4,
M_MODE_ORDERING_MODEL_BMSK = 0x1,
M_MODE_ORDERING_MODEL_SHFT = 0x0,
};
@@ -1526,10 +1528,10 @@
reg_val = readl_relaxed(M_PRIOLVL_OVERRIDE_ADDR(binfo->
base, mas_index)) & M_PRIOLVL_OVERRIDE_RMSK;
val = qmode->fixed.prio_level <<
- M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT;
+ M_PRIOLVL_OVERRIDE_SHFT;
writel_relaxed(((reg_val &
- ~(M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK)) | (val
- & M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK)),
+ ~(M_PRIOLVL_OVERRIDE_BMSK)) | (val
+ & M_PRIOLVL_OVERRIDE_BMSK)),
M_PRIOLVL_OVERRIDE_ADDR(binfo->base, mas_index));
reg_val = readl_relaxed(M_RD_CMD_OVERRIDE_ADDR(binfo->
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_core.h b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
index fd2dbb5..98419a4 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_core.h
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
@@ -36,7 +36,7 @@
(((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0)
#define INTERLEAVED_BW(fab_pdata, bw, ports) \
- ((fab_pdata->il_flag) ? msm_bus_div64((bw), (ports)) : (bw))
+ ((fab_pdata->il_flag) ? msm_bus_div64((ports), (bw)) : (bw))
#define INTERLEAVED_VAL(fab_pdata, n) \
((fab_pdata->il_flag) ? (n) : 1)
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_of.c b/arch/arm/mach-msm/msm_bus/msm_bus_of.c
index 489eb5c..af3537c 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_of.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_of.c
@@ -42,20 +42,9 @@
return -EINVAL;
}
-/**
- * msm_bus_cl_get_pdata() - Generate bus client data from device tree
- * provided by clients.
- *
- * of_node: Device tree node to extract information from
- *
- * The function returns a valid pointer to the allocated bus-scale-pdata
- * if the vectors were correctly read from the client's device node.
- * Any error in reading or parsing the device node will return NULL
- * to the caller.
- */
-struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev)
+static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev,
+ struct device_node *of_node)
{
- struct device_node *of_node;
struct msm_bus_scale_pdata *pdata = NULL;
struct msm_bus_paths *usecase = NULL;
int i = 0, j, ret, num_usecases = 0, num_paths, len;
@@ -67,7 +56,6 @@
return NULL;
}
- of_node = pdev->dev.of_node;
pdata = devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_scale_pdata),
GFP_KERNEL);
if (!pdata) {
@@ -91,9 +79,10 @@
}
pdata->num_usecases = num_usecases;
- ret = of_property_read_u32(of_node, "qcom,msm-bus,active-only",
- &pdata->active_only);
- if (ret) {
+
+ if (of_property_read_bool(of_node, "qcom,msm-bus,active-only"))
+ pdata->active_only = 1;
+ else {
pr_debug("active_only flag absent.\n");
pr_debug("Using dual context by default\n");
}
@@ -154,9 +143,81 @@
return NULL;
}
+
+/**
+ * msm_bus_cl_get_pdata() - Generate bus client data from device tree
+ * provided by clients.
+ *
+ * of_node: Device tree node to extract information from
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *of_node;
+ struct msm_bus_scale_pdata *pdata = NULL;
+
+ if (!pdev) {
+ pr_err("Error: Null Platform device\n");
+ return NULL;
+ }
+
+ of_node = pdev->dev.of_node;
+ pdata = get_pdata(pdev, of_node);
+ if (!pdata) {
+ pr_err("Error getting bus pdata!\n");
+ return NULL;
+ }
+
+ return pdata;
+}
EXPORT_SYMBOL(msm_bus_cl_get_pdata);
/**
+ * msm_bus_cl_pdata_from_node() - Generate bus client data from device tree
+ * node provided by clients. This function should be used when a client
+ * driver needs to register multiple bus-clients from a single device-tree
+ * node associated with the platform-device.
+ *
+ * of_node: The subnode containing information about the bus scaling
+ * data
+ *
+ * pdev: Platform device associated with the device-tree node
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node)
+{
+ struct msm_bus_scale_pdata *pdata = NULL;
+
+ if (!pdev) {
+ pr_err("Error: Null Platform device\n");
+ return NULL;
+ }
+
+ if (!of_node) {
+ pr_err("Error: Null of_node passed to bus driver\n");
+ return NULL;
+ }
+
+ pdata = get_pdata(pdev, of_node);
+ if (!pdata) {
+ pr_err("Error getting bus pdata!\n");
+ return NULL;
+ }
+
+ return pdata;
+}
+EXPORT_SYMBOL(msm_bus_pdata_from_node);
+
+/**
* msm_bus_cl_clear_pdata() - Clear pdata allocated from device-tree
* of_node: Device tree node to extract information from
*/
diff --git a/arch/arm/mach-msm/msm_dsps.c b/arch/arm/mach-msm/msm_dsps.c
index db67f7d..0ada902 100644
--- a/arch/arm/mach-msm/msm_dsps.c
+++ b/arch/arm/mach-msm/msm_dsps.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -36,8 +36,8 @@
#include <mach/msm_smsm.h>
#include <mach/msm_dsps.h>
#include <mach/subsystem_restart.h>
+#include <mach/ramdump.h>
-#include "ramdump.h"
#include "timer.h"
#define DRV_NAME "msm_dsps"
diff --git a/arch/arm/mach-msm/msm_watchdog.c b/arch/arm/mach-msm/msm_watchdog.c
index b1c8b30..dcfe13c 100644
--- a/arch/arm/mach-msm/msm_watchdog.c
+++ b/arch/arm/mach-msm/msm_watchdog.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,7 @@
#include <linux/suspend.h>
#include <linux/percpu.h>
#include <linux/interrupt.h>
+#include <linux/reboot.h>
#include <asm/fiq.h>
#include <asm/hardware/gic.h>
#include <mach/msm_iomap.h>
@@ -64,6 +65,12 @@
module_param(enable, int, 0);
/*
+ * Watchdog bark reboot timeout in seconds.
+ * Can be specified in kernel command line.
+ */
+static int reboot_bark_timeout = 22;
+module_param(reboot_bark_timeout, int, 0644);
+/*
* If the watchdog is enabled at bootup (enable=1),
* the runtime_disable sysfs node at
* /sys/module/msm_watchdog/runtime_disable
@@ -154,6 +161,27 @@
.notifier_call = panic_wdog_handler,
};
+#define get_sclk_hz(t_ms) ((t_ms / 1000) * WDT_HZ)
+#define get_reboot_bark_timeout(t_s) ((t_s * MSEC_PER_SEC) < bark_time ? \
+ get_sclk_hz(bark_time) : get_sclk_hz(t_s * MSEC_PER_SEC))
+
+static int msm_watchdog_reboot_notifier(struct notifier_block *this,
+ unsigned long code, void *unused)
+{
+
+ u64 timeout = get_reboot_bark_timeout(reboot_bark_timeout);
+ __raw_writel(timeout, msm_wdt_base + WDT_BARK_TIME);
+ __raw_writel(timeout + 3 * WDT_HZ,
+ msm_wdt_base + WDT_BITE_TIME);
+ __raw_writel(1, msm_wdt_base + WDT_RST);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block msm_reboot_notifier = {
+ .notifier_call = msm_watchdog_reboot_notifier,
+};
+
struct wdog_disable_work_data {
struct work_struct work;
struct completion complete;
@@ -177,6 +205,7 @@
}
enable = 0;
atomic_notifier_chain_unregister(&panic_notifier_list, &panic_blk);
+ unregister_reboot_notifier(&msm_reboot_notifier);
cancel_delayed_work(&dogwork_struct);
/* may be suspended after the first write above */
__raw_writel(0, msm_wdt_base + WDT_EN);
@@ -373,6 +402,10 @@
atomic_notifier_chain_register(&panic_notifier_list,
&panic_blk);
+ ret = register_reboot_notifier(&msm_reboot_notifier);
+ if (ret)
+ pr_err("Failed to register reboot notifier\n");
+
__raw_writel(1, msm_wdt_base + WDT_EN);
__raw_writel(1, msm_wdt_base + WDT_RST);
last_pet = sched_clock();
@@ -395,6 +428,11 @@
}
bark_time = pdata->bark_time;
+ /* reboot_bark_timeout (in seconds) might have been supplied as
+ * module parameter.
+ */
+ if ((reboot_bark_timeout * MSEC_PER_SEC) < bark_time)
+ reboot_bark_timeout = (bark_time / MSEC_PER_SEC);
has_vic = pdata->has_vic;
if (!pdata->has_secure) {
appsbark = 1;
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index 4685f02..d31f3c4 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -587,6 +587,34 @@
.release = seq_release,
};
+static int ocmem_timing_show(struct seq_file *f, void *dummy)
+{
+ unsigned i = 0;
+ for (i = OCMEM_GRAPHICS; i < OCMEM_CLIENT_MAX; i++) {
+ struct ocmem_zone *z = get_zone(i);
+ if (z && z->active == true)
+ seq_printf(f, "zone %s\t: alloc_delay:[max:%d, min:%d, total:%llu,cnt:%lu] free_delay:[max:%d, min:%d, total:%llu, cnt:%lu]\n",
+ get_name(z->owner), z->max_alloc_time,
+ z->min_alloc_time, z->total_alloc_time,
+ get_ocmem_stat(z, 1), z->max_free_time,
+ z->min_free_time, z->total_free_time,
+ get_ocmem_stat(z, 6));
+ }
+ return 0;
+}
+
+static int ocmem_timing_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ocmem_timing_show, inode->i_private);
+}
+
+static const struct file_operations timing_show_fops = {
+ .open = ocmem_timing_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static int ocmem_zone_init(struct platform_device *pdev)
{
@@ -656,6 +684,13 @@
zone->max_regions = 0;
INIT_LIST_HEAD(&zone->req_list);
zone->z_ops = z_ops;
+ zone->max_alloc_time = 0;
+ zone->min_alloc_time = 0xFFFFFFFF;
+ zone->total_alloc_time = 0;
+ zone->max_free_time = 0;
+ zone->min_free_time = 0xFFFFFFFF;
+ zone->total_free_time = 0;
+
if (part->p_tail) {
z_ops->allocate = allocate_tail;
z_ops->free = free_tail;
@@ -688,6 +723,12 @@
return -EBUSY;
}
+ if (!debugfs_create_file("timing", S_IRUGO, pdata->debug_node,
+ NULL, &timing_show_fops)) {
+ dev_err(dev, "Unable to create debugfs node for timing\n");
+ return -EBUSY;
+ }
+
dev_dbg(dev, "Total active zones = %d\n", active_zones);
return 0;
}
diff --git a/arch/arm/mach-msm/ocmem_api.c b/arch/arm/mach-msm/ocmem_api.c
index 16dd8b8..13c4c1a 100644
--- a/arch/arm/mach-msm/ocmem_api.c
+++ b/arch/arm/mach-msm/ocmem_api.c
@@ -105,6 +105,11 @@
{
bool can_block = false;
bool can_wait = true;
+ struct ocmem_buf *buffer;
+ struct timeval start_time;
+ struct timeval end_time;
+ unsigned int delay;
+ struct ocmem_zone *zone;
if (!check_id(client_id)) {
pr_err("ocmem: Invalid client id: %d\n", client_id);
@@ -129,8 +134,33 @@
return NULL;
}
- return __ocmem_allocate_range(client_id, size, size,
+ zone = get_zone(client_id);
+ if (!zone) {
+ pr_err("ocmem: Zone not found for client %d\n", client_id);
+ return NULL;
+ }
+
+ do_gettimeofday(&start_time);
+
+ buffer = __ocmem_allocate_range(client_id, size, size,
size, can_block, can_wait);
+
+ do_gettimeofday(&end_time);
+
+ if (!buffer)
+ return NULL;
+
+ delay = (end_time.tv_sec * USEC_PER_SEC + end_time.tv_usec)
+ - (start_time.tv_sec * USEC_PER_SEC + start_time.tv_usec);
+
+ if (delay > zone->max_alloc_time)
+ zone->max_alloc_time = delay;
+ if (delay < zone->min_alloc_time)
+ zone->min_alloc_time = delay;
+ zone->total_alloc_time += delay;
+ inc_ocmem_stat(zone, NR_SYNC_ALLOCATIONS);
+
+ return buffer;
}
EXPORT_SYMBOL(ocmem_allocate);
@@ -250,6 +280,12 @@
int ocmem_free(int client_id, struct ocmem_buf *buffer)
{
+ int rc;
+ struct timeval start_time;
+ struct timeval end_time;
+ unsigned int delay;
+ struct ocmem_zone *zone;
+
if (!check_id(client_id)) {
pr_err("ocmem: Invalid client id: %d\n", client_id);
return -EINVAL;
@@ -261,12 +297,38 @@
return -EINVAL;
}
+ zone = get_zone(client_id);
+ if (!zone) {
+ pr_err("ocmem: Zone not found for client %d\n", client_id);
+ return -EINVAL;
+ }
+
if (!buffer) {
pr_err("ocmem: Invalid buffer\n");
return -EINVAL;
}
- return __ocmem_free(client_id, buffer);
+ do_gettimeofday(&start_time);
+
+ rc = __ocmem_free(client_id, buffer);
+
+ do_gettimeofday(&end_time);
+
+ if (rc < 0)
+ return rc;
+
+ delay = (end_time.tv_sec * USEC_PER_SEC + end_time.tv_usec)
+ - (start_time.tv_sec * USEC_PER_SEC + start_time.tv_usec);
+
+ if (delay > zone->max_free_time)
+ zone->max_free_time = delay;
+ if (delay < zone->min_free_time)
+ zone->min_free_time = delay;
+ zone->total_free_time += delay;
+ inc_ocmem_stat(zone, NR_FREES);
+
+ return rc;
+
}
EXPORT_SYMBOL(ocmem_free);
@@ -473,6 +535,7 @@
}
return process_quota(client_id);
}
+EXPORT_SYMBOL(get_max_quota);
/* Synchronous eviction/restore calls */
/* Only a single eviction or restoration is allowed */
diff --git a/arch/arm/mach-msm/pcie.c b/arch/arm/mach-msm/pcie.c
index 6305abc..c2ba6c1 100644
--- a/arch/arm/mach-msm/pcie.c
+++ b/arch/arm/mach-msm/pcie.c
@@ -528,8 +528,8 @@
msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
/* PARF programming */
- writel_relaxed(0x282828, dev->parf + PCIE20_PARF_PCS_DEEMPH);
- writel_relaxed(0x7F7F, dev->parf + PCIE20_PARF_PCS_SWING);
+ writel_relaxed(dev->parf_deemph, dev->parf + PCIE20_PARF_PCS_DEEMPH);
+ writel_relaxed(dev->parf_swing, dev->parf + PCIE20_PARF_PCS_SWING);
writel_relaxed((4<<24), dev->parf + PCIE20_PARF_CONFIG_BITS);
/* ensure that hardware registers the PARF configuration */
wmb();
@@ -621,6 +621,8 @@
msm_pcie_dev.gpio = pdata->gpio;
msm_pcie_dev.wake_n = pdata->wake_n;
msm_pcie_dev.vreg_n = pdata->vreg_n;
+ msm_pcie_dev.parf_deemph = pdata->parf_deemph;
+ msm_pcie_dev.parf_swing = pdata->parf_swing;
msm_pcie_dev.vreg = msm_pcie_vreg_info;
msm_pcie_dev.clk = msm_pcie_clk_info;
msm_pcie_dev.res = msm_pcie_res_info;
diff --git a/arch/arm/mach-msm/pcie.h b/arch/arm/mach-msm/pcie.h
index 31371c2..051e475 100644
--- a/arch/arm/mach-msm/pcie.h
+++ b/arch/arm/mach-msm/pcie.h
@@ -71,6 +71,8 @@
uint32_t wake_n;
uint32_t vreg_n;
+ uint32_t parf_deemph;
+ uint32_t parf_swing;
};
extern uint32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev);
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index 4e8674c..475e8a1 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -36,9 +36,9 @@
#include <asm-generic/io-64-nonatomic-lo-hi.h>
#include <mach/msm_iomap.h>
+#include <mach/ramdump.h>
#include "peripheral-loader.h"
-#include "ramdump.h"
#define pil_err(desc, fmt, ...) \
dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
@@ -290,10 +290,12 @@
static void pil_dump_segs(const struct pil_priv *priv)
{
struct pil_seg *seg;
+ phys_addr_t seg_h_paddr;
list_for_each_entry(seg, &priv->segs, list) {
- pil_info(priv->desc, "%d: %#08zx %#08lx\n", seg->num,
- seg->paddr, seg->paddr + seg->sz);
+ seg_h_paddr = seg->paddr + seg->sz;
+ pil_info(priv->desc, "%d: %pa %pa\n", seg->num,
+ &seg->paddr, &seg_h_paddr);
}
}
@@ -322,7 +324,7 @@
return 0;
}
}
- pil_err(priv->desc, "entry address %08zx not within range\n", entry);
+ pil_err(priv->desc, "entry address %pa not within range\n", &entry);
pil_dump_segs(priv);
return -EADDRNOTAVAIL;
}
@@ -335,6 +337,14 @@
unsigned int mask;
size_t size = max_addr - min_addr;
+ /* Don't reallocate due to fragmentation concerns, just sanity check */
+ if (priv->region) {
+ if (WARN(priv->region_end - priv->region_start < size,
+ "Can't reuse PIL memory, too small\n"))
+ return -ENOMEM;
+ return 0;
+ }
+
if (!ion) {
WARN_ON_ONCE("No ION client, can't support relocation\n");
return -ENOMEM;
@@ -471,9 +481,6 @@
writeq(0, &priv->info->start);
writel_relaxed(0, &priv->info->size);
- if (priv->region)
- ion_free(ion, priv->region);
- priv->region = NULL;
list_for_each_entry_safe(p, tmp, &priv->segs, list) {
list_del(&p->list);
kfree(p);
@@ -484,7 +491,8 @@
static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
{
- int ret = 0, count, paddr;
+ int ret = 0, count;
+ phys_addr_t paddr;
char fw_name[30];
const struct firmware *fw = NULL;
const u8 *data;
@@ -661,8 +669,13 @@
release_firmware(fw);
out:
up_read(&pil_pm_rwsem);
- if (ret)
+ if (ret) {
+ if (priv->region) {
+ ion_free(ion, priv->region);
+ priv->region = NULL;
+ }
pil_release_mmap(desc);
+ }
return ret;
}
EXPORT_SYMBOL(pil_boot);
diff --git a/arch/arm/mach-msm/peripheral-loader.h b/arch/arm/mach-msm/peripheral-loader.h
index ff10fe5..5aeeaf3 100644
--- a/arch/arm/mach-msm/peripheral-loader.h
+++ b/arch/arm/mach-msm/peripheral-loader.h
@@ -55,7 +55,8 @@
int (*init_image)(struct pil_desc *pil, const u8 *metadata,
size_t size);
int (*mem_setup)(struct pil_desc *pil, phys_addr_t addr, size_t size);
- int (*verify_blob)(struct pil_desc *pil, u32 phy_addr, size_t size);
+ int (*verify_blob)(struct pil_desc *pil, phys_addr_t phy_addr,
+ size_t size);
int (*proxy_vote)(struct pil_desc *pil);
int (*auth_and_reset)(struct pil_desc *pil);
void (*proxy_unvote)(struct pil_desc *pil);
diff --git a/arch/arm/mach-msm/pil-dsps.c b/arch/arm/mach-msm/pil-dsps.c
index 65d60d6..df5ea35 100644
--- a/arch/arm/mach-msm/pil-dsps.c
+++ b/arch/arm/mach-msm/pil-dsps.c
@@ -21,10 +21,10 @@
#include <mach/subsystem_restart.h>
#include <mach/msm_smsm.h>
+#include <mach/ramdump.h>
#include "peripheral-loader.h"
#include "scm-pas.h"
-#include "ramdump.h"
#define PPSS_RESET 0x2594
#define PPSS_RESET_PROC_RESET 0x2
diff --git a/arch/arm/mach-msm/pil-gss.c b/arch/arm/mach-msm/pil-gss.c
index c9e2e0d..65f86bc 100644
--- a/arch/arm/mach-msm/pil-gss.c
+++ b/arch/arm/mach-msm/pil-gss.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,11 +29,11 @@
#include <mach/msm_bus_board.h>
#include <mach/msm_bus.h>
#include <mach/subsystem_restart.h>
+#include <mach/ramdump.h>
#include "peripheral-loader.h"
#include "scm-pas.h"
#include "smd_private.h"
-#include "ramdump.h"
#define GSS_CSR_AHB_CLK_SEL 0x0
#define GSS_CSR_RESET 0x4
@@ -203,7 +203,7 @@
{
struct gss_data *drv = dev_get_drvdata(pil->dev);
void __iomem *base = drv->base;
- unsigned long start_addr = pil_get_entry_addr(pil);
+ phys_addr_t start_addr = pil_get_entry_addr(pil);
void __iomem *cbase = drv->cbase;
int ret;
diff --git a/arch/arm/mach-msm/pil-modem.c b/arch/arm/mach-msm/pil-modem.c
index e95fae8..8398206 100644
--- a/arch/arm/mach-msm/pil-modem.c
+++ b/arch/arm/mach-msm/pil-modem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,11 +24,11 @@
#include <mach/subsystem_restart.h>
#include <mach/msm_smsm.h>
+#include <mach/ramdump.h>
#include "modem_notifier.h"
#include "peripheral-loader.h"
#include "scm-pas.h"
-#include "ramdump.h"
#define MARM_BOOT_CONTROL 0x0010
#define MARM_RESET 0x2BD4
@@ -93,7 +93,7 @@
{
u32 reg;
const struct modem_data *drv = dev_get_drvdata(pil->dev);
- unsigned long start_addr = pil_get_entry_addr(pil);
+ phys_addr_t start_addr = pil_get_entry_addr(pil);
/* Put modem AHB0,1,2 clocks into reset */
writel_relaxed(BIT(0) | BIT(1), drv->cbase + MAHB0_SFAB_PORT_RESET);
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index edaa60c..cf29cf1 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -29,10 +29,10 @@
#include <mach/subsystem_restart.h>
#include <mach/msm_smsm.h>
+#include <mach/ramdump.h>
#include "peripheral-loader.h"
#include "scm-pas.h"
-#include "ramdump.h"
#define PRONTO_PMU_COMMON_GDSCR 0x24
#define PRONTO_PMU_COMMON_GDSCR_SW_COLLAPSE BIT(0)
@@ -123,7 +123,7 @@
int rc;
struct pronto_data *drv = dev_get_drvdata(pil->dev);
void __iomem *base = drv->base;
- unsigned long start_addr = pil_get_entry_addr(pil);
+ phys_addr_t start_addr = pil_get_entry_addr(pil);
/* Deassert reset to subsystem and wait for propagation */
reg = readl_relaxed(drv->reset_base);
@@ -410,6 +410,15 @@
int ret, err_fatal_gpio, irq;
uint32_t regval;
+ int clk_ready = of_get_named_gpio(pdev->dev.of_node,
+ "qcom,gpio-proxy-unvote", 0);
+ if (clk_ready < 0)
+ return clk_ready;
+
+ clk_ready = gpio_to_irq(clk_ready);
+ if (clk_ready < 0)
+ return clk_ready;
+
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
return -ENOMEM;
@@ -460,6 +469,7 @@
desc->dev = &pdev->dev;
desc->owner = THIS_MODULE;
desc->proxy_timeout = 10000;
+ desc->proxy_unvote_irq = clk_ready;
if (pas_supported(PAS_WCNSS) > 0) {
desc->ops = &pil_pronto_ops_trusted;
diff --git a/arch/arm/mach-msm/pil-q6v3.c b/arch/arm/mach-msm/pil-q6v3.c
index 66adc2b..a369878 100644
--- a/arch/arm/mach-msm/pil-q6v3.c
+++ b/arch/arm/mach-msm/pil-q6v3.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,8 +23,8 @@
#include <mach/subsystem_restart.h>
#include <mach/scm.h>
+#include <mach/ramdump.h>
-#include "ramdump.h"
#include "peripheral-loader.h"
#include "scm-pas.h"
@@ -116,7 +116,7 @@
{
u32 reg;
struct q6v3_data *drv = dev_get_drvdata(pil->dev);
- unsigned long start_addr = pil_get_entry_addr(pil);
+ phys_addr_t start_addr = pil_get_entry_addr(pil);
/* Put Q6 into reset */
reg = readl_relaxed(drv->cbase + LCC_Q6_FUNC);
diff --git a/arch/arm/mach-msm/pil-q6v4-lpass.c b/arch/arm/mach-msm/pil-q6v4-lpass.c
index 1387433..f05bcdb 100644
--- a/arch/arm/mach-msm/pil-q6v4-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v4-lpass.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012,2013 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,9 +24,9 @@
#include <mach/scm.h>
#include <mach/subsystem_restart.h>
#include <mach/subsystem_notif.h>
+#include <mach/ramdump.h>
#include "smd_private.h"
-#include "ramdump.h"
#include "sysmon.h"
#include "peripheral-loader.h"
#include "pil-q6v4.h"
diff --git a/arch/arm/mach-msm/pil-q6v4-mss.c b/arch/arm/mach-msm/pil-q6v4-mss.c
index f2b090f..1821ab1 100644
--- a/arch/arm/mach-msm/pil-q6v4-mss.c
+++ b/arch/arm/mach-msm/pil-q6v4-mss.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012,2013 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,9 +22,9 @@
#include <mach/subsystem_restart.h>
#include <mach/msm_smsm.h>
+#include <mach/ramdump.h>
#include "smd_private.h"
-#include "ramdump.h"
#include "peripheral-loader.h"
#include "pil-q6v4.h"
#include "scm-pas.h"
diff --git a/arch/arm/mach-msm/pil-q6v4.c b/arch/arm/mach-msm/pil-q6v4.c
index 29d14dd..51f7aa2 100644
--- a/arch/arm/mach-msm/pil-q6v4.c
+++ b/arch/arm/mach-msm/pil-q6v4.c
@@ -130,7 +130,7 @@
{
u32 reg, err;
const struct q6v4_data *drv = pil_to_q6v4_data(pil);
- unsigned long start_addr = pil_get_entry_addr(pil);
+ phys_addr_t start_addr = pil_get_entry_addr(pil);
/* Enable Q6 ACLK */
writel_relaxed(0x10, drv->aclk_reg);
diff --git a/arch/arm/mach-msm/pil-q6v5-lpass.c b/arch/arm/mach-msm/pil-q6v5-lpass.c
index ef13c34..04c1be3 100644
--- a/arch/arm/mach-msm/pil-q6v5-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v5-lpass.c
@@ -22,16 +22,17 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
+#include <linux/of_gpio.h>
#include <mach/clk.h>
#include <mach/subsystem_restart.h>
#include <mach/subsystem_notif.h>
#include <mach/scm.h>
+#include <mach/ramdump.h>
#include "peripheral-loader.h"
#include "pil-q6v5.h"
#include "scm-pas.h"
-#include "ramdump.h"
#include "sysmon.h"
#define QDSP6SS_RST_EVB 0x010
@@ -47,9 +48,11 @@
void *ramdump_dev;
int wdog_irq;
struct work_struct work;
- void *riva_notif_hdle;
+ void *wcnss_notif_hdle;
void *modem_notif_hdle;
int crash_shutdown;
+ unsigned int err_fatal_irq;
+ int force_stop_gpio;
};
#define subsys_to_drv(d) container_of(d, struct lpass_data, subsys_desc)
@@ -124,7 +127,7 @@
static int pil_lpass_reset(struct pil_desc *pil)
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
- unsigned long start_addr = pil_get_entry_addr(pil);
+ phys_addr_t start_addr = pil_get_entry_addr(pil);
int ret;
/* Deassert reset to subsystem and wait for propagation */
@@ -189,24 +192,19 @@
.shutdown = pil_lpass_shutdown_trusted,
};
-static int riva_notifier_cb(struct notifier_block *this, unsigned long code,
+static int wcnss_notifier_cb(struct notifier_block *this, unsigned long code,
void *ss_handle)
{
int ret;
- switch (code) {
- case SUBSYS_BEFORE_SHUTDOWN:
- pr_debug("%s: R-Notify: Shutdown started\n", __func__);
- ret = sysmon_send_event(SYSMON_SS_LPASS, "wcnss",
- SUBSYS_BEFORE_SHUTDOWN);
- if (ret < 0)
- pr_err("%s: sysmon_send_event error %d", __func__, ret);
- break;
- }
+ pr_debug("%s: W-Notify: event %lu\n", __func__, code);
+ ret = sysmon_send_event(SYSMON_SS_LPASS, "wcnss", code);
+ if (ret < 0)
+ pr_err("%s: sysmon_send_event error %d", __func__, ret);
return NOTIFY_DONE;
}
-static struct notifier_block rnb = {
- .notifier_call = riva_notifier_cb,
+static struct notifier_block wnb = {
+ .notifier_call = wcnss_notifier_cb,
};
static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
@@ -264,20 +262,19 @@
restart_adsp(drv);
}
-static void adsp_smsm_state_cb(void *data, uint32_t old_state,
- uint32_t new_state)
+static irqreturn_t adsp_err_fatal_intr_handler (int irq, void *dev_id)
{
- struct lpass_data *drv = data;
+ struct lpass_data *drv = dev_id;
- /* Ignore if we're the one that set SMSM_RESET */
+ /* Ignore if we're the one that set the force stop bit in the outbound
+ * entry
+ */
if (drv->crash_shutdown)
- return;
+ return IRQ_HANDLED;
- if (new_state & SMSM_RESET) {
- pr_err("%s: ADSP SMSM state changed to SMSM_RESET, new_state = %#x, old_state = %#x\n",
- __func__, new_state, old_state);
- restart_adsp(drv);
- }
+ pr_err("Fatal error on the ADSP!\n");
+ restart_adsp(drv);
+ return IRQ_HANDLED;
}
#define SCM_Q6_NMI_CMD 0x1
@@ -361,6 +358,7 @@
struct lpass_data *drv = subsys_to_lpass(subsys);
drv->crash_shutdown = 1;
+ gpio_set_value(drv->force_stop_gpio, 1);
send_q6_nmi();
}
@@ -393,7 +391,7 @@
struct q6v5_data *q6;
struct pil_desc *desc;
struct resource *res;
- int ret;
+ int ret, gpio_clk_ready;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
@@ -404,6 +402,23 @@
if (drv->wdog_irq < 0)
return drv->wdog_irq;
+ ret = gpio_to_irq(of_get_named_gpio(pdev->dev.of_node,
+ "qcom,gpio-err-fatal", 0));
+ if (ret < 0)
+ return ret;
+ drv->err_fatal_irq = ret;
+
+ ret = gpio_to_irq(of_get_named_gpio(pdev->dev.of_node,
+ "qcom,gpio-proxy-unvote", 0));
+ if (ret < 0)
+ return ret;
+ gpio_clk_ready = ret;
+
+ drv->force_stop_gpio = of_get_named_gpio(pdev->dev.of_node,
+ "qcom,gpio-force-stop", 0);
+ if (drv->force_stop_gpio < 0)
+ return drv->force_stop_gpio;
+
q6 = pil_q6v5_init(pdev);
if (IS_ERR(q6))
return PTR_ERR(q6);
@@ -412,6 +427,7 @@
desc = &q6->desc;
desc->owner = THIS_MODULE;
desc->proxy_timeout = PROXY_TIMEOUT_MS;
+ desc->proxy_unvote_irq = gpio_clk_ready;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
q6->restart_reg = devm_request_and_ioremap(&pdev->dev, res);
@@ -475,15 +491,17 @@
if (ret)
goto err_irq;
- ret = smsm_state_cb_register(SMSM_Q6_STATE, SMSM_RESET,
- adsp_smsm_state_cb, drv);
- if (ret < 0)
- goto err_smsm;
+ ret = devm_request_irq(&pdev->dev, drv->err_fatal_irq,
+ adsp_err_fatal_intr_handler,
+ IRQF_TRIGGER_RISING,
+ dev_name(&pdev->dev), drv);
+ if (ret)
+ goto err_irq;
- drv->riva_notif_hdle = subsys_notif_register_notifier("riva", &rnb);
- if (IS_ERR(drv->riva_notif_hdle)) {
- ret = PTR_ERR(drv->riva_notif_hdle);
- goto err_notif_riva;
+ drv->wcnss_notif_hdle = subsys_notif_register_notifier("wcnss", &wnb);
+ if (IS_ERR(drv->wcnss_notif_hdle)) {
+ ret = PTR_ERR(drv->wcnss_notif_hdle);
+ goto err_notif_wcnss;
}
drv->modem_notif_hdle = subsys_notif_register_notifier("modem", &mnb);
@@ -510,11 +528,8 @@
err_kobj:
kobject_put(lpass_status);
err_notif_modem:
- subsys_notif_unregister_notifier(drv->riva_notif_hdle, &rnb);
-err_notif_riva:
- smsm_state_cb_deregister(SMSM_Q6_STATE, SMSM_RESET,
- adsp_smsm_state_cb, drv);
-err_smsm:
+ subsys_notif_unregister_notifier(drv->wcnss_notif_hdle, &wnb);
+err_notif_wcnss:
err_irq:
subsys_unregister(drv->subsys);
err_subsys:
@@ -527,10 +542,8 @@
static int __devexit pil_lpass_driver_exit(struct platform_device *pdev)
{
struct lpass_data *drv = platform_get_drvdata(pdev);
- subsys_notif_unregister_notifier(drv->riva_notif_hdle, &rnb);
+ subsys_notif_unregister_notifier(drv->wcnss_notif_hdle, &wnb);
subsys_notif_unregister_notifier(drv->modem_notif_hdle, &mnb);
- smsm_state_cb_deregister(SMSM_Q6_STATE, SMSM_RESET,
- adsp_smsm_state_cb, drv);
subsys_unregister(drv->subsys);
destroy_ramdump_device(drv->ramdump_dev);
pil_desc_release(&drv->q6->desc);
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index 06de8cc..c1c3100 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -30,10 +30,10 @@
#include <mach/subsystem_restart.h>
#include <mach/clk.h>
#include <mach/msm_smsm.h>
+#include <mach/ramdump.h>
#include "peripheral-loader.h"
#include "pil-q6v5.h"
-#include "ramdump.h"
#include "sysmon.h"
/* Q6 Register Offsets */
@@ -243,7 +243,7 @@
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
struct platform_device *pdev = to_platform_device(pil->dev);
struct mba_data *mba = platform_get_drvdata(pdev);
- unsigned long start_addr = pil_get_entry_addr(pil);
+ phys_addr_t start_addr = pil_get_entry_addr(pil);
int ret;
/*
@@ -402,7 +402,7 @@
return ret;
}
-static int pil_mba_verify_blob(struct pil_desc *pil, u32 phy_addr,
+static int pil_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
size_t size)
{
struct mba_data *drv = dev_get_drvdata(pil->dev);
@@ -608,8 +608,15 @@
if (ret)
return ret;
ret = pil_boot(&drv->desc);
- if (ret)
+ if (ret) {
pil_shutdown(&drv->q6->desc);
+ /*
+ * We know now that the unvote interrupt is not coming.
+ * Remove the proxy votes immediately.
+ */
+ if (drv->q6->desc.proxy_unvote_irq)
+ pil_q6v5_mss_remove_proxy_votes(&drv->q6->desc);
+ }
return ret;
}
@@ -709,6 +716,15 @@
struct resource *res;
int ret;
+ int clk_ready = of_get_named_gpio(pdev->dev.of_node,
+ "qcom,gpio-proxy-unvote", 0);
+ if (clk_ready < 0)
+ return clk_ready;
+
+ clk_ready = gpio_to_irq(clk_ready);
+ if (clk_ready < 0)
+ return clk_ready;
+
q6 = pil_q6v5_init(pdev);
if (IS_ERR(q6))
return PTR_ERR(q6);
@@ -718,6 +734,7 @@
q6_desc->ops = &pil_mss_ops;
q6_desc->owner = THIS_MODULE;
q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
+ q6_desc->proxy_unvote_irq = clk_ready;
drv->self_auth = of_property_read_bool(pdev->dev.of_node,
"qcom,pil-self-auth");
@@ -781,6 +798,7 @@
mba_desc->ops = &pil_mba_ops;
mba_desc->owner = THIS_MODULE;
mba_desc->proxy_timeout = PROXY_TIMEOUT_MS;
+ mba_desc->proxy_unvote_irq = clk_ready;
ret = pil_desc_init(mba_desc);
if (ret)
diff --git a/arch/arm/mach-msm/pil-q6v5.c b/arch/arm/mach-msm/pil-q6v5.c
index 0263faf..c6add8f 100644
--- a/arch/arm/mach-msm/pil-q6v5.c
+++ b/arch/arm/mach-msm/pil-q6v5.c
@@ -46,7 +46,9 @@
#define Q6SS_CLK_ENA BIT(1)
/* QDSP6SS_PWR_CTL */
-#define Q6SS_L2DATA_SLP_NRET_N (BIT(0)|BIT(1)|BIT(2))
+#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
#define Q6SS_ETB_SLP_NRET_N BIT(17)
#define Q6SS_L2DATA_STBY_N BIT(18)
@@ -160,7 +162,8 @@
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
/* Turn off Q6 memories */
- val &= ~(Q6SS_L2DATA_SLP_NRET_N | Q6SS_SLP_RET_N |
+ val &= ~(Q6SS_L2DATA_SLP_NRET_N_0 | Q6SS_L2DATA_SLP_NRET_N_1 |
+ Q6SS_L2DATA_SLP_NRET_N_2 | Q6SS_SLP_RET_N |
Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLP_NRET_N |
Q6SS_L2DATA_STBY_N);
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
@@ -194,11 +197,19 @@
mb();
udelay(1);
- /* Turn on memories */
+ /*
+ * Turn on memories. L2 banks should be done individually
+ * to minimize inrush current.
+ */
val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
- val |= Q6SS_L2DATA_SLP_NRET_N | Q6SS_SLP_RET_N |
- Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLP_NRET_N |
- Q6SS_L2DATA_STBY_N;
+ val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+ Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_2;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_1;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_0;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
/* Remove IO clamp */
diff --git a/arch/arm/mach-msm/pil-riva.c b/arch/arm/mach-msm/pil-riva.c
index 33301de..d72b848 100644
--- a/arch/arm/mach-msm/pil-riva.c
+++ b/arch/arm/mach-msm/pil-riva.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,10 +23,10 @@
#include <linux/wcnss_wlan.h>
#include <mach/subsystem_restart.h>
+#include <mach/ramdump.h>
#include "peripheral-loader.h"
#include "scm-pas.h"
-#include "ramdump.h"
#include "smd_private.h"
#define RIVA_PMU_A2XB_CFG 0xB8
@@ -134,7 +134,7 @@
u32 reg, sel;
struct riva_data *drv = dev_get_drvdata(pil->dev);
void __iomem *base = drv->base;
- unsigned long start_addr = pil_get_entry_addr(pil);
+ phys_addr_t start_addr = pil_get_entry_addr(pil);
void __iomem *cbase = drv->cbase;
bool use_cxo = cxo_is_needed(drv);
diff --git a/arch/arm/mach-msm/pil-venus.c b/arch/arm/mach-msm/pil-venus.c
index b0150d4..4e9e54b 100644
--- a/arch/arm/mach-msm/pil-venus.c
+++ b/arch/arm/mach-msm/pil-venus.c
@@ -30,10 +30,10 @@
#include <mach/subsystem_restart.h>
#include <mach/msm_bus_board.h>
#include <mach/msm_bus.h>
+#include <mach/ramdump.h>
#include "peripheral-loader.h"
#include "scm-pas.h"
-#include "ramdump.h"
/* VENUS WRAPPER registers */
#define VENUS_WRAPPER_HW_VERSION 0x0
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index bc40130..f4ca4e3 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -282,10 +282,9 @@
pr_debug("Starting secondary CPU %d\n", cpu);
if (per_cpu(cold_boot_done, cpu) == false) {
- if (machine_is_msm8974_sim() || machine_is_mpq8092_sim())
+ if (of_board_is_sim())
release_secondary_sim(0xf9088000, cpu);
- else if (!machine_is_msm8974_rumi() &&
- !machine_is_msmzinc_sim())
+ else if (!of_board_is_rumi())
msm8974_release_secondary(0xf9088000, cpu);
per_cpu(cold_boot_done, cpu) = true;
@@ -298,9 +297,9 @@
pr_debug("Starting secondary CPU %d\n", cpu);
if (per_cpu(cold_boot_done, cpu) == false) {
- if (machine_is_msm8226_sim() || machine_is_msm8610_sim())
+ if (of_board_is_sim())
release_secondary_sim(0xf9088000, cpu);
- else if (!machine_is_msm8610_rumi())
+ else if (!of_board_is_rumi())
arm_release_secondary(0xf9088000, cpu);
per_cpu(cold_boot_done, cpu) = true;
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index 3c50bc6..a39e38b 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/regulator/krait-regulator.h>
+#include <linux/cpu.h>
#include <mach/msm_iomap.h>
#include <mach/socinfo.h>
#include <mach/system.h>
@@ -56,7 +57,6 @@
#include <mach/event_timer.h>
#define CREATE_TRACE_POINTS
#include "trace_msm_low_power.h"
-
#define SCM_L2_RETENTION (0x2)
#define SCM_CMD_TERMINATE_PC (0x2)
@@ -64,7 +64,6 @@
(container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
#define SCLK_HZ (32768)
-#define MSM_PM_SLEEP_TICK_LIMIT (0x6DDD000)
#define NUM_OF_COUNTERS 3
#define MAX_BUF_SIZE 512
@@ -127,9 +126,9 @@
static bool msm_pm_use_sync_timer;
static struct msm_pm_cp15_save_data cp15_data;
static bool msm_pm_retention_calls_tz;
-static uint32_t msm_pm_max_sleep_time;
static bool msm_no_ramp_down_pc;
static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
+static bool msm_pm_pc_reset_timer;
static int msm_pm_get_pc_mode(struct device_node *node,
const char *key, uint32_t *pc_mode_val)
@@ -404,39 +403,6 @@
return;
}
-/*
- * Convert time from nanoseconds to slow clock ticks, then cap it to the
- * specified limit
- */
-static int64_t msm_pm_convert_and_cap_time(int64_t time_ns, int64_t limit)
-{
- do_div(time_ns, NSEC_PER_SEC / SCLK_HZ);
- return (time_ns > limit) ? limit : time_ns;
-}
-
-/*
- * Set the sleep time for suspend. 0 means infinite sleep time.
- */
-void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
-{
- if (max_sleep_time_ns == 0) {
- msm_pm_max_sleep_time = 0;
- } else {
- msm_pm_max_sleep_time =
- (uint32_t)msm_pm_convert_and_cap_time(
- max_sleep_time_ns, MSM_PM_SLEEP_TICK_LIMIT);
-
- if (msm_pm_max_sleep_time == 0)
- msm_pm_max_sleep_time = 1;
- }
-
- if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
- pr_info("%s: Requested %lld ns Giving %u sclk ticks\n",
- __func__, max_sleep_time_ns,
- msm_pm_max_sleep_time);
-}
-EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
-
static void msm_pm_save_cpu_reg(void)
{
int i;
@@ -525,9 +491,14 @@
if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
pr_info("CPU%u: %s: program vector to %p\n",
cpu, __func__, entry);
+ if (from_idle && msm_pm_pc_reset_timer)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
collapsed = msm_pm_collapse();
+ if (from_idle && msm_pm_pc_reset_timer)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+
msm_pm_boot_config_after_pc(cpu);
if (collapsed) {
@@ -863,9 +834,8 @@
int exit_stat = -1;
enum msm_pm_sleep_mode sleep_mode;
void *msm_pm_idle_rs_limits = NULL;
- int sleep_delay = 1;
+ uint32_t sleep_delay = 1;
int ret = -ENODEV;
- int64_t timer_expiration = 0;
int notify_rpm = false;
bool timer_halted = false;
@@ -885,10 +855,8 @@
if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
notify_rpm = true;
- timer_expiration = msm_pm_timer_enter_idle();
+ sleep_delay = (uint32_t)msm_pm_timer_enter_idle();
- sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
- timer_expiration, MSM_PM_SLEEP_TICK_LIMIT);
if (sleep_delay == 0) /* 0 would mean infinite time */
sleep_delay = 1;
}
@@ -1075,6 +1043,7 @@
void *rs_limits = NULL;
int ret = -ENODEV;
uint32_t power;
+ uint32_t msm_pm_max_sleep_time = 0;
if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
pr_info("%s: power collapse\n", __func__);
@@ -1084,8 +1053,8 @@
if (msm_pm_sleep_time_override > 0) {
int64_t ns = NSEC_PER_SEC *
(int64_t) msm_pm_sleep_time_override;
- msm_pm_set_max_sleep_time(ns);
- msm_pm_sleep_time_override = 0;
+ do_div(ns, NSEC_PER_SEC / SCLK_HZ);
+ msm_pm_max_sleep_time = (uint32_t) ns;
}
if (pm_sleep_ops.lowest_limits)
@@ -1229,18 +1198,11 @@
},
};
-static int __devinit msm_pm_init(void)
+static int __init msm_pm_setup_saved_state(void)
{
pgd_t *pc_pgd;
pmd_t *pmd;
unsigned long pmdval;
- enum msm_pm_time_stats_id enable_stats[] = {
- MSM_PM_STAT_IDLE_WFI,
- MSM_PM_STAT_RETENTION,
- MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
- MSM_PM_STAT_IDLE_POWER_COLLAPSE,
- MSM_PM_STAT_SUSPEND,
- };
unsigned long exit_phys;
/* Page table for cores to come back up safely. */
@@ -1280,12 +1242,63 @@
clean_caches((unsigned long)&msm_pm_pc_pgd, sizeof(msm_pm_pc_pgd),
virt_to_phys(&msm_pm_pc_pgd));
+ return 0;
+}
+core_initcall(msm_pm_setup_saved_state);
+
+static void setup_broadcast_timer(void *arg)
+{
+ unsigned long reason = (unsigned long)arg;
+ int cpu = smp_processor_id();
+
+ reason = reason ?
+ CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
+
+ clockevents_notify(reason, &cpu);
+}
+
+static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
+ unsigned long action, void *hcpu)
+{
+ int hotcpu = (unsigned long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ smp_call_function_single(hotcpu, setup_broadcast_timer,
+ (void *)true, 1);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block setup_broadcast_notifier = {
+ .notifier_call = setup_broadcast_cpuhp_notify,
+};
+
+static int __init msm_pm_init(void)
+{
+ enum msm_pm_time_stats_id enable_stats[] = {
+ MSM_PM_STAT_IDLE_WFI,
+ MSM_PM_STAT_RETENTION,
+ MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+ MSM_PM_STAT_SUSPEND,
+ };
msm_pm_mode_sysfs_add();
msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
suspend_set_ops(&msm_pm_ops);
hrtimer_init(&pm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
msm_cpuidle_init();
+ if (msm_pm_pc_reset_timer) {
+ get_cpu();
+ smp_call_function_many(cpu_online_mask, setup_broadcast_timer,
+ (void *)true, 1);
+ put_cpu();
+ register_cpu_notifier(&setup_broadcast_notifier);
+ }
+
return 0;
}
@@ -1470,6 +1483,10 @@
key = "qcom,saw-turns-off-pll";
msm_no_ramp_down_pc = of_property_read_bool(pdev->dev.of_node,
key);
+
+ key = "qcom,pc-resets-timer";
+ msm_pm_pc_reset_timer = of_property_read_bool(
+ pdev->dev.of_node, key);
}
if (pdata_local.cp15_data.reg_data &&
diff --git a/arch/arm/mach-msm/pm-data.c b/arch/arm/mach-msm/pm-data.c
index ccc2519..249032f 100644
--- a/arch/arm/mach-msm/pm-data.c
+++ b/arch/arm/mach-msm/pm-data.c
@@ -46,7 +46,7 @@
.idle_supported = 0,
.suspend_supported = 1,
.idle_enabled = 0,
- .suspend_enabled = 0,
+ .suspend_enabled = 1,
},
[MSM_PM_MODE(1, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
@@ -74,7 +74,7 @@
.idle_supported = 0,
.suspend_supported = 1,
.idle_enabled = 0,
- .suspend_enabled = 0,
+ .suspend_enabled = 1,
},
[MSM_PM_MODE(2, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
@@ -102,7 +102,7 @@
.idle_supported = 0,
.suspend_supported = 1,
.idle_enabled = 0,
- .suspend_enabled = 0,
+ .suspend_enabled = 1,
},
[MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
diff --git a/arch/arm/mach-msm/qdsp6v2/Makefile b/arch/arm/mach-msm/qdsp6v2/Makefile
index 88de98b..6bd3efb 100644
--- a/arch/arm/mach-msm/qdsp6v2/Makefile
+++ b/arch/arm/mach-msm/qdsp6v2/Makefile
@@ -12,7 +12,7 @@
obj-$(CONFIG_FB_MSM_HDMI_MSM_PANEL) += lpa_if_hdmi.o
endif
obj-$(CONFIG_MSM_QDSP6_APR) += apr.o apr_v1.o apr_tal.o q6core.o dsp_debug.o
-obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o q6core.o dsp_debug.o
+obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o dsp_debug.o
ifdef CONFIG_ARCH_MSM9615
obj-y += audio_acdb.o
obj-y += rtac.o
@@ -23,7 +23,7 @@
obj-$(CONFIG_MSM_QDSP6_CODECS) += audio_mp3.o audio_amrnb.o audio_amrwb.o audio_amrwbplus.o audio_evrc.o audio_qcelp.o amrwb_in.o
obj-$(CONFIG_MSM_QDSP6V2_CODECS) += aac_in.o qcelp_in.o evrc_in.o amrnb_in.o audio_utils.o
obj-$(CONFIG_MSM_QDSP6V2_CODECS) += audio_wma.o audio_wmapro.o audio_aac.o audio_multi_aac.o audio_utils_aio.o
-obj-$(CONFIG_MSM_QDSP6V2_CODECS) += q6audio_v2.o q6audio_v2_aio.o
+obj-$(CONFIG_MSM_QDSP6V2_CODECS) += q6audio_v2.o q6audio_v2_aio.o msm_audio_ion.o
obj-$(CONFIG_MSM_QDSP6V2_CODECS) += audio_mp3.o audio_amrnb.o audio_amrwb.o audio_amrwbplus.o audio_evrc.o audio_qcelp.o amrwb_in.o
obj-$(CONFIG_MSM_ADSP_LOADER) += adsp-loader.o
obj-$(CONFIG_MSM_ULTRASOUND_A) += ultrasound/version_a/
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
index e6b9549..64ee880 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
@@ -24,6 +24,7 @@
#include <linux/atomic.h>
#include <asm/ioctls.h>
#include <linux/debugfs.h>
+#include <linux/msm_audio_ion.h>
#include "audio_utils_aio.h"
#ifdef CONFIG_USE_DEV_CTRL_VOLUME
#include <mach/qdsp6v2/audio_dev_ctl.h>
@@ -374,8 +375,7 @@
list_for_each_safe(ptr, next, &audio->ion_region_queue) {
region = list_entry(ptr, struct audio_aio_ion_region, list);
list_del(®ion->list);
- ion_unmap_kernel(audio->client, region->handle);
- ion_free(audio->client, region->handle);
+ msm_audio_ion_free_legacy(audio->client, region->handle);
kfree(region);
}
@@ -557,7 +557,7 @@
audio_aio_disable(audio);
audio_aio_unmap_ion_region(audio);
audio_aio_reset_ion_region(audio);
- ion_client_destroy(audio->client);
+ msm_audio_ion_client_destroy(audio->client);
audio->event_abort = 1;
wake_up(&audio->event_wait);
audio_aio_reset_event_queue(audio);
@@ -769,14 +769,13 @@
static int audio_aio_ion_add(struct q6audio_aio *audio,
struct msm_audio_ion_info *info)
{
- ion_phys_addr_t paddr;
- size_t len;
- unsigned long kvaddr;
+ ion_phys_addr_t paddr = 0;
+ size_t len = 0;
struct audio_aio_ion_region *region;
int rc = -EINVAL;
- struct ion_handle *handle;
+ struct ion_handle *handle = NULL;
unsigned long ionflag;
- void *temp_ptr;
+ void *kvaddr = NULL;
pr_debug("%s[%p]:\n", __func__, audio);
region = kmalloc(sizeof(*region), GFP_KERNEL);
@@ -786,31 +785,14 @@
goto end;
}
- handle = ion_import_dma_buf(audio->client, info->fd);
- if (IS_ERR_OR_NULL(handle)) {
- pr_err("%s: could not get handle of the given fd\n", __func__);
+ rc = msm_audio_ion_import_legacy("Audio_Dec_Client", audio->client,
+ &handle, info->fd, &ionflag,
+ 0, &paddr, &len, &kvaddr);
+ if (rc) {
+ pr_err("%s: msm audio ion alloc failed\n", __func__);
goto import_error;
}
- rc = ion_handle_get_flags(audio->client, handle, &ionflag);
- if (rc) {
- pr_err("%s: could not get flags for the handle\n", __func__);
- goto flag_error;
- }
-
- temp_ptr = ion_map_kernel(audio->client, handle);
- if (IS_ERR_OR_NULL(temp_ptr)) {
- pr_err("%s: could not get virtual address\n", __func__);
- goto map_error;
- }
- kvaddr = (unsigned long)temp_ptr;
-
- rc = ion_phys(audio->client, handle, &paddr, &len);
- if (rc) {
- pr_err("%s: could not get physical address\n", __func__);
- goto ion_error;
- }
-
rc = audio_aio_ion_check(audio, info->vaddr, len);
if (rc < 0) {
pr_err("%s: audio_aio_ion_check failed\n", __func__);
@@ -821,7 +803,7 @@
region->vaddr = info->vaddr;
region->fd = info->fd;
region->paddr = paddr;
- region->kvaddr = kvaddr;
+ region->kvaddr = (unsigned long)kvaddr;
region->len = len;
region->ref_cnt = 0;
pr_debug("%s[%p]:add region paddr %lx vaddr %p, len %lu kvaddr %lx\n",
@@ -839,10 +821,7 @@
mmap_error:
list_del(®ion->list);
ion_error:
- ion_unmap_kernel(audio->client, handle);
-map_error:
-flag_error:
- ion_free(audio->client, handle);
+ msm_audio_ion_free_legacy(audio->client, handle);
import_error:
kfree(region);
end:
@@ -879,8 +858,8 @@
__func__, audio);
list_del(®ion->list);
- ion_unmap_kernel(audio->client, region->handle);
- ion_free(audio->client, region->handle);
+ msm_audio_ion_free_legacy(audio->client,
+ region->handle);
kfree(region);
rc = 0;
break;
@@ -1167,7 +1146,8 @@
break;
}
}
- audio->client = msm_ion_client_create(UINT_MAX, "Audio_Dec_Client");
+ audio->client = msm_audio_ion_client_create(UINT_MAX,
+ "Audio_Dec_Client");
if (IS_ERR_OR_NULL(audio->client)) {
pr_err("Unable to create ION client\n");
rc = -EACCES;
diff --git a/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c b/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c
new file mode 100644
index 0000000..c9bc3d7
--- /dev/null
+++ b/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <mach/subsystem_restart.h>
+#include <mach/qdsp6v2/apr.h>
+#include <linux/of_device.h>
+#include <linux/msm_audio_ion.h>
+
+struct msm_audio_ion_private {
+ bool smmu_enabled;
+ /*u32 group_id;*/
+ /*u32 domain_id;*/
+};
+
+static struct msm_audio_ion_private msm_audio_ion_data = {0,};
+
+
+static int msm_audio_ion_get_phys(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+
+
+int msm_audio_ion_alloc(const char *name, struct ion_client **client,
+ struct ion_handle **handle, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+ int rc = 0;
+
+ *client = msm_audio_ion_client_create(UINT_MAX, name);
+ if (IS_ERR_OR_NULL((void *)(*client))) {
+ pr_err("%s: ION create client for AUDIO failed\n", __func__);
+ goto err;
+ }
+
+ *handle = ion_alloc(*client, bufsz, SZ_4K, (0x1<<ION_AUDIO_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ pr_err("%s: ION memory allocation for AUDIO failed\n",
+ __func__);
+ goto err_ion_client;
+ }
+
+ rc = msm_audio_ion_get_phys(*client, *handle, paddr, pa_len);
+ if (rc) {
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ __func__, rc);
+ goto err_ion_handle;
+ }
+
+ /*Need to add condition SMMU enable or not */
+ *vaddr = ion_map_kernel(*client, *handle);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ goto err_ion_handle;
+ }
+
+ if (bufsz != 0)
+ memset((void *)*vaddr, 0, bufsz);
+
+ return 0;
+
+err_ion_handle:
+ ion_free(*client, *handle);
+err_ion_client:
+ msm_audio_ion_client_destroy(*client);
+err:
+ return -EINVAL;
+
+}
+
+int msm_audio_ion_import(const char *name, struct ion_client **client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+ int rc = 0;
+
+ *client = msm_audio_ion_client_create(UINT_MAX, name);
+ if (IS_ERR_OR_NULL((void *)(*client))) {
+ pr_err("%s: ION create client for AUDIO failed\n", __func__);
+ goto err;
+ }
+
+ /* name should be audio_acdb_client or Audio_Dec_Client,
+ bufsz should be 0 and fd shouldn't be 0 as of now
+ */
+ *handle = ion_import_dma_buf(*client, fd);
+ pr_err("%s: DMA Buf name=%s, fd=%d handle=%p\n", __func__,
+ name, fd, *handle);
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ pr_err("%s: ion import dma buffer failed\n",
+ __func__);
+ goto err_ion_handle;
+ }
+
+ if (ionflag != NULL) {
+ rc = ion_handle_get_flags(*client, *handle, ionflag);
+ if (rc) {
+ pr_err("%s: could not get flags for the handle\n",
+ __func__);
+ goto err_ion_handle;
+ }
+ }
+
+ rc = msm_audio_ion_get_phys(*client, *handle, paddr, pa_len);
+ if (rc) {
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ __func__, rc);
+ goto err_ion_handle;
+ }
+
+ /*Need to add condition SMMU enable or not */
+ *vaddr = ion_map_kernel(*client, *handle);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ goto err_ion_handle;
+ }
+
+ if (bufsz != 0)
+ memset((void *)*vaddr, 0, bufsz);
+
+ return 0;
+
+err_ion_handle:
+ ion_free(*client, *handle);
+ msm_audio_ion_client_destroy(*client);
+err:
+ return -EINVAL;
+
+}
+
+int msm_audio_ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+ /* add condition for SMMU enabled */
+ ion_unmap_kernel(client, handle);
+
+ ion_free(client, handle);
+ msm_audio_ion_client_destroy(client);
+ return 0;
+}
+
+
+bool msm_audio_ion_is_smmu_available(void)
+{
+ return msm_audio_ion_data.smmu_enabled;
+}
+
+/* move to static section again */
+struct ion_client *msm_audio_ion_client_create(unsigned int heap_mask,
+ const char *name)
+{
+ pr_debug("%s: smmu_enabled = %d\n", __func__,
+ msm_audio_ion_data.smmu_enabled);
+
+
+ return msm_ion_client_create(heap_mask, name);
+}
+
+
+void msm_audio_ion_client_destroy(struct ion_client *client)
+{
+ pr_debug("%s: smmu_enabled = %d\n", __func__,
+ msm_audio_ion_data.smmu_enabled);
+
+ ion_client_destroy(client);
+}
+
+int msm_audio_ion_import_legacy(const char *name, struct ion_client *client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+ int rc = 0;
+ /* client is already created for legacy and given*/
+ /* name should be audio_acdb_client or Audio_Dec_Client,
+ bufsz should be 0 and fd shouldn't be 0 as of now
+ */
+ *handle = ion_import_dma_buf(client, fd);
+ pr_err("%s: DMA Buf name=%s, fd=%d handle=%p\n", __func__,
+ name, fd, *handle);
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ pr_err("%s: ion import dma buffer failed\n",
+ __func__);
+ goto err_ion_handle;
+ }
+
+ if (ionflag != NULL) {
+ rc = ion_handle_get_flags(client, *handle, ionflag);
+ if (rc) {
+ pr_err("%s: could not get flags for the handle\n",
+ __func__);
+ goto err_ion_handle;
+ }
+ }
+
+ rc = msm_audio_ion_get_phys(client, *handle, paddr, pa_len);
+ if (rc) {
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ __func__, rc);
+ goto err_ion_handle;
+ }
+
+ /*Need to add condition SMMU enable or not */
+ *vaddr = ion_map_kernel(client, *handle);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ goto err_ion_handle;
+ }
+
+ if (bufsz != 0)
+ memset((void *)*vaddr, 0, bufsz);
+
+ return 0;
+
+err_ion_handle:
+ ion_free(client, *handle);
+ return -EINVAL;
+
+}
+
+int msm_audio_ion_free_legacy(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ /* To add condition for SMMU enabled */
+ ion_unmap_kernel(client, handle);
+
+ ion_free(client, handle);
+ /* no client_destrody in legacy*/
+ return 0;
+}
+
+
+static int msm_audio_ion_get_phys(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ int rc = 0;
+ pr_debug("%s: smmu_enabled = %d\n", __func__,
+ msm_audio_ion_data.smmu_enabled);
+
+ if (msm_audio_ion_data.smmu_enabled) {
+ /* SMMU enabled case ion_map_iommu()*/
+ } else {
+ /* SMMU is disabled*/
+ rc = ion_phys(client, handle, addr, len);
+ }
+ pr_debug("%s: addr= 0x%p, len= %d\n", __func__, addr, *len);
+ return rc;
+}
+
+
+
+
+static int msm_audio_ion_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ const char *msm_audio_ion_dt = "qcom,smmu-enabled";
+ bool smmu_enabled;
+
+ if (pdev->dev.of_node == NULL) {
+ pr_err("%s: device tree is not found\n", __func__);
+ msm_audio_ion_data.smmu_enabled = 0;
+ return 0;
+ }
+
+ smmu_enabled = of_property_read_bool(pdev->dev.of_node,
+ msm_audio_ion_dt);
+ msm_audio_ion_data.smmu_enabled = smmu_enabled;
+
+ pr_debug("%s: SMMU-Enabled = %d\n", __func__, smmu_enabled);
+ return rc;
+}
+
+static int msm_audio_ion_remove(struct platform_device *pdev)
+{
+ pr_debug("%s: msm audio ion is unloaded\n", __func__);
+
+ return 0;
+}
+
+static const struct of_device_id msm_audio_ion_dt_match[] = {
+ { .compatible = "qcom,msm-audio-ion" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm_audio_ion_dt_match);
+
+static struct platform_driver msm_audio_ion_driver = {
+ .driver = {
+ .name = "msm-audio-ion",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_audio_ion_dt_match,
+ },
+ .probe = msm_audio_ion_probe,
+ .remove = __devexit_p(msm_audio_ion_remove),
+};
+
+static int __init msm_audio_ion_init(void)
+{
+ return platform_driver_register(&msm_audio_ion_driver);
+}
+module_init(msm_audio_ion_init);
+
+static void __exit msm_audio_ion_exit(void)
+{
+ platform_driver_unregister(&msm_audio_ion_driver);
+}
+module_exit(msm_audio_ion_exit);
+
+MODULE_DESCRIPTION("MSM Audio ION module");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/qdsp6v2/q6core.c b/arch/arm/mach-msm/qdsp6v2/q6core.c
index f23ba67..fd699df 100644
--- a/arch/arm/mach-msm/qdsp6v2/q6core.c
+++ b/arch/arm/mach-msm/qdsp6v2/q6core.c
@@ -69,12 +69,12 @@
switch (payload1[0]) {
case ADSP_CMD_SET_POWER_COLLAPSE_STATE:
- pr_info("Cmd = ADSP_CMD_SET_POWER_COLLAPSE_STATE"
- " status[0x%x]\n", payload1[1]);
+ pr_info("Cmd = ADSP_CMD_SET_POWER_COLLAPSE_STATE status[0x%x]\n",
+ payload1[1]);
break;
case ADSP_CMD_REMOTE_BUS_BW_REQUEST:
- pr_info("%s: cmd = ADSP_CMD_REMOTE_BUS_BW_REQUEST"
- " status = 0x%x\n", __func__, payload1[1]);
+ pr_info("%s: cmd = ADSP_CMD_REMOTE_BUS_BW_REQUEST status = 0x%x\n",
+ __func__, payload1[1]);
bus_bw_resp_received = 1;
wake_up(&bus_bw_req_wait);
@@ -160,10 +160,9 @@
core_handle_q = apr_register("ADSP", "CORE",
aprv2_core_fn_q, 0xFFFFFFFF, NULL);
}
- pr_info("Open_q %p\n", core_handle_q);
- if (core_handle_q == NULL) {
+ pr_debug("Open_q %p\n", core_handle_q);
+ if (core_handle_q == NULL)
pr_err("%s: Unable to register CORE\n", __func__);
- }
}
int core_req_bus_bandwith(u16 bus_id, u32 ab_bps, u32 ib_bps)
@@ -352,7 +351,7 @@
pc.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
pc.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
- sizeof(uint32_t));;
+ sizeof(uint32_t));
pc.hdr.src_port = 0;
pc.hdr.dest_port = 0;
pc.hdr.token = 0;
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
index 91ea1dc..d37a325 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,8 +27,8 @@
#include "usfcdev.h"
/* The driver version*/
-#define DRV_VERSION "1.4.1"
-#define USF_VERSION_ID 0x0141
+#define DRV_VERSION "1.4.2"
+#define USF_VERSION_ID 0x0142
/* Standard timeout in the asynchronous ops */
#define USF_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */
@@ -430,12 +430,14 @@
{
int rc = 0;
uint16_t data_map_size = 0;
+ uint16_t min_map_size = 0;
if ((usf_xx == NULL) ||
(config == NULL))
return -EINVAL;
data_map_size = sizeof(usf_xx->encdec_cfg.cfg_common.data_map);
+ min_map_size = min(data_map_size, config->port_cnt);
if (config->client_name != NULL) {
if (strncpy_from_user(usf_xx->client_name,
@@ -454,20 +456,13 @@
__func__, config->buf_num, config->stream_format,
config->port_cnt, config->params_data_size);
- pr_debug("%s: p_id[0]=%d, p_id[1]=%d, p_id[2]=%d, p_id[3]=%d\n",
+ pr_debug("%s: id[0]=%d, id[1]=%d, id[2]=%d, id[3]=%d, id[4]=%d\n",
__func__,
config->port_id[0],
config->port_id[1],
config->port_id[2],
- config->port_id[3]);
-
- if (data_map_size < config->port_cnt) {
- pr_err("%s: number of supported ports:%d < requested:%d\n",
- __func__,
- data_map_size,
- config->port_cnt);
- return -EINVAL;
- }
+ config->port_id[3],
+ config->port_id[4]);
/* q6usm allocation & configuration */
usf_xx->buffer_size = config->buf_size;
@@ -481,7 +476,8 @@
usf_xx->encdec_cfg.cfg_common.ch_cfg = config->port_cnt;
memcpy((void *)&usf_xx->encdec_cfg.cfg_common.data_map,
(void *)config->port_id,
- config->port_cnt);
+ min_map_size);
+
if (rc) {
pr_err("%s: ports offsets copy failure\n", __func__);
return -EINVAL;
@@ -897,8 +893,10 @@
rc = q6usm_us_client_buf_alloc(OUT, usf_xx->usc,
usf_xx->buffer_size,
usf_xx->buffer_count);
- if (rc)
+ if (rc) {
+ (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
return rc;
+ }
rc = q6usm_enc_cfg_blk(usf_xx->usc,
&usf_xx->encdec_cfg);
@@ -908,7 +906,9 @@
&config_tx.input_info);
}
- if (!rc)
+ if (rc)
+ (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
+ else
usf_xx->usf_state = USF_CONFIGURED_STATE;
return rc;
@@ -948,13 +948,17 @@
usf_xx->usc,
usf_xx->buffer_size,
usf_xx->buffer_count);
- if (rc)
+ if (rc) {
+ (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
return rc;
+ }
}
rc = q6usm_dec_cfg_blk(usf_xx->usc,
&usf_xx->encdec_cfg);
- if (!rc) {
+ if (rc)
+ (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE);
+ else {
init_waitqueue_head(&usf_xx->wait);
usf_xx->usf_state = USF_CONFIGURED_STATE;
}
diff --git a/arch/arm/mach-msm/ramdump.c b/arch/arm/mach-msm/ramdump.c
index 7f09a56..be21025 100644
--- a/arch/arm/mach-msm/ramdump.c
+++ b/arch/arm/mach-msm/ramdump.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,7 +25,7 @@
#include <linux/elf.h>
#include <linux/wait.h>
-#include "ramdump.h"
+#include <mach/ramdump.h>
#define RAMDUMP_WAIT_MSECS 120000
diff --git a/arch/arm/mach-msm/remote_spinlock.c b/arch/arm/mach-msm/remote_spinlock.c
index 94923a0..62e3e05 100644
--- a/arch/arm/mach-msm/remote_spinlock.c
+++ b/arch/arm/mach-msm/remote_spinlock.c
@@ -143,6 +143,7 @@
}
/* end dekkers implementation ----------------------------------------------- */
+#ifndef CONFIG_THUMB2_KERNEL
/* swp implementation ------------------------------------------------------- */
static void __raw_remote_swp_spin_lock(raw_remote_spinlock_t *lock)
{
@@ -194,6 +195,7 @@
: "cc");
}
/* end swp implementation --------------------------------------------------- */
+#endif
/* ldrex implementation ----------------------------------------------------- */
static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
@@ -431,6 +433,7 @@
current_ops.owner = __raw_remote_dek_spin_owner;
is_hw_lock_type = 0;
break;
+#ifndef CONFIG_THUMB2_KERNEL
case SWP_MODE:
current_ops.lock = __raw_remote_swp_spin_lock;
current_ops.unlock = __raw_remote_swp_spin_unlock;
@@ -439,6 +442,7 @@
current_ops.owner = __raw_remote_gen_spin_owner;
is_hw_lock_type = 0;
break;
+#endif
case LDREX_MODE:
current_ops.lock = __raw_remote_ex_spin_lock;
current_ops.unlock = __raw_remote_ex_spin_unlock;
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index b84ade9..6ed80f6 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -673,7 +673,7 @@
static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
{
struct list_head *ptr;
- struct msm_rpm_wait_data *elem;
+ struct msm_rpm_wait_data *elem = NULL;
unsigned long flags;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
@@ -739,7 +739,7 @@
static void msm_rpm_process_ack(uint32_t msg_id, int errno)
{
struct list_head *ptr;
- struct msm_rpm_wait_data *elem;
+ struct msm_rpm_wait_data *elem = NULL;
unsigned long flags;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
diff --git a/arch/arm/mach-msm/rpm.c b/arch/arm/mach-msm/rpm.c
index 5128b44..f9ac00f 100644
--- a/arch/arm/mach-msm/rpm.c
+++ b/arch/arm/mach-msm/rpm.c
@@ -310,7 +310,7 @@
unsigned long flags;
uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
uint32_t ctx_mask_ack = 0;
- uint32_t sel_masks_ack[SEL_MASK_SIZE];
+ uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0};
int i;
msm_rpm_request_irq_mode.req = req;
@@ -369,7 +369,7 @@
unsigned long flags;
uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
uint32_t ctx_mask_ack = 0;
- uint32_t sel_masks_ack[SEL_MASK_SIZE];
+ uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0};
struct irq_chip *irq_chip, *err_chip;
int i;
diff --git a/arch/arm/mach-msm/rpm_log.c b/arch/arm/mach-msm/rpm_log.c
index a2c74a5..53d5752 100644
--- a/arch/arm/mach-msm/rpm_log.c
+++ b/arch/arm/mach-msm/rpm_log.c
@@ -203,11 +203,14 @@
struct msm_rpm_log_buffer *buf;
buf = file->private_data;
- pdata = buf->pdata;
- if (!pdata)
- return -EINVAL;
+
if (!buf)
return -ENOMEM;
+
+ pdata = buf->pdata;
+
+ if (!pdata)
+ return -EINVAL;
if (!buf->data)
return -ENOMEM;
if (!bufu || count < 0)
diff --git a/arch/arm/mach-msm/rpm_stats.c b/arch/arm/mach-msm/rpm_stats.c
index 176c3de..cb8ed19 100644
--- a/arch/arm/mach-msm/rpm_stats.c
+++ b/arch/arm/mach-msm/rpm_stats.c
@@ -63,6 +63,8 @@
u32 count;
u64 last_entered_at;
u64 last_exited_at;
+ u64 accumulated;
+ u32 reserved[4];
};
static inline u64 get_time_in_sec(u64 counter)
@@ -84,6 +86,7 @@
char stat_type[5];
u64 time_in_last_mode;
u64 time_since_last_mode;
+ u64 actual_last_sleep;
stat_type[4] = 0;
memcpy(stat_type, &data->stat_type, sizeof(u32));
@@ -92,12 +95,13 @@
time_in_last_mode = get_time_in_msec(time_in_last_mode);
time_since_last_mode = arch_counter_get_cntpct() - data->last_exited_at;
time_since_last_mode = get_time_in_sec(time_since_last_mode);
+ actual_last_sleep = get_time_in_msec(data->accumulated);
return snprintf(buf , buflength,
"RPM Mode:%s\n\t count:%d\n time in last mode(msec):%llu\n"
- "time since last mode(sec):%llu\n",
+ "time since last mode(sec):%llu\n actual last sleep(msec):%llu\n",
stat_type, data->count, time_in_last_mode,
- time_since_last_mode);
+ time_since_last_mode, actual_last_sleep);
}
static inline u32 msm_rpmstats_read_long_register_v2(void __iomem *regbase,
@@ -140,6 +144,9 @@
i, offsetof(struct msm_rpm_stats_data_v2,
last_exited_at));
+ data.accumulated = msm_rpmstats_read_quad_register_v2(reg,
+ i, offsetof(struct msm_rpm_stats_data_v2,
+ accumulated));
length += msm_rpmstats_append_data_to_buf(prvdata->buf + length,
&data, sizeof(prvdata->buf) - length);
prvdata->read_idx++;
diff --git a/arch/arm/mach-msm/scm-pas.c b/arch/arm/mach-msm/scm-pas.c
index b7271bb..f48b538 100644
--- a/arch/arm/mach-msm/scm-pas.c
+++ b/arch/arm/mach-msm/scm-pas.c
@@ -46,55 +46,6 @@
static struct clk *scm_clocks[NUM_CLKS];
-int pas_init_image(enum pas_id id, const u8 *metadata, size_t size)
-{
- int ret;
- struct pas_init_image_req {
- u32 proc;
- u32 image_addr;
- } request;
- u32 scm_ret = 0;
- /* Make memory physically contiguous */
- void *mdata_buf = kmemdup(metadata, size, GFP_KERNEL);
-
- if (!mdata_buf)
- return -ENOMEM;
-
- request.proc = id;
- request.image_addr = virt_to_phys(mdata_buf);
-
- ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
- sizeof(request), &scm_ret, sizeof(scm_ret));
- kfree(mdata_buf);
-
- if (ret)
- return ret;
- return scm_ret;
-}
-EXPORT_SYMBOL(pas_init_image);
-
-int pas_mem_setup(enum pas_id id, u32 start_addr, u32 len)
-{
- int ret;
- struct pas_init_image_req {
- u32 proc;
- u32 start_addr;
- u32 len;
- } request;
- u32 scm_ret = 0;
-
- request.proc = id;
- request.start_addr = start_addr;
- request.len = len;
-
- ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
- sizeof(request), &scm_ret, sizeof(scm_ret));
- if (ret)
- return ret;
- return scm_ret;
-}
-EXPORT_SYMBOL(pas_mem_setup);
-
static struct msm_bus_paths scm_pas_bw_tbl[] = {
{
.vectors = (struct msm_bus_vectors[]){
@@ -176,18 +127,78 @@
mutex_unlock(&scm_pas_bw_mutex);
}
+int pas_init_image(enum pas_id id, const u8 *metadata, size_t size)
+{
+ int ret;
+ struct pas_init_image_req {
+ u32 proc;
+ u32 image_addr;
+ } request;
+ u32 scm_ret = 0;
+ void *mdata_buf;
+
+ ret = scm_pas_enable_bw();
+ if (ret)
+ return ret;
+
+ /* Make memory physically contiguous */
+ mdata_buf = kmemdup(metadata, size, GFP_KERNEL);
+
+ if (!mdata_buf)
+ return -ENOMEM;
+
+ request.proc = id;
+ request.image_addr = virt_to_phys(mdata_buf);
+
+ ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
+ sizeof(request), &scm_ret, sizeof(scm_ret));
+
+ kfree(mdata_buf);
+ scm_pas_disable_bw();
+
+ if (ret)
+ return ret;
+ return scm_ret;
+}
+EXPORT_SYMBOL(pas_init_image);
+
+int pas_mem_setup(enum pas_id id, u32 start_addr, u32 len)
+{
+ int ret;
+ struct pas_init_image_req {
+ u32 proc;
+ u32 start_addr;
+ u32 len;
+ } request;
+ u32 scm_ret = 0;
+
+ request.proc = id;
+ request.start_addr = start_addr;
+ request.len = len;
+
+ ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
+ sizeof(request), &scm_ret, sizeof(scm_ret));
+ if (ret)
+ return ret;
+ return scm_ret;
+}
+EXPORT_SYMBOL(pas_mem_setup);
+
int pas_auth_and_reset(enum pas_id id)
{
- int ret, bus_ret;
+ int ret;
u32 proc = id, scm_ret = 0;
- bus_ret = scm_pas_enable_bw();
+ ret = scm_pas_enable_bw();
+ if (ret)
+ return ret;
+
ret = scm_call(SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &proc,
sizeof(proc), &scm_ret, sizeof(scm_ret));
if (ret)
scm_ret = ret;
- if (!bus_ret)
- scm_pas_disable_bw();
+
+ scm_pas_disable_bw();
return scm_ret;
}
@@ -251,7 +262,7 @@
rate = clk_round_rate(scm_clocks[CORE_CLK_SRC], 1);
clk_set_rate(scm_clocks[CORE_CLK_SRC], rate);
- if (cpu_is_msm8974() || cpu_is_msm8226()) {
+ if (cpu_is_msm8974() || cpu_is_msm8226() || cpu_is_msm8610()) {
scm_pas_bw_tbl[0].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
scm_pas_bw_tbl[1].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
} else {
diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
index d070efa..6e05177 100644
--- a/arch/arm/mach-msm/scm.c
+++ b/arch/arm/mach-msm/scm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -347,6 +347,43 @@
}
EXPORT_SYMBOL(scm_call_atomic2);
+/**
+ * scm_call_atomic3() - Send an atomic SCM command with three arguments
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ * @arg3: third argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3)
+{
+ int context_id;
+ register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 3);
+ register u32 r1 asm("r1") = (u32)&context_id;
+ register u32 r2 asm("r2") = arg1;
+ register u32 r3 asm("r3") = arg2;
+ register u32 r4 asm("r4") = arg3;
+
+ asm volatile(
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r0")
+ __asmeq("%2", "r1")
+ __asmeq("%3", "r2")
+ __asmeq("%4", "r3")
+ __asmeq("%5", "r4")
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0 @ switch to secure world\n"
+ : "=r" (r0)
+ : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4));
+ return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic3);
+
s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
{
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index 40ef20e..3590e6b 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -34,10 +34,10 @@
#include <linux/kfifo.h>
#include <linux/wakelock.h>
#include <linux/notifier.h>
-#include <linux/sort.h>
#include <linux/suspend.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+
#include <mach/msm_smd.h>
#include <mach/msm_iomap.h>
#include <mach/system.h>
@@ -45,11 +45,13 @@
#include <mach/socinfo.h>
#include <mach/proc_comm.h>
#include <mach/msm_ipc_logging.h>
+#include <mach/ramdump.h>
+#include <mach/board.h>
+
#include <asm/cacheflush.h>
#include "smd_private.h"
#include "modem_notifier.h"
-#include "ramdump.h"
#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
|| defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
@@ -74,6 +76,7 @@
#define SMSM_SNAPSHOT_CNT 64
#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
#define RSPIN_INIT_WAIT_MS 1000
+#define SMD_FIFO_FULL_RESERVE 4
uint32_t SMSM_NUM_ENTRIES = 8;
uint32_t SMSM_NUM_HOSTS = 3;
@@ -181,7 +184,7 @@
static struct smem_area *smem_areas;
static struct ramdump_segment *smem_ramdump_segments;
static void *smem_ramdump_dev;
-static void *smem_range_check(phys_addr_t base, unsigned offset);
+static void *smem_phys_to_virt(phys_addr_t base, unsigned offset);
static void *smd_dev;
struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
@@ -241,6 +244,17 @@
#define SMx_POWER_INFO(x...) do { } while (0)
#endif
+/**
+ * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
+ *
+ * @type: type to check for overflow
+ * @a: left value to use
+ * @b: right value to use
+ * @returns: true if a + b will result in overflow; false otherwise
+ */
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+ (((type)~0 - (a)) < (b) ? true : false)
+
static unsigned last_heap_free = 0xffffffff;
static inline void smd_write_intr(unsigned int val,
@@ -1051,8 +1065,16 @@
/* how many bytes we are free to write */
static int smd_stream_write_avail(struct smd_channel *ch)
{
- return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
- ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
+ int bytes_avail;
+
+ bytes_avail = ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
+ ch->half_ch->get_tail(ch->send)) & ch->fifo_mask) + 1;
+
+ if (bytes_avail < SMD_FIFO_FULL_RESERVE)
+ bytes_avail = 0;
+ else
+ bytes_avail -= SMD_FIFO_FULL_RESERVE;
+ return bytes_avail;
}
static int smd_packet_read_avail(struct smd_channel *ch)
@@ -1174,7 +1196,18 @@
}
}
-/* provide a pointer and length to next free space in the fifo */
+/**
+ * ch_write_buffer() - Provide a pointer and length for the next segment of
+ * free space in the FIFO.
+ * @ch: channel
+ * @ptr: Address to pointer for the next segment write
+ * @returns: Maximum size that can be written until the FIFO is either full
+ * or the end of the FIFO has been reached.
+ *
+ * The returned pointer and length are passed to memcpy, so the next segment is
+ * defined as either the space available between the read index (tail) and the
+ * write index (head) or the space available to the end of the FIFO.
+ */
static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
{
unsigned head = ch->half_ch->get_head(ch->send);
@@ -1182,10 +1215,11 @@
*ptr = (void *) (ch->send_data + head);
if (head < tail) {
- return tail - head - 1;
+ return tail - head - SMD_FIFO_FULL_RESERVE;
} else {
- if (tail == 0)
- return ch->fifo_size - head - 1;
+ if (tail < SMD_FIFO_FULL_RESERVE)
+ return ch->fifo_size + tail - head
+ - SMD_FIFO_FULL_RESERVE;
else
return ch->fifo_size - head;
}
@@ -2109,6 +2143,29 @@
}
EXPORT_SYMBOL(smd_write_end);
+int smd_write_segment_avail(smd_channel_t *ch)
+{
+ int n;
+
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+ if (!ch->is_pkt_ch) {
+ pr_err("%s: non-packet channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ n = smd_stream_write_avail(ch);
+
+ /* pkt hdr already written, no need to reserve space for it */
+ if (ch->pending_pkt_sz)
+ return n;
+
+ return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
+}
+EXPORT_SYMBOL(smd_write_segment_avail);
+
int smd_read(smd_channel_t *ch, void *data, int len)
{
if (!ch) {
@@ -2354,37 +2411,107 @@
/* -------------------------------------------------------------------------- */
-/*
- * Shared Memory Range Check
- *
- * Takes a physical address and an offset and checks if the resulting physical
- * address would fit into one of the aux smem regions. If so, returns the
- * corresponding virtual address. Otherwise returns NULL. Expects the array
- * of smem regions to be in ascending physical address order.
+/**
+ * smem_phys_to_virt() - Convert a physical base and offset to virtual address
*
* @base: physical base address to check
* @offset: offset from the base to get the final address
+ * @returns: virtual SMEM address; NULL for failure
+ *
+ * Takes a physical address and an offset and checks if the resulting physical
+ * address would fit into one of the smem regions. If so, returns the
+ * corresponding virtual address. Otherwise returns NULL.
*/
-static void *smem_range_check(phys_addr_t base, unsigned offset)
+static void *smem_phys_to_virt(phys_addr_t base, unsigned offset)
{
int i;
phys_addr_t phys_addr;
resource_size_t size;
+ if (OVERFLOW_ADD_UNSIGNED(phys_addr_t, base, offset))
+ return NULL;
+
+ if (!smem_areas) {
+ /*
+ * Early boot - no area configuration yet, so default
+ * to using the main memory region.
+ *
+ * To remove the MSM_SHARED_RAM_BASE and the static
+ * mapping of SMEM in the future, add dump_stack()
+ * to identify the early callers of smem_get_entry()
+ * (which calls this function) and replace those calls
+ * with a new function that knows how to lookup the
+ * SMEM base address before SMEM has been probed.
+ */
+ phys_addr = msm_shared_ram_phys;
+ size = MSM_SHARED_RAM_SIZE;
+
+ if (base >= phys_addr && base + offset < phys_addr + size) {
+ if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)MSM_SHARED_RAM_BASE, offset)) {
+ pr_err("%s: overflow %p %x\n", __func__,
+ MSM_SHARED_RAM_BASE, offset);
+ return NULL;
+ }
+
+ return MSM_SHARED_RAM_BASE + offset;
+ } else {
+ return NULL;
+ }
+ }
for (i = 0; i < num_smem_areas; ++i) {
phys_addr = smem_areas[i].phys_addr;
size = smem_areas[i].size;
- if (base < phys_addr)
- return NULL;
- if (base > phys_addr + size)
+
+ if (base < phys_addr || base + offset >= phys_addr + size)
continue;
- if (base >= phys_addr && base + offset < phys_addr + size)
- return smem_areas[i].virt_addr + offset;
+
+ if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)smem_areas[i].virt_addr, offset)) {
+ pr_err("%s: overflow %p %x\n", __func__,
+ smem_areas[i].virt_addr, offset);
+ return NULL;
+ }
+
+ return smem_areas[i].virt_addr + offset;
}
return NULL;
}
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address)
+{
+ phys_addr_t phys_addr = 0;
+ int i;
+ void *vend;
+
+ if (!smem_areas)
+ return phys_addr;
+
+ for (i = 0; i < num_smem_areas; ++i) {
+ vend = (void *)(smem_areas[i].virt_addr + smem_areas[i].size);
+
+ if (smem_address >= smem_areas[i].virt_addr &&
+ smem_address < vend) {
+ phys_addr = smem_address - smem_areas[i].virt_addr;
+ phys_addr += smem_areas[i].phys_addr;
+ break;
+ }
+ }
+
+ return phys_addr;
+}
+EXPORT_SYMBOL(smem_virt_to_phys);
+
/* smem_alloc returns the pointer to smem item if it is already allocated.
* Otherwise, it returns NULL.
*/
@@ -2458,14 +2585,15 @@
remote_spin_lock_irqsave(&remote_spinlock, flags);
/* toc is in device memory and cannot be speculatively accessed */
if (toc[id].allocated) {
+ phys_addr_t phys_base;
+
*size = toc[id].size;
barrier();
- if (!(toc[id].reserved & BASE_ADDR_MASK))
- ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
- else
- ret = smem_range_check(
- toc[id].reserved & BASE_ADDR_MASK,
- toc[id].offset);
+
+ phys_base = toc[id].reserved & BASE_ADDR_MASK;
+ if (!phys_base)
+ phys_base = (phys_addr_t)msm_shared_ram_phys;
+ ret = smem_phys_to_virt(phys_base, toc[id].offset);
} else {
*size = 0;
}
@@ -3418,14 +3546,6 @@
return ret;
}
-int sort_cmp_func(const void *a, const void *b)
-{
- struct smem_area *left = (struct smem_area *)(a);
- struct smem_area *right = (struct smem_area *)(b);
-
- return left->phys_addr - right->phys_addr;
-}
-
int smd_core_platform_init(struct platform_device *pdev)
{
int i;
@@ -3436,7 +3556,8 @@
struct smd_subsystem_config *cfg;
int err_ret = 0;
struct smd_smem_regions *smd_smem_areas;
- int smem_idx = 0;
+ struct smem_area *smem_areas_tmp = NULL;
+ int smem_idx;
smd_platform_data = pdev->dev.platform_data;
num_ss = smd_platform_data->num_ss_configs;
@@ -3447,37 +3568,54 @@
smd_ssr_config->disable_smsm_reset_handshake;
smd_smem_areas = smd_platform_data->smd_smem_areas;
- if (smd_smem_areas) {
- num_smem_areas = smd_platform_data->num_smem_areas;
- smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
- GFP_KERNEL);
- if (!smem_areas) {
- pr_err("%s: smem_areas kmalloc failed\n", __func__);
+ num_smem_areas = smd_platform_data->num_smem_areas + 1;
+
+ /* Initialize main SMEM region */
+ smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
+ GFP_KERNEL);
+ if (!smem_areas_tmp) {
+ pr_err("%s: smem_areas kmalloc failed\n", __func__);
+ err_ret = -ENOMEM;
+ goto smem_areas_alloc_fail;
+ }
+
+ smem_areas_tmp[0].phys_addr = msm_shared_ram_phys;
+ smem_areas_tmp[0].size = MSM_SHARED_RAM_SIZE;
+ smem_areas_tmp[0].virt_addr = MSM_SHARED_RAM_BASE;
+
+ /* Configure auxiliary SMEM regions */
+ for (smem_idx = 1; smem_idx < num_smem_areas; ++smem_idx) {
+ smem_areas_tmp[smem_idx].phys_addr =
+ smd_smem_areas[smem_idx].phys_addr;
+ smem_areas_tmp[smem_idx].size =
+ smd_smem_areas[smem_idx].size;
+ smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
+ (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
+ smem_areas_tmp[smem_idx].size);
+ if (!smem_areas_tmp[smem_idx].virt_addr) {
+ pr_err("%s: ioremap_nocache() of addr: %pa size: %pa\n",
+ __func__,
+ &smem_areas_tmp[smem_idx].phys_addr,
+ &smem_areas_tmp[smem_idx].size);
err_ret = -ENOMEM;
- goto smem_areas_alloc_fail;
+ goto smem_failed;
}
- for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
- smem_areas[smem_idx].phys_addr =
- smd_smem_areas[smem_idx].phys_addr;
- smem_areas[smem_idx].size =
- smd_smem_areas[smem_idx].size;
- smem_areas[smem_idx].virt_addr = ioremap_nocache(
- (unsigned long)(smem_areas[smem_idx].phys_addr),
- smem_areas[smem_idx].size);
- if (!smem_areas[smem_idx].virt_addr) {
- pr_err("%s: ioremap_nocache() of addr: %pa size: %pa\n",
- __func__,
- &smem_areas[smem_idx].phys_addr,
- &smem_areas[smem_idx].size);
- err_ret = -ENOMEM;
- ++smem_idx;
- goto smem_failed;
- }
+ if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
+ smem_areas_tmp[smem_idx].size)) {
+ pr_err("%s: invalid virtual address block %i: %p:%pa\n",
+ __func__, smem_idx,
+ smem_areas_tmp[smem_idx].virt_addr,
+ &smem_areas_tmp[smem_idx].size);
+ ++smem_idx;
+ err_ret = -EINVAL;
+ goto smem_failed;
}
- sort(smem_areas, num_smem_areas,
- sizeof(struct smem_area),
- sort_cmp_func, NULL);
+
+ SMD_DBG("%s: %d = %pa %pa", __func__, smem_idx,
+ &smd_smem_areas[smem_idx].phys_addr,
+ &smd_smem_areas[smem_idx].size);
}
for (i = 0; i < num_ss; i++) {
@@ -3521,8 +3659,9 @@
cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
}
-
SMD_INFO("smd_core_platform_init() done\n");
+
+ smem_areas = smem_areas_tmp;
return 0;
intr_failed:
@@ -3540,9 +3679,12 @@
);
}
smem_failed:
- for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
- iounmap(smem_areas[smem_idx].virt_addr);
- kfree(smem_areas);
+ for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
+ iounmap(smem_areas_tmp[smem_idx].virt_addr);
+
+ num_smem_areas = 0;
+ kfree(smem_areas_tmp);
+
smem_areas_alloc_fail:
return err_ret;
}
@@ -3716,12 +3858,14 @@
resource_size_t aux_mem_size;
int temp_string_size = 11; /* max 3 digit count */
char temp_string[temp_string_size];
- int count;
struct device_node *node;
int ret;
const char *compatible;
- struct ramdump_segment *ramdump_segments_tmp;
+ struct ramdump_segment *ramdump_segments_tmp = NULL;
+ struct smem_area *smem_areas_tmp = NULL;
+ int smem_idx = 0;
int subnode_num = 0;
+ int i;
resource_size_t irq_out_size;
disable_smsm_reset_handshake = 1;
@@ -3741,101 +3885,115 @@
}
SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
- count = 1;
+ num_smem_areas = 1;
while (1) {
- scnprintf(temp_string, temp_string_size, "aux-mem%d", count);
+ scnprintf(temp_string, temp_string_size, "aux-mem%d",
+ num_smem_areas);
r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
temp_string);
if (!r)
break;
++num_smem_areas;
- ++count;
- if (count > 999) {
+ if (num_smem_areas > 999) {
pr_err("%s: max num aux mem regions reached\n",
__func__);
break;
}
}
- /* initialize SSR ramdump regions */
+ /* Initialize main SMEM region and SSR ramdump region */
key = "smem";
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
if (!r) {
pr_err("%s: missing '%s'\n", __func__, key);
return -ENODEV;
}
- ramdump_segments_tmp = kmalloc_array(num_smem_areas + 1,
- sizeof(struct ramdump_segment), GFP_KERNEL);
+ smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
+ GFP_KERNEL);
+ if (!smem_areas_tmp) {
+ pr_err("%s: smem areas kmalloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto free_smem_areas;
+ }
+
+ ramdump_segments_tmp = kmalloc_array(num_smem_areas,
+ sizeof(struct ramdump_segment), GFP_KERNEL);
if (!ramdump_segments_tmp) {
pr_err("%s: ramdump segment kmalloc failed\n", __func__);
ret = -ENOMEM;
goto free_smem_areas;
}
- ramdump_segments_tmp[0].address = r->start;
- ramdump_segments_tmp[0].size = resource_size(r);
- if (num_smem_areas) {
+ smem_areas_tmp[smem_idx].phys_addr = r->start;
+ smem_areas_tmp[smem_idx].size = resource_size(r);
+ smem_areas_tmp[smem_idx].virt_addr = MSM_SHARED_RAM_BASE;
- smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
- GFP_KERNEL);
+ ramdump_segments_tmp[smem_idx].address = r->start;
+ ramdump_segments_tmp[smem_idx].size = resource_size(r);
+ ++smem_idx;
- if (!smem_areas) {
- pr_err("%s: smem areas kmalloc failed\n", __func__);
+ /* Configure auxiliary SMEM regions */
+ while (1) {
+ scnprintf(temp_string, temp_string_size, "aux-mem%d",
+ smem_idx);
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ temp_string);
+ if (!r)
+ break;
+ aux_mem_base = r->start;
+ aux_mem_size = resource_size(r);
+
+ ramdump_segments_tmp[smem_idx].address = aux_mem_base;
+ ramdump_segments_tmp[smem_idx].size = aux_mem_size;
+
+ smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
+ smem_areas_tmp[smem_idx].size = aux_mem_size;
+ smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
+ (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
+ smem_areas_tmp[smem_idx].size);
+ SMD_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
+ &aux_mem_base, &aux_mem_size,
+ smem_areas_tmp[smem_idx].virt_addr);
+
+ if (!smem_areas_tmp[smem_idx].virt_addr) {
+ pr_err("%s: ioremap_nocache() of addr:%pa size: %pa\n",
+ __func__,
+ &smem_areas_tmp[smem_idx].phys_addr,
+ &smem_areas_tmp[smem_idx].size);
ret = -ENOMEM;
goto free_smem_areas;
}
- count = 1;
- while (1) {
- scnprintf(temp_string, temp_string_size, "aux-mem%d",
- count);
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- temp_string);
- if (!r)
- break;
- aux_mem_base = r->start;
- aux_mem_size = resource_size(r);
- /*
- * Add to ram-dumps segments.
- * ramdump_segments_tmp[0] is the main SMEM region,
- * so auxiliary segments are indexed by count
- * instead of count - 1.
- */
- ramdump_segments_tmp[count].address = aux_mem_base;
- ramdump_segments_tmp[count].size = aux_mem_size;
-
- SMD_DBG("%s: %s = %pa %pa", __func__, temp_string,
- &aux_mem_base, &aux_mem_size);
- smem_areas[count - 1].phys_addr = aux_mem_base;
- smem_areas[count - 1].size = aux_mem_size;
- smem_areas[count - 1].virt_addr = ioremap_nocache(
- (unsigned long)(smem_areas[count-1].phys_addr),
- smem_areas[count - 1].size);
- if (!smem_areas[count - 1].virt_addr) {
- pr_err("%s: ioremap_nocache() of addr:%pa size: %pa\n",
- __func__,
- &smem_areas[count - 1].phys_addr,
- &smem_areas[count - 1].size);
- ret = -ENOMEM;
- goto free_smem_areas;
- }
-
- ++count;
- if (count > 999) {
- pr_err("%s: max num aux mem regions reached\n",
- __func__);
- break;
- }
+ if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
+ smem_areas_tmp[smem_idx].size)) {
+ pr_err("%s: invalid virtual address block %i: %p:%pa\n",
+ __func__, smem_idx,
+ smem_areas_tmp[smem_idx].virt_addr,
+ &smem_areas_tmp[smem_idx].size);
+ ++smem_idx;
+ ret = -EINVAL;
+ goto free_smem_areas;
}
- sort(smem_areas, num_smem_areas,
- sizeof(struct smem_area),
- sort_cmp_func, NULL);
+
+ ++smem_idx;
+ if (smem_idx > 999) {
+ pr_err("%s: max num aux mem regions reached\n",
+ __func__);
+ break;
+ }
}
for_each_child_of_node(pdev->dev.of_node, node) {
compatible = of_get_property(node, "compatible", NULL);
+ if (!compatible) {
+ pr_err("%s: invalid child node: compatible null\n",
+ __func__);
+ ret = -ENODEV;
+ goto rollback_subnodes;
+ }
if (!strcmp(compatible, "qcom,smd")) {
ret = parse_smd_devicetree(node, irq_out_base);
if (ret)
@@ -3853,15 +4011,16 @@
++subnode_num;
}
+ smem_areas = smem_areas_tmp;
smem_ramdump_segments = ramdump_segments_tmp;
return 0;
rollback_subnodes:
- count = 0;
+ i = 0;
for_each_child_of_node(pdev->dev.of_node, node) {
- if (count >= subnode_num)
+ if (i >= subnode_num)
break;
- ++count;
+ ++i;
compatible = of_get_property(node, "compatible", NULL);
if (!strcmp(compatible, "qcom,smd"))
unparse_smd_devicetree(node);
@@ -3869,10 +4028,12 @@
unparse_smsm_devicetree(node);
}
free_smem_areas:
+ for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
+ iounmap(smem_areas_tmp[smem_idx].virt_addr);
+
num_smem_areas = 0;
kfree(ramdump_segments_tmp);
- kfree(smem_areas);
- smem_areas = NULL;
+ kfree(smem_areas_tmp);
return ret;
}
diff --git a/arch/arm/mach-msm/smd_pkt.c b/arch/arm/mach-msm/smd_pkt.c
index 7eb9ead..424d310 100644
--- a/arch/arm/mach-msm/smd_pkt.c
+++ b/arch/arm/mach-msm/smd_pkt.c
@@ -509,7 +509,7 @@
do {
prepare_to_wait(&smd_pkt_devp->ch_write_wait_queue,
&write_wait, TASK_UNINTERRUPTIBLE);
- if (!smd_write_avail(smd_pkt_devp->ch) &&
+ if (!smd_write_segment_avail(smd_pkt_devp->ch) &&
!smd_pkt_devp->has_reset) {
smd_enable_read_intr(smd_pkt_devp->ch);
schedule();
@@ -631,7 +631,7 @@
return;
}
- sz = smd_write_avail(smd_pkt_devp->ch);
+ sz = smd_write_segment_avail(smd_pkt_devp->ch);
if (sz) {
D_WRITE("%s: %d bytes write space in smd_pkt_dev id:%d\n",
__func__, sz, smd_pkt_devp->i);
diff --git a/arch/arm/mach-msm/smd_tty.c b/arch/arm/mach-msm/smd_tty.c
index 8a2c23f..0b270b7 100644
--- a/arch/arm/mach-msm/smd_tty.c
+++ b/arch/arm/mach-msm/smd_tty.c
@@ -63,7 +63,6 @@
struct tty_port port;
struct device *device_ptr;
struct wake_lock wake_lock;
- int open_count;
struct tasklet_struct tty_tsklt;
struct timer_list buf_req_timer;
struct completion ch_allocated;
@@ -333,112 +332,106 @@
mutex_lock(&smd_tty_lock);
tty->driver_data = info;
- if (info->open_count++ == 0) {
- peripheral = smd_edge_to_subsystem(smd_tty[n].smd->edge);
- if (peripheral) {
- info->pil = subsystem_get(peripheral);
- if (IS_ERR(info->pil)) {
- SMD_TTY_INFO(
- "%s failed on smd_tty device :%s subsystem_get failed for %s",
- __func__, smd_tty[n].smd->port_name,
- peripheral);
- /*
- * Sleep, inorder to reduce the frequency of
- * retry by user-space modules and to avoid
- * possible watchdog bite.
- */
- msleep((smd_tty[n].open_wait * 1000));
- res = PTR_ERR(info->pil);
- goto out;
- }
-
- /* Wait for the modem SMSM to be inited for the SMD
- * Loopback channel to be allocated at the modem. Since
- * the wait need to be done atmost once, using msleep
- * doesn't degrade the performance.
- */
- if (n == LOOPBACK_IDX) {
- if (!is_modem_smsm_inited())
- msleep(5000);
- smsm_change_state(SMSM_APPS_STATE,
- 0, SMSM_SMD_LOOPBACK);
- msleep(100);
- }
-
+ peripheral = smd_edge_to_subsystem(smd_tty[n].smd->edge);
+ if (peripheral) {
+ info->pil = subsystem_get(peripheral);
+ if (IS_ERR(info->pil)) {
+ SMD_TTY_INFO(
+ "%s failed on smd_tty device :%s subsystem_get failed for %s",
+ __func__, smd_tty[n].smd->port_name,
+ peripheral);
/*
- * Wait for a channel to be allocated so we know
- * the modem is ready enough.
+ * Sleep, inorder to reduce the frequency of
+ * retry by user-space modules and to avoid
+ * possible watchdog bite.
*/
- if (smd_tty[n].open_wait) {
- res = wait_for_completion_interruptible_timeout(
+ msleep((smd_tty[n].open_wait * 1000));
+ res = PTR_ERR(info->pil);
+ goto out;
+ }
+
+ /* Wait for the modem SMSM to be inited for the SMD
+ * Loopback channel to be allocated at the modem. Since
+ * the wait need to be done atmost once, using msleep
+ * doesn't degrade the performance.
+ */
+ if (n == LOOPBACK_IDX) {
+ if (!is_modem_smsm_inited())
+ msleep(5000);
+ smsm_change_state(SMSM_APPS_STATE,
+ 0, SMSM_SMD_LOOPBACK);
+ msleep(100);
+ }
+
+ /*
+ * Wait for a channel to be allocated so we know
+ * the modem is ready enough.
+ */
+ if (smd_tty[n].open_wait) {
+ res = wait_for_completion_interruptible_timeout(
&info->ch_allocated,
msecs_to_jiffies(smd_tty[n].open_wait *
1000));
- if (res == 0) {
- SMD_TTY_INFO(
- "Timed out waiting for SMD channel %s",
- smd_tty[n].smd->port_name);
- res = -ETIMEDOUT;
- goto release_pil;
- } else if (res < 0) {
- SMD_TTY_INFO(
- "Error waiting for SMD channel %s : %d\n",
- smd_tty[n].smd->port_name, res);
- goto release_pil;
- }
-
- res = 0;
- }
- }
-
- tasklet_init(&info->tty_tsklt, smd_tty_read,
- (unsigned long)info);
- wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND,
- smd_tty[n].smd->port_name);
- scnprintf(info->ra_wake_lock_name,
- MAX_RA_WAKE_LOCK_NAME_LEN,
- "SMD_TTY_%s_RA", smd_tty[n].smd->port_name);
- wake_lock_init(&info->ra_wake_lock, WAKE_LOCK_SUSPEND,
- info->ra_wake_lock_name);
- if (!info->ch) {
- res = smd_named_open_on_edge(smd_tty[n].smd->port_name,
- smd_tty[n].smd->edge,
- &info->ch, info,
- smd_tty_notify);
- if (res < 0) {
+ if (res == 0) {
SMD_TTY_INFO(
- "%s: %s open failed %d\n",
- __func__, smd_tty[n].smd->port_name,
- res);
- goto release_pil;
- }
-
- res = wait_event_interruptible_timeout(
- info->ch_opened_wait_queue,
- info->is_open, (2 * HZ));
- if (res == 0)
+ "Timed out waiting for SMD channel %s",
+ smd_tty[n].smd->port_name);
res = -ETIMEDOUT;
- if (res < 0) {
+ goto release_pil;
+ } else if (res < 0) {
SMD_TTY_INFO(
- "%s: wait for %s smd_open failed %d\n",
- __func__, smd_tty[n].smd->port_name,
- res);
+ "Error waiting for SMD channel %s : %d\n",
+ smd_tty[n].smd->port_name, res);
goto release_pil;
}
- res = 0;
- SMD_TTY_INFO("%s with PID %u opened port %s",
- current->comm, current->pid,
- smd_tty[n].smd->port_name);
}
}
+ tasklet_init(&info->tty_tsklt, smd_tty_read, (unsigned long)info);
+ wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND,
+ smd_tty[n].smd->port_name);
+ scnprintf(info->ra_wake_lock_name, MAX_RA_WAKE_LOCK_NAME_LEN,
+ "SMD_TTY_%s_RA", smd_tty[n].smd->port_name);
+ wake_lock_init(&info->ra_wake_lock, WAKE_LOCK_SUSPEND,
+ info->ra_wake_lock_name);
+
+ res = smd_named_open_on_edge(smd_tty[n].smd->port_name,
+ smd_tty[n].smd->edge, &info->ch, info,
+ smd_tty_notify);
+ if (res < 0) {
+ SMD_TTY_INFO("%s: %s open failed %d\n",
+ __func__, smd_tty[n].smd->port_name, res);
+ goto release_wl_tl;
+ }
+
+ res = wait_event_interruptible_timeout(info->ch_opened_wait_queue,
+ info->is_open, (2 * HZ));
+ if (res == 0)
+ res = -ETIMEDOUT;
+ if (res < 0) {
+ SMD_TTY_INFO("%s: wait for %s smd_open failed %d\n",
+ __func__, smd_tty[n].smd->port_name, res);
+ goto close_ch;
+ }
+ SMD_TTY_INFO("%s with PID %u opened port %s",
+ current->comm, current->pid, smd_tty[n].smd->port_name);
+ smd_disable_read_intr(info->ch);
+ mutex_unlock(&smd_tty_lock);
+ return 0;
+
+close_ch:
+ smd_close(info->ch);
+ info->ch = NULL;
+
+release_wl_tl:
+ tasklet_kill(&info->tty_tsklt);
+ wake_lock_destroy(&info->wake_lock);
+ wake_lock_destroy(&info->ra_wake_lock);
+
release_pil:
- if (res < 0)
- subsystem_put(info->pil);
- else
- smd_disable_read_intr(info->ch);
+ subsystem_put(info->pil);
out:
mutex_unlock(&smd_tty_lock);
@@ -458,26 +451,25 @@
}
mutex_lock(&smd_tty_lock);
- if (--info->open_count == 0) {
- spin_lock_irqsave(&info->reset_lock, flags);
- info->is_open = 0;
- spin_unlock_irqrestore(&info->reset_lock, flags);
- if (tty) {
- tasklet_kill(&info->tty_tsklt);
- wake_lock_destroy(&info->wake_lock);
- wake_lock_destroy(&info->ra_wake_lock);
- }
- SMD_TTY_INFO("%s with PID %u closed port %s",
- current->comm, current->pid,
- info->smd->port_name);
- tty->driver_data = 0;
- del_timer(&info->buf_req_timer);
- if (info->ch) {
- smd_close(info->ch);
- info->ch = 0;
- subsystem_put(info->pil);
- }
- }
+
+ spin_lock_irqsave(&info->reset_lock, flags);
+ info->is_open = 0;
+ spin_unlock_irqrestore(&info->reset_lock, flags);
+
+ tasklet_kill(&info->tty_tsklt);
+ wake_lock_destroy(&info->wake_lock);
+ wake_lock_destroy(&info->ra_wake_lock);
+
+ SMD_TTY_INFO("%s with PID %u closed port %s",
+ current->comm, current->pid,
+ info->smd->port_name);
+ tty->driver_data = NULL;
+ del_timer(&info->buf_req_timer);
+
+ smd_close(info->ch);
+ info->ch = NULL;
+ subsystem_put(info->pil);
+
mutex_unlock(&smd_tty_lock);
tty_kref_put(tty);
}
diff --git a/arch/arm/mach-msm/smem_log.c b/arch/arm/mach-msm/smem_log.c
index 169df1e..361df33 100644
--- a/arch/arm/mach-msm/smem_log.c
+++ b/arch/arm/mach-msm/smem_log.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,6 +33,8 @@
#include <mach/msm_iomap.h>
#include <mach/smem_log.h>
+#include <asm/arch_timer.h>
+
#include "smd_private.h"
#include "smd_rpc_sym.h"
#include "modem_notifier.h"
@@ -652,13 +654,7 @@
#else
static inline unsigned int read_timestamp(void)
{
- unsigned long long val;
-
- /* SMEM LOG uses a 32.768KHz timestamp */
- val = sched_clock() * 32768U;
- do_div(val, 1000000000U);
-
- return (unsigned int)val;
+ return (unsigned int)(arch_counter_get_cntpct());
}
#endif
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 1f0fa85..12a3ceb 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -328,6 +328,12 @@
/* 8610 IDs */
[147] = MSM_CPU_8610,
+ [161] = MSM_CPU_8610,
+ [162] = MSM_CPU_8610,
+ [163] = MSM_CPU_8610,
+ [164] = MSM_CPU_8610,
+ [165] = MSM_CPU_8610,
+ [166] = MSM_CPU_8610,
/* 8064AB IDs */
[153] = MSM_CPU_8064AB,
@@ -347,8 +353,11 @@
/* 8064AA IDs */
[172] = MSM_CPU_8064AA,
- /* zinc IDs */
- [178] = MSM_CPU_ZINC,
+ /* 8084 IDs */
+ [178] = MSM_CPU_8084,
+
+ /* krypton IDs */
+ [187] = MSM_CPU_KRYPTON,
/* Uninitialized IDs are not known to run Linux.
MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
@@ -845,35 +854,17 @@
static void * __init setup_dummy_socinfo(void)
{
- if (machine_is_msm8960_cdp())
- dummy_socinfo.id = 87;
- else if (machine_is_msm9615_mtp() || machine_is_msm9615_cdp())
- dummy_socinfo.id = 104;
- else if (early_machine_is_msm8974()) {
- dummy_socinfo.id = 126;
- strlcpy(dummy_socinfo.build_id, "msm8974 - ",
- sizeof(dummy_socinfo.build_id));
- } else if (early_machine_is_msm9625()) {
- dummy_socinfo.id = 134;
- strlcpy(dummy_socinfo.build_id, "msm9625 - ",
- sizeof(dummy_socinfo.build_id));
- } else if (early_machine_is_msm8226()) {
- dummy_socinfo.id = 145;
- strlcpy(dummy_socinfo.build_id, "msm8226 - ",
- sizeof(dummy_socinfo.build_id));
- } else if (machine_is_msm8625_rumi3())
- dummy_socinfo.id = 127;
- else if (early_machine_is_mpq8092()) {
+ if (early_machine_is_mpq8092()) {
dummy_socinfo.id = 146;
strlcpy(dummy_socinfo.build_id, "mpq8092 - ",
sizeof(dummy_socinfo.build_id));
- } else if (early_machine_is_msm8610()) {
- dummy_socinfo.id = 147;
- strlcpy(dummy_socinfo.build_id, "msm8610 - ",
- sizeof(dummy_socinfo.build_id));
- } else if (early_machine_is_msmzinc()) {
+ } else if (early_machine_is_apq8084()) {
dummy_socinfo.id = 178;
- strlcpy(dummy_socinfo.build_id, "msmzinc - ",
+ strlcpy(dummy_socinfo.build_id, "apq8084 - ",
+ sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_msmkrypton()) {
+ dummy_socinfo.id = 187;
+ strlcpy(dummy_socinfo.build_id, "msmkrypton - ",
sizeof(dummy_socinfo.build_id));
}
strlcat(dummy_socinfo.build_id, "Dummy socinfo",
diff --git a/arch/arm/mach-msm/spm_devices.c b/arch/arm/mach-msm/spm_devices.c
index 233c5a5..174d444 100644
--- a/arch/arm/mach-msm/spm_devices.c
+++ b/arch/arm/mach-msm/spm_devices.c
@@ -74,6 +74,7 @@
info.cpu = cpu;
info.vlevel = vlevel;
+ info.err = -ENODEV;
if (cpu_online(cpu)) {
/**
diff --git a/arch/arm/mach-msm/wdog_debug.c b/arch/arm/mach-msm/wdog_debug.c
index cccca26..95a85f26 100644
--- a/arch/arm/mach-msm/wdog_debug.c
+++ b/arch/arm/mach-msm/wdog_debug.c
@@ -24,7 +24,7 @@
ret = scm_call_atomic2(SCM_SVC_BOOT,
SCM_WDOG_DEBUG_BOOT_PART, 0, BOOT_PART_EN_VAL);
if (ret)
- pr_err("failed to enable wdog debug\n");
+ pr_err("failed to enable wdog debug: %d\n", ret);
}
EXPORT_SYMBOL(msm_enable_wdog_debug);
@@ -35,6 +35,6 @@
ret = scm_call_atomic2(SCM_SVC_BOOT,
SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
if (ret)
- pr_err("failed to disable wdog debug\n");
+ pr_err("failed to disable wdog debug: %d\n", ret);
}
EXPORT_SYMBOL(msm_disable_wdog_debug);
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 21b9e1b..d6f9ee8 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
@@ -135,3 +136,58 @@
return pte_page(get_top_pte(vaddr));
}
+
+#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
+static void kmap_remove_unused_cpu(int cpu)
+{
+ int start_idx, idx, type;
+
+ pagefault_disable();
+ type = kmap_atomic_idx();
+ start_idx = type + 1 + KM_TYPE_NR * cpu;
+
+ for (idx = start_idx; idx < KM_TYPE_NR + KM_TYPE_NR * cpu; idx++) {
+ unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ pte_t ptep;
+
+ ptep = get_top_pte(vaddr);
+ if (ptep)
+ set_top_pte(vaddr, __pte(0));
+ }
+ pagefault_enable();
+}
+
+static void kmap_remove_unused(void *unused)
+{
+ kmap_remove_unused_cpu(smp_processor_id());
+}
+
+void kmap_atomic_flush_unused(void)
+{
+ on_each_cpu(kmap_remove_unused, NULL, 1);
+}
+
+static int hotplug_kmap_atomic_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_DYING:
+ kmap_remove_unused_cpu((int)hcpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block hotplug_kmap_atomic_notifier = {
+ .notifier_call = hotplug_kmap_atomic_callback,
+};
+
+static int __init init_kmap_atomic(void)
+{
+ return register_hotcpu_notifier(&hotplug_kmap_atomic_notifier);
+}
+early_initcall(init_kmap_atomic);
+#endif
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 66567bb..26b92d4 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -361,7 +361,7 @@
BUG_ON(!arm_memblock_steal_permitted);
- phys = memblock_alloc(size, align);
+ phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
memblock_free(phys, size);
memblock_remove(phys, size);
diff --git a/block/blk-core.c b/block/blk-core.c
index bd50c8e..69764df 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1080,6 +1080,16 @@
BUG_ON(blk_queued_rq(rq));
+ if (rq->cmd_flags & REQ_URGENT) {
+ /*
+ * It's not compliant with the design to re-insert
+ * urgent requests. We want to be able to track this
+ * down.
+ */
+ pr_err("%s(): requeueing an URGENT request", __func__);
+ WARN_ON(!q->dispatched_urgent);
+ q->dispatched_urgent = false;
+ }
elv_requeue_request(q, rq);
}
EXPORT_SYMBOL(blk_requeue_request);
@@ -1107,6 +1117,16 @@
blk_queue_end_tag(q, rq);
BUG_ON(blk_queued_rq(rq));
+ if (rq->cmd_flags & REQ_URGENT) {
+ /*
+ * It's not compliant with the design to re-insert
+ * urgent requests. We want to be able to track this
+ * down.
+ */
+ pr_err("%s(): requeueing an URGENT request", __func__);
+ WARN_ON(!q->dispatched_urgent);
+ q->dispatched_urgent = false;
+ }
return elv_reinsert_request(q, rq);
}
diff --git a/block/row-iosched.c b/block/row-iosched.c
index 3baec8c..e71f6af 100644
--- a/block/row-iosched.c
+++ b/block/row-iosched.c
@@ -158,6 +158,20 @@
};
/**
+ * struct starvation_data - data for starvation management
+ * @starvation_limit: number of times this priority class
+ * can tolerate being starved
+ * @starvation_counter: number of requests from higher
+ * priority classes that were dispatched while this
+ * priority request were pending
+ *
+ */
+struct starvation_data {
+ int starvation_limit;
+ int starvation_counter;
+};
+
+/**
* struct row_queue - Per block device rqueue structure
* @dispatch_queue: dispatch rqueue
* @row_queues: array of priority request queues
@@ -170,6 +184,8 @@
* complete.
* @pending_urgent_rq: pointer to the pending urgent request
* @last_served_ioprio_class: I/O priority class that was last dispatched from
+ * @reg_prio_starvation: starvation data for REGULAR priority queues
+ * @low_prio_starvation: starvation data for LOW priority queues
* @cycle_flags: used for marking unserved queueus
*
*/
@@ -183,6 +199,12 @@
bool urgent_in_flight;
struct request *pending_urgent_rq;
int last_served_ioprio_class;
+
+#define ROW_REG_STARVATION_TOLLERANCE 5000
+ struct starvation_data reg_prio_starvation;
+#define ROW_LOW_STARVATION_TOLLERANCE 10000
+ struct starvation_data low_prio_starvation;
+
unsigned int cycle_flags;
};
@@ -258,6 +280,42 @@
return HRTIMER_NORESTART;
}
+/*
+ * row_regular_req_pending() - Check if there are REGULAR priority requests
+ * Pending in scheduler
+ * @rd: pointer to struct row_data
+ *
+ * Returns True if there are REGULAR priority requests in scheduler queues.
+ * False, otherwise.
+ */
+static inline bool row_regular_req_pending(struct row_data *rd)
+{
+ int i;
+
+ for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++)
+ if (!list_empty(&rd->row_queues[i].fifo))
+ return true;
+ return false;
+}
+
+/*
+ * row_low_req_pending() - Check if there are LOW priority requests
+ * Pending in scheduler
+ * @rd: pointer to struct row_data
+ *
+ * Returns True if there are LOW priority requests in scheduler queues.
+ * False, otherwise.
+ */
+static inline bool row_low_req_pending(struct row_data *rd)
+{
+ int i;
+
+ for (i = ROWQ_LOW_PRIO_IDX; i < ROWQ_MAX_PRIO; i++)
+ if (!list_empty(&rd->row_queues[i].fifo))
+ return true;
+ return false;
+}
+
/******************* Elevator callback functions *********************/
/*
@@ -272,6 +330,7 @@
struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
struct row_queue *rqueue = RQ_ROWQ(rq);
s64 diff_ms;
+ bool queue_was_empty = list_empty(&rqueue->fifo);
list_add_tail(&rq->queuelist, &rqueue->fifo);
rd->nr_reqs[rq_data_dir(rq)]++;
@@ -316,7 +375,8 @@
!rd->pending_urgent_rq && !rd->urgent_in_flight) {
/* Handle High Priority queues */
if (rqueue->prio < ROWQ_REG_PRIO_IDX &&
- rd->last_served_ioprio_class != IOPRIO_CLASS_RT) {
+ rd->last_served_ioprio_class != IOPRIO_CLASS_RT &&
+ queue_was_empty) {
row_log_rowq(rd, rqueue->prio,
"added (high prio) urgent request");
rq->cmd_flags |= REQ_URGENT;
@@ -472,12 +532,21 @@
row_log_rowq(rd, rqueue->prio,
" Dispatched request %p nr_disp = %d", rq,
rqueue->nr_dispatched);
- if (rqueue->prio < ROWQ_REG_PRIO_IDX)
+ if (rqueue->prio < ROWQ_REG_PRIO_IDX) {
rd->last_served_ioprio_class = IOPRIO_CLASS_RT;
- else if (rqueue->prio < ROWQ_LOW_PRIO_IDX)
+ if (row_regular_req_pending(rd))
+ rd->reg_prio_starvation.starvation_counter++;
+ if (row_low_req_pending(rd))
+ rd->low_prio_starvation.starvation_counter++;
+ } else if (rqueue->prio < ROWQ_LOW_PRIO_IDX) {
rd->last_served_ioprio_class = IOPRIO_CLASS_BE;
- else
+ rd->reg_prio_starvation.starvation_counter = 0;
+ if (row_low_req_pending(rd))
+ rd->low_prio_starvation.starvation_counter++;
+ } else {
rd->last_served_ioprio_class = IOPRIO_CLASS_IDLE;
+ rd->low_prio_starvation.starvation_counter = 0;
+ }
}
/*
@@ -517,7 +586,18 @@
rd->rd_idle_data.idling_queue_idx =
ROWQ_MAX_PRIO;
}
- ret = IOPRIO_CLASS_RT;
+
+ if (row_regular_req_pending(rd) &&
+ (rd->reg_prio_starvation.starvation_counter >=
+ rd->reg_prio_starvation.starvation_limit))
+ ret = IOPRIO_CLASS_BE;
+ else if (row_low_req_pending(rd) &&
+ (rd->low_prio_starvation.starvation_counter >=
+ rd->low_prio_starvation.starvation_limit))
+ ret = IOPRIO_CLASS_IDLE;
+ else
+ ret = IOPRIO_CLASS_RT;
+
goto done;
}
}
@@ -546,7 +626,12 @@
!force && row_queues_def[i].idling_enabled)
goto initiate_idling;
} else {
- ret = IOPRIO_CLASS_BE;
+ if (row_low_req_pending(rd) &&
+ (rd->low_prio_starvation.starvation_counter >=
+ rd->low_prio_starvation.starvation_limit))
+ ret = IOPRIO_CLASS_IDLE;
+ else
+ ret = IOPRIO_CLASS_BE;
goto done;
}
}
@@ -716,6 +801,10 @@
ktime_set(0, 0);
}
+ rdata->reg_prio_starvation.starvation_limit =
+ ROW_REG_STARVATION_TOLLERANCE;
+ rdata->low_prio_starvation.starvation_limit =
+ ROW_LOW_STARVATION_TOLLERANCE;
/*
* Currently idling is enabled only for READ queues. If we want to
* enable it for write queues also, note that idling frequency will
@@ -865,42 +954,42 @@
return count;
}
-#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
+#define SHOW_FUNCTION(__FUNC, __VAR) \
static ssize_t __FUNC(struct elevator_queue *e, char *page) \
{ \
struct row_data *rowd = e->elevator_data; \
int __data = __VAR; \
- if (__CONV) \
- __data = jiffies_to_msecs(__data); \
return row_var_show(__data, (page)); \
}
SHOW_FUNCTION(row_hp_read_quantum_show,
- rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 0);
+ rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum);
SHOW_FUNCTION(row_rp_read_quantum_show,
- rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum, 0);
+ rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum);
SHOW_FUNCTION(row_hp_swrite_quantum_show,
- rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum, 0);
+ rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum);
SHOW_FUNCTION(row_rp_swrite_quantum_show,
- rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum, 0);
+ rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum);
SHOW_FUNCTION(row_rp_write_quantum_show,
- rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum, 0);
+ rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum);
SHOW_FUNCTION(row_lp_read_quantum_show,
- rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum, 0);
+ rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum);
SHOW_FUNCTION(row_lp_swrite_quantum_show,
- rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum, 0);
-SHOW_FUNCTION(row_rd_idle_data_show, rowd->rd_idle_data.idle_time_ms, 0);
-SHOW_FUNCTION(row_rd_idle_data_freq_show, rowd->rd_idle_data.freq_ms, 0);
+ rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum);
+SHOW_FUNCTION(row_rd_idle_data_show, rowd->rd_idle_data.idle_time_ms);
+SHOW_FUNCTION(row_rd_idle_data_freq_show, rowd->rd_idle_data.freq_ms);
+SHOW_FUNCTION(row_reg_starv_limit_show,
+ rowd->reg_prio_starvation.starvation_limit);
+SHOW_FUNCTION(row_low_starv_limit_show,
+ rowd->low_prio_starvation.starvation_limit);
#undef SHOW_FUNCTION
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
static ssize_t __FUNC(struct elevator_queue *e, \
const char *page, size_t count) \
{ \
struct row_data *rowd = e->elevator_data; \
int __data; \
int ret = row_var_store(&__data, (page), count); \
- if (__CONV) \
- __data = (int)msecs_to_jiffies(__data); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
@@ -909,29 +998,35 @@
return ret; \
}
STORE_FUNCTION(row_hp_read_quantum_store,
-&rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 1, INT_MAX, 0);
+&rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 1, INT_MAX);
STORE_FUNCTION(row_rp_read_quantum_store,
&rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum,
- 1, INT_MAX, 0);
+ 1, INT_MAX);
STORE_FUNCTION(row_hp_swrite_quantum_store,
&rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum,
- 1, INT_MAX, 0);
+ 1, INT_MAX);
STORE_FUNCTION(row_rp_swrite_quantum_store,
&rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum,
- 1, INT_MAX, 0);
+ 1, INT_MAX);
STORE_FUNCTION(row_rp_write_quantum_store,
&rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum,
- 1, INT_MAX, 0);
+ 1, INT_MAX);
STORE_FUNCTION(row_lp_read_quantum_store,
&rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum,
- 1, INT_MAX, 0);
+ 1, INT_MAX);
STORE_FUNCTION(row_lp_swrite_quantum_store,
&rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum,
- 1, INT_MAX, 0);
+ 1, INT_MAX);
STORE_FUNCTION(row_rd_idle_data_store, &rowd->rd_idle_data.idle_time_ms,
- 1, INT_MAX, 0);
+ 1, INT_MAX);
STORE_FUNCTION(row_rd_idle_data_freq_store, &rowd->rd_idle_data.freq_ms,
- 1, INT_MAX, 0);
+ 1, INT_MAX);
+STORE_FUNCTION(row_reg_starv_limit_store,
+ &rowd->reg_prio_starvation.starvation_limit,
+ 1, INT_MAX);
+STORE_FUNCTION(row_low_starv_limit_store,
+ &rowd->low_prio_starvation.starvation_limit,
+ 1, INT_MAX);
#undef STORE_FUNCTION
@@ -949,6 +1044,8 @@
ROW_ATTR(lp_swrite_quantum),
ROW_ATTR(rd_idle_data),
ROW_ATTR(rd_idle_data_freq),
+ ROW_ATTR(reg_starv_limit),
+ ROW_ATTR(low_starv_limit),
__ATTR_NULL
};
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index ed91480..45c9023 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -227,7 +227,7 @@
pr_info("Found %s, memory base %lx, size %ld MiB\n", uname,
(unsigned long)base, (unsigned long)size / SZ_1M);
- dma_contiguous_reserve_area(size, &base, 0, name);
+ dma_contiguous_reserve_area(size, &base, MEMBLOCK_ALLOC_ANYWHERE, name);
return 0;
}
diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c
index 8b7259a..0de37c9 100644
--- a/drivers/base/genlock.c
+++ b/drivers/base/genlock.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -712,6 +712,40 @@
}
EXPORT_SYMBOL(genlock_get_handle_fd);
+/*
+ * Get a file descriptor reference to a lock suitable for sharing with
+ * other processes
+ */
+
+int genlock_get_fd_handle(struct genlock_handle *handle)
+{
+ int ret;
+ struct genlock *lock;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ lock = handle->lock;
+
+ if (IS_ERR(lock))
+ return PTR_ERR(lock);
+
+ if (!lock->file) {
+ GENLOCK_LOG_ERR("No file attached to the lock\n");
+ return -EINVAL;
+ }
+
+ ret = get_unused_fd_flags(0);
+
+ if (ret < 0)
+ return ret;
+
+ fd_install(ret, lock->file);
+
+ return ret;
+}
+EXPORT_SYMBOL(genlock_get_fd_handle);
+
#ifdef CONFIG_GENLOCK_MISCDEVICE
static long genlock_dev_ioctl(struct file *filep, unsigned int cmd,
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index d7c69db..13c9080 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -23,38 +23,227 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/delay.h>
+#include <linux/bluetooth-power.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
-static struct of_device_id ar3002_match_table[] = {
+#define BT_PWR_DBG(fmt, arg...) pr_debug("%s: " fmt "\n" , __func__ , ## arg)
+#define BT_PWR_INFO(fmt, arg...) pr_info("%s: " fmt "\n" , __func__ , ## arg)
+#define BT_PWR_ERR(fmt, arg...) pr_err("%s: " fmt "\n" , __func__ , ## arg)
+
+
+static struct of_device_id bt_power_match_table[] = {
{ .compatible = "qca,ar3002" },
{}
};
-static int bt_reset_gpio;
-
+static struct bluetooth_power_platform_data *bt_power_pdata;
+static struct platform_device *btpdev;
static bool previous;
-static int bluetooth_power(int on)
+static int bt_vreg_init(struct bt_power_vreg_data *vreg)
{
- int rc;
+ int rc = 0;
+ struct device *dev = &btpdev->dev;
- pr_debug("%s bt_gpio= %d\n", __func__, bt_reset_gpio);
+ BT_PWR_DBG("vreg_get for : %s", vreg->name);
+
+ /* Get the regulator handle */
+ vreg->reg = regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ rc = PTR_ERR(vreg->reg);
+ pr_err("%s: regulator_get(%s) failed. rc=%d\n",
+ __func__, vreg->name, rc);
+ goto out;
+ }
+
+ if ((regulator_count_voltages(vreg->reg) > 0)
+ && (vreg->low_vol_level) && (vreg->high_vol_level))
+ vreg->set_voltage_sup = 1;
+
+out:
+ return rc;
+}
+
+static int bt_vreg_enable(struct bt_power_vreg_data *vreg)
+{
+ int rc = 0;
+
+ BT_PWR_DBG("vreg_en for : %s", vreg->name);
+
+ if (!vreg->is_enabled) {
+ if (vreg->set_voltage_sup) {
+ rc = regulator_set_voltage(vreg->reg,
+ vreg->low_vol_level,
+ vreg->high_vol_level);
+ if (rc < 0) {
+ BT_PWR_ERR("vreg_set_vol(%s) failed rc=%d\n",
+ vreg->name, rc);
+ goto out;
+ }
+ }
+
+ rc = regulator_enable(vreg->reg);
+ if (rc < 0) {
+ BT_PWR_ERR("regulator_enable(%s) failed. rc=%d\n",
+ vreg->name, rc);
+ goto out;
+ }
+ vreg->is_enabled = true;
+ }
+out:
+ return rc;
+}
+
+static int bt_vreg_disable(struct bt_power_vreg_data *vreg)
+{
+ int rc = 0;
+
+ if (!vreg)
+ return rc;
+
+ BT_PWR_DBG("vreg_disable for : %s", vreg->name);
+
+ if (vreg->is_enabled) {
+ rc = regulator_disable(vreg->reg);
+ if (rc < 0) {
+ BT_PWR_ERR("regulator_disable(%s) failed. rc=%d\n",
+ vreg->name, rc);
+ goto out;
+ }
+ vreg->is_enabled = false;
+
+ if (vreg->set_voltage_sup) {
+ /* Set the min voltage to 0 */
+ rc = regulator_set_voltage(vreg->reg,
+ 0,
+ vreg->high_vol_level);
+ if (rc < 0) {
+ BT_PWR_ERR("vreg_set_vol(%s) failed rc=%d\n",
+ vreg->name, rc);
+ goto out;
+
+ }
+ }
+ }
+out:
+ return rc;
+}
+
+static int bt_configure_vreg(struct bt_power_vreg_data *vreg)
+{
+ int rc = 0;
+
+ BT_PWR_DBG("config %s", vreg->name);
+
+ /* Get the regulator handle for vreg */
+ if (!(vreg->reg)) {
+ rc = bt_vreg_init(vreg);
+ if (rc < 0)
+ return rc;
+ }
+ rc = bt_vreg_enable(vreg);
+
+ return rc;
+}
+
+static int bt_configure_gpios(int on)
+{
+ int rc = 0;
+ int bt_reset_gpio = bt_power_pdata->bt_gpio_sys_rst;
+
+ BT_PWR_DBG("%s bt_gpio= %d on: %d", __func__, bt_reset_gpio, on);
+
if (on) {
+ rc = gpio_request(bt_reset_gpio, "bt_sys_rst_n");
+ if (rc) {
+ BT_PWR_ERR("unable to request gpio %d (%d)\n",
+ bt_reset_gpio, rc);
+ return rc;
+ }
+
+ rc = gpio_direction_output(bt_reset_gpio, 0);
+ if (rc) {
+ BT_PWR_ERR("Unable to set direction\n");
+ return rc;
+ }
+
rc = gpio_direction_output(bt_reset_gpio, 1);
if (rc) {
- pr_err("%s: Unable to set direction\n", __func__);
+ BT_PWR_ERR("Unable to set direction\n");
return rc;
}
msleep(100);
} else {
gpio_set_value(bt_reset_gpio, 0);
+
rc = gpio_direction_input(bt_reset_gpio);
- if (rc) {
- pr_err("%s: Unable to set direction\n", __func__);
- return rc;
- }
+ if (rc)
+ BT_PWR_ERR("Unable to set direction\n");
+
msleep(100);
}
- return 0;
+ return rc;
+}
+
+static int bluetooth_power(int on)
+{
+ int rc = 0;
+
+ BT_PWR_DBG("on: %d", on);
+
+ if (on) {
+ if (bt_power_pdata->bt_vdd_io) {
+ rc = bt_configure_vreg(bt_power_pdata->bt_vdd_io);
+ if (rc < 0) {
+ BT_PWR_ERR("bt_power vddio config failed");
+ goto out;
+ }
+ }
+ if (bt_power_pdata->bt_vdd_ldo) {
+ rc = bt_configure_vreg(bt_power_pdata->bt_vdd_ldo);
+ if (rc < 0) {
+ BT_PWR_ERR("bt_power vddldo config failed");
+ goto vdd_ldo_fail;
+ }
+ }
+ if (bt_power_pdata->bt_vdd_pa) {
+ rc = bt_configure_vreg(bt_power_pdata->bt_vdd_pa);
+ if (rc < 0) {
+ BT_PWR_ERR("bt_power vddpa config failed");
+ goto vdd_pa_fail;
+ }
+ }
+ if (bt_power_pdata->bt_chip_pwd) {
+ rc = bt_configure_vreg(bt_power_pdata->bt_chip_pwd);
+ if (rc < 0) {
+ BT_PWR_ERR("bt_power vddldo config failed");
+ goto chip_pwd_fail;
+ }
+ }
+ if (bt_power_pdata->bt_gpio_sys_rst) {
+ rc = bt_configure_gpios(on);
+ if (rc < 0) {
+ BT_PWR_ERR("bt_power gpio config failed");
+ goto gpio_fail;
+ }
+ }
+ } else {
+ bt_configure_gpios(on);
+gpio_fail:
+ if (bt_power_pdata->bt_gpio_sys_rst)
+ gpio_free(bt_power_pdata->bt_gpio_sys_rst);
+ bt_vreg_disable(bt_power_pdata->bt_chip_pwd);
+chip_pwd_fail:
+ bt_vreg_disable(bt_power_pdata->bt_vdd_pa);
+vdd_pa_fail:
+ bt_vreg_disable(bt_power_pdata->bt_vdd_ldo);
+vdd_ldo_fail:
+ bt_vreg_disable(bt_power_pdata->bt_vdd_io);
+ }
+
+out:
+ return rc;
}
static int bluetooth_toggle_radio(void *data, bool blocked)
@@ -62,7 +251,9 @@
int ret = 0;
int (*power_control)(int enable);
- power_control = data;
+ power_control =
+ ((struct bluetooth_power_platform_data *)data)->bt_power_setup;
+
if (previous != blocked)
ret = (*power_control)(!blocked);
if (!ret)
@@ -117,47 +308,146 @@
platform_set_drvdata(pdev, NULL);
}
+#define MAX_PROP_SIZE 32
+static int bt_dt_parse_vreg_info(struct device *dev,
+ struct bt_power_vreg_data **vreg_data, const char *vreg_name)
+{
+ int len, ret = 0;
+ const __be32 *prop;
+ char prop_name[MAX_PROP_SIZE];
+ struct bt_power_vreg_data *vreg;
+ struct device_node *np = dev->of_node;
+
+ BT_PWR_DBG("vreg dev tree parse for %s", vreg_name);
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
+ if (of_parse_phandle(np, prop_name, 0)) {
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg) {
+ dev_err(dev, "No memory for vreg: %s\n", vreg_name);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ vreg->name = vreg_name;
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-voltage-level", vreg_name);
+ prop = of_get_property(np, prop_name, &len);
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_warn(dev, "%s %s property\n",
+ prop ? "invalid format" : "no", prop_name);
+ } else {
+ vreg->low_vol_level = be32_to_cpup(&prop[0]);
+ vreg->high_vol_level = be32_to_cpup(&prop[1]);
+ }
+
+ *vreg_data = vreg;
+ BT_PWR_DBG("%s: vol=[%d %d]uV\n",
+ vreg->name, vreg->low_vol_level,
+ vreg->high_vol_level);
+ } else
+ BT_PWR_INFO("%s: is not provided in device tree", vreg_name);
+
+err:
+ return ret;
+}
+
+static int bt_power_populate_dt_pinfo(struct platform_device *pdev)
+{
+ int rc;
+
+ BT_PWR_DBG("");
+
+ if (!bt_power_pdata)
+ return -ENOMEM;
+
+ if (pdev->dev.of_node) {
+ bt_power_pdata->bt_gpio_sys_rst =
+ of_get_named_gpio(pdev->dev.of_node,
+ "qca,bt-reset-gpio", 0);
+ if (bt_power_pdata->bt_gpio_sys_rst < 0) {
+ BT_PWR_ERR("bt-reset-gpio not provided in device tree");
+ return bt_power_pdata->bt_gpio_sys_rst;
+ }
+
+ rc = bt_dt_parse_vreg_info(&pdev->dev,
+ &bt_power_pdata->bt_vdd_io,
+ "qca,bt-vdd-io");
+ if (rc < 0)
+ return rc;
+
+ rc = bt_dt_parse_vreg_info(&pdev->dev,
+ &bt_power_pdata->bt_vdd_pa,
+ "qca,bt-vdd-pa");
+ if (rc < 0)
+ return rc;
+
+ rc = bt_dt_parse_vreg_info(&pdev->dev,
+ &bt_power_pdata->bt_vdd_ldo,
+ "qca,bt-vdd-ldo");
+ if (rc < 0)
+ return rc;
+
+ rc = bt_dt_parse_vreg_info(&pdev->dev,
+ &bt_power_pdata->bt_chip_pwd,
+ "qca,bt-chip-pwd");
+ if (rc < 0)
+ return rc;
+
+ }
+
+ bt_power_pdata->bt_power_setup = bluetooth_power;
+
+ return 0;
+}
+
static int __devinit bt_power_probe(struct platform_device *pdev)
{
int ret = 0;
dev_dbg(&pdev->dev, "%s\n", __func__);
- if (!pdev->dev.platform_data) {
- /* Update the platform data if the
- device node exists as part of device tree.*/
- if (pdev->dev.of_node) {
- pdev->dev.platform_data = bluetooth_power;
- } else {
- dev_err(&pdev->dev, "device node not set\n");
- return -ENOSYS;
- }
+ bt_power_pdata =
+ kzalloc(sizeof(struct bluetooth_power_platform_data),
+ GFP_KERNEL);
+
+ if (!bt_power_pdata) {
+ BT_PWR_ERR("Failed to allocate memory");
+ return -ENOMEM;
}
+
if (pdev->dev.of_node) {
- bt_reset_gpio = of_get_named_gpio(pdev->dev.of_node,
- "qca,bt-reset-gpio", 0);
- if (bt_reset_gpio < 0) {
- pr_err("bt-reset-gpio not available");
- return bt_reset_gpio;
+ ret = bt_power_populate_dt_pinfo(pdev);
+ if (ret < 0) {
+ BT_PWR_ERR("Failed to populate device tree info");
+ goto free_pdata;
}
+ pdev->dev.platform_data = bt_power_pdata;
+ } else if (pdev->dev.platform_data) {
+ /* Optional data set to default if not provided */
+ if (!((struct bluetooth_power_platform_data *)
+ (pdev->dev.platform_data))->bt_power_setup)
+ ((struct bluetooth_power_platform_data *)
+ (pdev->dev.platform_data))->bt_power_setup =
+ bluetooth_power;
+
+ memcpy(bt_power_pdata, pdev->dev.platform_data,
+ sizeof(struct bluetooth_power_platform_data));
+ } else {
+ BT_PWR_ERR("Failed to get platform data");
+ goto free_pdata;
}
- ret = gpio_request(bt_reset_gpio, "bt sys_rst_n");
- if (ret) {
- pr_err("%s: unable to request gpio %d (%d)\n",
- __func__, bt_reset_gpio, ret);
- return ret;
- }
+ if (bluetooth_power_rfkill_probe(pdev) < 0)
+ goto free_pdata;
- /* When booting up, de-assert BT reset pin */
- ret = gpio_direction_output(bt_reset_gpio, 0);
- if (ret) {
- pr_err("%s: Unable to set direction\n", __func__);
- return ret;
- }
+ btpdev = pdev;
- ret = bluetooth_power_rfkill_probe(pdev);
+ return 0;
+free_pdata:
+ kfree(bt_power_pdata);
return ret;
}
@@ -167,6 +457,11 @@
bluetooth_power_rfkill_remove(pdev);
+ if (bt_power_pdata->bt_chip_pwd->reg)
+ regulator_put(bt_power_pdata->bt_chip_pwd->reg);
+
+ kfree(bt_power_pdata);
+
return 0;
}
@@ -176,7 +471,7 @@
.driver = {
.name = "bt_power",
.owner = THIS_MODULE,
- .of_match_table = ar3002_match_table,
+ .of_match_table = bt_power_match_table,
},
};
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 2557983..0383d8f 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -39,6 +39,8 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/proc_fs.h>
+
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -47,8 +49,10 @@
#include <mach/msm_serial_hs.h>
#endif
-unsigned int enableuartsleep = 1;
-module_param(enableuartsleep, uint, 0644);
+static int enableuartsleep = 1;
+module_param(enableuartsleep, int, 0644);
+MODULE_PARM_DESC(enableuartsleep, "Enable Atheros Sleep Protocol");
+
/*
* Global variables
*/
@@ -62,6 +66,9 @@
/** Global state flags */
static unsigned long flags;
+/** To Check LPM is enabled */
+static bool is_lpm_enabled;
+
/** Workqueue to respond to change in hostwake line */
static void wakeup_host_work(struct work_struct *work);
@@ -72,6 +79,8 @@
/** Lock for state transitions */
static spinlock_t rw_lock;
+#define PROC_DIR "bluetooth/sleep"
+
#define POLARITY_LOW 0
#define POLARITY_HIGH 1
@@ -80,8 +89,11 @@
unsigned ext_wake; /* wake up device */
unsigned host_wake_irq;
int irq_polarity;
+ struct uart_port *uport;
};
+struct work_struct ws_sleep;
+
/* 1 second timeout */
#define TX_TIMER_INTERVAL 1
@@ -99,23 +111,24 @@
struct sk_buff_head txq;
struct work_struct ctxtsw;
- struct work_struct ws_sleep;
};
-static void hsuart_serial_clock_on(struct tty_struct *tty)
+static void hsuart_serial_clock_on(struct uart_port *port)
{
- struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
BT_DBG("");
- msm_hs_request_clock_on(port);
+ if (port)
+ msm_hs_request_clock_on(port);
+ else
+ BT_INFO("Uart has not voted for Clock ON");
}
-static void hsuart_serial_clock_off(struct tty_struct *tty)
+static void hsuart_serial_clock_off(struct uart_port *port)
{
- struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
BT_DBG("");
- msm_hs_request_clock_off(port);
+ if (port)
+ msm_hs_request_clock_off(port);
+ else
+ BT_INFO("Uart has not voted for Clock OFF");
}
static void modify_timer_task(void)
@@ -127,31 +140,31 @@
}
-static int ath_wakeup_ar3k(struct tty_struct *tty)
+static int ath_wakeup_ar3k(void)
{
int status = 0;
if (test_bit(BT_TXEXPIRED, &flags)) {
- hsuart_serial_clock_on(tty);
- BT_INFO("wakeup device\n");
+ hsuart_serial_clock_on(bsi->uport);
+ BT_DBG("wakeup device\n");
gpio_set_value(bsi->ext_wake, 0);
msleep(20);
gpio_set_value(bsi->ext_wake, 1);
}
- modify_timer_task();
+ if (!is_lpm_enabled)
+ modify_timer_task();
return status;
}
static void wakeup_host_work(struct work_struct *work)
{
- struct ath_struct *ath =
- container_of(work, struct ath_struct, ws_sleep);
- BT_INFO("wake up host");
+ BT_DBG("wake up host");
if (test_bit(BT_SLEEPENABLE, &flags)) {
if (test_bit(BT_TXEXPIRED, &flags))
- hsuart_serial_clock_on(ath->hu->tty);
+ hsuart_serial_clock_on(bsi->uport);
}
- modify_timer_task();
+ if (!is_lpm_enabled)
+ modify_timer_task();
}
static void ath_hci_uart_work(struct work_struct *work)
@@ -159,16 +172,14 @@
int status;
struct ath_struct *ath;
struct hci_uart *hu;
- struct tty_struct *tty;
ath = container_of(work, struct ath_struct, ctxtsw);
hu = ath->hu;
- tty = hu->tty;
/* verify and wake up controller */
if (test_bit(BT_SLEEPENABLE, &flags))
- status = ath_wakeup_ar3k(tty);
+ status = ath_wakeup_ar3k();
/* Ready to send Data */
clear_bit(HCI_UART_SENDING, &hu->tx_state);
hci_uart_tx_wakeup(hu);
@@ -176,15 +187,15 @@
static irqreturn_t bluesleep_hostwake_isr(int irq, void *dev_id)
{
- /* schedule a tasklet to handle the change in the host wake line */
- struct ath_struct *ath = (struct ath_struct *)dev_id;
-
- schedule_work(&ath->ws_sleep);
+ /* schedule a work to global shared workqueue to handle
+ * the change in the host wake line
+ */
+ schedule_work(&ws_sleep);
return IRQ_HANDLED;
}
-static int ath_bluesleep_gpio_config(struct ath_struct *ath, int on)
+static int ath_bluesleep_gpio_config(int on)
{
int ret = 0;
@@ -232,16 +243,16 @@
/* Initialize timer */
init_timer(&tx_timer);
tx_timer.function = bluesleep_tx_timer_expire;
- tx_timer.data = (u_long)ath->hu;
+ tx_timer.data = 0;
if (bsi->irq_polarity == POLARITY_LOW) {
ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
IRQF_DISABLED | IRQF_TRIGGER_FALLING,
- "bluetooth hostwake", (void *)ath);
+ "bluetooth hostwake", NULL);
} else {
ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
IRQF_DISABLED | IRQF_TRIGGER_RISING,
- "bluetooth hostwake", (void *)ath);
+ "bluetooth hostwake", NULL);
}
if (ret < 0) {
BT_ERR("Couldn't acquire BT_HOST_WAKE IRQ");
@@ -257,7 +268,7 @@
return 0;
free_host_wake_irq:
- free_irq(bsi->host_wake_irq, (void *)ath);
+ free_irq(bsi->host_wake_irq, NULL);
delete_timer:
del_timer(&tx_timer);
gpio_ext_wake:
@@ -268,26 +279,76 @@
return ret;
}
+static int ath_lpm_start(void)
+{
+ BT_DBG("Start LPM mode");
+
+ if (!bsi) {
+ BT_ERR("HCIATH3K bluesleep info does not exist");
+ return -EIO;
+ }
+
+ bsi->uport = msm_hs_get_uart_port(0);
+ if (!bsi->uport) {
+ BT_ERR("UART Port is not available");
+ return -ENODEV;
+ }
+
+ INIT_WORK(&ws_sleep, wakeup_host_work);
+
+ if (ath_bluesleep_gpio_config(1) < 0) {
+ BT_ERR("HCIATH3K GPIO Config failed");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ath_lpm_stop(void)
+{
+ BT_DBG("Stop LPM mode");
+ cancel_work_sync(&ws_sleep);
+
+ if (bsi) {
+ bsi->uport = NULL;
+ ath_bluesleep_gpio_config(0);
+ }
+
+ return 0;
+}
+
/* Initialize protocol */
static int ath_open(struct hci_uart *hu)
{
struct ath_struct *ath;
+ struct uart_state *state;
BT_DBG("hu %p, bsi %p", hu, bsi);
- if (!bsi)
+ if (!bsi) {
+ BT_ERR("HCIATH3K bluesleep info does not exist");
return -EIO;
+ }
ath = kzalloc(sizeof(*ath), GFP_ATOMIC);
- if (!ath)
+ if (!ath) {
+ BT_ERR("HCIATH3K Memory not enough to init driver");
return -ENOMEM;
+ }
skb_queue_head_init(&ath->txq);
hu->priv = ath;
ath->hu = hu;
+ state = hu->tty->driver_data;
- if (ath_bluesleep_gpio_config(ath, 1) < 0) {
+ if (!state) {
+ BT_ERR("HCIATH3K tty driver data does not exist");
+ return -ENXIO;
+ }
+ bsi->uport = state->uart_port;
+
+ if (ath_bluesleep_gpio_config(1) < 0) {
BT_ERR("HCIATH3K GPIO Config failed");
hu->priv = NULL;
kfree(ath);
@@ -300,7 +361,7 @@
modify_timer_task();
}
INIT_WORK(&ath->ctxtsw, ath_hci_uart_work);
- INIT_WORK(&ath->ws_sleep, wakeup_host_work);
+ INIT_WORK(&ws_sleep, wakeup_host_work);
return 0;
}
@@ -327,12 +388,13 @@
cancel_work_sync(&ath->ctxtsw);
- cancel_work_sync(&ath->ws_sleep);
+ cancel_work_sync(&ws_sleep);
if (bsi)
- ath_bluesleep_gpio_config(ath, 0);
+ ath_bluesleep_gpio_config(0);
hu->priv = NULL;
+ bsi->uport = NULL;
kfree(ath);
return 0;
@@ -423,14 +485,13 @@
static void bluesleep_tx_timer_expire(unsigned long data)
{
- struct hci_uart *hu = (struct hci_uart *) data;
if (!test_bit(BT_SLEEPENABLE, &flags))
return;
BT_INFO("Tx timer expired\n");
set_bit(BT_TXEXPIRED, &flags);
- hsuart_serial_clock_off(hu->tty);
+ hsuart_serial_clock_off(bsi->uport);
}
static struct hci_uart_proto athp = {
@@ -443,6 +504,88 @@
.flush = ath_flush,
};
+static int lpm_enabled;
+
+static int bluesleep_lpm_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+
+ if (ret) {
+ BT_ERR("HCIATH3K: lpm enable parameter set failed");
+ return ret;
+ }
+
+ BT_DBG("lpm : %d", lpm_enabled);
+
+ if ((lpm_enabled == 0) && is_lpm_enabled) {
+ ath_lpm_stop();
+ clear_bit(BT_SLEEPENABLE, &flags);
+ is_lpm_enabled = false;
+ } else if ((lpm_enabled == 1) && !is_lpm_enabled) {
+ if (ath_lpm_start() < 0) {
+ BT_ERR("HCIATH3K LPM mode failed");
+ return -EIO;
+ }
+ set_bit(BT_SLEEPENABLE, &flags);
+ is_lpm_enabled = true;
+ } else {
+ BT_ERR("HCIATH3K invalid lpm value");
+ return -EINVAL;
+ }
+ return 0;
+
+}
+
+static struct kernel_param_ops bluesleep_lpm_ops = {
+ .set = bluesleep_lpm_set,
+ .get = param_get_int,
+};
+
+module_param_cb(ath_lpm, &bluesleep_lpm_ops,
+ &lpm_enabled, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ath_lpm, "Enable Atheros LPM sleep Protocol");
+
+static int lpm_btwrite;
+
+static int bluesleep_lpm_btwrite(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+
+ if (ret) {
+ BT_ERR("HCIATH3K: lpm btwrite parameter set failed");
+ return ret;
+ }
+
+ BT_DBG("btwrite : %d", lpm_btwrite);
+ if (is_lpm_enabled) {
+ if (lpm_btwrite == 0) {
+ /*Setting TXEXPIRED bit to make it
+ compatible with current solution*/
+ set_bit(BT_TXEXPIRED, &flags);
+ hsuart_serial_clock_off(bsi->uport);
+ } else if (lpm_btwrite == 1) {
+ ath_wakeup_ar3k();
+ clear_bit(BT_TXEXPIRED, &flags);
+ } else {
+ BT_ERR("HCIATH3K invalid btwrite value");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static struct kernel_param_ops bluesleep_lpm_btwrite_ops = {
+ .set = bluesleep_lpm_btwrite,
+ .get = param_get_int,
+};
+
+module_param_cb(ath_btwrite, &bluesleep_lpm_btwrite_ops,
+ &lpm_btwrite, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ath_lpm, "Assert/Deassert the sleep");
static int bluesleep_populate_dt_pinfo(struct platform_device *pdev)
{
@@ -581,5 +724,6 @@
int __exit ath_deinit(void)
{
platform_driver_unregister(&bluesleep_driver);
+
return hci_uart_unregister_proto(&athp);
}
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index d78327f..51578e0 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -527,11 +527,9 @@
{
struct fastrpc_apps *me = &gfa;
- if (me->chan)
- (void)smd_close(me->chan);
+ smd_close(me->chan);
context_list_dtor(&me->clst);
- if (me->iclient)
- ion_client_destroy(me->iclient);
+ ion_client_destroy(me->iclient);
me->iclient = 0;
me->chan = 0;
}
@@ -584,25 +582,32 @@
INIT_HLIST_HEAD(&me->htbl[i]);
VERIFY(err, 0 == context_list_ctor(&me->clst, SZ_4K));
if (err)
- goto bail;
+ goto context_list_bail;
me->iclient = msm_ion_client_create(ION_HEAP_CARVEOUT_MASK,
DEVICE_NAME);
VERIFY(err, 0 == IS_ERR_OR_NULL(me->iclient));
if (err)
- goto bail;
+ goto ion_bail;
VERIFY(err, 0 == smd_named_open_on_edge(FASTRPC_SMD_GUID,
SMD_APPS_QDSP, &me->chan,
me, smd_event_handler));
if (err)
- goto bail;
+ goto smd_bail;
VERIFY(err, 0 != wait_for_completion_timeout(&me->work,
RPC_TIMEOUT));
if (err)
- goto bail;
+ goto completion_bail;
}
- bail:
- if (err)
- fastrpc_deinit();
+
+ return 0;
+
+completion_bail:
+ smd_close(me->chan);
+smd_bail:
+ ion_client_destroy(me->iclient);
+ion_bail:
+ context_list_dtor(&me->clst);
+context_list_bail:
return err;
}
@@ -1090,35 +1095,37 @@
VERIFY(err, 0 == fastrpc_init());
if (err)
- goto bail;
+ goto fastrpc_bail;
VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, 1, DEVICE_NAME));
if (err)
- goto bail;
+ goto alloc_chrdev_bail;
cdev_init(&me->cdev, &fops);
me->cdev.owner = THIS_MODULE;
VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0), 1));
if (err)
- goto bail;
+ goto cdev_init_bail;
me->class = class_create(THIS_MODULE, "chardrv");
VERIFY(err, !IS_ERR(me->class));
if (err)
- goto bail;
+ goto class_create_bail;
me->dev = device_create(me->class, NULL, MKDEV(MAJOR(me->dev_no), 0),
NULL, DEVICE_NAME);
VERIFY(err, !IS_ERR(me->dev));
if (err)
- goto bail;
+ goto device_create_bail;
pr_info("'created /dev/%s c %d 0'\n", DEVICE_NAME, MAJOR(me->dev_no));
- bail:
- if (err) {
- if (me->dev_no)
- unregister_chrdev_region(me->dev_no, 1);
- if (me->class)
- class_destroy(me->class);
- if (me->cdev.owner)
- cdev_del(&me->cdev);
- fastrpc_deinit();
- }
+
+ return 0;
+
+device_create_bail:
+ class_destroy(me->class);
+class_create_bail:
+ cdev_del(&me->cdev);
+cdev_init_bail:
+ unregister_chrdev_region(me->dev_no, 1);
+alloc_chrdev_bail:
+ fastrpc_deinit();
+fastrpc_bail:
return err;
}
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 6d28042..682d876 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -1090,6 +1090,12 @@
diag_smd_destructor(&driver->smd_dci[i]);
platform_driver_unregister(&msm_diag_dci_driver);
+
+ if (driver->dci_client_tbl) {
+ for (i = 0; i < MAX_DCI_CLIENTS; i++)
+ kfree(driver->dci_client_tbl[i].dci_data);
+ }
+
kfree(driver->req_tracking_tbl);
kfree(driver->dci_client_tbl);
kfree(driver->apps_dci_buf);
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 6f37608..684f11d 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -37,6 +37,7 @@
#define HDLC_OUT_BUF_SIZE 8192
#define POOL_TYPE_COPY 1
#define POOL_TYPE_HDLC 2
+#define POOL_TYPE_USER 3
#define POOL_TYPE_WRITE_STRUCT 4
#define POOL_TYPE_HSIC 5
#define POOL_TYPE_HSIC_2 6
@@ -55,7 +56,7 @@
#define MSG_MASK_SIZE 10000
#define LOG_MASK_SIZE 8000
#define EVENT_MASK_SIZE 1000
-#define USER_SPACE_DATA 8000
+#define USER_SPACE_DATA 8192
#define PKT_SIZE 4096
#define MAX_EQUIP_ID 15
#define DIAG_CTRL_MSG_LOG_MASK 9
@@ -234,16 +235,20 @@
unsigned int poolsize;
unsigned int itemsize_hdlc;
unsigned int poolsize_hdlc;
+ unsigned int itemsize_user;
+ unsigned int poolsize_user;
unsigned int itemsize_write_struct;
unsigned int poolsize_write_struct;
unsigned int debug_flag;
/* State for the mempool for the char driver */
mempool_t *diagpool;
mempool_t *diag_hdlc_pool;
+ mempool_t *diag_user_pool;
mempool_t *diag_write_struct_pool;
struct mutex diagmem_mutex;
int count;
int count_hdlc_pool;
+ int count_user_pool;
int count_write_struct_pool;
int used;
/* Buffers for masks */
@@ -259,7 +264,6 @@
struct diag_smd_info smd_dci[NUM_SMD_DCI_CHANNELS];
unsigned char *usb_buf_out;
unsigned char *apps_rsp_buf;
- unsigned char *user_space_data;
/* buffer for updating mask to peripherals */
unsigned char *buf_msg_mask_update;
unsigned char *buf_log_mask_update;
@@ -276,6 +280,8 @@
struct usb_diag_ch *legacy_ch;
struct work_struct diag_proc_hdlc_work;
struct work_struct diag_read_work;
+ struct work_struct diag_usb_connect_work;
+ struct work_struct diag_usb_disconnect_work;
#endif
struct workqueue_struct *diag_wq;
struct work_struct diag_drain_work;
@@ -312,6 +318,7 @@
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
spinlock_t hsic_ready_spinlock;
/* common for all bridges */
+ struct work_struct diag_connect_work;
struct work_struct diag_disconnect_work;
/* SGLTE variables */
int lcid;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index a0c32f5..2ebae71 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -56,13 +56,16 @@
/* The following variables can be specified by module options */
/* for copy buffer */
static unsigned int itemsize = 4096; /*Size of item in the mempool */
-static unsigned int poolsize = 10; /*Number of items in the mempool */
+static unsigned int poolsize = 12; /*Number of items in the mempool */
/* for hdlc buffer */
static unsigned int itemsize_hdlc = 8192; /*Size of item in the mempool */
-static unsigned int poolsize_hdlc = 8; /*Number of items in the mempool */
+static unsigned int poolsize_hdlc = 10; /*Number of items in the mempool */
+/* for user buffer */
+static unsigned int itemsize_user = 8192; /*Size of item in the mempool */
+static unsigned int poolsize_user = 8; /*Number of items in the mempool */
/* for write structure buffer */
static unsigned int itemsize_write_struct = 20; /*Size of item in the mempool */
-static unsigned int poolsize_write_struct = 8; /* Num of items in the mempool */
+static unsigned int poolsize_write_struct = 10;/* Num of items in the mempool */
/* This is the max number of user-space clients supported at initialization*/
static unsigned int max_clients = 15;
static unsigned int threshold_client_limit = 30;
@@ -781,10 +784,26 @@
{
int i, temp, success = -EINVAL, status;
int temp_realtime_mode = driver->real_time_mode;
+ int requested_mode = (int)ioarg;
+
+ switch (requested_mode) {
+ case USB_MODE:
+ case MEMORY_DEVICE_MODE:
+ case NO_LOGGING_MODE:
+ case UART_MODE:
+ case SOCKET_MODE:
+ case CALLBACK_MODE:
+ case MEMORY_DEVICE_MODE_NRT:
+ break;
+ default:
+ pr_err("diag: In %s, request to switch to invalid mode: %d\n",
+ __func__, requested_mode);
+ return -EINVAL;
+ }
mutex_lock(&driver->diagchar_mutex);
temp = driver->logging_mode;
- driver->logging_mode = (int)ioarg;
+ driver->logging_mode = requested_mode;
if (driver->logging_mode == MEMORY_DEVICE_MODE_NRT) {
diag_send_diag_mode_update(MODE_NONREALTIME);
@@ -1013,6 +1032,8 @@
current->tgid)
driver->req_tracking_tbl[i].pid = 0;
driver->dci_client_tbl[result].client = NULL;
+ kfree(driver->dci_client_tbl[result].dci_data);
+ driver->dci_client_tbl[result].dci_data = NULL;
driver->num_dci_client--;
}
mutex_unlock(&driver->dci_mutex);
@@ -1362,6 +1383,7 @@
struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
void *buf_copy = NULL;
+ void *user_space_data = NULL;
unsigned int payload_size;
index = 0;
@@ -1388,31 +1410,50 @@
}
#endif /* DIAG over USB */
if (pkt_type == DCI_DATA_TYPE) {
- err = copy_from_user(driver->user_space_data, buf + 4,
- payload_size);
+ user_space_data = diagmem_alloc(driver, payload_size,
+ POOL_TYPE_USER);
+ if (!user_space_data) {
+ driver->dropped_count++;
+ return -ENOMEM;
+ }
+ err = copy_from_user(user_space_data, buf + 4, payload_size);
if (err) {
pr_alert("diag: copy failed for DCI data\n");
return DIAG_DCI_SEND_DATA_FAIL;
}
- err = diag_process_dci_transaction(driver->user_space_data,
+ err = diag_process_dci_transaction(user_space_data,
payload_size);
+ diagmem_free(driver, user_space_data, POOL_TYPE_USER);
return err;
}
if (pkt_type == CALLBACK_DATA_TYPE) {
- err = copy_from_user(driver->user_space_data, buf + 4,
- payload_size);
- if (err) {
+ if (payload_size > itemsize) {
+ pr_err("diag: Dropping packet, packet payload size crosses 4KB limit. Current payload size %d\n",
+ payload_size);
+ driver->dropped_count++;
+ return -EBADMSG;
+ }
+
+ buf_copy = diagmem_alloc(driver, payload_size, POOL_TYPE_COPY);
+ if (!buf_copy) {
+ driver->dropped_count++;
+ return -ENOMEM;
+ }
+
+ err = copy_from_user(buf_copy, buf + 4, payload_size);
+ if (err) {
pr_err("diag: copy failed for user space data\n");
return -EIO;
}
/* Check for proc_type */
- remote_proc = diag_get_remote(*(int *)driver->user_space_data);
+ remote_proc = diag_get_remote(*(int *)buf_copy);
if (!remote_proc) {
wait_event_interruptible(driver->wait_q,
(driver->in_busy_pktdata == 0));
- return diag_process_apps_pkt(driver->user_space_data,
- payload_size);
+ ret = diag_process_apps_pkt(buf_copy, payload_size);
+ diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
+ return ret;
}
/* The packet is for the remote processor */
token_offset = 4;
@@ -1420,8 +1461,8 @@
buf += 4;
/* Perform HDLC encoding on incoming data */
send.state = DIAG_STATE_START;
- send.pkt = (void *)(driver->user_space_data + token_offset);
- send.last = (void *)(driver->user_space_data + token_offset -
+ send.pkt = (void *)(buf_copy + token_offset);
+ send.last = (void *)(buf_copy + token_offset -
1 + payload_size);
send.terminate = 1;
@@ -1503,21 +1544,30 @@
}
}
#endif
+ diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
+ buf_copy = NULL;
buf_hdlc = NULL;
driver->used = 0;
mutex_unlock(&driver->diagchar_mutex);
return ret;
}
if (pkt_type == USER_SPACE_DATA_TYPE) {
- err = copy_from_user(driver->user_space_data, buf + 4,
+ user_space_data = diagmem_alloc(driver, payload_size,
+ POOL_TYPE_USER);
+ if (!user_space_data) {
+ driver->dropped_count++;
+ return -ENOMEM;
+ }
+ err = copy_from_user(user_space_data, buf + 4,
payload_size);
if (err) {
pr_err("diag: copy failed for user space data\n");
+ diagmem_free(driver, user_space_data, POOL_TYPE_USER);
return -EIO;
}
/* Check for proc_type */
- remote_proc = diag_get_remote(*(int *)driver->user_space_data);
+ remote_proc = diag_get_remote(*(int *)user_space_data);
if (remote_proc) {
token_offset = 4;
@@ -1527,9 +1577,11 @@
/* Check masks for On-Device logging */
if (driver->mask_check) {
- if (!mask_request_validate(driver->user_space_data +
+ if (!mask_request_validate(user_space_data +
token_offset)) {
pr_alert("diag: mask request Invalid\n");
+ diagmem_free(driver, user_space_data,
+ POOL_TYPE_USER);
return -EFAULT;
}
}
@@ -1537,7 +1589,7 @@
#ifdef DIAG_DEBUG
pr_debug("diag: user space data %d\n", payload_size);
for (i = 0; i < payload_size; i++)
- pr_debug("\t %x", *((driver->user_space_data
+ pr_debug("\t %x", *((user_space_data
+ token_offset)+i));
#endif
#ifdef CONFIG_DIAG_SDIO_PIPE
@@ -1548,7 +1600,7 @@
payload_size));
if (driver->sdio_ch && (payload_size > 0)) {
sdio_write(driver->sdio_ch, (void *)
- (driver->user_space_data + token_offset),
+ (user_space_data + token_offset),
payload_size);
}
}
@@ -1578,8 +1630,8 @@
diag_hsic[index].in_busy_hsic_read_on_device =
0;
err = diag_bridge_write(index,
- driver->user_space_data +
- token_offset, payload_size);
+ user_space_data + token_offset,
+ payload_size);
if (err) {
pr_err("diag: err sending mask to MDM: %d\n",
err);
@@ -1600,11 +1652,13 @@
&& driver->lcid) {
if (payload_size > 0) {
err = msm_smux_write(driver->lcid, NULL,
- driver->user_space_data + token_offset,
+ user_space_data + token_offset,
payload_size);
if (err) {
pr_err("diag:send mask to MDM err %d",
err);
+ diagmem_free(driver, user_space_data,
+ POOL_TYPE_USER);
return err;
}
}
@@ -1613,8 +1667,8 @@
/* send masks to 8k now */
if (!remote_proc)
diag_process_hdlc((void *)
- (driver->user_space_data + token_offset),
- payload_size);
+ (user_space_data + token_offset), payload_size);
+ diagmem_free(driver, user_space_data, POOL_TYPE_USER);
return 0;
}
@@ -1885,6 +1939,11 @@
}
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_connect_work_fn(struct work_struct *w)
+{
+ diagfwd_connect_bridge(1);
+}
+
static void diag_disconnect_work_fn(struct work_struct *w)
{
diagfwd_disconnect_bridge(1);
@@ -1944,6 +2003,8 @@
driver->poolsize = poolsize;
driver->itemsize_hdlc = itemsize_hdlc;
driver->poolsize_hdlc = poolsize_hdlc;
+ driver->itemsize_user = itemsize_user;
+ driver->poolsize_user = poolsize_user;
driver->itemsize_write_struct = itemsize_write_struct;
driver->poolsize_write_struct = poolsize_write_struct;
driver->num_clients = max_clients;
@@ -1969,6 +2030,8 @@
pr_err("diag: could not register HSIC device, ret: %d\n",
ret);
diagfwd_bridge_init(SMUX);
+ INIT_WORK(&(driver->diag_connect_work),
+ diag_connect_work_fn);
INIT_WORK(&(driver->diag_disconnect_work),
diag_disconnect_work_fn);
#endif
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 5b929d7..151e304 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -402,7 +402,7 @@
return;
}
if (pkt_len > r) {
- pr_err("diag: In %s, SMD sending partial pkt %d %d %d %d %d %d\n",
+ pr_debug("diag: In %s, SMD sending partial pkt %d %d %d %d %d %d\n",
__func__, pkt_len, r, total_recd, loop_count,
smd_info->peripheral, smd_info->type);
}
@@ -693,8 +693,7 @@
diag_update_sleeping_process(entry.process_id, PKT_TYPE);
} else {
if (len > 0) {
- if ((entry.client_id >= 0) &&
- (entry.client_id < NUM_SMD_DATA_CHANNELS)) {
+ if (entry.client_id < NUM_SMD_DATA_CHANNELS) {
int index = entry.client_id;
if (driver->smd_data[index].ch) {
if ((index == MODEM_DATA) &&
@@ -907,94 +906,186 @@
/* bld time masks */
switch (ssid_first) {
case MSG_SSID_0:
+ if (ssid_range > sizeof(msg_bld_masks_0)) {
+ pr_warning("diag: truncating ssid range for ssid 0");
+ ssid_range = sizeof(msg_bld_masks_0);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_0[i/4];
break;
case MSG_SSID_1:
+ if (ssid_range > sizeof(msg_bld_masks_1)) {
+ pr_warning("diag: truncating ssid range for ssid 1");
+ ssid_range = sizeof(msg_bld_masks_1);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_1[i/4];
break;
case MSG_SSID_2:
+ if (ssid_range > sizeof(msg_bld_masks_2)) {
+ pr_warning("diag: truncating ssid range for ssid 2");
+ ssid_range = sizeof(msg_bld_masks_2);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_2[i/4];
break;
case MSG_SSID_3:
+ if (ssid_range > sizeof(msg_bld_masks_3)) {
+ pr_warning("diag: truncating ssid range for ssid 3");
+ ssid_range = sizeof(msg_bld_masks_3);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_3[i/4];
break;
case MSG_SSID_4:
+ if (ssid_range > sizeof(msg_bld_masks_4)) {
+ pr_warning("diag: truncating ssid range for ssid 4");
+ ssid_range = sizeof(msg_bld_masks_4);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_4[i/4];
break;
case MSG_SSID_5:
+ if (ssid_range > sizeof(msg_bld_masks_5)) {
+ pr_warning("diag: truncating ssid range for ssid 5");
+ ssid_range = sizeof(msg_bld_masks_5);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_5[i/4];
break;
case MSG_SSID_6:
+ if (ssid_range > sizeof(msg_bld_masks_6)) {
+ pr_warning("diag: truncating ssid range for ssid 6");
+ ssid_range = sizeof(msg_bld_masks_6);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_6[i/4];
break;
case MSG_SSID_7:
+ if (ssid_range > sizeof(msg_bld_masks_7)) {
+ pr_warning("diag: truncating ssid range for ssid 7");
+ ssid_range = sizeof(msg_bld_masks_7);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_7[i/4];
break;
case MSG_SSID_8:
+ if (ssid_range > sizeof(msg_bld_masks_8)) {
+ pr_warning("diag: truncating ssid range for ssid 8");
+ ssid_range = sizeof(msg_bld_masks_8);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_8[i/4];
break;
case MSG_SSID_9:
+ if (ssid_range > sizeof(msg_bld_masks_9)) {
+ pr_warning("diag: truncating ssid range for ssid 9");
+ ssid_range = sizeof(msg_bld_masks_9);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_9[i/4];
break;
case MSG_SSID_10:
+ if (ssid_range > sizeof(msg_bld_masks_10)) {
+ pr_warning("diag: truncating ssid range for ssid 10");
+ ssid_range = sizeof(msg_bld_masks_10);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_10[i/4];
break;
case MSG_SSID_11:
+ if (ssid_range > sizeof(msg_bld_masks_11)) {
+ pr_warning("diag: truncating ssid range for ssid 11");
+ ssid_range = sizeof(msg_bld_masks_11);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_11[i/4];
break;
case MSG_SSID_12:
+ if (ssid_range > sizeof(msg_bld_masks_12)) {
+ pr_warning("diag: truncating ssid range for ssid 12");
+ ssid_range = sizeof(msg_bld_masks_12);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_12[i/4];
break;
case MSG_SSID_13:
+ if (ssid_range > sizeof(msg_bld_masks_13)) {
+ pr_warning("diag: truncating ssid range for ssid 13");
+ ssid_range = sizeof(msg_bld_masks_13);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_13[i/4];
break;
case MSG_SSID_14:
+ if (ssid_range > sizeof(msg_bld_masks_14)) {
+ pr_warning("diag: truncating ssid range for ssid 14");
+ ssid_range = sizeof(msg_bld_masks_14);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_14[i/4];
break;
case MSG_SSID_15:
+ if (ssid_range > sizeof(msg_bld_masks_15)) {
+ pr_warning("diag: truncating ssid range for ssid 15");
+ ssid_range = sizeof(msg_bld_masks_15);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_15[i/4];
break;
case MSG_SSID_16:
+ if (ssid_range > sizeof(msg_bld_masks_16)) {
+ pr_warning("diag: truncating ssid range for ssid 16");
+ ssid_range = sizeof(msg_bld_masks_16);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_16[i/4];
break;
case MSG_SSID_17:
+ if (ssid_range > sizeof(msg_bld_masks_17)) {
+ pr_warning("diag: truncating ssid range for ssid 17");
+ ssid_range = sizeof(msg_bld_masks_17);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_17[i/4];
break;
case MSG_SSID_18:
+ if (ssid_range > sizeof(msg_bld_masks_18)) {
+ pr_warning("diag: truncating ssid range for ssid 18");
+ ssid_range = sizeof(msg_bld_masks_18);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_18[i/4];
break;
case MSG_SSID_19:
+ if (ssid_range > sizeof(msg_bld_masks_19)) {
+ pr_warning("diag: truncating ssid range for ssid 19");
+ ssid_range = sizeof(msg_bld_masks_19);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_19[i/4];
break;
case MSG_SSID_20:
+ if (ssid_range > sizeof(msg_bld_masks_20)) {
+ pr_warning("diag: truncating ssid range for ssid 20");
+ ssid_range = sizeof(msg_bld_masks_20);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_20[i/4];
break;
case MSG_SSID_21:
+ if (ssid_range > sizeof(msg_bld_masks_21)) {
+ pr_warning("diag: truncating ssid range for ssid 21");
+ ssid_range = sizeof(msg_bld_masks_21);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_21[i/4];
break;
case MSG_SSID_22:
+ if (ssid_range > sizeof(msg_bld_masks_22)) {
+ pr_warning("diag: truncating ssid range for ssid 22");
+ ssid_range = sizeof(msg_bld_masks_22);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_22[i/4];
break;
@@ -1191,6 +1282,16 @@
#define N_LEGACY_WRITE (driver->poolsize + 6)
#define N_LEGACY_READ 1
+static void diag_usb_connect_work_fn(struct work_struct *w)
+{
+ diagfwd_connect();
+}
+
+static void diag_usb_disconnect_work_fn(struct work_struct *w)
+{
+ diagfwd_disconnect();
+}
+
int diagfwd_connect(void)
{
int err;
@@ -1357,10 +1458,12 @@
{
switch (event) {
case USB_DIAG_CONNECT:
- diagfwd_connect();
+ queue_work(driver->diag_wq,
+ &driver->diag_usb_connect_work);
break;
case USB_DIAG_DISCONNECT:
- diagfwd_disconnect();
+ queue_work(driver->diag_wq,
+ &driver->diag_usb_disconnect_work);
break;
case USB_DIAG_READ_DONE:
diagfwd_read_complete(d_req);
@@ -1692,11 +1795,6 @@
&& (driver->hdlc_buf = kzalloc(HDLC_MAX, GFP_KERNEL)) == NULL)
goto err;
kmemleak_not_leak(driver->hdlc_buf);
- if (driver->user_space_data == NULL)
- driver->user_space_data = kzalloc(USER_SPACE_DATA, GFP_KERNEL);
- if (driver->user_space_data == NULL)
- goto err;
- kmemleak_not_leak(driver->user_space_data);
if (driver->client_map == NULL &&
(driver->client_map = kzalloc
((driver->num_clients) * sizeof(struct diag_client_map),
@@ -1741,6 +1839,10 @@
}
driver->diag_wq = create_singlethread_workqueue("diag_wq");
#ifdef CONFIG_DIAG_OVER_USB
+ INIT_WORK(&(driver->diag_usb_connect_work),
+ diag_usb_connect_work_fn);
+ INIT_WORK(&(driver->diag_usb_disconnect_work),
+ diag_usb_disconnect_work_fn);
INIT_WORK(&(driver->diag_proc_hdlc_work), diag_process_hdlc_fn);
INIT_WORK(&(driver->diag_read_work), diag_read_work_fn);
driver->legacy_ch = usb_diag_open(DIAG_LEGACY, driver,
@@ -1771,7 +1873,6 @@
kfree(driver->pkt_buf);
kfree(driver->usb_read_ptr);
kfree(driver->apps_rsp_buf);
- kfree(driver->user_space_data);
if (driver->diag_wq)
destroy_workqueue(driver->diag_wq);
}
@@ -1804,6 +1905,5 @@
kfree(driver->pkt_buf);
kfree(driver->usb_read_ptr);
kfree(driver->apps_rsp_buf);
- kfree(driver->user_space_data);
destroy_workqueue(driver->diag_wq);
}
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
index 475f5ba..8c07219b 100644
--- a/drivers/char/diag/diagfwd_bridge.c
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -233,7 +233,8 @@
switch (event) {
case USB_DIAG_CONNECT:
- diagfwd_connect_bridge(1);
+ queue_work(driver->diag_wq,
+ &driver->diag_connect_work);
break;
case USB_DIAG_DISCONNECT:
queue_work(driver->diag_wq,
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
index 0cd8267..bd339e2 100644
--- a/drivers/char/diag/diagmem.c
+++ b/drivers/char/diag/diagmem.c
@@ -45,6 +45,15 @@
GFP_ATOMIC);
}
}
+ } else if (pool_type == POOL_TYPE_USER) {
+ if (driver->diag_user_pool) {
+ if (driver->count_user_pool < driver->poolsize_user) {
+ atomic_add(1,
+ (atomic_t *)&driver->count_user_pool);
+ buf = mempool_alloc(driver->diag_user_pool,
+ GFP_ATOMIC);
+ }
+ }
} else if (pool_type == POOL_TYPE_WRITE_STRUCT) {
if (driver->diag_write_struct_pool) {
if (driver->count_write_struct_pool <
@@ -98,8 +107,9 @@
mempool_destroy(driver->diagpool);
driver->diagpool = NULL;
} else if (driver->ref_count == 0 && pool_type ==
- POOL_TYPE_ALL)
- printk(KERN_ALERT "Unable to destroy COPY mempool");
+ POOL_TYPE_ALL) {
+ pr_err("diag: Unable to destroy COPY mempool");
+ }
}
if (driver->diag_hdlc_pool) {
@@ -107,8 +117,19 @@
mempool_destroy(driver->diag_hdlc_pool);
driver->diag_hdlc_pool = NULL;
} else if (driver->ref_count == 0 && pool_type ==
- POOL_TYPE_ALL)
- printk(KERN_ALERT "Unable to destroy HDLC mempool");
+ POOL_TYPE_ALL) {
+ pr_err("diag: Unable to destroy HDLC mempool");
+ }
+ }
+
+ if (driver->diag_user_pool) {
+ if (driver->count_user_pool == 0 && driver->ref_count == 0) {
+ mempool_destroy(driver->diag_user_pool);
+ driver->diag_user_pool = NULL;
+ } else if (driver->ref_count == 0 && pool_type ==
+ POOL_TYPE_ALL) {
+ pr_err("diag: Unable to destroy USER mempool");
+ }
}
if (driver->diag_write_struct_pool) {
@@ -119,8 +140,9 @@
mempool_destroy(driver->diag_write_struct_pool);
driver->diag_write_struct_pool = NULL;
} else if (driver->ref_count == 0 && pool_type ==
- POOL_TYPE_ALL)
- printk(KERN_ALERT "Unable to destroy STRUCT mempool");
+ POOL_TYPE_ALL) {
+ pr_err("diag: Unable to destroy STRUCT mempool");
+ }
}
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
for (index = 0; index < MAX_HSIC_CH; index++) {
@@ -163,16 +185,25 @@
mempool_free(buf, driver->diagpool);
atomic_add(-1, (atomic_t *)&driver->count);
} else
- pr_err("diag: Attempt to free up DIAG driver "
- "mempool memory which is already free %d", driver->count);
+ pr_err("diag: Attempt to free up DIAG driver mempool memory which is already free %d",
+ driver->count);
} else if (pool_type == POOL_TYPE_HDLC) {
if (driver->diag_hdlc_pool != NULL &&
driver->count_hdlc_pool > 0) {
mempool_free(buf, driver->diag_hdlc_pool);
atomic_add(-1, (atomic_t *)&driver->count_hdlc_pool);
} else
- pr_err("diag: Attempt to free up DIAG driver "
- "HDLC mempool which is already free %d ", driver->count_hdlc_pool);
+ pr_err("diag: Attempt to free up DIAG driver HDLC mempool which is already free %d ",
+ driver->count_hdlc_pool);
+ } else if (pool_type == POOL_TYPE_USER) {
+ if (driver->diag_user_pool != NULL &&
+ driver->count_user_pool > 0) {
+ mempool_free(buf, driver->diag_user_pool);
+ atomic_add(-1, (atomic_t *)&driver->count_user_pool);
+ } else {
+ pr_err("diag: Attempt to free up DIAG driver USER mempool which is already free %d ",
+ driver->count_user_pool);
+ }
} else if (pool_type == POOL_TYPE_WRITE_STRUCT) {
if (driver->diag_write_struct_pool != NULL &&
driver->count_write_struct_pool > 0) {
@@ -180,9 +211,8 @@
atomic_add(-1,
(atomic_t *)&driver->count_write_struct_pool);
} else
- pr_err("diag: Attempt to free up DIAG driver "
- "USB structure mempool which is already free %d ",
- driver->count_write_struct_pool);
+ pr_err("diag: Attempt to free up DIAG driver USB structure mempool which is already free %d ",
+ driver->count_write_struct_pool);
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
} else if (pool_type == POOL_TYPE_HSIC ||
pool_type == POOL_TYPE_HSIC_2) {
@@ -229,18 +259,25 @@
driver->diag_hdlc_pool = mempool_create_kmalloc_pool(
driver->poolsize_hdlc, driver->itemsize_hdlc);
+ if (driver->count_user_pool == 0)
+ driver->diag_user_pool = mempool_create_kmalloc_pool(
+ driver->poolsize_user, driver->itemsize_user);
+
if (driver->count_write_struct_pool == 0)
driver->diag_write_struct_pool = mempool_create_kmalloc_pool(
driver->poolsize_write_struct, driver->itemsize_write_struct);
if (!driver->diagpool)
- printk(KERN_INFO "Cannot allocate diag mempool\n");
+ pr_err("diag: Cannot allocate diag mempool\n");
if (!driver->diag_hdlc_pool)
- printk(KERN_INFO "Cannot allocate diag HDLC mempool\n");
+ pr_err("diag: Cannot allocate diag HDLC mempool\n");
+
+ if (!driver->diag_user_pool)
+ pr_err("diag: Cannot allocate diag USER mempool\n");
if (!driver->diag_write_struct_pool)
- printk(KERN_INFO "Cannot allocate diag USB struct mempool\n");
+ pr_err("diag: Cannot allocate diag USB struct mempool\n");
}
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index e946b42..9a43ea4 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -181,8 +181,7 @@
pr_err("ion_import_dma_buf() failed\n");
return PTR_ERR(*pihdl);
}
- pr_debug("%s(): ion_hdl %p, ion_fd %d\n", __func__, *pihdl,
- ion_share_dma_buf(msm_rotator_dev->client, *pihdl));
+ pr_debug("%s(): ion_hdl %p, ion_fd %d\n", __func__, *pihdl, mem_id);
if (rot_iommu_split_domain) {
if (secure) {
diff --git a/drivers/coresight/coresight-csr.c b/drivers/coresight/coresight-csr.c
index 988d1c9..1c2ab25 100644
--- a/drivers/coresight/coresight-csr.c
+++ b/drivers/coresight/coresight-csr.c
@@ -102,7 +102,7 @@
CSR_LOCK(drvdata);
}
-EXPORT_SYMBOL_GPL(msm_qdss_csr_enable_bam_to_usb);
+EXPORT_SYMBOL(msm_qdss_csr_enable_bam_to_usb);
void msm_qdss_csr_disable_bam_to_usb(void)
{
@@ -117,7 +117,7 @@
CSR_LOCK(drvdata);
}
-EXPORT_SYMBOL_GPL(msm_qdss_csr_disable_bam_to_usb);
+EXPORT_SYMBOL(msm_qdss_csr_disable_bam_to_usb);
void msm_qdss_csr_disable_flush(void)
{
@@ -132,7 +132,7 @@
CSR_LOCK(drvdata);
}
-EXPORT_SYMBOL_GPL(msm_qdss_csr_disable_flush);
+EXPORT_SYMBOL(msm_qdss_csr_disable_flush);
static int __devinit csr_probe(struct platform_device *pdev)
{
diff --git a/drivers/coresight/coresight-etm.c b/drivers/coresight/coresight-etm.c
index 2777769..5a5c0cf 100644
--- a/drivers/coresight/coresight-etm.c
+++ b/drivers/coresight/coresight-etm.c
@@ -276,23 +276,32 @@
}
/*
- * Memory mapped writes to clear os lock are not supported on Krait v1, v2
- * and OS lock must be unlocked before any memory mapped access, otherwise
- * memory mapped reads/writes will be invalid.
+ * Unlock OS lock to allow memory mapped access on Krait and in general
+ * so that ETMSR[1] can be polled while clearing the ETMCR[10] prog bit
+ * since ETMSR[1] is set when prog bit is set or OS lock is set.
*/
static void etm_os_unlock(void *info)
{
struct etm_drvdata *drvdata = (struct etm_drvdata *) info;
- ETM_UNLOCK(drvdata);
+ /*
+ * Memory mapped writes to clear os lock are not supported on Krait v1,
+ * v2 and OS lock must be unlocked before any memory mapped access,
+ * otherwise memory mapped reads/writes will be invalid.
+ */
if (cpu_is_krait()) {
etm_writel_cp14(0x0, ETMOSLAR);
+ /* ensure os lock is unlocked before we return */
isb();
- } else if (etm_os_lock_present(drvdata)) {
- etm_writel(drvdata, 0x0, ETMOSLAR);
- mb();
+ } else {
+ ETM_UNLOCK(drvdata);
+ if (etm_os_lock_present(drvdata)) {
+ etm_writel(drvdata, 0x0, ETMOSLAR);
+ /* ensure os lock is unlocked before we return */
+ mb();
+ }
+ ETM_LOCK(drvdata);
}
- ETM_LOCK(drvdata);
}
/*
@@ -1876,11 +1885,26 @@
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ static bool clk_disable[NR_CPUS];
+ int ret;
if (!etmdrvdata[cpu])
goto out;
switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_UP_PREPARE:
+ if (!etmdrvdata[cpu]->os_unlock) {
+ ret = clk_prepare_enable(etmdrvdata[cpu]->clk);
+ if (ret) {
+ dev_err(etmdrvdata[cpu]->dev,
+ "ETM clk enable during hotplug failed"
+ "for cpu: %d, ret: %d\n", cpu, ret);
+ return notifier_from_errno(ret);
+ }
+ clk_disable[cpu] = true;
+ }
+ break;
+
case CPU_STARTING:
spin_lock(&etmdrvdata[cpu]->spinlock);
if (!etmdrvdata[cpu]->os_unlock) {
@@ -1894,6 +1918,11 @@
break;
case CPU_ONLINE:
+ if (clk_disable[cpu]) {
+ clk_disable_unprepare(etmdrvdata[cpu]->clk);
+ clk_disable[cpu] = false;
+ }
+
if (etmdrvdata[cpu]->boot_enable &&
!etmdrvdata[cpu]->sticky_enable)
coresight_enable(etmdrvdata[cpu]->csdev);
@@ -1903,6 +1932,13 @@
__etm_store_pcsave(etmdrvdata[cpu], 1);
break;
+ case CPU_UP_CANCELED:
+ if (clk_disable[cpu]) {
+ clk_disable_unprepare(etmdrvdata[cpu]->clk);
+ clk_disable[cpu] = false;
+ }
+ break;
+
case CPU_DYING:
spin_lock(&etmdrvdata[cpu]->spinlock);
if (etmdrvdata[cpu]->enable && etmdrvdata[cpu]->round_robin)
diff --git a/drivers/coresight/coresight-stm.c b/drivers/coresight/coresight-stm.c
index 87cf63a..7d4dabe 100644
--- a/drivers/coresight/coresight-stm.c
+++ b/drivers/coresight/coresight-stm.c
@@ -595,7 +595,7 @@
return __stm_trace(options, entity_id, proto_id, data, size);
}
-EXPORT_SYMBOL_GPL(stm_trace);
+EXPORT_SYMBOL(stm_trace);
static ssize_t stm_write(struct file *file, const char __user *data,
size_t size, loff_t *ppos)
diff --git a/drivers/coresight/coresight.c b/drivers/coresight/coresight.c
index aef3d26..e237fb7 100644
--- a/drivers/coresight/coresight.c
+++ b/drivers/coresight/coresight.c
@@ -368,7 +368,7 @@
pr_err("coresight: enable failed\n");
return ret;
}
-EXPORT_SYMBOL_GPL(coresight_enable);
+EXPORT_SYMBOL(coresight_enable);
void coresight_disable(struct coresight_device *csdev)
{
@@ -391,7 +391,7 @@
out:
up(&coresight_mutex);
}
-EXPORT_SYMBOL_GPL(coresight_disable);
+EXPORT_SYMBOL(coresight_disable);
void coresight_abort(void)
{
@@ -415,7 +415,7 @@
out:
up(&coresight_mutex);
}
-EXPORT_SYMBOL_GPL(coresight_abort);
+EXPORT_SYMBOL(coresight_abort);
static ssize_t coresight_show_type(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -681,7 +681,7 @@
err_kzalloc_csdev:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(coresight_register);
+EXPORT_SYMBOL(coresight_register);
void coresight_unregister(struct coresight_device *csdev)
{
@@ -693,7 +693,7 @@
put_device(&csdev->dev);
}
}
-EXPORT_SYMBOL_GPL(coresight_unregister);
+EXPORT_SYMBOL(coresight_unregister);
static int __init coresight_init(void)
{
diff --git a/drivers/coresight/of_coresight.c b/drivers/coresight/of_coresight.c
index 1eccd09..8b8c0d62 100644
--- a/drivers/coresight/of_coresight.c
+++ b/drivers/coresight/of_coresight.c
@@ -97,7 +97,7 @@
"coresight-default-sink");
return pdata;
}
-EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
+EXPORT_SYMBOL(of_get_coresight_platform_data);
struct coresight_cti_data *of_get_coresight_cti_data(
struct device *dev, struct device_node *node)
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 63cdc68..7d1952c 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -20,97 +20,94 @@
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
-#include <linux/mutex.h>
+#include <linux/moduleparam.h>
+#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/input.h>
#include <asm/cputime.h>
#define CREATE_TRACE_POINTS
#include <trace/events/cpufreq_interactive.h>
-static atomic_t active_count = ATOMIC_INIT(0);
+static int active_count;
struct cpufreq_interactive_cpuinfo {
struct timer_list cpu_timer;
- int timer_idlecancel;
+ struct timer_list cpu_slack_timer;
+ spinlock_t load_lock; /* protects the next 4 fields */
u64 time_in_idle;
- u64 idle_exit_time;
- u64 timer_run_time;
- int idling;
- u64 target_set_time;
- u64 target_set_time_in_idle;
+ u64 time_in_idle_timestamp;
+ u64 cputime_speedadj;
+ u64 cputime_speedadj_timestamp;
struct cpufreq_policy *policy;
struct cpufreq_frequency_table *freq_table;
unsigned int target_freq;
unsigned int floor_freq;
u64 floor_validate_time;
u64 hispeed_validate_time;
+ struct rw_semaphore enable_sem;
int governor_enabled;
};
static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
-/* Workqueues handle frequency scaling */
-static struct task_struct *up_task;
-static struct workqueue_struct *down_wq;
-static struct work_struct freq_scale_down_work;
-static cpumask_t up_cpumask;
-static spinlock_t up_cpumask_lock;
-static cpumask_t down_cpumask;
-static spinlock_t down_cpumask_lock;
-static struct mutex set_speed_lock;
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
/* Hi speed to bump to from lo speed when load burst (default max) */
-static u64 hispeed_freq;
+static unsigned int hispeed_freq;
/* Go to hi speed when CPU load at or above this value. */
-#define DEFAULT_GO_HISPEED_LOAD 85
-static unsigned long go_hispeed_load;
+#define DEFAULT_GO_HISPEED_LOAD 99
+static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+
+/* Target load. Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+static spinlock_t target_loads_lock;
+static unsigned int *target_loads = default_target_loads;
+static int ntarget_loads = ARRAY_SIZE(default_target_loads);
/*
* The minimum amount of time to spend at a frequency before we can ramp down.
*/
#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
-static unsigned long min_sample_time;
+static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
/*
* The sample rate of the timer used to increase frequency
*/
#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
-static unsigned long timer_rate;
+static unsigned long timer_rate = DEFAULT_TIMER_RATE;
/*
* Wait this long before raising speed above hispeed, by default a single
* timer interval.
*/
#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
-static unsigned long above_hispeed_delay_val;
+static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
-/*
- * Boost pulse to hispeed on touchscreen input.
- */
-
-static int input_boost_val;
-
-struct cpufreq_interactive_inputopen {
- struct input_handle *handle;
- struct work_struct inputopen_work;
-};
-
-static struct cpufreq_interactive_inputopen inputopen;
-
-/*
- * Non-zero means longer-term speed boost active.
- */
-
+/* Non-zero means indefinite speed boost active */
static int boost_val;
+/* Duration of a boot pulse in usecs */
+static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+/* End time of boost pulse in ktime converted to usecs */
+static u64 boostpulse_endtime;
+
+/*
+ * Max additional time to wait in idle, beyond timer_rate, at speeds above
+ * minimum before wakeup to reduce speed, or -1 if unnecessary.
+ */
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+static int timer_slack_val = DEFAULT_TIMER_SLACK;
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
unsigned int event);
@@ -125,104 +122,210 @@
.owner = THIS_MODULE,
};
-static void cpufreq_interactive_timer(unsigned long data)
+static void cpufreq_interactive_timer_resched(
+ struct cpufreq_interactive_cpuinfo *pcpu)
{
- unsigned int delta_idle;
- unsigned int delta_time;
- int cpu_load;
- int load_since_change;
- u64 time_in_idle;
- u64 idle_exit_time;
- struct cpufreq_interactive_cpuinfo *pcpu =
- &per_cpu(cpuinfo, data);
- u64 now_idle;
- unsigned int new_freq;
- unsigned int index;
+ unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
unsigned long flags;
- smp_rmb();
+ mod_timer_pinned(&pcpu->cpu_timer, expires);
+ if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
+ expires += usecs_to_jiffies(timer_slack_val);
+ mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
+ }
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ pcpu->time_in_idle =
+ get_cpu_idle_time_us(smp_processor_id(),
+ &pcpu->time_in_idle_timestamp);
+ pcpu->cputime_speedadj = 0;
+ pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+static unsigned int freq_to_targetload(unsigned int freq)
+{
+ int i;
+ unsigned int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_loads_lock, flags);
+
+ for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
+ ;
+
+ ret = target_loads[i];
+ spin_unlock_irqrestore(&target_loads_lock, flags);
+ return ret;
+}
+
+/*
+ * If increasing frequencies never map to a lower target load then
+ * choose_freq() will find the minimum frequency that does not exceed its
+ * target load given the current load.
+ */
+
+static unsigned int choose_freq(
+ struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
+{
+ unsigned int freq = pcpu->policy->cur;
+ unsigned int prevfreq, freqmin, freqmax;
+ unsigned int tl;
+ int index;
+
+ freqmin = 0;
+ freqmax = UINT_MAX;
+
+ do {
+ prevfreq = freq;
+ tl = freq_to_targetload(freq);
+
+ /*
+ * Find the lowest frequency where the computed load is less
+ * than or equal to the target load.
+ */
+
+ cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+ CPUFREQ_RELATION_L, &index);
+ freq = pcpu->freq_table[index].frequency;
+
+ if (freq > prevfreq) {
+ /* The previous frequency is too low. */
+ freqmin = prevfreq;
+
+ if (freq >= freqmax) {
+ /*
+ * Find the highest frequency that is less
+ * than freqmax.
+ */
+ cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmax - 1, CPUFREQ_RELATION_H,
+ &index);
+ freq = pcpu->freq_table[index].frequency;
+
+ if (freq == freqmin) {
+ /*
+ * The first frequency below freqmax
+ * has already been found to be too
+ * low. freqmax is the lowest speed
+ * we found that is fast enough.
+ */
+ freq = freqmax;
+ break;
+ }
+ }
+ } else if (freq < prevfreq) {
+ /* The previous frequency is high enough. */
+ freqmax = prevfreq;
+
+ if (freq <= freqmin) {
+ /*
+ * Find the lowest frequency that is higher
+ * than freqmin.
+ */
+ cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmin + 1, CPUFREQ_RELATION_L,
+ &index);
+ freq = pcpu->freq_table[index].frequency;
+
+ /*
+ * If freqmax is the first frequency above
+ * freqmin then we have already found that
+ * this speed is fast enough.
+ */
+ if (freq == freqmax)
+ break;
+ }
+ }
+
+ /* If same frequency chosen as previous then done. */
+ } while (freq != prevfreq);
+
+ return freq;
+}
+
+static u64 update_load(int cpu)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+ u64 now;
+ u64 now_idle;
+ unsigned int delta_idle;
+ unsigned int delta_time;
+ u64 active_time;
+
+ now_idle = get_cpu_idle_time_us(cpu, &now);
+ delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+ delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+ active_time = delta_time - delta_idle;
+ pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
+
+ pcpu->time_in_idle = now_idle;
+ pcpu->time_in_idle_timestamp = now;
+ return now;
+}
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+ u64 now;
+ unsigned int delta_time;
+ u64 cputime_speedadj;
+ int cpu_load;
+ struct cpufreq_interactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, data);
+ unsigned int new_freq;
+ unsigned int loadadjfreq;
+ unsigned int index;
+ unsigned long flags;
+ bool boosted;
+
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return;
if (!pcpu->governor_enabled)
goto exit;
- /*
- * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
- * this lets idle exit know the current idle time sample has
- * been processed, and idle exit can generate a new sample and
- * re-arm the timer. This prevents a concurrent idle
- * exit on that CPU from writing a new set of info at the same time
- * the timer function runs (the timer function can't use that info
- * until more time passes).
- */
- time_in_idle = pcpu->time_in_idle;
- idle_exit_time = pcpu->idle_exit_time;
- now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
- smp_wmb();
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ now = update_load(data);
+ delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
+ cputime_speedadj = pcpu->cputime_speedadj;
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
- /* If we raced with cancelling a timer, skip. */
- if (!idle_exit_time)
- goto exit;
-
- delta_idle = (unsigned int)(now_idle - time_in_idle);
- delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
-
- /*
- * If timer ran less than 1ms after short-term sample started, retry.
- */
- if (delta_time < 1000)
+ if (WARN_ON_ONCE(!delta_time))
goto rearm;
- if (delta_idle > delta_time)
- cpu_load = 0;
- else
- cpu_load = 100 * (delta_time - delta_idle) / delta_time;
+ do_div(cputime_speedadj, delta_time);
+ loadadjfreq = (unsigned int)cputime_speedadj * 100;
+ cpu_load = loadadjfreq / pcpu->target_freq;
+ boosted = boost_val || now < boostpulse_endtime;
- delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
- delta_time = (unsigned int)(pcpu->timer_run_time -
- pcpu->target_set_time);
-
- if ((delta_time == 0) || (delta_idle > delta_time))
- load_since_change = 0;
- else
- load_since_change =
- 100 * (delta_time - delta_idle) / delta_time;
-
- /*
- * Choose greater of short-term load (since last idle timer
- * started or timer function re-armed itself) or long-term load
- * (since last frequency change).
- */
- if (load_since_change > cpu_load)
- cpu_load = load_since_change;
-
- if (cpu_load >= go_hispeed_load || boost_val) {
- if (pcpu->target_freq <= pcpu->policy->min) {
+ if (cpu_load >= go_hispeed_load || boosted) {
+ if (pcpu->target_freq < hispeed_freq) {
new_freq = hispeed_freq;
} else {
- new_freq = pcpu->policy->max * cpu_load / 100;
+ new_freq = choose_freq(pcpu, loadadjfreq);
if (new_freq < hispeed_freq)
new_freq = hispeed_freq;
-
- if (pcpu->target_freq == hispeed_freq &&
- new_freq > hispeed_freq &&
- pcpu->timer_run_time - pcpu->hispeed_validate_time
- < above_hispeed_delay_val) {
- trace_cpufreq_interactive_notyet(data, cpu_load,
- pcpu->target_freq,
- new_freq);
- goto rearm;
- }
}
} else {
- new_freq = pcpu->policy->max * cpu_load / 100;
+ new_freq = choose_freq(pcpu, loadadjfreq);
}
- if (new_freq <= hispeed_freq)
- pcpu->hispeed_validate_time = pcpu->timer_run_time;
+ if (pcpu->target_freq >= hispeed_freq &&
+ new_freq > pcpu->target_freq &&
+ now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
+ trace_cpufreq_interactive_notyet(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+ goto rearm;
+ }
+
+ pcpu->hispeed_validate_time = now;
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
- new_freq, CPUFREQ_RELATION_H,
+ new_freq, CPUFREQ_RELATION_L,
&index)) {
pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
(int) data);
@@ -236,41 +339,42 @@
* floor frequency for the minimum sample time since last validated.
*/
if (new_freq < pcpu->floor_freq) {
- if (pcpu->timer_run_time - pcpu->floor_validate_time
- < min_sample_time) {
- trace_cpufreq_interactive_notyet(data, cpu_load,
- pcpu->target_freq, new_freq);
+ if (now - pcpu->floor_validate_time < min_sample_time) {
+ trace_cpufreq_interactive_notyet(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
goto rearm;
}
}
- pcpu->floor_freq = new_freq;
- pcpu->floor_validate_time = pcpu->timer_run_time;
+ /*
+ * Update the timestamp for checking whether speed has been held at
+ * or above the selected frequency for a minimum of min_sample_time,
+ * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
+ * allow the speed to drop as soon as the boostpulse duration expires
+ * (or the indefinite boost is turned off).
+ */
+
+ if (!boosted || new_freq > hispeed_freq) {
+ pcpu->floor_freq = new_freq;
+ pcpu->floor_validate_time = now;
+ }
if (pcpu->target_freq == new_freq) {
- trace_cpufreq_interactive_already(data, cpu_load,
- pcpu->target_freq, new_freq);
+ trace_cpufreq_interactive_already(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
goto rearm_if_notmax;
}
trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
- new_freq);
- pcpu->target_set_time_in_idle = now_idle;
- pcpu->target_set_time = pcpu->timer_run_time;
+ pcpu->policy->cur, new_freq);
- if (new_freq < pcpu->target_freq) {
- pcpu->target_freq = new_freq;
- spin_lock_irqsave(&down_cpumask_lock, flags);
- cpumask_set_cpu(data, &down_cpumask);
- spin_unlock_irqrestore(&down_cpumask_lock, flags);
- queue_work(down_wq, &freq_scale_down_work);
- } else {
- pcpu->target_freq = new_freq;
- spin_lock_irqsave(&up_cpumask_lock, flags);
- cpumask_set_cpu(data, &up_cpumask);
- spin_unlock_irqrestore(&up_cpumask_lock, flags);
- wake_up_process(up_task);
- }
+ pcpu->target_freq = new_freq;
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+ cpumask_set_cpu(data, &speedchange_cpumask);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+ wake_up_process(speedchange_task);
rearm_if_notmax:
/*
@@ -281,28 +385,11 @@
goto exit;
rearm:
- if (!timer_pending(&pcpu->cpu_timer)) {
- /*
- * If already at min: if that CPU is idle, don't set timer.
- * Else cancel the timer if that CPU goes idle. We don't
- * need to re-evaluate speed until the next idle exit.
- */
- if (pcpu->target_freq == pcpu->policy->min) {
- smp_rmb();
-
- if (pcpu->idling)
- goto exit;
-
- pcpu->timer_idlecancel = 1;
- }
-
- pcpu->time_in_idle = get_cpu_idle_time_us(
- data, &pcpu->idle_exit_time);
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
- }
+ if (!timer_pending(&pcpu->cpu_timer))
+ cpufreq_interactive_timer_resched(pcpu);
exit:
+ up_read(&pcpu->enable_sem);
return;
}
@@ -312,15 +399,16 @@
&per_cpu(cpuinfo, smp_processor_id());
int pending;
- if (!pcpu->governor_enabled)
+ if (!down_read_trylock(&pcpu->enable_sem))
return;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return;
+ }
- pcpu->idling = 1;
- smp_wmb();
pending = timer_pending(&pcpu->cpu_timer);
if (pcpu->target_freq != pcpu->policy->min) {
-#ifdef CONFIG_SMP
/*
* Entering idle while not at lowest speed. On some
* platforms this can hold the other CPU(s) at that speed
@@ -329,33 +417,11 @@
* min indefinitely. This should probably be a quirk of
* the CPUFreq driver.
*/
- if (!pending) {
- pcpu->time_in_idle = get_cpu_idle_time_us(
- smp_processor_id(), &pcpu->idle_exit_time);
- pcpu->timer_idlecancel = 0;
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
- }
-#endif
- } else {
- /*
- * If at min speed and entering idle after load has
- * already been evaluated, and a timer has been set just in
- * case the CPU suddenly goes busy, cancel that timer. The
- * CPU didn't go busy; we'll recheck things upon idle exit.
- */
- if (pending && pcpu->timer_idlecancel) {
- del_timer(&pcpu->cpu_timer);
- /*
- * Ensure last timer run time is after current idle
- * sample start time, so next idle exit will always
- * start a new idle sampling period.
- */
- pcpu->idle_exit_time = 0;
- pcpu->timer_idlecancel = 0;
- }
+ if (!pending)
+ cpufreq_interactive_timer_resched(pcpu);
}
+ up_read(&pcpu->enable_sem);
}
static void cpufreq_interactive_idle_end(void)
@@ -363,34 +429,26 @@
struct cpufreq_interactive_cpuinfo *pcpu =
&per_cpu(cpuinfo, smp_processor_id());
- pcpu->idling = 0;
- smp_wmb();
-
- /*
- * Arm the timer for 1-2 ticks later if not already, and if the timer
- * function has already processed the previous load sampling
- * interval. (If the timer is not pending but has not processed
- * the previous interval, it is probably racing with us on another
- * CPU. Let it compute load based on the previous sample and then
- * re-arm the timer for another interval when it's done, rather
- * than updating the interval start time to be "now", which doesn't
- * give the timer function enough time to make a decision on this
- * run.)
- */
- if (timer_pending(&pcpu->cpu_timer) == 0 &&
- pcpu->timer_run_time >= pcpu->idle_exit_time &&
- pcpu->governor_enabled) {
- pcpu->time_in_idle =
- get_cpu_idle_time_us(smp_processor_id(),
- &pcpu->idle_exit_time);
- pcpu->timer_idlecancel = 0;
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return;
}
+ /* Arm the timer for 1-2 ticks later if not already. */
+ if (!timer_pending(&pcpu->cpu_timer)) {
+ cpufreq_interactive_timer_resched(pcpu);
+ } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
+ del_timer(&pcpu->cpu_timer);
+ del_timer(&pcpu->cpu_slack_timer);
+ cpufreq_interactive_timer(smp_processor_id());
+ }
+
+ up_read(&pcpu->enable_sem);
}
-static int cpufreq_interactive_up_task(void *data)
+static int cpufreq_interactive_speedchange_task(void *data)
{
unsigned int cpu;
cpumask_t tmp_mask;
@@ -399,34 +457,35 @@
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&up_cpumask_lock, flags);
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
- if (cpumask_empty(&up_cpumask)) {
- spin_unlock_irqrestore(&up_cpumask_lock, flags);
+ if (cpumask_empty(&speedchange_cpumask)) {
+ spin_unlock_irqrestore(&speedchange_cpumask_lock,
+ flags);
schedule();
if (kthread_should_stop())
break;
- spin_lock_irqsave(&up_cpumask_lock, flags);
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
}
set_current_state(TASK_RUNNING);
- tmp_mask = up_cpumask;
- cpumask_clear(&up_cpumask);
- spin_unlock_irqrestore(&up_cpumask_lock, flags);
+ tmp_mask = speedchange_cpumask;
+ cpumask_clear(&speedchange_cpumask);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
for_each_cpu(cpu, &tmp_mask) {
unsigned int j;
unsigned int max_freq = 0;
pcpu = &per_cpu(cpuinfo, cpu);
- smp_rmb();
-
- if (!pcpu->governor_enabled)
+ if (!down_read_trylock(&pcpu->enable_sem))
continue;
-
- mutex_lock(&set_speed_lock);
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ continue;
+ }
for_each_cpu(j, pcpu->policy->cpus) {
struct cpufreq_interactive_cpuinfo *pjcpu =
@@ -440,57 +499,17 @@
__cpufreq_driver_target(pcpu->policy,
max_freq,
CPUFREQ_RELATION_H);
- mutex_unlock(&set_speed_lock);
- trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
+ trace_cpufreq_interactive_setspeed(cpu,
+ pcpu->target_freq,
pcpu->policy->cur);
+
+ up_read(&pcpu->enable_sem);
}
}
return 0;
}
-static void cpufreq_interactive_freq_down(struct work_struct *work)
-{
- unsigned int cpu;
- cpumask_t tmp_mask;
- unsigned long flags;
- struct cpufreq_interactive_cpuinfo *pcpu;
-
- spin_lock_irqsave(&down_cpumask_lock, flags);
- tmp_mask = down_cpumask;
- cpumask_clear(&down_cpumask);
- spin_unlock_irqrestore(&down_cpumask_lock, flags);
-
- for_each_cpu(cpu, &tmp_mask) {
- unsigned int j;
- unsigned int max_freq = 0;
-
- pcpu = &per_cpu(cpuinfo, cpu);
- smp_rmb();
-
- if (!pcpu->governor_enabled)
- continue;
-
- mutex_lock(&set_speed_lock);
-
- for_each_cpu(j, pcpu->policy->cpus) {
- struct cpufreq_interactive_cpuinfo *pjcpu =
- &per_cpu(cpuinfo, j);
-
- if (pjcpu->target_freq > max_freq)
- max_freq = pjcpu->target_freq;
- }
-
- if (max_freq != pcpu->policy->cur)
- __cpufreq_driver_target(pcpu->policy, max_freq,
- CPUFREQ_RELATION_H);
-
- mutex_unlock(&set_speed_lock);
- trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
- pcpu->policy->cur);
- }
-}
-
static void cpufreq_interactive_boost(void)
{
int i;
@@ -498,17 +517,16 @@
unsigned long flags;
struct cpufreq_interactive_cpuinfo *pcpu;
- spin_lock_irqsave(&up_cpumask_lock, flags);
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
for_each_online_cpu(i) {
pcpu = &per_cpu(cpuinfo, i);
if (pcpu->target_freq < hispeed_freq) {
pcpu->target_freq = hispeed_freq;
- cpumask_set_cpu(i, &up_cpumask);
- pcpu->target_set_time_in_idle =
- get_cpu_idle_time_us(i, &pcpu->target_set_time);
- pcpu->hispeed_validate_time = pcpu->target_set_time;
+ cpumask_set_cpu(i, &speedchange_cpumask);
+ pcpu->hispeed_validate_time =
+ ktime_to_us(ktime_get());
anyboost = 1;
}
@@ -521,106 +539,126 @@
pcpu->floor_validate_time = ktime_to_us(ktime_get());
}
- spin_unlock_irqrestore(&up_cpumask_lock, flags);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
if (anyboost)
- wake_up_process(up_task);
+ wake_up_process(speedchange_task);
}
-/*
- * Pulsed boost on input event raises CPUs to hispeed_freq and lets
- * usual algorithm of min_sample_time decide when to allow speed
- * to drop.
- */
-
-static void cpufreq_interactive_input_event(struct input_handle *handle,
- unsigned int type,
- unsigned int code, int value)
+static int cpufreq_interactive_notifier(
+ struct notifier_block *nb, unsigned long val, void *data)
{
- if (input_boost_val && type == EV_SYN && code == SYN_REPORT) {
- trace_cpufreq_interactive_boost("input");
- cpufreq_interactive_boost();
+ struct cpufreq_freqs *freq = data;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ int cpu;
+ unsigned long flags;
+
+ if (val == CPUFREQ_POSTCHANGE) {
+ pcpu = &per_cpu(cpuinfo, freq->cpu);
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return 0;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return 0;
+ }
+
+ for_each_cpu(cpu, pcpu->policy->cpus) {
+ struct cpufreq_interactive_cpuinfo *pjcpu =
+ &per_cpu(cpuinfo, cpu);
+ spin_lock_irqsave(&pjcpu->load_lock, flags);
+ update_load(cpu);
+ spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+ }
+
+ up_read(&pcpu->enable_sem);
}
-}
-
-static void cpufreq_interactive_input_open(struct work_struct *w)
-{
- struct cpufreq_interactive_inputopen *io =
- container_of(w, struct cpufreq_interactive_inputopen,
- inputopen_work);
- int error;
-
- error = input_open_device(io->handle);
- if (error)
- input_unregister_handle(io->handle);
-}
-
-static int cpufreq_interactive_input_connect(struct input_handler *handler,
- struct input_dev *dev,
- const struct input_device_id *id)
-{
- struct input_handle *handle;
- int error;
-
- pr_info("%s: connect to %s\n", __func__, dev->name);
- handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->dev = dev;
- handle->handler = handler;
- handle->name = "cpufreq_interactive";
-
- error = input_register_handle(handle);
- if (error)
- goto err;
-
- inputopen.handle = handle;
- queue_work(down_wq, &inputopen.inputopen_work);
return 0;
-err:
- kfree(handle);
- return error;
}
-static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
+static struct notifier_block cpufreq_notifier_block = {
+ .notifier_call = cpufreq_interactive_notifier,
+};
+
+static ssize_t show_target_loads(
+ struct kobject *kobj, struct attribute *attr, char *buf)
{
- input_close_device(handle);
- input_unregister_handle(handle);
- kfree(handle);
+ int i;
+ ssize_t ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_loads_lock, flags);
+
+ for (i = 0; i < ntarget_loads; i++)
+ ret += sprintf(buf + ret, "%u%s", target_loads[i],
+ i & 0x1 ? ":" : " ");
+
+ ret += sprintf(buf + ret, "\n");
+ spin_unlock_irqrestore(&target_loads_lock, flags);
+ return ret;
}
-static const struct input_device_id cpufreq_interactive_ids[] = {
- {
- .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
- INPUT_DEVICE_ID_MATCH_ABSBIT,
- .evbit = { BIT_MASK(EV_ABS) },
- .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
- BIT_MASK(ABS_MT_POSITION_X) |
- BIT_MASK(ABS_MT_POSITION_Y) },
- }, /* multi-touch touchscreen */
- {
- .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
- INPUT_DEVICE_ID_MATCH_ABSBIT,
- .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
- .absbit = { [BIT_WORD(ABS_X)] =
- BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
- }, /* touchpad */
- { },
-};
+static ssize_t store_target_loads(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ const char *cp;
+ unsigned int *new_target_loads = NULL;
+ int ntokens = 1;
+ int i;
+ unsigned long flags;
-static struct input_handler cpufreq_interactive_input_handler = {
- .event = cpufreq_interactive_input_event,
- .connect = cpufreq_interactive_input_connect,
- .disconnect = cpufreq_interactive_input_disconnect,
- .name = "cpufreq_interactive",
- .id_table = cpufreq_interactive_ids,
-};
+ cp = buf;
+ while ((cp = strpbrk(cp + 1, " :")))
+ ntokens++;
+
+ if (!(ntokens & 0x1))
+ goto err_inval;
+
+ new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+ if (!new_target_loads) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ cp = buf;
+ i = 0;
+ while (i < ntokens) {
+ if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
+ goto err_inval;
+
+ cp = strpbrk(cp, " :");
+ if (!cp)
+ break;
+ cp++;
+ }
+
+ if (i != ntokens)
+ goto err_inval;
+
+ spin_lock_irqsave(&target_loads_lock, flags);
+ if (target_loads != default_target_loads)
+ kfree(target_loads);
+ target_loads = new_target_loads;
+ ntarget_loads = ntokens;
+ spin_unlock_irqrestore(&target_loads_lock, flags);
+ return count;
+
+err_inval:
+ ret = -EINVAL;
+err:
+ kfree(new_target_loads);
+ return ret;
+}
+
+static struct global_attr target_loads_attr =
+ __ATTR(target_loads, S_IRUGO | S_IWUSR,
+ show_target_loads, store_target_loads);
static ssize_t show_hispeed_freq(struct kobject *kobj,
struct attribute *attr, char *buf)
{
- return sprintf(buf, "%llu\n", hispeed_freq);
+ return sprintf(buf, "%u\n", hispeed_freq);
}
static ssize_t store_hispeed_freq(struct kobject *kobj,
@@ -628,9 +666,9 @@
size_t count)
{
int ret;
- u64 val;
+ long unsigned int val;
- ret = strict_strtoull(buf, 0, &val);
+ ret = strict_strtoul(buf, 0, &val);
if (ret < 0)
return ret;
hispeed_freq = val;
@@ -729,26 +767,28 @@
static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
show_timer_rate, store_timer_rate);
-static ssize_t show_input_boost(struct kobject *kobj, struct attribute *attr,
- char *buf)
+static ssize_t show_timer_slack(
+ struct kobject *kobj, struct attribute *attr, char *buf)
{
- return sprintf(buf, "%u\n", input_boost_val);
+ return sprintf(buf, "%d\n", timer_slack_val);
}
-static ssize_t store_input_boost(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
+static ssize_t store_timer_slack(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
{
int ret;
unsigned long val;
- ret = strict_strtoul(buf, 0, &val);
+ ret = kstrtol(buf, 10, &val);
if (ret < 0)
return ret;
- input_boost_val = val;
+
+ timer_slack_val = val;
return count;
}
-define_one_global_rw(input_boost);
+define_one_global_rw(timer_slack);
static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
char *buf)
@@ -790,6 +830,7 @@
if (ret < 0)
return ret;
+ boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
trace_cpufreq_interactive_boost("pulse");
cpufreq_interactive_boost();
return count;
@@ -798,15 +839,40 @@
static struct global_attr boostpulse =
__ATTR(boostpulse, 0200, NULL, store_boostpulse);
+static ssize_t show_boostpulse_duration(
+ struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ boostpulse_duration_val = val;
+ return count;
+}
+
+define_one_global_rw(boostpulse_duration);
+
static struct attribute *interactive_attributes[] = {
+ &target_loads_attr.attr,
&hispeed_freq_attr.attr,
&go_hispeed_load_attr.attr,
&above_hispeed_delay.attr,
&min_sample_time_attr.attr,
&timer_rate_attr.attr,
- &input_boost.attr,
+ &timer_slack.attr,
&boost.attr,
&boostpulse.attr,
+ &boostpulse_duration.attr,
NULL,
};
@@ -815,102 +881,6 @@
.name = "interactive",
};
-static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
- unsigned int event)
-{
- int rc;
- unsigned int j;
- struct cpufreq_interactive_cpuinfo *pcpu;
- struct cpufreq_frequency_table *freq_table;
-
- switch (event) {
- case CPUFREQ_GOV_START:
- if (!cpu_online(policy->cpu))
- return -EINVAL;
-
- freq_table =
- cpufreq_frequency_get_table(policy->cpu);
-
- for_each_cpu(j, policy->cpus) {
- pcpu = &per_cpu(cpuinfo, j);
- pcpu->policy = policy;
- pcpu->target_freq = policy->cur;
- pcpu->freq_table = freq_table;
- pcpu->target_set_time_in_idle =
- get_cpu_idle_time_us(j,
- &pcpu->target_set_time);
- pcpu->floor_freq = pcpu->target_freq;
- pcpu->floor_validate_time =
- pcpu->target_set_time;
- pcpu->hispeed_validate_time =
- pcpu->target_set_time;
- pcpu->governor_enabled = 1;
- pcpu->idle_exit_time = pcpu->target_set_time;
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
- smp_wmb();
- }
-
- if (!hispeed_freq)
- hispeed_freq = policy->max;
-
- /*
- * Do not register the idle hook and create sysfs
- * entries if we have already done so.
- */
- if (atomic_inc_return(&active_count) > 1)
- return 0;
-
- rc = sysfs_create_group(cpufreq_global_kobject,
- &interactive_attr_group);
- if (rc)
- return rc;
-
- rc = input_register_handler(&cpufreq_interactive_input_handler);
- if (rc)
- pr_warn("%s: failed to register input handler\n",
- __func__);
-
- break;
-
- case CPUFREQ_GOV_STOP:
- for_each_cpu(j, policy->cpus) {
- pcpu = &per_cpu(cpuinfo, j);
- pcpu->governor_enabled = 0;
- smp_wmb();
- del_timer_sync(&pcpu->cpu_timer);
-
- /*
- * Reset idle exit time since we may cancel the timer
- * before it can run after the last idle exit time,
- * to avoid tripping the check in idle exit for a timer
- * that is trying to run.
- */
- pcpu->idle_exit_time = 0;
- }
-
- flush_work(&freq_scale_down_work);
- if (atomic_dec_return(&active_count) > 0)
- return 0;
-
- input_unregister_handler(&cpufreq_interactive_input_handler);
- sysfs_remove_group(cpufreq_global_kobject,
- &interactive_attr_group);
-
- break;
-
- case CPUFREQ_GOV_LIMITS:
- if (policy->max < policy->cur)
- __cpufreq_driver_target(policy,
- policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > policy->cur)
- __cpufreq_driver_target(policy,
- policy->min, CPUFREQ_RELATION_L);
- break;
- }
- return 0;
-}
-
static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
unsigned long val,
void *data)
@@ -931,57 +901,148 @@
.notifier_call = cpufreq_interactive_idle_notifier,
};
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ int rc;
+ unsigned int j;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ struct cpufreq_frequency_table *freq_table;
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if (!cpu_online(policy->cpu))
+ return -EINVAL;
+
+ mutex_lock(&gov_lock);
+
+ freq_table =
+ cpufreq_frequency_get_table(policy->cpu);
+ if (!hispeed_freq)
+ hispeed_freq = policy->max;
+
+ for_each_cpu(j, policy->cpus) {
+ unsigned long expires;
+
+ pcpu = &per_cpu(cpuinfo, j);
+ pcpu->policy = policy;
+ pcpu->target_freq = policy->cur;
+ pcpu->freq_table = freq_table;
+ pcpu->floor_freq = pcpu->target_freq;
+ pcpu->floor_validate_time =
+ ktime_to_us(ktime_get());
+ pcpu->hispeed_validate_time =
+ pcpu->floor_validate_time;
+ down_write(&pcpu->enable_sem);
+ expires = jiffies + usecs_to_jiffies(timer_rate);
+ pcpu->cpu_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_timer, j);
+ if (timer_slack_val >= 0) {
+ expires += usecs_to_jiffies(timer_slack_val);
+ pcpu->cpu_slack_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_slack_timer, j);
+ }
+ pcpu->governor_enabled = 1;
+ up_write(&pcpu->enable_sem);
+ }
+
+ /*
+ * Do not register the idle hook and create sysfs
+ * entries if we have already done so.
+ */
+ if (++active_count > 1) {
+ mutex_unlock(&gov_lock);
+ return 0;
+ }
+
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &interactive_attr_group);
+ if (rc) {
+ mutex_unlock(&gov_lock);
+ return rc;
+ }
+
+ idle_notifier_register(&cpufreq_interactive_idle_nb);
+ cpufreq_register_notifier(
+ &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
+ mutex_unlock(&gov_lock);
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ mutex_lock(&gov_lock);
+ for_each_cpu(j, policy->cpus) {
+ pcpu = &per_cpu(cpuinfo, j);
+ down_write(&pcpu->enable_sem);
+ pcpu->governor_enabled = 0;
+ del_timer_sync(&pcpu->cpu_timer);
+ del_timer_sync(&pcpu->cpu_slack_timer);
+ up_write(&pcpu->enable_sem);
+ }
+
+ if (--active_count > 0) {
+ mutex_unlock(&gov_lock);
+ return 0;
+ }
+
+ cpufreq_unregister_notifier(
+ &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
+ idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+ sysfs_remove_group(cpufreq_global_kobject,
+ &interactive_attr_group);
+ mutex_unlock(&gov_lock);
+
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy,
+ policy->min, CPUFREQ_RELATION_L);
+ break;
+ }
+ return 0;
+}
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
static int __init cpufreq_interactive_init(void)
{
unsigned int i;
struct cpufreq_interactive_cpuinfo *pcpu;
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
- go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
- min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
- above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
- timer_rate = DEFAULT_TIMER_RATE;
-
/* Initalize per-cpu timers */
for_each_possible_cpu(i) {
pcpu = &per_cpu(cpuinfo, i);
- init_timer(&pcpu->cpu_timer);
+ init_timer_deferrable(&pcpu->cpu_timer);
pcpu->cpu_timer.function = cpufreq_interactive_timer;
pcpu->cpu_timer.data = i;
+ init_timer(&pcpu->cpu_slack_timer);
+ pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
+ spin_lock_init(&pcpu->load_lock);
+ init_rwsem(&pcpu->enable_sem);
}
- up_task = kthread_create(cpufreq_interactive_up_task, NULL,
- "kinteractiveup");
- if (IS_ERR(up_task))
- return PTR_ERR(up_task);
+ spin_lock_init(&target_loads_lock);
+ spin_lock_init(&speedchange_cpumask_lock);
+ mutex_init(&gov_lock);
+ speedchange_task =
+ kthread_create(cpufreq_interactive_speedchange_task, NULL,
+ "cfinteractive");
+ if (IS_ERR(speedchange_task))
+ return PTR_ERR(speedchange_task);
- sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m);
- get_task_struct(up_task);
+ sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
+ get_task_struct(speedchange_task);
- /* No rescuer thread, bind to CPU queuing the work for possibly
- warm cache (probably doesn't matter much). */
- down_wq = alloc_workqueue("knteractive_down", 0, 1);
+ /* NB: wake up so the thread does not look hung to the freezer */
+ wake_up_process(speedchange_task);
- if (!down_wq)
- goto err_freeuptask;
-
- INIT_WORK(&freq_scale_down_work,
- cpufreq_interactive_freq_down);
-
- spin_lock_init(&up_cpumask_lock);
- spin_lock_init(&down_cpumask_lock);
- mutex_init(&set_speed_lock);
-
- /* Kick the kthread to idle */
- wake_up_process(up_task);
-
- idle_notifier_register(&cpufreq_interactive_idle_nb);
- INIT_WORK(&inputopen.inputopen_work, cpufreq_interactive_input_open);
return cpufreq_register_governor(&cpufreq_gov_interactive);
-
-err_freeuptask:
- put_task_struct(up_task);
- return -ENOMEM;
}
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
@@ -993,9 +1054,8 @@
static void __exit cpufreq_interactive_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_interactive);
- kthread_stop(up_task);
- put_task_struct(up_task);
- destroy_workqueue(down_wq);
+ kthread_stop(speedchange_task);
+ put_task_struct(speedchange_task);
}
module_exit(cpufreq_interactive_exit);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index c758b3a..99ace44 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -306,7 +306,7 @@
config CRYPTO_DEV_QCE
tristate "Qualcomm Crypto Engine (QCE) module"
select CRYPTO_DEV_QCE40 if ARCH_MSM8960 || ARCH_MSM9615
- select CRYPTO_DEV_QCE50 if ARCH_MSM8974 || ARCH_MSM9625
+ select CRYPTO_DEV_QCE50 if ARCH_MSM8974 || ARCH_MSM9625 || ARCH_MSM8226
default n
help
This driver supports Qualcomm Crypto Engine in MSM7x30, MSM8660
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
index 24cf30a..7778477 100644
--- a/drivers/crypto/msm/qce.c
+++ b/drivers/crypto/msm/qce.c
@@ -2203,6 +2203,18 @@
}
EXPORT_SYMBOL(qce_process_sha_req);
+int qce_enable_clk(void *handle)
+{
+ return 0;
+}
+EXPORT_SYMBOL(qce_enable_clk);
+
+int qce_disable_clk(void *handle)
+{
+ return 0;
+}
+EXPORT_SYMBOL(qce_disable_clk);
+
/*
* crypto engine open function.
*/
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index 3ff84cf..51a74b6 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -160,5 +160,7 @@
int qce_ablk_cipher_req(void *handle, struct qce_req *req);
int qce_hw_support(void *handle, struct ce_hw_support *support);
int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
+int qce_enable_clk(void *handle);
+int qce_disable_clk(void *handle);
#endif /* __CRYPTO_MSM_QCE_H */
diff --git a/drivers/crypto/msm/qce40.c b/drivers/crypto/msm/qce40.c
index 7b0964d..5249917 100644
--- a/drivers/crypto/msm/qce40.c
+++ b/drivers/crypto/msm/qce40.c
@@ -2426,6 +2426,18 @@
}
EXPORT_SYMBOL(qce_process_sha_req);
+int qce_enable_clk(void *handle)
+{
+ return 0;
+}
+EXPORT_SYMBOL(qce_enable_clk);
+
+int qce_disable_clk(void *handle)
+{
+ return 0;
+}
+EXPORT_SYMBOL(qce_disable_clk);
+
/* crypto engine open function. */
void *qce_open(struct platform_device *pdev, int *rc)
{
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 8545a5c..245272b 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -39,8 +39,7 @@
#include "qcryptohw_50.h"
#define CRYPTO_CONFIG_RESET 0xE001F
-#define QCE_MAX_NUM_DSCR 0x400
-#define QCE_SIZE_BAM_DSCR 0x08
+#define QCE_MAX_NUM_DSCR 0x500
#define QCE_SECTOR_SIZE 0x200
static DEFINE_MUTEX(bam_register_cnt);
@@ -611,7 +610,8 @@
/* write xts du size */
pce = cmdlistinfo->encr_xts_du_size;
if (use_pipe_key == true)
- pce->data = QCE_SECTOR_SIZE;
+ pce->data = min((unsigned int)QCE_SECTOR_SIZE,
+ creq->cryptlen);
else
pce->data = creq->cryptlen;
}
@@ -919,17 +919,23 @@
iovec->flags |= flag;
}
-static void _qce_sps_add_data(uint32_t addr, uint32_t len,
+static int _qce_sps_add_data(uint32_t addr, uint32_t len,
struct sps_transfer *sps_bam_pipe)
{
struct sps_iovec *iovec = sps_bam_pipe->iovec +
sps_bam_pipe->iovec_count;
+ if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
if (len) {
iovec->size = len;
iovec->addr = addr;
iovec->flags = 0;
sps_bam_pipe->iovec_count++;
}
+ return 0;
}
static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
@@ -947,6 +953,12 @@
if (pce_dev->ce_sps.minor_version == 0)
len = ALIGN(len, pce_dev->ce_sps.ce_burst_size);
while (len > 0) {
+ if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count,
+ (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
if (len > SPS_MAX_PKT_SIZE) {
data_cnt = SPS_MAX_PKT_SIZE;
iovec->size = data_cnt;
@@ -1095,7 +1107,8 @@
* descriptor memory (256 bytes + 8 bytes). But in order to be
* in power of 2, we are allocating 512 bytes of memory.
*/
- sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * QCE_SIZE_BAM_DSCR;
+ sps_connect_info->desc.size = QCE_MAX_NUM_DSCR *
+ sizeof(struct sps_iovec);
sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
sps_connect_info->desc.size,
&sps_connect_info->desc.phys_base,
@@ -1324,19 +1337,6 @@
}
};
-static void _aead_sps_consumer_callback(struct sps_event_notify *notify)
-{
- struct qce_device *pce_dev = (struct qce_device *)
- ((struct sps_event_notify *)notify)->user;
-
- pce_dev->ce_sps.notify = *notify;
- pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
- notify->event_id,
- notify->data.transfer.iovec.addr,
- notify->data.transfer.iovec.size,
- notify->data.transfer.iovec.flags);
-};
-
static void _sha_sps_producer_callback(struct sps_event_notify *notify)
{
struct qce_device *pce_dev = (struct qce_device *)
@@ -1352,19 +1352,6 @@
_sha_complete(pce_dev);
};
-static void _sha_sps_consumer_callback(struct sps_event_notify *notify)
-{
- struct qce_device *pce_dev = (struct qce_device *)
- ((struct sps_event_notify *)notify)->user;
-
- pce_dev->ce_sps.notify = *notify;
- pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
- notify->event_id,
- notify->data.transfer.iovec.addr,
- notify->data.transfer.iovec.size,
- notify->data.transfer.iovec.flags);
-};
-
static void _ablk_cipher_sps_producer_callback(struct sps_event_notify *notify)
{
struct qce_device *pce_dev = (struct qce_device *)
@@ -1399,19 +1386,6 @@
}
};
-static void _ablk_cipher_sps_consumer_callback(struct sps_event_notify *notify)
-{
- struct qce_device *pce_dev = (struct qce_device *)
- ((struct sps_event_notify *)notify)->user;
-
- pce_dev->ce_sps.notify = *notify;
- pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
- notify->event_id,
- notify->data.transfer.iovec.addr,
- notify->data.transfer.iovec.size,
- notify->data.transfer.iovec.flags);
-};
-
static void qce_add_cmd_element(struct qce_device *pdev,
struct sps_command_element **cmd_ptr, u32 addr,
u32 data, struct sps_command_element **populate)
@@ -2216,12 +2190,12 @@
pce_dev->ce_sps.in_transfer.iovec = (struct sps_iovec *)vaddr;
pce_dev->ce_sps.in_transfer.iovec_phys =
(uint32_t)GET_PHYS_ADDR(vaddr);
- vaddr += MAX_BAM_DESCRIPTORS * 8;
+ vaddr += QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec);
pce_dev->ce_sps.out_transfer.iovec = (struct sps_iovec *)vaddr;
pce_dev->ce_sps.out_transfer.iovec_phys =
(uint32_t)GET_PHYS_ADDR(vaddr);
- vaddr += MAX_BAM_DESCRIPTORS * 8;
+ vaddr += QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec);
qce_setup_cmdlistptrs(pce_dev, &vaddr);
vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr),
@@ -2358,31 +2332,23 @@
pr_err("Producer callback registration failed rc = %d\n", rc);
goto bad;
}
- /* Register callback event for EOT (End of transfer) event. */
- pce_dev->ce_sps.consumer.event.callback = _aead_sps_consumer_callback;
- pce_dev->ce_sps.consumer.event.options = SPS_O_DESC_DONE;
- rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
- &pce_dev->ce_sps.consumer.event);
- if (rc) {
- pr_err("Consumer callback registration failed rc = %d\n", rc);
- goto bad;
- }
-
_qce_sps_iovec_count_init(pce_dev);
_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
&pce_dev->ce_sps.in_transfer);
if (pce_dev->ce_sps.minor_version == 0) {
- _qce_sps_add_sg_data(pce_dev, areq->src, totallen_in,
- &pce_dev->ce_sps.in_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen_in,
+ &pce_dev->ce_sps.in_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
- _qce_sps_add_sg_data(pce_dev, areq->dst, out_len +
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len +
areq->assoclen + hw_pad_out,
- &pce_dev->ce_sps.out_transfer);
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
if (totallen_in > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
@@ -2390,42 +2356,52 @@
SPS_O_DESC_DONE;
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
- _qce_sps_add_data(GET_PHYS_ADDR(
+ if (_qce_sps_add_data(GET_PHYS_ADDR(
pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
- &pce_dev->ce_sps.out_transfer);
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
}
} else {
- _qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
- &pce_dev->ce_sps.in_transfer);
- _qce_sps_add_data((uint32_t)pce_dev->phy_iv_in, ivsize,
- &pce_dev->ce_sps.in_transfer);
- _qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
- &pce_dev->ce_sps.in_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
+ &pce_dev->ce_sps.in_transfer))
+ goto bad;
+ if (_qce_sps_add_data((uint32_t)pce_dev->phy_iv_in, ivsize,
+ &pce_dev->ce_sps.in_transfer))
+ goto bad;
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
+ &pce_dev->ce_sps.in_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
/* Pass through to ignore associated (+iv, if applicable) data*/
- _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
(ivsize + areq->assoclen),
- &pce_dev->ce_sps.out_transfer);
- _qce_sps_add_sg_data(pce_dev, areq->dst, out_len,
- &pce_dev->ce_sps.out_transfer);
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len,
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
/* Pass through to ignore hw_pad (padding of the MAC data) */
- _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
- hw_pad_out, &pce_dev->ce_sps.out_transfer);
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
+ hw_pad_out, &pce_dev->ce_sps.out_transfer))
+ goto bad;
if (totallen_in > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
- _qce_sps_add_data(
+ if (_qce_sps_add_data(
GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
- &pce_dev->ce_sps.out_transfer);
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
@@ -2514,37 +2490,30 @@
pr_err("Producer callback registration failed rc = %d\n", rc);
goto bad;
}
- /* Register callback event for EOT (End of transfer) event. */
- pce_dev->ce_sps.consumer.event.callback =
- _ablk_cipher_sps_consumer_callback;
- pce_dev->ce_sps.consumer.event.options = SPS_O_DESC_DONE;
- rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
- &pce_dev->ce_sps.consumer.event);
- if (rc) {
- pr_err("Consumer callback registration failed rc = %d\n", rc);
- goto bad;
- }
-
_qce_sps_iovec_count_init(pce_dev);
_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
&pce_dev->ce_sps.in_transfer);
- _qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
- &pce_dev->ce_sps.in_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+ &pce_dev->ce_sps.in_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
- _qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
- &pce_dev->ce_sps.out_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
if (areq->nbytes > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
- _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
- CRYPTO_RESULT_DUMP_SIZE,
- &pce_dev->ce_sps.out_transfer);
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
}
@@ -2597,29 +2566,20 @@
pr_err("Producer callback registration failed rc = %d\n", rc);
goto bad;
}
-
- /* Register callback event for EOT (End of transfer) event. */
- pce_dev->ce_sps.consumer.event.callback = _sha_sps_consumer_callback;
- pce_dev->ce_sps.consumer.event.options = SPS_O_DESC_DONE;
- rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
- &pce_dev->ce_sps.consumer.event);
- if (rc) {
- pr_err("Consumer callback registration failed rc = %d\n", rc);
- goto bad;
- }
-
_qce_sps_iovec_count_init(pce_dev);
_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
&pce_dev->ce_sps.in_transfer);
- _qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
- &pce_dev->ce_sps.in_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+ &pce_dev->ce_sps.in_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
- _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+ if (_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
- &pce_dev->ce_sps.out_transfer);
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT);
rc = _qce_sps_transfer(pce_dev);
if (rc)
@@ -2799,7 +2759,7 @@
}
}
-static int __qce_enable_clk(void *handle)
+int qce_enable_clk(void *handle)
{
struct qce_device *pce_dev = (struct qce_device *) handle;
int rc = 0;
@@ -2812,6 +2772,7 @@
return rc;
}
}
+
/* Enable CE clk */
if (pce_dev->ce_clk != NULL) {
rc = clk_prepare_enable(pce_dev->ce_clk);
@@ -2833,8 +2794,9 @@
}
return rc;
}
+EXPORT_SYMBOL(qce_enable_clk);
-static int __qce_disable_clk(void *handle)
+int qce_disable_clk(void *handle)
{
struct qce_device *pce_dev = (struct qce_device *) handle;
int rc = 0;
@@ -2848,6 +2810,7 @@
return rc;
}
+EXPORT_SYMBOL(qce_disable_clk);
/* crypto engine open function. */
void *qce_open(struct platform_device *pdev, int *rc)
@@ -2885,19 +2848,20 @@
if (*rc)
goto err_mem;
- *rc = __qce_enable_clk(pce_dev);
+ *rc = qce_enable_clk(pce_dev);
if (*rc)
goto err;
if (_probe_ce_engine(pce_dev)) {
*rc = -ENXIO;
- __qce_disable_clk(pce_dev);
goto err;
}
*rc = 0;
qce_setup_ce_sps_data(pce_dev);
qce_sps_init(pce_dev);
+ qce_disable_clk(pce_dev);
+
return pce_dev;
err:
__qce_deinit_clk(pce_dev);
@@ -2931,7 +2895,7 @@
dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
pce_dev->coh_vmem, pce_dev->coh_pmem);
- __qce_disable_clk(pce_dev);
+ qce_disable_clk(pce_dev);
__qce_deinit_clk(pce_dev);
qce_sps_exit(pce_dev);
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
index dc4ccf7..f5123df 100644
--- a/drivers/crypto/msm/qce50.h
+++ b/drivers/crypto/msm/qce50.h
@@ -17,7 +17,6 @@
/* MAX Data xfer block size between BAM and CE */
#define MAX_CE_BAM_BURST_SIZE 0x40
#define QCEBAM_BURST_SIZE MAX_CE_BAM_BURST_SIZE
-#define MAX_BAM_DESCRIPTORS (0x40 - 1)
#define GET_VIRT_ADDR(x) \
((uint32_t)pce_dev->coh_vmem + \
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 2440404..e91dcaa 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -98,7 +98,7 @@
};
static DEFINE_MUTEX(send_cmd_lock);
-static DEFINE_MUTEX(sent_bw_req);
+static DEFINE_MUTEX(qcedev_sent_bw_req);
/**********************************************************************
* Register ourselves as a misc device to be able to access the dev driver
* from userspace. */
@@ -177,25 +177,51 @@
{
int ret = 0;
- mutex_lock(&sent_bw_req);
+ mutex_lock(&qcedev_sent_bw_req);
if (high_bw_req) {
- if (podev->high_bw_req_count == 0)
+ if (podev->high_bw_req_count == 0) {
+ ret = qce_enable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
ret = msm_bus_scale_client_update_request(
podev->bus_scale_handle, 1);
- if (ret)
- pr_err("%s Unable to set to high bandwidth\n",
+ if (ret) {
+ pr_err("%s Unable to set to high bandwidth\n",
__func__);
+ ret = qce_disable_clk(podev->qce);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ }
podev->high_bw_req_count++;
} else {
- if (podev->high_bw_req_count == 1)
+ if (podev->high_bw_req_count == 1) {
ret = msm_bus_scale_client_update_request(
podev->bus_scale_handle, 0);
- if (ret)
- pr_err("%s Unable to set to low bandwidth\n",
+ if (ret) {
+ pr_err("%s Unable to set to low bandwidth\n",
__func__);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ ret = qce_disable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable disable clk\n", __func__);
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret)
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ }
podev->high_bw_req_count--;
}
- mutex_unlock(&sent_bw_req);
+ mutex_unlock(&qcedev_sent_bw_req);
}
@@ -1530,6 +1556,45 @@
}
+static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
+ struct qcedev_control *podev)
+{
+ /* if intending to use HW key make sure key fields are set
+ * correctly and HW key is indeed supported in target
+ */
+ if (req->encklen == 0) {
+ int i;
+ for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++)
+ if (req->enckey[i])
+ goto error;
+ if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
+ (req->op != QCEDEV_OPER_DEC_NO_KEY))
+ if (!podev->platform_support.hw_key_support)
+ goto error;
+ } else {
+ if (req->encklen == QCEDEV_AES_KEY_192) {
+ if (!podev->ce_support.aes_key_192)
+ goto error;
+ } else {
+ /* if not using HW key make sure key
+ * length is valid
+ */
+ if ((req->mode == QCEDEV_AES_MODE_XTS)) {
+ if (!((req->encklen == QCEDEV_AES_KEY_128*2) ||
+ (req->encklen == QCEDEV_AES_KEY_256*2)))
+ goto error;
+ } else {
+ if (!((req->encklen == QCEDEV_AES_KEY_128) ||
+ (req->encklen == QCEDEV_AES_KEY_256)))
+ goto error;
+ }
+ }
+ }
+ return 0;
+error:
+ return -EINVAL;
+}
+
static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
struct qcedev_control *podev)
{
@@ -1542,36 +1607,13 @@
if ((req->alg >= QCEDEV_ALG_LAST) ||
(req->mode >= QCEDEV_AES_DES_MODE_LAST))
goto error;
- if (req->alg == QCEDEV_ALG_AES) {
- if ((req->mode == QCEDEV_AES_MODE_XTS) &&
- (!podev->ce_support.aes_xts))
- goto error;
- /* if intending to use HW key make sure key fields are set
- * correctly and HW key is indeed supported in target
- */
- if (req->encklen == 0) {
- int i;
- for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++)
- if (req->enckey[i])
+
+ if ((req->mode == QCEDEV_AES_MODE_XTS) && (!podev->ce_support.aes_xts))
goto error;
- if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
- (req->op != QCEDEV_OPER_DEC_NO_KEY))
- if (!podev->platform_support.hw_key_support)
+
+ if (req->alg == QCEDEV_ALG_AES)
+ if (qcedev_check_cipher_key(req, podev))
goto error;
- } else {
- if (req->encklen == QCEDEV_AES_KEY_192) {
- if (!podev->ce_support.aes_key_192)
- goto error;
- } else {
- /* if not using HW key make sure key
- * length is valid
- */
- if (!((req->encklen == QCEDEV_AES_KEY_128) ||
- (req->encklen == QCEDEV_AES_KEY_256)))
- goto error;
- }
- }
- }
/* if using a byteoffset, make sure it is CTR mode using vbuf */
if (req->byteoffset) {
if (req->mode != QCEDEV_AES_MODE_CTR)
@@ -1607,6 +1649,11 @@
if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST)
goto sha_error;
+ if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
+ (req->alg == QCEDEV_ALG_SHA1_HMAC)) {
+ if (req->authklen == 0)
+ goto sha_error;
+ }
return 0;
sha_error:
return -EINVAL;
@@ -1833,6 +1880,14 @@
podev->platform_support.hw_key_support = 0;
podev->platform_support.bus_scale_table = NULL;
podev->platform_support.sha_hmac = 1;
+
+ if (podev->ce_support.is_shared == false) {
+ podev->platform_support.bus_scale_table =
+ (struct msm_bus_scale_pdata *)
+ msm_bus_cl_get_pdata(pdev);
+ if (!podev->platform_support.bus_scale_table)
+ pr_err("bus_scale_table is NULL\n");
+ }
} else {
platform_support =
(struct msm_ce_hw_support *)pdev->dev.platform_data;
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 05ef87c..40fb29ac 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -121,7 +121,7 @@
#define NUM_RETRY 1000
#define CE_BUSY 55
-static DEFINE_MUTEX(sent_bw_req);
+static DEFINE_MUTEX(qcrypto_sent_bw_req);
static int qcrypto_scm_cmd(int resource, int cmd, int *response)
{
@@ -346,25 +346,51 @@
{
int ret = 0;
- mutex_lock(&sent_bw_req);
+ mutex_lock(&qcrypto_sent_bw_req);
if (high_bw_req) {
- if (cp->high_bw_req_count == 0)
+ if (cp->high_bw_req_count == 0) {
+ ret = qce_enable_clk(cp->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ mutex_unlock(&qcrypto_sent_bw_req);
+ return;
+ }
ret = msm_bus_scale_client_update_request(
- cp->bus_scale_handle, 1);
- if (ret)
- pr_err("%s Unable to set to high bandwidth\n",
+ cp->bus_scale_handle, 1);
+ if (ret) {
+ pr_err("%s Unable to set to high bandwidth\n",
__func__);
+ qce_disable_clk(cp->qce);
+ mutex_unlock(&qcrypto_sent_bw_req);
+ return;
+ }
+ }
cp->high_bw_req_count++;
} else {
- if (cp->high_bw_req_count == 1)
+ if (cp->high_bw_req_count == 1) {
ret = msm_bus_scale_client_update_request(
- cp->bus_scale_handle, 0);
- if (ret)
- pr_err("%s Unable to set to low bandwidth\n",
+ cp->bus_scale_handle, 0);
+ if (ret) {
+ pr_err("%s Unable to set to low bandwidth\n",
__func__);
+ mutex_unlock(&qcrypto_sent_bw_req);
+ return;
+ }
+ ret = qce_disable_clk(cp->qce);
+ if (ret) {
+ pr_err("%s Unable disable clk\n", __func__);
+ ret = msm_bus_scale_client_update_request(
+ cp->bus_scale_handle, 1);
+ if (ret)
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ mutex_unlock(&qcrypto_sent_bw_req);
+ return;
+ }
+ }
cp->high_bw_req_count--;
}
- mutex_unlock(&sent_bw_req);
+ mutex_unlock(&qcrypto_sent_bw_req);
}
static int _start_qcrypto_process(struct crypto_priv *cp);
@@ -677,12 +703,10 @@
return 0;
};
-static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
- unsigned int len)
+
+static int _qcrypto_check_aes_keylen(struct crypto_ablkcipher *cipher,
+ struct crypto_priv *cp, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_priv *cp = ctx->cp;
switch (len) {
case AES_KEYSIZE_128:
@@ -695,8 +719,40 @@
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
};
- ctx->enc_key_len = len;
- memcpy(ctx->enc_key, key, len);
+
+ return 0;
+}
+
+static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ if (_qcrypto_check_aes_keylen(cipher, cp, len)) {
+ return -EINVAL;
+ } else {
+ ctx->enc_key_len = len;
+ memcpy(ctx->enc_key, key, len);
+ }
+ return 0;
+};
+
+static int _qcrypto_setkey_aes_xts(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+
+ if (_qcrypto_check_aes_keylen(cipher, cp, len/2)) {
+ return -EINVAL;
+ } else {
+ ctx->enc_key_len = len;
+ memcpy(ctx->enc_key, key, len);
+ }
return 0;
};
@@ -3124,7 +3180,7 @@
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .setkey = _qcrypto_setkey_aes,
+ .setkey = _qcrypto_setkey_aes_xts,
.encrypt = _qcrypto_enc_aes_xts,
.decrypt = _qcrypto_dec_aes_xts,
},
@@ -3306,6 +3362,14 @@
cp->platform_support.hw_key_support = 0;
cp->platform_support.bus_scale_table = NULL;
cp->platform_support.sha_hmac = 1;
+
+ if (cp->ce_support.is_shared == false) {
+ cp->platform_support.bus_scale_table =
+ (struct msm_bus_scale_pdata *)
+ msm_bus_cl_get_pdata(pdev);
+ if (!cp->platform_support.bus_scale_table)
+ pr_warn("bus_scale_table is NULL\n");
+ }
} else {
platform_support =
(struct msm_ce_hw_support *)pdev->dev.platform_data;
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index 60a6b81..d7ff73a 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -1,4 +1,6 @@
-obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o ion_iommu_heap.o ion_cp_heap.o
+obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o \
+ ion_iommu_heap.o ion_cp_heap.o ion_removed_heap.o \
+ ion_page_pool.o ion_chunk_heap.o
obj-$(CONFIG_CMA) += ion_cma_heap.o ion_cma_secure_heap.o
obj-$(CONFIG_ION_TEGRA) += tegra/
obj-$(CONFIG_ION_MSM) += msm/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index ce25bfd..fbe4da0 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -1,4 +1,5 @@
/*
+
* drivers/gpu/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
@@ -18,15 +19,18 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/file.h>
+#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/ion.h>
+#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/memblock.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/rbtree.h>
+#include <linux/rtmutex.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
@@ -43,16 +47,18 @@
/**
* struct ion_device - the metadata of the ion device node
* @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @lock: lock protecting the buffers & heaps trees
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
* @heaps: list of all the heaps in the system
* @user_clients: list of all the clients created from userspace
*/
struct ion_device {
struct miscdevice dev;
struct rb_root buffers;
- struct mutex lock;
- struct rb_root heaps;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct plist_head heaps;
long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
unsigned long arg);
struct rb_root clients;
@@ -65,7 +71,6 @@
* @dev: backpointer to ion device
* @handles: an rb tree of all the handles in this client
* @lock: lock protecting the tree of handles
- * @heap_mask: mask of all supported heaps
* @name: used for debugging
* @task: used for debugging
*
@@ -78,7 +83,7 @@
struct ion_device *dev;
struct rb_root handles;
struct mutex lock;
- unsigned int heap_mask;
+ unsigned int heap_type_mask;
char *name;
struct task_struct *task;
pid_t pid;
@@ -112,7 +117,10 @@
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
}
-static void ion_iommu_release(struct kref *kref);
+bool ion_buffer_cached(struct ion_buffer *buffer)
+{
+ return !!(buffer->flags & ION_FLAG_CACHED);
+}
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
@@ -140,63 +148,9 @@
rb_insert_color(&buffer->node, &dev->buffers);
}
-static void ion_iommu_add(struct ion_buffer *buffer,
- struct ion_iommu_map *iommu)
-{
- struct rb_node **p = &buffer->iommu_maps.rb_node;
- struct rb_node *parent = NULL;
- struct ion_iommu_map *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_iommu_map, node);
-
- if (iommu->key < entry->key) {
- p = &(*p)->rb_left;
- } else if (iommu->key > entry->key) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: buffer %p already has mapping for domain %d"
- " and partition %d\n", __func__,
- buffer,
- iommu_map_domain(iommu),
- iommu_map_partition(iommu));
- BUG();
- }
- }
-
- rb_link_node(&iommu->node, parent, p);
- rb_insert_color(&iommu->node, &buffer->iommu_maps);
-
-}
-
-static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
- unsigned int domain_no,
- unsigned int partition_no)
-{
- struct rb_node **p = &buffer->iommu_maps.rb_node;
- struct rb_node *parent = NULL;
- struct ion_iommu_map *entry;
- uint64_t key = domain_no;
- key = key << 32 | partition_no;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_iommu_map, node);
-
- if (key < entry->key)
- p = &(*p)->rb_left;
- else if (key > entry->key)
- p = &(*p)->rb_right;
- else
- return entry;
- }
-
- return NULL;
-}
-
static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
+static bool ion_heap_drain_freelist(struct ion_heap *heap);
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
@@ -218,9 +172,16 @@
kref_init(&buffer->ref);
ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
if (ret) {
- kfree(buffer);
- return ERR_PTR(ret);
+ if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+ goto err2;
+
+ ion_heap_drain_freelist(heap);
+ ret = heap->ops->allocate(heap, buffer, len, align,
+ flags);
+ if (ret)
+ goto err2;
}
buffer->dev = dev;
@@ -265,74 +226,58 @@
if (sg_dma_address(sg) == 0)
sg_dma_address(sg) = sg_phys(sg);
}
+ mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
+ mutex_unlock(&dev->buffer_lock);
return buffer;
err:
heap->ops->unmap_dma(heap, buffer);
heap->ops->free(buffer);
+err2:
kfree(buffer);
return ERR_PTR(ret);
}
-/**
- * Check for delayed IOMMU unmapping. Also unmap any outstanding
- * mappings which would otherwise have been leaked.
- */
-static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
-{
- struct ion_iommu_map *iommu_map;
- struct rb_node *node;
- const struct rb_root *rb = &(buffer->iommu_maps);
- unsigned long ref_count;
- unsigned int delayed_unmap;
-
- mutex_lock(&buffer->lock);
-
- while ((node = rb_first(rb)) != 0) {
- iommu_map = rb_entry(node, struct ion_iommu_map, node);
- ref_count = atomic_read(&iommu_map->ref.refcount);
- delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
-
- if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
- pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
- __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
- iommu_map->domain_info[DI_PARTITION_NUM]);
- }
- /* set ref count to 1 to force release */
- kref_init(&iommu_map->ref);
- kref_put(&iommu_map->ref, ion_iommu_release);
- }
-
- mutex_unlock(&buffer->lock);
-}
-
static void ion_delayed_unsecure(struct ion_buffer *buffer)
{
if (buffer->heap->ops->unsecure_buffer)
buffer->heap->ops->unsecure_buffer(buffer, 1);
}
-static void ion_buffer_destroy(struct kref *kref)
+static void _ion_buffer_destroy(struct ion_buffer *buffer)
{
- struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
- struct ion_device *dev = buffer->dev;
-
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
ion_delayed_unsecure(buffer);
- ion_iommu_delayed_unmap(buffer);
buffer->heap->ops->free(buffer);
- mutex_lock(&dev->lock);
- rb_erase(&buffer->node, &dev->buffers);
- mutex_unlock(&dev->lock);
if (buffer->flags & ION_FLAG_CACHED)
kfree(buffer->dirty);
kfree(buffer);
}
+static void ion_buffer_destroy(struct kref *kref)
+{
+ struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+ struct ion_heap *heap = buffer->heap;
+ struct ion_device *dev = buffer->dev;
+
+ mutex_lock(&dev->buffer_lock);
+ rb_erase(&buffer->node, &dev->buffers);
+ mutex_unlock(&dev->buffer_lock);
+
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
+ rt_mutex_lock(&heap->lock);
+ list_add(&buffer->list, &heap->free_list);
+ rt_mutex_unlock(&heap->lock);
+ wake_up(&heap->waitqueue);
+ return;
+ }
+ _ion_buffer_destroy(buffer);
+}
+
static void ion_buffer_get(struct ion_buffer *buffer)
{
kref_get(&buffer->ref);
@@ -343,6 +288,37 @@
return kref_put(&buffer->ref, ion_buffer_destroy);
}
+static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+{
+ mutex_lock(&buffer->lock);
+ buffer->handle_count++;
+ mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
+{
+ /*
+ * when a buffer is removed from a handle, if it is not in
+ * any other handles, copy the taskcomm and the pid of the
+ * process it's being removed from into the buffer. At this
+ * point there will be no way to track what processes this buffer is
+ * being used by, it only exists as a dma_buf file descriptor.
+ * The taskcomm and pid can provide a debug hint as to where this fd
+ * is in the system
+ */
+ mutex_lock(&buffer->lock);
+ buffer->handle_count--;
+ BUG_ON(buffer->handle_count < 0);
+ if (!buffer->handle_count) {
+ struct task_struct *task;
+
+ task = current->group_leader;
+ get_task_comm(buffer->task_comm, task);
+ buffer->pid = task_pid_nr(task);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
static struct ion_handle *ion_handle_create(struct ion_client *client,
struct ion_buffer *buffer)
{
@@ -355,6 +331,7 @@
rb_init_node(&handle->node);
handle->client = client;
ion_buffer_get(buffer);
+ ion_buffer_add_to_handle(buffer);
handle->buffer = buffer;
return handle;
@@ -376,7 +353,9 @@
if (!RB_EMPTY_NODE(&handle->node))
rb_erase(&handle->node, &client->handles);
+ ion_buffer_remove_from_handle(buffer);
ion_buffer_put(buffer);
+
kfree(handle);
}
@@ -449,14 +428,14 @@
}
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_mask,
+ size_t align, unsigned int heap_id_mask,
unsigned int flags)
{
- struct rb_node *n;
struct ion_handle *handle;
struct ion_device *dev = client->dev;
struct ion_buffer *buffer = NULL;
- unsigned long secure_allocation = flags & ION_SECURE;
+ struct ion_heap *heap;
+ unsigned long secure_allocation = flags & ION_FLAG_SECURE;
const unsigned int MAX_DBG_STR_LEN = 64;
char dbg_str[MAX_DBG_STR_LEN];
unsigned int dbg_str_idx = 0;
@@ -471,8 +450,8 @@
*/
flags |= ION_FLAG_CACHED_NEEDS_SYNC;
- pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
- align, heap_mask, flags);
+ pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
+ len, align, heap_id_mask, flags);
/*
* traverse the list of heaps available in this system in priority
* order. If the heap type is supported by the client, and matches the
@@ -484,29 +463,26 @@
len = PAGE_ALIGN(len);
- mutex_lock(&dev->lock);
- for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
- struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
- /* if the client doesn't support this heap type */
- if (!((1 << heap->type) & client->heap_mask))
- continue;
- /* if the caller didn't specify this heap type */
- if (!((1 << heap->id) & heap_mask))
+ down_read(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
+ /* if the caller didn't specify this heap id */
+ if (!((1 << heap->id) & heap_id_mask))
continue;
/* Do not allow un-secure heap if secure is specified */
if (secure_allocation &&
!ion_heap_allow_secure_allocation(heap->type))
continue;
trace_ion_alloc_buffer_start(client->name, heap->name, len,
- heap_mask, flags);
+ heap_id_mask, flags);
buffer = ion_buffer_create(heap, dev, len, align, flags);
trace_ion_alloc_buffer_end(client->name, heap->name, len,
- heap_mask, flags);
+ heap_id_mask, flags);
if (!IS_ERR_OR_NULL(buffer))
break;
trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
- heap_mask, flags, PTR_ERR(buffer));
+ heap_id_mask, flags,
+ PTR_ERR(buffer));
if (dbg_str_idx < MAX_DBG_STR_LEN) {
unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
int ret_value = snprintf(&dbg_str[dbg_str_idx],
@@ -523,21 +499,21 @@
}
}
}
- mutex_unlock(&dev->lock);
+ up_read(&dev->lock);
if (buffer == NULL) {
trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
- heap_mask, flags, -ENODEV);
+ heap_id_mask, flags, -ENODEV);
return ERR_PTR(-ENODEV);
}
if (IS_ERR(buffer)) {
trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
- heap_mask, flags, PTR_ERR(buffer));
+ heap_id_mask, flags,
+ PTR_ERR(buffer));
pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
- "0x%x) from heap(s) %sfor client %s with heap "
- "mask 0x%x\n",
- len, align, dbg_str, client->name, client->heap_mask);
+ "0x%x) from heap(s) %sfor client %s\n",
+ len, align, dbg_str, client->name);
return ERR_PTR(PTR_ERR(buffer));
}
@@ -654,212 +630,6 @@
ion_buffer_kmap_put(buffer);
}
-static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
- int domain_num, int partition_num, unsigned long align,
- unsigned long iova_length, unsigned long flags,
- unsigned long *iova)
-{
- struct ion_iommu_map *data;
- int ret;
-
- data = kmalloc(sizeof(*data), GFP_ATOMIC);
-
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- data->buffer = buffer;
- iommu_map_domain(data) = domain_num;
- iommu_map_partition(data) = partition_num;
-
- ret = buffer->heap->ops->map_iommu(buffer, data,
- domain_num,
- partition_num,
- align,
- iova_length,
- flags);
-
- if (ret)
- goto out;
-
- kref_init(&data->ref);
- *iova = data->iova_addr;
-
- ion_iommu_add(buffer, data);
-
- return data;
-
-out:
- kfree(data);
- return ERR_PTR(ret);
-}
-
-int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
- int domain_num, int partition_num, unsigned long align,
- unsigned long iova_length, unsigned long *iova,
- unsigned long *buffer_size,
- unsigned long flags, unsigned long iommu_flags)
-{
- struct ion_buffer *buffer;
- struct ion_iommu_map *iommu_map;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(client)) {
- pr_err("%s: client pointer is invalid\n", __func__);
- return -EINVAL;
- }
- if (IS_ERR_OR_NULL(handle)) {
- pr_err("%s: handle pointer is invalid\n", __func__);
- return -EINVAL;
- }
- if (IS_ERR_OR_NULL(handle->buffer)) {
- pr_err("%s: buffer pointer is invalid\n", __func__);
- return -EINVAL;
- }
-
- if (ION_IS_CACHED(flags)) {
- pr_err("%s: Cannot map iommu as cached.\n", __func__);
- return -EINVAL;
- }
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_kernel.\n",
- __func__);
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
-
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
-
- if (!handle->buffer->heap->ops->map_iommu) {
- pr_err("%s: map_iommu is not implemented by this heap.\n",
- __func__);
- ret = -ENODEV;
- goto out;
- }
-
- /*
- * If clients don't want a custom iova length, just use whatever
- * the buffer size is
- */
- if (!iova_length)
- iova_length = buffer->size;
-
- if (buffer->size > iova_length) {
- pr_debug("%s: iova length %lx is not at least buffer size"
- " %x\n", __func__, iova_length, buffer->size);
- ret = -EINVAL;
- goto out;
- }
-
- if (buffer->size & ~PAGE_MASK) {
- pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
- buffer->size, PAGE_SIZE);
- ret = -EINVAL;
- goto out;
- }
-
- if (iova_length & ~PAGE_MASK) {
- pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
- iova_length, PAGE_SIZE);
- ret = -EINVAL;
- goto out;
- }
-
- iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
- if (!iommu_map) {
- iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
- align, iova_length, flags, iova);
- if (!IS_ERR_OR_NULL(iommu_map)) {
- iommu_map->flags = iommu_flags;
-
- if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
- kref_get(&iommu_map->ref);
- } else {
- ret = PTR_ERR(iommu_map);
- }
- } else {
- if (iommu_map->flags != iommu_flags) {
- pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
- __func__, handle,
- iommu_map->flags, iommu_flags);
- ret = -EINVAL;
- } else if (iommu_map->mapped_size != iova_length) {
- pr_err("%s: handle %p is already mapped with length"
- " %x, trying to map with length %lx\n",
- __func__, handle, iommu_map->mapped_size,
- iova_length);
- ret = -EINVAL;
- } else {
- kref_get(&iommu_map->ref);
- *iova = iommu_map->iova_addr;
- }
- }
- if (!ret)
- buffer->iommu_map_cnt++;
- *buffer_size = buffer->size;
-out:
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return ret;
-}
-EXPORT_SYMBOL(ion_map_iommu);
-
-static void ion_iommu_release(struct kref *kref)
-{
- struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
- ref);
- struct ion_buffer *buffer = map->buffer;
-
- rb_erase(&map->node, &buffer->iommu_maps);
- buffer->heap->ops->unmap_iommu(map);
- kfree(map);
-}
-
-void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
- int domain_num, int partition_num)
-{
- struct ion_iommu_map *iommu_map;
- struct ion_buffer *buffer;
-
- if (IS_ERR_OR_NULL(client)) {
- pr_err("%s: client pointer is invalid\n", __func__);
- return;
- }
- if (IS_ERR_OR_NULL(handle)) {
- pr_err("%s: handle pointer is invalid\n", __func__);
- return;
- }
- if (IS_ERR_OR_NULL(handle->buffer)) {
- pr_err("%s: buffer pointer is invalid\n", __func__);
- return;
- }
-
- mutex_lock(&client->lock);
- buffer = handle->buffer;
-
- mutex_lock(&buffer->lock);
-
- iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
-
- if (!iommu_map) {
- WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
- domain_num, partition_num, buffer);
- goto out;
- }
-
- kref_put(&iommu_map->ref, ion_iommu_release);
-
- buffer->iommu_map_cnt--;
-out:
- mutex_unlock(&buffer->lock);
-
- mutex_unlock(&client->lock);
-
-}
-EXPORT_SYMBOL(ion_unmap_iommu);
-
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
{
struct ion_buffer *buffer;
@@ -903,52 +673,10 @@
}
EXPORT_SYMBOL(ion_unmap_kernel);
-int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
- void *uaddr, unsigned long offset, unsigned long len,
- unsigned int cmd)
-{
- struct ion_buffer *buffer;
- int ret = -EINVAL;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to do_cache_op.\n",
- __func__);
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
-
- if (!ION_IS_CACHED(buffer->flags)) {
- ret = 0;
- goto out;
- }
-
- if (!handle->buffer->heap->ops->cache_op) {
- pr_err("%s: cache_op is not implemented by this heap.\n",
- __func__);
- ret = -ENODEV;
- goto out;
- }
-
-
- ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
- offset, len, cmd);
-
-out:
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return ret;
-
-}
-EXPORT_SYMBOL(ion_do_cache_op);
-
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
struct ion_client *client = s->private;
struct rb_node *n;
- struct rb_node *n2;
seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
"heap_name", "size_in_bytes", "handle refcount",
@@ -958,6 +686,7 @@
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
+
enum ion_heap_type type = handle->buffer->heap->type;
seq_printf(s, "%16.16s: %16x : %16d : %12p",
@@ -973,19 +702,9 @@
else
seq_printf(s, " : %12s", "N/A");
- for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
- n2 = rb_next(n2)) {
- struct ion_iommu_map *imap =
- rb_entry(n2, struct ion_iommu_map, node);
- seq_printf(s, " : [%d,%d] - %8lx",
- imap->domain_info[DI_DOMAIN_NUM],
- imap->domain_info[DI_PARTITION_NUM],
- imap->iova_addr);
- }
seq_printf(s, "\n");
}
mutex_unlock(&client->lock);
-
return 0;
}
@@ -1002,7 +721,6 @@
};
struct ion_client *ion_client_create(struct ion_device *dev,
- unsigned int heap_mask,
const char *name)
{
struct ion_client *client;
@@ -1052,11 +770,10 @@
strlcpy(client->name, name, name_len+1);
}
- client->heap_mask = heap_mask;
client->task = task;
client->pid = pid;
- mutex_lock(&dev->lock);
+ down_write(&dev->lock);
p = &dev->clients.rb_node;
while (*p) {
parent = *p;
@@ -1074,96 +791,16 @@
client->debug_root = debugfs_create_file(name, 0664,
dev->debug_root, client,
&debug_client_fops);
- mutex_unlock(&dev->lock);
+ up_write(&dev->lock);
return client;
}
-
-/**
- * ion_mark_dangling_buffers_locked() - Mark dangling buffers
- * @dev: the ion device whose buffers will be searched
- *
- * Sets marked=1 for all known buffers associated with `dev' that no
- * longer have a handle pointing to them. dev->lock should be held
- * across a call to this function (and should only be unlocked after
- * checking for marked buffers).
- */
-static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
-{
- struct rb_node *n, *n2;
- /* mark all buffers as 1 */
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
- node);
-
- buf->marked = 1;
- }
-
- /* now see which buffers we can access */
- for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
- node);
-
- mutex_lock(&client->lock);
- for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
- struct ion_handle *handle
- = rb_entry(n2, struct ion_handle, node);
-
- handle->buffer->marked = 0;
-
- }
- mutex_unlock(&client->lock);
-
- }
-}
-
-#ifdef CONFIG_ION_LEAK_CHECK
-static u32 ion_debug_check_leaks_on_destroy;
-
-static int ion_check_for_and_print_leaks(struct ion_device *dev)
-{
- struct rb_node *n;
- int num_leaks = 0;
-
- if (!ion_debug_check_leaks_on_destroy)
- return 0;
-
- /* check for leaked buffers (those that no longer have a
- * handle pointing to them) */
- ion_mark_dangling_buffers_locked(dev);
-
- /* Anyone still marked as a 1 means a leaked handle somewhere */
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
- node);
-
- if (buf->marked == 1) {
- pr_info("Leaked ion buffer at %p\n", buf);
- num_leaks++;
- }
- }
- return num_leaks;
-}
-static void setup_ion_leak_check(struct dentry *debug_root)
-{
- debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
- &ion_debug_check_leaks_on_destroy);
-}
-#else
-static int ion_check_for_and_print_leaks(struct ion_device *dev)
-{
- return 0;
-}
-static void setup_ion_leak_check(struct dentry *debug_root)
-{
-}
-#endif
+EXPORT_SYMBOL(ion_client_create);
void ion_client_destroy(struct ion_client *client)
{
struct ion_device *dev = client->dev;
struct rb_node *n;
- int num_leaks;
pr_debug("%s: %d\n", __func__, __LINE__);
while ((n = rb_first(&client->handles))) {
@@ -1171,25 +808,13 @@
node);
ion_handle_destroy(&handle->ref);
}
- mutex_lock(&dev->lock);
+ down_write(&dev->lock);
if (client->task)
put_task_struct(client->task);
rb_erase(&client->node, &dev->clients);
debugfs_remove_recursive(client->debug_root);
- num_leaks = ion_check_for_and_print_leaks(dev);
-
- mutex_unlock(&dev->lock);
-
- if (num_leaks) {
- struct task_struct *current_task = current;
- char current_task_name[TASK_COMM_LEN];
- get_task_comm(current_task_name, current_task);
- WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
- __func__, num_leaks, num_leaks == 1 ? "" : "s");
- pr_info("task name at time of leak: %s, pid: %d\n",
- current_task_name, current_task->pid);
- }
+ up_write(&dev->lock);
kfree(client->name);
kfree(client);
@@ -1462,7 +1087,7 @@
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
struct ion_buffer *buffer = dmabuf->priv;
- return buffer->vaddr + offset;
+ return buffer->vaddr + offset * PAGE_SIZE;
}
static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
@@ -1518,19 +1143,19 @@
.kunmap = ion_dma_buf_kunmap,
};
-int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle)
{
struct ion_buffer *buffer;
struct dma_buf *dmabuf;
bool valid_handle;
- int fd;
mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
mutex_unlock(&client->lock);
if (!valid_handle) {
WARN(1, "%s: invalid handle passed to share.\n", __func__);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
buffer = handle->buffer;
@@ -1538,15 +1163,29 @@
dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
if (IS_ERR(dmabuf)) {
ion_buffer_put(buffer);
- return PTR_ERR(dmabuf);
+ return dmabuf;
}
+
+ return dmabuf;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+ struct dma_buf *dmabuf;
+ int fd;
+
+ dmabuf = ion_share_dma_buf(client, handle);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
if (fd < 0)
dma_buf_put(dmabuf);
return fd;
}
-EXPORT_SYMBOL(ion_share_dma_buf);
+EXPORT_SYMBOL(ion_share_dma_buf_fd);
struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
{
@@ -1655,7 +1294,8 @@
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
- data.fd = ion_share_dma_buf(client, data.handle);
+ data.fd = ion_share_dma_buf_fd(client, data.handle);
+
if (copy_to_user((void __user *)arg, &data, sizeof(data)))
return -EFAULT;
if (data.fd < 0)
@@ -1735,7 +1375,7 @@
pr_debug("%s: %d\n", __func__, __LINE__);
snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
- client = ion_client_create(dev, -1, debug_name);
+ client = ion_client_create(dev, debug_name);
if (IS_ERR_OR_NULL(client))
return PTR_ERR(client);
file->private_data = client;
@@ -1917,9 +1557,12 @@
struct ion_heap *heap = s->private;
struct ion_device *dev = heap->dev;
struct rb_node *n;
+ size_t total_size = 0;
+ size_t total_orphaned_size = 0;
- mutex_lock(&dev->lock);
+ mutex_lock(&dev->buffer_lock);
seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+ seq_printf(s, "----------------------------------------------------\n");
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
@@ -1938,8 +1581,28 @@
client->pid, size);
}
}
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "orphaned allocations (info is from last known client):"
+ "\n");
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+ node);
+ if (buffer->heap->type == heap->type)
+ total_size += buffer->size;
+ if (!buffer->handle_count) {
+ seq_printf(s, "%16.s %16u %16u\n", buffer->task_comm,
+ buffer->pid, buffer->size);
+ total_orphaned_size += buffer->size;
+ }
+ }
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "%16.s %16u\n", "total orphaned",
+ total_orphaned_size);
+ seq_printf(s, "%16.s %16u\n", "total ", total_size);
+ seq_printf(s, "----------------------------------------------------\n");
+
ion_heap_print_debug(s, heap);
- mutex_unlock(&dev->lock);
+ mutex_unlock(&dev->buffer_lock);
return 0;
}
@@ -1955,40 +1618,90 @@
.release = single_release,
};
+static size_t ion_heap_free_list_is_empty(struct ion_heap *heap)
+{
+ bool is_empty;
+
+ rt_mutex_lock(&heap->lock);
+ is_empty = list_empty(&heap->free_list);
+ rt_mutex_unlock(&heap->lock);
+
+ return is_empty;
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+ struct ion_heap *heap = data;
+
+ while (true) {
+ struct ion_buffer *buffer;
+
+ wait_event_freezable(heap->waitqueue,
+ !ion_heap_free_list_is_empty(heap));
+
+ rt_mutex_lock(&heap->lock);
+ if (list_empty(&heap->free_list)) {
+ rt_mutex_unlock(&heap->lock);
+ continue;
+ }
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ rt_mutex_unlock(&heap->lock);
+ _ion_buffer_destroy(buffer);
+ }
+
+ return 0;
+}
+
+static bool ion_heap_drain_freelist(struct ion_heap *heap)
+{
+ struct ion_buffer *buffer, *tmp;
+
+ if (ion_heap_free_list_is_empty(heap))
+ return false;
+ rt_mutex_lock(&heap->lock);
+ list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
+ list_del(&buffer->list);
+ _ion_buffer_destroy(buffer);
+ }
+ BUG_ON(!list_empty(&heap->free_list));
+ rt_mutex_unlock(&heap->lock);
+
+
+ return true;
+}
+
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
- struct rb_node **p = &dev->heaps.rb_node;
- struct rb_node *parent = NULL;
- struct ion_heap *entry;
+ struct sched_param param = { .sched_priority = 0 };
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
!heap->ops->unmap_dma)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
- heap->dev = dev;
- mutex_lock(&dev->lock);
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_heap, node);
-
- if (heap->id < entry->id) {
- p = &(*p)->rb_left;
- } else if (heap->id > entry->id ) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: can not insert multiple heaps with "
- "id %d\n", __func__, heap->id);
- goto end;
- }
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
+ INIT_LIST_HEAD(&heap->free_list);
+ rt_mutex_init(&heap->lock);
+ init_waitqueue_head(&heap->waitqueue);
+ heap->task = kthread_run(ion_heap_deferred_free, heap,
+ "%s", heap->name);
+ sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
+ if (IS_ERR(heap->task))
+ pr_err("%s: creating thread for deferred free failed\n",
+ __func__);
}
- rb_link_node(&heap->node, parent, p);
- rb_insert_color(&heap->node, &dev->heaps);
+ heap->dev = dev;
+ down_write(&dev->lock);
+ /* use negative heap->id to reverse the priority -- when traversing
+ the list later attempt higher id numbers first */
+ plist_node_init(&heap->node, -heap->id);
+ plist_add(&heap->node, &dev->heaps);
debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
&debug_heap_fops);
-end:
- mutex_unlock(&dev->lock);
+ up_write(&dev->lock);
}
int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
@@ -2061,16 +1774,15 @@
int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
void *data)
{
- struct rb_node *n;
int ret_val = 0;
+ struct ion_heap *heap;
/*
* traverse the list of heaps available in this system
* and find the heap that is specified.
*/
- mutex_lock(&dev->lock);
- for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
- struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
+ down_write(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
if (!ion_heap_allow_heap_secure(heap->type))
continue;
if (ION_HEAP(heap->id) != heap_id)
@@ -2081,7 +1793,7 @@
ret_val = -EINVAL;
break;
}
- mutex_unlock(&dev->lock);
+ up_write(&dev->lock);
return ret_val;
}
EXPORT_SYMBOL(ion_secure_heap);
@@ -2089,16 +1801,15 @@
int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
void *data)
{
- struct rb_node *n;
int ret_val = 0;
+ struct ion_heap *heap;
/*
* traverse the list of heaps available in this system
* and find the heap that is specified.
*/
- mutex_lock(&dev->lock);
- for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
- struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
+ down_write(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
if (!ion_heap_allow_heap_secure(heap->type))
continue;
if (ION_HEAP(heap->id) != heap_id)
@@ -2109,50 +1820,11 @@
ret_val = -EINVAL;
break;
}
- mutex_unlock(&dev->lock);
+ up_write(&dev->lock);
return ret_val;
}
EXPORT_SYMBOL(ion_unsecure_heap);
-static int ion_debug_leak_show(struct seq_file *s, void *unused)
-{
- struct ion_device *dev = s->private;
- struct rb_node *n;
-
- seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
- "ref cnt");
-
- mutex_lock(&dev->lock);
- ion_mark_dangling_buffers_locked(dev);
-
- /* Anyone still marked as a 1 means a leaked handle somewhere */
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
- node);
-
- if (buf->marked == 1)
- seq_printf(s, "%16.x %16.s %16.x %16.d\n",
- (int)buf, buf->heap->name, buf->size,
- atomic_read(&buf->ref.refcount));
- }
- mutex_unlock(&dev->lock);
- return 0;
-}
-
-static int ion_debug_leak_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ion_debug_leak_show, inode->i_private);
-}
-
-static const struct file_operations debug_leak_fops = {
- .open = ion_debug_leak_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
-
struct ion_device *ion_device_create(long (*custom_ioctl)
(struct ion_client *client,
unsigned int cmd,
@@ -2181,13 +1853,10 @@
idev->custom_ioctl = custom_ioctl;
idev->buffers = RB_ROOT;
- mutex_init(&idev->lock);
- idev->heaps = RB_ROOT;
+ mutex_init(&idev->buffer_lock);
+ init_rwsem(&idev->lock);
+ plist_head_init(&idev->heaps);
idev->clients = RB_ROOT;
- debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
- &debug_leak_fops);
-
- setup_ion_leak_check(idev->debug_root);
return idev;
}
@@ -2200,16 +1869,35 @@
void __init ion_reserve(struct ion_platform_data *data)
{
- int i, ret;
+ int i;
for (i = 0; i < data->nr; i++) {
if (data->heaps[i].size == 0)
continue;
- ret = memblock_reserve(data->heaps[i].base,
- data->heaps[i].size);
- if (ret)
- pr_err("memblock reserve of %x@%pa failed\n",
- data->heaps[i].size,
- &data->heaps[i].base);
+
+ if (data->heaps[i].base == 0) {
+ phys_addr_t paddr;
+ paddr = memblock_alloc_base(data->heaps[i].size,
+ data->heaps[i].align,
+ MEMBLOCK_ALLOC_ANYWHERE);
+ if (!paddr) {
+ pr_err("%s: error allocating memblock for "
+ "heap %d\n",
+ __func__, i);
+ continue;
+ }
+ data->heaps[i].base = paddr;
+ } else {
+ int ret = memblock_reserve(data->heaps[i].base,
+ data->heaps[i].size);
+ if (ret)
+ pr_err("memblock reserve of %x@%pa failed\n",
+ data->heaps[i].size,
+ &data->heaps[i].base);
+ }
+ pr_info("%s: %s reserved base %pa size %d\n", __func__,
+ data->heaps[i].name,
+ &data->heaps[i].base,
+ data->heaps[i].size);
}
}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 9610dfe..0dd3054 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -24,11 +24,9 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/iommu.h>
#include <linux/seq_file.h>
#include "ion_priv.h"
-#include <mach/iommu_domains.h>
#include <asm/mach/map.h>
#include <asm/cacheflush.h>
#include <linux/msm_ion.h>
@@ -39,10 +37,6 @@
ion_phys_addr_t base;
unsigned long allocated_bytes;
unsigned long total_size;
- int (*request_region)(void *);
- int (*release_region)(void *);
- atomic_t map_count;
- void *bus_id;
unsigned int has_outer_cache;
};
@@ -130,79 +124,33 @@
buffer->sg_table = 0;
}
-static int ion_carveout_request_region(struct ion_carveout_heap *carveout_heap)
-{
- int ret_value = 0;
- if (atomic_inc_return(&carveout_heap->map_count) == 1) {
- if (carveout_heap->request_region) {
- ret_value = carveout_heap->request_region(
- carveout_heap->bus_id);
- if (ret_value) {
- pr_err("Unable to request SMI region");
- atomic_dec(&carveout_heap->map_count);
- }
- }
- }
- return ret_value;
-}
-
-static int ion_carveout_release_region(struct ion_carveout_heap *carveout_heap)
-{
- int ret_value = 0;
- if (atomic_dec_and_test(&carveout_heap->map_count)) {
- if (carveout_heap->release_region) {
- ret_value = carveout_heap->release_region(
- carveout_heap->bus_id);
- if (ret_value)
- pr_err("Unable to release SMI region");
- }
- }
- return ret_value;
-}
-
void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
void *ret_value;
- if (ion_carveout_request_region(carveout_heap))
- return NULL;
-
if (ION_IS_CACHED(buffer->flags))
ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
else
ret_value = ioremap(buffer->priv_phys, buffer->size);
- if (!ret_value)
- ion_carveout_release_region(carveout_heap);
return ret_value;
}
void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
-
__arm_iounmap(buffer->vaddr);
buffer->vaddr = NULL;
- ion_carveout_release_region(carveout_heap);
return;
}
int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
int ret_value = 0;
- if (ion_carveout_request_region(carveout_heap))
- return -EINVAL;
-
if (!ION_IS_CACHED(buffer->flags))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@@ -211,104 +159,9 @@
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
- if (ret_value)
- ion_carveout_release_region(carveout_heap);
return ret_value;
}
-void ion_carveout_heap_unmap_user(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
- ion_carveout_release_region(carveout_heap);
-}
-
-int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
- void *vaddr, unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
- unsigned int size_to_vmap, total_size;
- int i, j;
- void *ptr = NULL;
- ion_phys_addr_t buff_phys = buffer->priv_phys;
-
- if (!vaddr) {
- /*
- * Split the vmalloc space into smaller regions in
- * order to clean and/or invalidate the cache.
- */
- size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
- total_size = buffer->size;
-
- for (i = 0; i < total_size; i += size_to_vmap) {
- size_to_vmap = min(size_to_vmap, total_size - i);
- for (j = 0; j < 10 && size_to_vmap; ++j) {
- ptr = ioremap(buff_phys, size_to_vmap);
- if (ptr) {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- buff_phys += size_to_vmap;
- break;
- } else {
- size_to_vmap >>= 1;
- }
- }
- if (!ptr) {
- pr_err("Couldn't io-remap the memory\n");
- return -EINVAL;
- }
- iounmap(ptr);
- }
- } else {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- }
-
- if (carveout_heap->has_outer_cache) {
- unsigned long pstart = buffer->priv_phys + offset;
- outer_cache_op(pstart, pstart + length);
- }
- return 0;
-}
-
static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map)
{
@@ -363,124 +216,16 @@
return 0;
}
-int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- struct iommu_domain *domain;
- int ret = 0;
- unsigned long extra;
- struct scatterlist *sglist = 0;
- int prot = IOMMU_WRITE | IOMMU_READ;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = buffer->priv_phys;
- return 0;
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- sglist = vmalloc(sizeof(*sglist));
- if (!sglist)
- goto out1;
-
- sg_init_table(sglist, 1);
- sglist->length = buffer->size;
- sglist->offset = 0;
- sglist->dma_address = buffer->priv_phys;
-
- ret = iommu_map_range(domain, data->iova_addr, sglist,
- buffer->size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- unsigned long phys_addr = sg_phys(sglist);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- vfree(sglist);
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- vfree(sglist);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
-out:
-
- return ret;
-}
-
-void ion_carveout_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
.phys = ion_carveout_heap_phys,
.map_user = ion_carveout_heap_map_user,
.map_kernel = ion_carveout_heap_map_kernel,
- .unmap_user = ion_carveout_heap_unmap_user,
.unmap_kernel = ion_carveout_heap_unmap_kernel,
.map_dma = ion_carveout_heap_map_dma,
.unmap_dma = ion_carveout_heap_unmap_dma,
- .cache_op = ion_carveout_cache_ops,
.print_debug = ion_carveout_print_debug,
- .map_iommu = ion_carveout_heap_map_iommu,
- .unmap_iommu = ion_carveout_heap_unmap_iommu,
};
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
@@ -511,19 +256,6 @@
carveout_heap->total_size = heap_data->size;
carveout_heap->has_outer_cache = heap_data->has_outer_cache;
- if (heap_data->extra_data) {
- struct ion_co_heap_pdata *extra_data =
- heap_data->extra_data;
-
- if (extra_data->setup_region)
- carveout_heap->bus_id = extra_data->setup_region();
- if (extra_data->request_region)
- carveout_heap->request_region =
- extra_data->request_region;
- if (extra_data->release_region)
- carveout_heap->release_region =
- extra_data->release_region;
- }
return &carveout_heap->heap;
}
diff --git a/drivers/gpu/ion/ion_chunk_heap.c b/drivers/gpu/ion/ion_chunk_heap.c
new file mode 100644
index 0000000..b76f898
--- /dev/null
+++ b/drivers/gpu/ion/ion_chunk_heap.c
@@ -0,0 +1,180 @@
+/*
+ * drivers/gpu/ion/ion_chunk_heap.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+//#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion_priv.h"
+
+#include <asm/mach/map.h>
+
+struct ion_chunk_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+ unsigned long chunk_size;
+ unsigned long size;
+ unsigned long allocated;
+};
+
+static int ion_chunk_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret, i;
+ unsigned long num_chunks;
+
+ if (ion_buffer_fault_user_mappings(buffer))
+ return -ENOMEM;
+
+ num_chunks = ALIGN(size, chunk_heap->chunk_size) /
+ chunk_heap->chunk_size;
+ buffer->size = num_chunks * chunk_heap->chunk_size;
+
+ if (buffer->size > chunk_heap->size - chunk_heap->allocated)
+ return -ENOMEM;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ return ret;
+ }
+
+ sg = table->sgl;
+ for (i = 0; i < num_chunks; i++) {
+ unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
+ chunk_heap->chunk_size);
+ if (!paddr)
+ goto err;
+ sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
+ sg = sg_next(sg);
+ }
+
+ buffer->priv_virt = table;
+ chunk_heap->allocated += buffer->size;
+ return 0;
+err:
+ sg = table->sgl;
+ for (i -= 1; i >= 0; i--) {
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ sg_dma_len(sg));
+ sg = sg_next(sg);
+ }
+ sg_free_table(table);
+ kfree(table);
+ return -ENOMEM;
+}
+
+static void ion_chunk_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+ struct sg_table *table = buffer->priv_virt;
+ struct scatterlist *sg;
+ int i;
+
+ ion_heap_buffer_zero(buffer);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, sg, 1, DMA_BIDIRECTIONAL);
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ sg_dma_len(sg));
+ }
+ chunk_heap->allocated -= buffer->size;
+ sg_free_table(table);
+ kfree(table);
+}
+
+struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static struct ion_heap_ops chunk_heap_ops = {
+ .allocate = ion_chunk_heap_allocate,
+ .free = ion_chunk_heap_free,
+ .map_dma = ion_chunk_heap_map_dma,
+ .unmap_dma = ion_chunk_heap_unmap_dma,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_chunk_heap *chunk_heap;
+ struct scatterlist sg;
+
+ chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
+ if (!chunk_heap)
+ return ERR_PTR(-ENOMEM);
+
+ chunk_heap->chunk_size = (unsigned long)heap_data->priv;
+ chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
+ PAGE_SHIFT, -1);
+ if (!chunk_heap->pool) {
+ kfree(chunk_heap);
+ return ERR_PTR(-ENOMEM);
+ }
+ chunk_heap->base = heap_data->base;
+ chunk_heap->size = heap_data->size;
+ chunk_heap->allocated = 0;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, phys_to_page(heap_data->base), heap_data->size, 0);
+ dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
+ gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
+ chunk_heap->heap.ops = &chunk_heap_ops;
+ chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
+ chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ pr_info("%s: base %pa size %zd align %pa\n", __func__,
+ &chunk_heap->base, heap_data->size, &heap_data->align);
+
+ return &chunk_heap->heap;
+}
+
+void ion_chunk_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+
+ gen_pool_destroy(chunk_heap->pool);
+ kfree(chunk_heap);
+ chunk_heap = NULL;
+}
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
index 4f12e38..193f4d4 100644
--- a/drivers/gpu/ion/ion_cma_heap.c
+++ b/drivers/gpu/ion/ion_cma_heap.c
@@ -47,7 +47,7 @@
int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size)
{
- struct page *page = virt_to_page(cpu_addr);
+ struct page *page = phys_to_page(handle);
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -178,148 +178,6 @@
return;
}
-int ion_cma_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- unsigned long extra_iova_addr;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
- struct sg_table *table = info->table;
- int prot = IOMMU_WRITE | IOMMU_READ;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = info->handle;
- return 0;
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -EINVAL;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, table->sgl,
- buffer->size, prot);
-
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- extra_iova_addr = data->iova_addr + buffer->size;
- if (extra) {
- unsigned long phys_addr = sg_phys(table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
-
-void ion_cma_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
-int ion_cma_cache_ops(struct ion_heap *heap,
- struct ion_buffer *buffer, void *vaddr,
- unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
-
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- if (!vaddr)
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- else
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- if (!vaddr)
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- else
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- if (!vaddr) {
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- } else {
- dmac_flush_range(vaddr, vaddr + length);
- }
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
-
- if (cma_heap_has_outer_cache) {
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- outer_cache_op(info->handle, info->handle + length);
- }
-
- return 0;
-}
-
static int ion_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map)
{
@@ -358,9 +216,6 @@
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
- .map_iommu = ion_cma_map_iommu,
- .unmap_iommu = ion_cma_unmap_iommu,
- .cache_op = ion_cma_cache_ops,
.print_debug = ion_cma_print_debug,
};
diff --git a/drivers/gpu/ion/ion_cma_secure_heap.c b/drivers/gpu/ion/ion_cma_secure_heap.c
index d7a5920..e1b3eea 100644
--- a/drivers/gpu/ion/ion_cma_secure_heap.c
+++ b/drivers/gpu/ion/ion_cma_secure_heap.c
@@ -44,7 +44,6 @@
bool is_cached;
};
-static int cma_heap_has_outer_cache;
/*
* Create scatter-list for the already allocated DMA buffer.
* This function could be replace by dma_common_get_sgtable
@@ -53,7 +52,7 @@
int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size)
{
- struct page *page = virt_to_page(cpu_addr);
+ struct page *page = phys_to_page(handle);
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -117,7 +116,7 @@
unsigned long len, unsigned long align,
unsigned long flags)
{
- unsigned long secure_allocation = flags & ION_SECURE;
+ unsigned long secure_allocation = flags & ION_FLAG_SECURE;
struct ion_secure_cma_buffer_info *buf = NULL;
if (!secure_allocation) {
@@ -212,110 +211,6 @@
return;
}
-int ion_secure_cma_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- unsigned long extra_iova_addr;
- struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
- struct sg_table *table = info->table;
- int prot = IOMMU_WRITE | IOMMU_READ;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = info->handle;
- return 0;
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -EINVAL;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, table->sgl,
- buffer->size, prot);
-
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- extra_iova_addr = data->iova_addr + buffer->size;
- if (extra) {
- unsigned long phys_addr = sg_phys(table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
-
-void ion_secure_cma_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
-int ion_secure_cma_cache_ops(struct ion_heap *heap,
- struct ion_buffer *buffer, void *vaddr,
- unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- pr_info("%s: cache operations disallowed from secure heap %s\n",
- __func__, heap->name);
- return -EINVAL;
-}
-
static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map)
{
@@ -354,9 +249,6 @@
.map_user = ion_secure_cma_mmap,
.map_kernel = ion_secure_cma_map_kernel,
.unmap_kernel = ion_secure_cma_unmap_kernel,
- .map_iommu = ion_secure_cma_map_iommu,
- .unmap_iommu = ion_secure_cma_unmap_iommu,
- .cache_op = ion_secure_cma_cache_ops,
.print_debug = ion_secure_cma_print_debug,
.secure_buffer = ion_cp_secure_buffer,
.unsecure_buffer = ion_cp_unsecure_buffer,
@@ -376,7 +268,6 @@
* used to make the link with reserved CMA memory */
heap->priv = data->priv;
heap->type = ION_HEAP_TYPE_SECURE_DMA;
- cma_heap_has_outer_cache = data->has_outer_cache;
return heap;
}
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 7bcae01..f1868a8 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -67,10 +67,6 @@
* kernel space (un-cached).
* @umap_count: the total number of times this heap has been mapped in
* user space.
- * @iommu_iova: saved iova when mapping full heap at once.
- * @iommu_partition: partition used to map full heap.
- * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
- * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
* @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
*/
struct ion_cp_heap {
@@ -90,11 +86,6 @@
unsigned long kmap_cached_count;
unsigned long kmap_uncached_count;
unsigned long umap_count;
- unsigned long iommu_iova[MAX_DOMAINS];
- unsigned long iommu_partition[MAX_DOMAINS];
- void *reserved_vrange;
- int iommu_map_all;
- int iommu_2x_map_domain;
unsigned int has_outer_cache;
atomic_t protect_cnt;
void *cpu_addr;
@@ -289,8 +280,8 @@
unsigned long flags)
{
unsigned long offset;
- unsigned long secure_allocation = flags & ION_SECURE;
- unsigned long force_contig = flags & ION_FORCE_CONTIGUOUS;
+ unsigned long secure_allocation = flags & ION_FLAG_SECURE;
+ unsigned long force_contig = flags & ION_FLAG_FORCE_CONTIGUOUS;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
@@ -361,29 +352,6 @@
return offset;
}
-static void iommu_unmap_all(unsigned long domain_num,
- struct ion_cp_heap *cp_heap)
-{
- unsigned long left_to_unmap = cp_heap->total_size;
- unsigned long page_size = SZ_64K;
-
- struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
- if (domain) {
- unsigned long temp_iova = cp_heap->iommu_iova[domain_num];
-
- while (left_to_unmap) {
- iommu_unmap(domain, temp_iova, page_size);
- temp_iova += page_size;
- left_to_unmap -= page_size;
- }
- if (domain_num == cp_heap->iommu_2x_map_domain)
- msm_iommu_unmap_extra(domain, temp_iova,
- cp_heap->total_size, SZ_64K);
- } else {
- pr_err("Unable to get IOMMU domain %lu\n", domain_num);
- }
-}
-
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size)
{
@@ -401,25 +369,6 @@
cp_heap->heap_protected == HEAP_NOT_PROTECTED)
ion_on_last_free(heap);
- /* Unmap everything if we previously mapped the whole heap at once. */
- if (!cp_heap->allocated_bytes) {
- unsigned int i;
- for (i = 0; i < MAX_DOMAINS; ++i) {
- if (cp_heap->iommu_iova[i]) {
- unsigned long vaddr_len = cp_heap->total_size;
-
- if (i == cp_heap->iommu_2x_map_domain)
- vaddr_len <<= 1;
- iommu_unmap_all(i, cp_heap);
-
- msm_free_iova_address(cp_heap->iommu_iova[i], i,
- cp_heap->iommu_partition[i],
- vaddr_len);
- }
- cp_heap->iommu_iova[i] = 0;
- cp_heap->iommu_partition[i] = 0;
- }
- }
mutex_unlock(&cp_heap->lock);
}
@@ -460,7 +409,7 @@
buf->want_delayed_unsecure = 0;
atomic_set(&buf->secure_cnt, 0);
mutex_init(&buf->lock);
- buf->is_secure = flags & ION_SECURE ? 1 : 0;
+ buf->is_secure = flags & ION_FLAG_SECURE ? 1 : 0;
buffer->priv_virt = buf;
return 0;
@@ -674,91 +623,6 @@
mutex_unlock(&cp_heap->lock);
}
-int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
- void *vaddr, unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- unsigned int size_to_vmap, total_size;
- struct ion_cp_buffer *buf = buffer->priv_virt;
- int i, j;
- void *ptr = NULL;
- ion_phys_addr_t buff_phys = buffer->priv_phys;
-
- if (!vaddr) {
- /*
- * Split the vmalloc space into smaller regions in
- * order to clean and/or invalidate the cache.
- */
- size_to_vmap = (VMALLOC_END - VMALLOC_START)/8;
- total_size = buffer->size;
- for (i = 0; i < total_size; i += size_to_vmap) {
- size_to_vmap = min(size_to_vmap, total_size - i);
- for (j = 0; j < 10 && size_to_vmap; ++j) {
- ptr = ioremap(buff_phys, size_to_vmap);
- if (ptr) {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- buff_phys += size_to_vmap;
- break;
- } else {
- size_to_vmap >>= 1;
- }
- }
- if (!ptr) {
- pr_err("Couldn't io-remap the memory\n");
- return -EINVAL;
- }
- iounmap(ptr);
- }
- } else {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- }
-
- if (cp_heap->has_outer_cache) {
- unsigned long pstart = buf->buffer + offset;
- outer_cache_op(pstart, pstart + length);
- }
- return 0;
-}
-
static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map)
{
@@ -859,205 +723,6 @@
return ret_value;
}
-static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
- int partition, unsigned long prot)
-{
- unsigned long left_to_map = cp_heap->total_size;
- unsigned long page_size = SZ_64K;
- int ret_value = 0;
- unsigned long virt_addr_len = cp_heap->total_size;
- struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
-
- /* If we are mapping into the video domain we need to map twice the
- * size of the heap to account for prefetch issue in video core.
- */
- if (domain_num == cp_heap->iommu_2x_map_domain)
- virt_addr_len <<= 1;
-
- if (cp_heap->total_size & (SZ_64K-1)) {
- pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
- ret_value = -EINVAL;
- }
- if (cp_heap->base & (SZ_64K-1)) {
- pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
- ret_value = -EINVAL;
- }
- if (!ret_value && domain) {
- unsigned long temp_phys = cp_heap->base;
- unsigned long temp_iova;
-
- ret_value = msm_allocate_iova_address(domain_num, partition,
- virt_addr_len, SZ_64K,
- &temp_iova);
-
- if (ret_value) {
- pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
- __func__, domain_num, partition);
- goto out;
- }
- cp_heap->iommu_iova[domain_num] = temp_iova;
-
- while (left_to_map) {
- int ret = iommu_map(domain, temp_iova, temp_phys,
- page_size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p, error: %d\n",
- __func__, temp_iova, domain, ret);
- ret_value = -EAGAIN;
- goto free_iova;
- }
- temp_iova += page_size;
- temp_phys += page_size;
- left_to_map -= page_size;
- }
- if (domain_num == cp_heap->iommu_2x_map_domain)
- ret_value = msm_iommu_map_extra(domain, temp_iova,
- cp_heap->base,
- cp_heap->total_size,
- SZ_64K, prot);
- if (ret_value)
- goto free_iova;
- } else {
- pr_err("Unable to get IOMMU domain %lu\n", domain_num);
- ret_value = -ENOMEM;
- }
- goto out;
-
-free_iova:
- msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
- partition, virt_addr_len);
-out:
- return ret_value;
-}
-
-static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- struct iommu_domain *domain;
- int ret = 0;
- unsigned long extra;
- struct ion_cp_heap *cp_heap =
- container_of(buffer->heap, struct ion_cp_heap, heap);
- int prot = IOMMU_WRITE | IOMMU_READ;
- struct ion_cp_buffer *buf = buffer->priv_virt;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = buf->buffer;
- return 0;
- }
-
- if (cp_heap->iommu_iova[domain_num]) {
- /* Already mapped. */
- unsigned long offset = buf->buffer - cp_heap->base;
- data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
- return 0;
- } else if (cp_heap->iommu_map_all) {
- ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
- if (!ret) {
- unsigned long offset =
- buf->buffer - cp_heap->base;
- data->iova_addr =
- cp_heap->iommu_iova[domain_num] + offset;
- cp_heap->iommu_partition[domain_num] = partition_num;
- /*
- clear delayed map flag so that we don't interfere
- with this feature (we are already delaying).
- */
- data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
- return 0;
- } else {
- cp_heap->iommu_iova[domain_num] = 0;
- cp_heap->iommu_partition[domain_num] = 0;
- return ret;
- }
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl,
- buffer->size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
-static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
- struct ion_cp_heap *cp_heap =
- container_of(data->buffer->heap, struct ion_cp_heap, heap);
-
- if (!msm_use_iommu())
- return;
-
-
- domain_num = iommu_map_domain(data);
-
- /* If we are mapping everything we'll wait to unmap until everything
- is freed. */
- if (cp_heap->iommu_iova[domain_num])
- return;
-
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
static struct ion_heap_ops cp_heap_ops = {
.allocate = ion_cp_heap_allocate,
.free = ion_cp_heap_free,
@@ -1068,12 +733,9 @@
.unmap_kernel = ion_cp_heap_unmap_kernel,
.map_dma = ion_cp_heap_map_dma,
.unmap_dma = ion_cp_heap_unmap_dma,
- .cache_op = ion_cp_cache_ops,
.print_debug = ion_cp_print_debug,
.secure_heap = ion_cp_secure_heap,
.unsecure_heap = ion_cp_unsecure_heap,
- .map_iommu = ion_cp_heap_map_iommu,
- .unmap_iommu = ion_cp_heap_unmap_iommu,
.secure_buffer = ion_cp_secure_buffer,
.unsecure_buffer = ion_cp_unsecure_buffer,
};
@@ -1120,10 +782,6 @@
if (extra_data->release_region)
cp_heap->heap_release_region =
extra_data->release_region;
- cp_heap->iommu_map_all =
- extra_data->iommu_map_all;
- cp_heap->iommu_2x_map_domain =
- extra_data->iommu_2x_map_domain;
cp_heap->cma = extra_data->is_cma;
cp_heap->allow_non_secure_allocation =
extra_data->allow_nonsecure_alloc;
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 46fefb5..3d37541 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -17,14 +17,125 @@
#include <linux/err.h>
#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
#include "ion_priv.h"
-#include <linux/msm_ion.h>
+
+void *ion_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct scatterlist *sg;
+ int i, j;
+ void *vaddr;
+ pgprot_t pgprot;
+ struct sg_table *table = buffer->sg_table;
+ int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **tmp = pages;
+
+ if (!pages)
+ return 0;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
+ struct page *page = sg_page(sg);
+ BUG_ON(i >= npages);
+ for (j = 0; j < npages_this_entry; j++) {
+ *(tmp++) = page++;
+ }
+ }
+ vaddr = vmap(pages, npages, VM_MAP, pgprot);
+ vfree(pages);
+
+ return vaddr;
+}
+
+void ion_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ vunmap(buffer->vaddr);
+}
+
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct sg_table *table = buffer->sg_table;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg_dma_len(sg);
+
+ if (offset >= sg_dma_len(sg)) {
+ offset -= sg_dma_len(sg);
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len = sg_dma_len(sg) - offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
+}
+
+int ion_heap_buffer_zero(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->sg_table;
+ pgprot_t pgprot;
+ struct scatterlist *sg;
+ struct vm_struct *vm_struct;
+ int i, j, ret = 0;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
+ if (!vm_struct)
+ return -ENOMEM;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long len = sg_dma_len(sg);
+
+ for (j = 0; j < len / PAGE_SIZE; j++) {
+ struct page *sub_page = page + j;
+ struct page **pages = &sub_page;
+ ret = map_vm_area(vm_struct, pgprot, &pages);
+ if (ret)
+ goto end;
+ memset(vm_struct->addr, 0, PAGE_SIZE);
+ unmap_kernel_range((unsigned long)vm_struct->addr,
+ PAGE_SIZE);
+ }
+ }
+end:
+ free_vm_area(vm_struct);
+ return ret;
+}
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_heap *heap = NULL;
- switch ((int) heap_data->type) {
+ switch (heap_data->type) {
case ION_HEAP_TYPE_SYSTEM_CONTIG:
heap = ion_system_contig_heap_create(heap_data);
break;
@@ -34,21 +145,9 @@
case ION_HEAP_TYPE_CARVEOUT:
heap = ion_carveout_heap_create(heap_data);
break;
- case ION_HEAP_TYPE_IOMMU:
- heap = ion_iommu_heap_create(heap_data);
+ case ION_HEAP_TYPE_CHUNK:
+ heap = ion_chunk_heap_create(heap_data);
break;
- case ION_HEAP_TYPE_CP:
- heap = ion_cp_heap_create(heap_data);
- break;
-#ifdef CONFIG_CMA
- case ION_HEAP_TYPE_DMA:
- heap = ion_cma_heap_create(heap_data);
- break;
-
- case ION_HEAP_TYPE_SECURE_DMA:
- heap = ion_secure_cma_heap_create(heap_data);
- break;
-#endif
default:
pr_err("%s: Invalid heap type %d\n", __func__,
heap_data->type);
@@ -73,7 +172,7 @@
if (!heap)
return;
- switch ((int) heap->type) {
+ switch (heap->type) {
case ION_HEAP_TYPE_SYSTEM_CONTIG:
ion_system_contig_heap_destroy(heap);
break;
@@ -83,20 +182,9 @@
case ION_HEAP_TYPE_CARVEOUT:
ion_carveout_heap_destroy(heap);
break;
- case ION_HEAP_TYPE_IOMMU:
- ion_iommu_heap_destroy(heap);
+ case ION_HEAP_TYPE_CHUNK:
+ ion_chunk_heap_destroy(heap);
break;
- case ION_HEAP_TYPE_CP:
- ion_cp_heap_destroy(heap);
- break;
-#ifdef CONFIG_CMA
- case ION_HEAP_TYPE_DMA:
- ion_cma_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_SECURE_DMA:
- ion_secure_cma_heap_destroy(heap);
- break;
-#endif
default:
pr_err("%s: Invalid heap type %d\n", __func__,
heap->type);
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 512ebf3..bc9bddd 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -27,10 +27,10 @@
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <mach/iommu_domains.h>
+#include <trace/events/kmem.h>
struct ion_iommu_heap {
struct ion_heap heap;
- unsigned int has_outer_cache;
};
/*
@@ -84,9 +84,13 @@
} else {
gfp |= GFP_KERNEL;
}
+ trace_alloc_pages_iommu_start(gfp, orders[i]);
page = alloc_pages(gfp, orders[i]);
- if (!page)
+ trace_alloc_pages_iommu_end(gfp, orders[i]);
+ if (!page) {
+ trace_alloc_pages_iommu_fail(gfp, orders[i]);
continue;
+ }
info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
info->page = page;
@@ -112,7 +116,7 @@
int j;
void *ptr = NULL;
unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
- long size_remaining = PAGE_ALIGN(size);
+ unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
data = kmalloc(sizeof(*data), GFP_KERNEL);
@@ -315,157 +319,6 @@
return 0;
}
-int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- struct iommu_domain *domain;
- int ret = 0;
- unsigned long extra;
- int prot = IOMMU_WRITE | IOMMU_READ;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- BUG_ON(!msm_use_iommu());
-
- data->mapped_size = iova_length;
- extra = iova_length - buffer->size;
-
- /* Use the biggest alignment to allow bigger IOMMU mappings.
- * Use the first entry since the first entry will always be the
- * biggest entry. To take advantage of bigger mapping sizes both the
- * VA and PA addresses have to be aligned to the biggest size.
- */
- if (buffer->sg_table->sgl->length > align)
- align = buffer->sg_table->sgl->length;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr,
- buffer->sg_table->sgl,
- buffer->size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- buffer->size);
-
-out:
-
- return ret;
-}
-
-void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- BUG_ON(!msm_use_iommu());
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
-static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
- void *vaddr, unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
- struct ion_iommu_heap *iommu_heap =
- container_of(heap, struct ion_iommu_heap, heap);
-
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- if (!vaddr)
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- else
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- if (!vaddr)
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- else
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- if (!vaddr) {
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- } else {
- dmac_flush_range(vaddr, vaddr + length);
- }
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
-
- if (iommu_heap->has_outer_cache) {
- unsigned long pstart;
- unsigned int i;
- struct ion_iommu_priv_data *data = buffer->priv_virt;
- if (!data)
- return -ENOMEM;
-
- for (i = 0; i < data->nrpages; ++i) {
- pstart = page_to_phys(data->pages[i]);
- outer_cache_op(pstart, pstart + PAGE_SIZE);
- }
- }
- return 0;
-}
-
static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
@@ -483,9 +336,6 @@
.map_user = ion_iommu_heap_map_user,
.map_kernel = ion_iommu_heap_map_kernel,
.unmap_kernel = ion_iommu_heap_unmap_kernel,
- .map_iommu = ion_iommu_heap_map_iommu,
- .unmap_iommu = ion_iommu_heap_unmap_iommu,
- .cache_op = ion_iommu_cache_ops,
.map_dma = ion_iommu_heap_map_dma,
.unmap_dma = ion_iommu_heap_unmap_dma,
};
@@ -500,7 +350,6 @@
iommu_heap->heap.ops = &iommu_heap_ops;
iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
- iommu_heap->has_outer_cache = heap_data->has_outer_cache;
return &iommu_heap->heap;
}
diff --git a/drivers/gpu/ion/ion_page_pool.c b/drivers/gpu/ion/ion_page_pool.c
new file mode 100644
index 0000000..e8b5489
--- /dev/null
+++ b/drivers/gpu/ion/ion_page_pool.c
@@ -0,0 +1,282 @@
+/*
+ * drivers/gpu/ion/ion_mem_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/shrinker.h>
+#include "ion_priv.h"
+
+/* #define DEBUG_PAGE_POOL_SHRINKER */
+
+static struct plist_head pools = PLIST_HEAD_INIT(pools);
+static struct shrinker shrinker;
+
+struct ion_page_pool_item {
+ struct page *page;
+ struct list_head list;
+};
+
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+{
+ struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+ struct scatterlist sg;
+
+ if (!page)
+ return NULL;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, PAGE_SIZE << pool->order, 0);
+ dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
+
+ return page;
+}
+
+static void ion_page_pool_free_pages(struct ion_page_pool *pool,
+ struct page *page)
+{
+ __free_pages(page, pool->order);
+}
+
+static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+{
+ struct ion_page_pool_item *item;
+
+ item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ mutex_lock(&pool->mutex);
+ item->page = page;
+ if (PageHighMem(page)) {
+ list_add_tail(&item->list, &pool->high_items);
+ pool->high_count++;
+ } else {
+ list_add_tail(&item->list, &pool->low_items);
+ pool->low_count++;
+ }
+ mutex_unlock(&pool->mutex);
+ return 0;
+}
+
+static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
+{
+ struct ion_page_pool_item *item;
+ struct page *page;
+
+ if (high) {
+ BUG_ON(!pool->high_count);
+ item = list_first_entry(&pool->high_items,
+ struct ion_page_pool_item, list);
+ pool->high_count--;
+ } else {
+ BUG_ON(!pool->low_count);
+ item = list_first_entry(&pool->low_items,
+ struct ion_page_pool_item, list);
+ pool->low_count--;
+ }
+
+ list_del(&item->list);
+ page = item->page;
+ kfree(item);
+ return page;
+}
+
+void *ion_page_pool_alloc(struct ion_page_pool *pool)
+{
+ struct page *page = NULL;
+
+ BUG_ON(!pool);
+
+ mutex_lock(&pool->mutex);
+ if (pool->high_count)
+ page = ion_page_pool_remove(pool, true);
+ else if (pool->low_count)
+ page = ion_page_pool_remove(pool, false);
+ mutex_unlock(&pool->mutex);
+
+ if (!page)
+ page = ion_page_pool_alloc_pages(pool);
+
+ return page;
+}
+
+void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
+{
+ int ret;
+
+ ret = ion_page_pool_add(pool, page);
+ if (ret)
+ ion_page_pool_free_pages(pool, page);
+}
+
+#ifdef DEBUG_PAGE_POOL_SHRINKER
+static int debug_drop_pools_set(void *data, u64 val)
+{
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ if (!val)
+ return 0;
+
+ objs = shrinker.shrink(&shrinker, &sc);
+ sc.nr_to_scan = objs;
+
+ shrinker.shrink(&shrinker, &sc);
+ return 0;
+}
+
+static int debug_drop_pools_get(void *data, u64 *val)
+{
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ objs = shrinker.shrink(&shrinker, &sc);
+ *val = objs;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_drop_pools_fops, debug_drop_pools_get,
+ debug_drop_pools_set, "%llu\n");
+
+static int debug_grow_pools_set(void *data, u64 val)
+{
+ struct ion_page_pool *pool;
+ struct page *page;
+
+ plist_for_each_entry(pool, &pools, list) {
+ if (val != pool->list.prio)
+ continue;
+ page = ion_page_pool_alloc_pages(pool);
+ if (page)
+ ion_page_pool_add(pool, page);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_grow_pools_fops, debug_drop_pools_get,
+ debug_grow_pools_set, "%llu\n");
+#endif
+
+static int ion_page_pool_total(bool high)
+{
+ struct ion_page_pool *pool;
+ int total = 0;
+
+ plist_for_each_entry(pool, &pools, list) {
+ total += high ? (pool->high_count + pool->low_count) *
+ (1 << pool->order) :
+ pool->low_count * (1 << pool->order);
+ }
+ return total;
+}
+
+static int ion_page_pool_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct ion_page_pool *pool;
+ int nr_freed = 0;
+ int i;
+ bool high;
+ int nr_to_scan = sc->nr_to_scan;
+
+ if (sc->gfp_mask & __GFP_HIGHMEM)
+ high = true;
+
+ if (nr_to_scan == 0)
+ return ion_page_pool_total(high);
+
+ plist_for_each_entry(pool, &pools, list) {
+ for (i = 0; i < nr_to_scan; i++) {
+ struct page *page;
+
+ mutex_lock(&pool->mutex);
+ if (high && pool->high_count) {
+ page = ion_page_pool_remove(pool, true);
+ } else if (pool->low_count) {
+ page = ion_page_pool_remove(pool, false);
+ } else {
+ mutex_unlock(&pool->mutex);
+ break;
+ }
+ mutex_unlock(&pool->mutex);
+ ion_page_pool_free_pages(pool, page);
+ nr_freed += (1 << pool->order);
+ }
+ nr_to_scan -= i;
+ }
+
+ return ion_page_pool_total(high);
+}
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+{
+ struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
+ GFP_KERNEL);
+ if (!pool)
+ return NULL;
+ pool->high_count = 0;
+ pool->low_count = 0;
+ INIT_LIST_HEAD(&pool->low_items);
+ INIT_LIST_HEAD(&pool->high_items);
+ pool->gfp_mask = gfp_mask;
+ pool->order = order;
+ mutex_init(&pool->mutex);
+ plist_node_init(&pool->list, order);
+ plist_add(&pool->list, &pools);
+
+ return pool;
+}
+
+void ion_page_pool_destroy(struct ion_page_pool *pool)
+{
+ plist_del(&pool->list, &pools);
+ kfree(pool);
+}
+
+static int __init ion_page_pool_init(void)
+{
+ shrinker.shrink = ion_page_pool_shrink;
+ shrinker.seeks = DEFAULT_SEEKS;
+ shrinker.batch = 0;
+ register_shrinker(&shrinker);
+#ifdef DEBUG_PAGE_POOL_SHRINKER
+ debugfs_create_file("ion_pools_shrink", 0644, NULL, NULL,
+ &debug_drop_pools_fops);
+ debugfs_create_file("ion_pools_grow", 0644, NULL, NULL,
+ &debug_grow_pools_fops);
+#endif
+ return 0;
+}
+
+static void __exit ion_page_pool_exit(void)
+{
+ unregister_shrinker(&shrinker);
+}
+
+module_init(ion_page_pool_init);
+module_exit(ion_page_pool_exit);
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 28ef1a5..4b724df 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -18,49 +18,17 @@
#ifndef _ION_PRIV_H
#define _ION_PRIV_H
+#include <linux/ion.h>
#include <linux/kref.h>
#include <linux/mm_types.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
-#include <linux/ion.h>
-#include <linux/iommu.h>
#include <linux/seq_file.h>
-enum {
- DI_PARTITION_NUM = 0,
- DI_DOMAIN_NUM = 1,
- DI_MAX,
-};
-
-/**
- * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
- * @iova_addr - iommu virtual address
- * @node - rb node to exist in the buffer's tree of iommu mappings
- * @domain_info - contains the partition number and domain number
- * domain_info[1] = domain number
- * domain_info[0] = partition number
- * @ref - for reference counting this mapping
- * @mapped_size - size of the iova space mapped
- * (may not be the same as the buffer size)
- * @flags - iommu domain/partition specific flags.
- *
- * Represents a mapping of one ion buffer to a particular iommu domain
- * and address range. There may exist other mappings of this buffer in
- * different domains or address ranges. All mappings will have the same
- * cacheability and security.
- */
-struct ion_iommu_map {
- unsigned long iova_addr;
- struct rb_node node;
- union {
- int domain_info[DI_MAX];
- uint64_t key;
- };
- struct ion_buffer *buffer;
- struct kref ref;
- int mapped_size;
- unsigned long flags;
-};
+#include "msm_ion_priv.h"
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
@@ -81,10 +49,22 @@
* @vaddr: the kenrel mapping if kmap_cnt is not zero
* @dmap_cnt: number of times the buffer is mapped for dma
* @sg_table: the sg table for the buffer if dmap_cnt is not zero
+ * @dirty: bitmask representing which pages of this buffer have
+ * been dirtied by the cpu and need cache maintenance
+ * before dma
+ * @vmas: list of vma's mapping this buffer
+ * @handle_count: count of handles referencing this buffer
+ * @task_comm: taskcomm of last client to reference this buffer in a
+ * handle, used for debugging
+ * @pid: pid of last client to reference this buffer in a
+ * handle, used for debugging
*/
struct ion_buffer {
struct kref ref;
- struct rb_node node;
+ union {
+ struct rb_node node;
+ struct list_head list;
+ };
struct ion_device *dev;
struct ion_heap *heap;
unsigned long flags;
@@ -100,9 +80,10 @@
struct sg_table *sg_table;
unsigned long *dirty;
struct list_head vmas;
- unsigned int iommu_map_cnt;
- struct rb_root iommu_maps;
- int marked;
+ /* used to track orphaned buffers */
+ int handle_count;
+ char task_comm[TASK_COMM_LEN];
+ pid_t pid;
};
/**
@@ -133,17 +114,6 @@
int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma);
void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
- int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
- void *vaddr, unsigned int offset,
- unsigned int length, unsigned int cmd);
- int (*map_iommu)(struct ion_buffer *buffer,
- struct ion_iommu_map *map_data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags);
- void (*unmap_iommu)(struct ion_iommu_map *data);
int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map);
int (*secure_heap)(struct ion_heap *heap, int version, void *data);
@@ -154,16 +124,28 @@
};
/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+
+/**
* struct ion_heap - represents a heap in the system
* @node: rb node to put the heap on the device's tree of heaps
* @dev: back pointer to the ion_device
* @type: type of heap
* @ops: ops struct as above
+ * @flags: flags
* @id: id of heap, also indicates priority of this heap when
* allocating. These are specified by platform data and
* MUST be unique
* @name: used for debugging
* @priv: private heap data
+ * @free_list: free list head if deferred free is used
+ * @lock: protects the free list
+ * @waitqueue: queue to wait on from deferred free thread
+ * @task: task struct of deferred free thread
+ * @debug_show: called when heap debug file is read to add any
+ * heap specific debug info to output
*
* Represents a pool of memory from which buffers can be made. In some
* systems the only heap is regular system memory allocated via vmalloc.
@@ -171,16 +153,30 @@
* that are allocated from a specially reserved heap.
*/
struct ion_heap {
- struct rb_node node;
+ struct plist_node node;
struct ion_device *dev;
enum ion_heap_type type;
struct ion_heap_ops *ops;
- int id;
+ unsigned long flags;
+ unsigned int id;
const char *name;
void *priv;
+ struct list_head free_list;
+ struct rt_mutex lock;
+ wait_queue_head_t waitqueue;
+ struct task_struct *task;
+ int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
};
/**
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer: buffer
+ *
+ * indicates whether this ion buffer is cached
+ */
+bool ion_buffer_cached(struct ion_buffer *buffer);
+
+/**
* ion_buffer_fault_user_mappings - fault in user mappings of this buffer
* @buffer: buffer
*
@@ -190,26 +186,6 @@
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
/**
- * struct mem_map_data - represents information about the memory map for a heap
- * @node: rb node used to store in the tree of mem_map_data
- * @addr: start address of memory region.
- * @addr: end address of memory region.
- * @size: size of memory region
- * @client_name: name of the client who owns this buffer.
- *
- */
-struct mem_map_data {
- struct rb_node node;
- ion_phys_addr_t addr;
- ion_phys_addr_t addr_end;
- unsigned long size;
- const char *client_name;
-};
-
-#define iommu_map_domain(__m) ((__m)->domain_info[1])
-#define iommu_map_partition(__m) ((__m)->domain_info[0])
-
-/**
* ion_device_create - allocates and returns an ion device
* @custom_ioctl: arch specific ioctl function if applicable
*
@@ -234,6 +210,17 @@
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
+void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
+int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
+ struct vm_area_struct *);
+int ion_heap_buffer_zero(struct ion_buffer *buffer);
+
+
+/**
* functions for creating and destroying the built in ion heaps.
* architectures can add their own custom architecture specific
* heaps as appropriate.
@@ -241,7 +228,6 @@
struct ion_heap *ion_heap_create(struct ion_platform_heap *);
void ion_heap_destroy(struct ion_heap *);
-
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
void ion_system_heap_destroy(struct ion_heap *);
@@ -251,15 +237,8 @@
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
void ion_carveout_heap_destroy(struct ion_heap *);
-struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
-void ion_iommu_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *);
-void ion_cp_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_reusable_heap_create(struct ion_platform_heap *);
-void ion_reusable_heap_destroy(struct ion_heap *);
-
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
+void ion_chunk_heap_destroy(struct ion_heap *);
/**
* kernel api to allocate/free from carveout -- used when carveout is
* used to back an architecture specific custom heap
@@ -269,88 +248,58 @@
void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size);
-#ifdef CONFIG_CMA
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
-void ion_cma_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *);
-void ion_secure_cma_heap_destroy(struct ion_heap *);
-#endif
-
-struct ion_heap *msm_get_contiguous_heap(void);
/**
- * The carveout/cp heap returns physical addresses, since 0 may be a valid
+ * The carveout heap returns physical addresses, since 0 may be a valid
* physical address, this is used to indicate allocation failed
*/
#define ION_CARVEOUT_ALLOCATE_FAIL -1
-#define ION_CP_ALLOCATE_FAIL -1
+
/**
- * The reserved heap returns physical addresses, since 0 may be a valid
- * physical address, this is used to indicate allocation failed
- */
-#define ION_RESERVED_ALLOCATE_FAIL -1
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap. Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant peformance benefit on
+ * many systems */
/**
- * ion_map_fmem_buffer - map fmem allocated memory into the kernel
- * @buffer - buffer to map
- * @phys_base - physical base of the heap
- * @virt_base - virtual base of the heap
- * @flags - flags for the heap
+ * struct ion_page_pool - pagepool struct
+ * @high_count: number of highmem items in the pool
+ * @low_count: number of lowmem items in the pool
+ * @high_items: list of highmem items
+ * @low_items: list of lowmem items
+ * @shrinker: a shrinker for the items
+ * @mutex: lock protecting this struct and especially the count
+ * item list
+ * @alloc: function to be used to allocate pageory when the pool
+ * is empty
+ * @free: function to be used to free pageory back to the system
+ * when the shrinker fires
+ * @gfp_mask: gfp_mask to use from alloc
+ * @order: order of pages in the pool
+ * @list: plist node for list of pools
*
- * Map fmem allocated memory into the kernel address space. This
- * is designed to be used by other heaps that need fmem behavior.
- * The virtual range must be pre-allocated.
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant peformance benefit
+ * on many systems
*/
-void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
- void *virt_base, unsigned long flags);
+struct ion_page_pool {
+ int high_count;
+ int low_count;
+ struct list_head high_items;
+ struct list_head low_items;
+ struct mutex mutex;
+ void *(*alloc)(struct ion_page_pool *pool);
+ void (*free)(struct ion_page_pool *pool, struct page *page);
+ gfp_t gfp_mask;
+ unsigned int order;
+ struct plist_node list;
+};
-/**
- * ion_do_cache_op - do cache operations.
- *
- * @client - pointer to ION client.
- * @handle - pointer to buffer handle.
- * @uaddr - virtual address to operate on.
- * @offset - offset from physical address.
- * @len - Length of data to do cache operation on.
- * @cmd - Cache operation to perform:
- * ION_IOC_CLEAN_CACHES
- * ION_IOC_INV_CACHES
- * ION_IOC_CLEAN_INV_CACHES
- *
- * Returns 0 on success
- */
-int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
- void *uaddr, unsigned long offset, unsigned long len,
- unsigned int cmd);
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+void ion_page_pool_destroy(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *);
+void ion_page_pool_free(struct ion_page_pool *, struct page *);
-void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
- unsigned long *size);
-
-void ion_mem_map_show(struct ion_heap *heap);
-
-
-
-int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
- int version, void *data, int flags);
-
-int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle);
-
-int ion_heap_allow_secure_allocation(enum ion_heap_type type);
-
-int ion_heap_allow_heap_secure(enum ion_heap_type type);
-
-int ion_heap_allow_handle_secure(enum ion_heap_type type);
-
-/**
- * ion_create_chunked_sg_table - helper function to create sg table
- * with specified chunk size
- * @buffer_base: The starting address used for the sg dma address
- * @chunk_size: The size of each entry in the sg table
- * @total_size: The total size of the sg table (i.e. the sum of the
- * entries). This will be rounded up to the nearest
- * multiple of `chunk_size'
- */
-struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
- size_t chunk_size, size_t total_size);
#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_removed_heap.c b/drivers/gpu/ion/ion_removed_heap.c
new file mode 100644
index 0000000..84d8d37
--- /dev/null
+++ b/drivers/gpu/ion/ion_removed_heap.c
@@ -0,0 +1,353 @@
+/*
+ * drivers/gpu/ion/ion_removed_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/seq_file.h>
+#include "ion_priv.h"
+
+#include <asm/mach/map.h>
+#include <asm/cacheflush.h>
+#include <linux/msm_ion.h>
+
+struct ion_removed_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+ unsigned long allocated_bytes;
+ unsigned long total_size;
+ int (*request_region)(void *);
+ int (*release_region)(void *);
+ atomic_t map_count;
+ void *bus_id;
+};
+
+ion_phys_addr_t ion_removed_allocate(struct ion_heap *heap,
+ unsigned long size,
+ unsigned long align)
+{
+ struct ion_removed_heap *removed_heap =
+ container_of(heap, struct ion_removed_heap, heap);
+ unsigned long offset = gen_pool_alloc_aligned(removed_heap->pool,
+ size, ilog2(align));
+
+ if (!offset) {
+ if ((removed_heap->total_size -
+ removed_heap->allocated_bytes) >= size)
+ pr_debug("%s: heap %s has enough memory (%lx) but the allocation of size %lx still failed. Memory is probably fragmented.",
+ __func__, heap->name,
+ removed_heap->total_size -
+ removed_heap->allocated_bytes, size);
+ return ION_CARVEOUT_ALLOCATE_FAIL;
+ }
+
+ removed_heap->allocated_bytes += size;
+ return offset;
+}
+
+void ion_removed_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size)
+{
+ struct ion_removed_heap *removed_heap =
+ container_of(heap, struct ion_removed_heap, heap);
+
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+ return;
+ gen_pool_free(removed_heap->pool, addr, size);
+ removed_heap->allocated_bytes -= size;
+}
+
+static int ion_removed_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ *addr = buffer->priv_phys;
+ *len = buffer->size;
+ return 0;
+}
+
+static int ion_removed_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ buffer->priv_phys = ion_removed_allocate(heap, size, align);
+ return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
+}
+
+static void ion_removed_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+
+ ion_removed_free(heap, buffer->priv_phys, buffer->size);
+ buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
+}
+
+struct sg_table *ion_removed_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct sg_table *table;
+ int ret;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto err0;
+
+ table->sgl->length = buffer->size;
+ table->sgl->offset = 0;
+ table->sgl->dma_address = buffer->priv_phys;
+
+ return table;
+
+err0:
+ kfree(table);
+ return ERR_PTR(ret);
+}
+
+void ion_removed_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ if (buffer->sg_table)
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
+ buffer->sg_table = 0;
+}
+
+static int ion_removed_request_region(struct ion_removed_heap *removed_heap)
+{
+ int ret_value = 0;
+ if (atomic_inc_return(&removed_heap->map_count) == 1) {
+ if (removed_heap->request_region) {
+ ret_value = removed_heap->request_region(
+ removed_heap->bus_id);
+ if (ret_value) {
+ pr_err("Unable to request SMI region");
+ atomic_dec(&removed_heap->map_count);
+ }
+ }
+ }
+ return ret_value;
+}
+
+static int ion_removed_release_region(struct ion_removed_heap *removed_heap)
+{
+ int ret_value = 0;
+ if (atomic_dec_and_test(&removed_heap->map_count)) {
+ if (removed_heap->release_region) {
+ ret_value = removed_heap->release_region(
+ removed_heap->bus_id);
+ if (ret_value)
+ pr_err("Unable to release SMI region");
+ }
+ }
+ return ret_value;
+}
+
+void *ion_removed_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_removed_heap *removed_heap =
+ container_of(heap, struct ion_removed_heap, heap);
+ void *ret_value;
+
+ if (ion_removed_request_region(removed_heap))
+ return NULL;
+
+ if (ION_IS_CACHED(buffer->flags))
+ ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
+ else
+ ret_value = ioremap(buffer->priv_phys, buffer->size);
+
+ if (!ret_value)
+ ion_removed_release_region(removed_heap);
+ return ret_value;
+}
+
+void ion_removed_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_removed_heap *removed_heap =
+ container_of(heap, struct ion_removed_heap, heap);
+
+ __arm_iounmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+
+ ion_removed_release_region(removed_heap);
+ return;
+}
+
+int ion_removed_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct ion_removed_heap *removed_heap =
+ container_of(heap, struct ion_removed_heap, heap);
+ int ret_value = 0;
+
+ if (ion_removed_request_region(removed_heap))
+ return -EINVAL;
+
+ if (!ION_IS_CACHED(buffer->flags))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ ret_value = remap_pfn_range(vma, vma->vm_start,
+ __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+
+ if (ret_value)
+ ion_removed_release_region(removed_heap);
+ return ret_value;
+}
+
+void ion_removed_heap_unmap_user(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_removed_heap *removed_heap =
+ container_of(heap, struct ion_removed_heap, heap);
+ ion_removed_release_region(removed_heap);
+}
+
+static int ion_removed_print_debug(struct ion_heap *heap, struct seq_file *s,
+ const struct rb_root *mem_map)
+{
+ struct ion_removed_heap *removed_heap =
+ container_of(heap, struct ion_removed_heap, heap);
+
+ seq_printf(s, "total bytes currently allocated: %lx\n",
+ removed_heap->allocated_bytes);
+ seq_printf(s, "total heap size: %lx\n", removed_heap->total_size);
+
+ if (mem_map) {
+ unsigned long base = removed_heap->base;
+ unsigned long size = removed_heap->total_size;
+ unsigned long end = base+size;
+ unsigned long last_end = base;
+ struct rb_node *n;
+
+ seq_printf(s, "\nMemory Map\n");
+ seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+ "client", "start address", "end address",
+ "size (hex)");
+
+ for (n = rb_first(mem_map); n; n = rb_next(n)) {
+ struct mem_map_data *data =
+ rb_entry(n, struct mem_map_data, node);
+ const char *client_name = "(null)";
+
+ if (last_end < data->addr) {
+ phys_addr_t da;
+
+ da = data->addr-1;
+ seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
+ "FREE", &last_end, &da,
+ data->addr-last_end,
+ data->addr-last_end);
+ }
+
+ if (data->client_name)
+ client_name = data->client_name;
+
+ seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
+ client_name, &data->addr,
+ &data->addr_end,
+ data->size, data->size);
+ last_end = data->addr_end+1;
+ }
+ if (last_end < end) {
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
+ last_end, end-1, end-last_end, end-last_end);
+ }
+ }
+ return 0;
+}
+
+static struct ion_heap_ops removed_heap_ops = {
+ .allocate = ion_removed_heap_allocate,
+ .free = ion_removed_heap_free,
+ .phys = ion_removed_heap_phys,
+ .map_user = ion_removed_heap_map_user,
+ .map_kernel = ion_removed_heap_map_kernel,
+ .unmap_user = ion_removed_heap_unmap_user,
+ .unmap_kernel = ion_removed_heap_unmap_kernel,
+ .map_dma = ion_removed_heap_map_dma,
+ .unmap_dma = ion_removed_heap_unmap_dma,
+ .print_debug = ion_removed_print_debug,
+};
+
+struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_removed_heap *removed_heap;
+ int ret;
+
+ removed_heap = kzalloc(sizeof(struct ion_removed_heap), GFP_KERNEL);
+ if (!removed_heap)
+ return ERR_PTR(-ENOMEM);
+
+ removed_heap->pool = gen_pool_create(12, -1);
+ if (!removed_heap->pool) {
+ kfree(removed_heap);
+ return ERR_PTR(-ENOMEM);
+ }
+ removed_heap->base = heap_data->base;
+ ret = gen_pool_add(removed_heap->pool, removed_heap->base,
+ heap_data->size, -1);
+ if (ret < 0) {
+ gen_pool_destroy(removed_heap->pool);
+ kfree(removed_heap);
+ return ERR_PTR(-EINVAL);
+ }
+ removed_heap->heap.ops = &removed_heap_ops;
+ removed_heap->heap.type = ION_HEAP_TYPE_REMOVED;
+ removed_heap->allocated_bytes = 0;
+ removed_heap->total_size = heap_data->size;
+
+ if (heap_data->extra_data) {
+ struct ion_co_heap_pdata *extra_data =
+ heap_data->extra_data;
+
+ if (extra_data->setup_region)
+ removed_heap->bus_id = extra_data->setup_region();
+ if (extra_data->request_region)
+ removed_heap->request_region =
+ extra_data->request_region;
+ if (extra_data->release_region)
+ removed_heap->release_region =
+ extra_data->release_region;
+ }
+ return &removed_heap->heap;
+}
+
+void ion_removed_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_removed_heap *removed_heap =
+ container_of(heap, struct ion_removed_heap, heap);
+
+ gen_pool_destroy(removed_heap->pool);
+ kfree(removed_heap);
+ removed_heap = NULL;
+}
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index ceb30a4..4e9f55c 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -22,46 +22,129 @@
#include <linux/ion.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/iommu.h>
#include <linux/seq_file.h>
-#include <mach/iommu_domains.h>
#include "ion_priv.h"
#include <mach/memory.h>
#include <asm/cacheflush.h>
#include <linux/msm_ion.h>
#include <linux/dma-mapping.h>
+#include <trace/events/kmem.h>
static atomic_t system_heap_allocated;
static atomic_t system_contig_heap_allocated;
-static unsigned int system_heap_has_outer_cache;
-static unsigned int system_heap_contig_has_outer_cache;
+
+static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
+ __GFP_NOWARN | __GFP_NORETRY |
+ __GFP_NO_KSWAPD) & ~__GFP_WAIT;
+static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
+ __GFP_NOWARN);
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+ int i;
+ for (i = 0; i < num_orders; i++)
+ if (order == orders[i])
+ return i;
+ BUG();
+ return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+ return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+ struct ion_heap heap;
+ struct ion_page_pool **pools;
+};
struct page_info {
struct page *page;
- unsigned long order;
+ unsigned int order;
struct list_head list;
};
-static struct page_info *alloc_largest_available(unsigned long size,
- bool split_pages)
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long order)
{
- static unsigned int orders[] = {8, 4, 0};
+ bool cached = ion_buffer_cached(buffer);
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ struct page *page;
+
+ if (!cached) {
+ page = ion_page_pool_alloc(pool);
+ } else {
+ struct scatterlist sg;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (order > 4)
+ gfp_flags = high_order_gfp_flags;
+ trace_alloc_pages_sys_start(gfp_flags, order);
+ page = alloc_pages(gfp_flags, order);
+ trace_alloc_pages_sys_end(gfp_flags, order);
+ if (!page) {
+ trace_alloc_pages_sys_fail(gfp_flags, order);
+ return 0;
+ }
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, PAGE_SIZE << order, 0);
+ dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
+ }
+ if (!page)
+ return 0;
+
+ if (split_pages)
+ split_page(page, order);
+ return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer, struct page *page,
+ unsigned int order)
+{
+ bool cached = ion_buffer_cached(buffer);
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
+ int i;
+
+ if (!cached) {
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ ion_page_pool_free(pool, page);
+ } else if (split_pages) {
+ for (i = 0; i < (1 << order); i++)
+ __free_page(page + i);
+ } else {
+ __free_pages(page, order);
+ }
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size,
+ unsigned int max_order)
+{
struct page *page;
struct page_info *info;
int i;
- for (i = 0; i < ARRAY_SIZE(orders); i++) {
- if (size < (1 << orders[i]) * PAGE_SIZE)
+ for (i = 0; i < num_orders; i++) {
+ if (size < order_to_size(orders[i]))
continue;
- page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
- __GFP_NOWARN | __GFP_NORETRY, orders[i]);
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_buffer_page(heap, buffer, orders[i]);
if (!page)
continue;
- if (split_pages)
- split_page(page, orders[i]);
- info = kmap(page);
+
+ info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
info->page = page;
info->order = orders[i];
return info;
@@ -74,23 +157,27 @@
unsigned long size, unsigned long align,
unsigned long flags)
{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
struct sg_table *table;
struct scatterlist *sg;
int ret;
struct list_head pages;
struct page_info *info, *tmp_info;
int i = 0;
- long size_remaining = PAGE_ALIGN(size);
+ unsigned long size_remaining = PAGE_ALIGN(size);
+ unsigned int max_order = orders[0];
bool split_pages = ion_buffer_fault_user_mappings(buffer);
-
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
- info = alloc_largest_available(size_remaining, split_pages);
+ info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
if (!info)
goto err;
list_add_tail(&info->list, &pages);
size_remaining -= (1 << info->order) * PAGE_SIZE;
+ max_order = info->order;
i++;
}
@@ -110,7 +197,6 @@
sg = table->sgl;
list_for_each_entry_safe(info, tmp_info, &pages, list) {
struct page *page = info->page;
-
if (split_pages) {
for (i = 0; i < (1 << info->order); i++) {
sg_set_page(sg, page + i, PAGE_SIZE, 0);
@@ -122,12 +208,9 @@
sg = sg_next(sg);
}
list_del(&info->list);
- kunmap(page);
+ kfree(info);
}
- dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
-
buffer->priv_virt = table;
atomic_add(size, &system_heap_allocated);
return 0;
@@ -135,28 +218,34 @@
kfree(table);
err:
list_for_each_entry(info, &pages, list) {
- if (split_pages)
- for (i = 0; i < (1 << info->order); i++)
- __free_page(info->page + i);
- else
- __free_pages(info->page, info->order);
-
- kunmap(info->page);
+ free_buffer_page(sys_heap, buffer, info->page, info->order);
+ kfree(info);
}
return -ENOMEM;
}
void ion_system_heap_free(struct ion_buffer *buffer)
{
- int i;
+ struct ion_heap *heap = buffer->heap;
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table = buffer->sg_table;
+ bool cached = ion_buffer_cached(buffer);
struct scatterlist *sg;
- struct sg_table *table = buffer->priv_virt;
+ LIST_HEAD(pages);
+ int i;
+
+ /* uncached pages come from the page pools, zero them before returning
+ for security purposes (other allocations are zerod at alloc time */
+ if (!cached)
+ ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i)
- __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
- if (buffer->sg_table)
- sg_free_table(buffer->sg_table);
- kfree(buffer->sg_table);
+ free_buffer_page(sys_heap, buffer, sg_page(sg),
+ get_order(sg_dma_len(sg)));
+ sg_free_table(table);
+ kfree(table);
atomic_sub(buffer->size, &system_heap_allocated);
}
@@ -172,156 +261,6 @@
return;
}
-void *ion_system_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct scatterlist *sg;
- int i, j;
- void *vaddr;
- pgprot_t pgprot;
- struct sg_table *table = buffer->priv_virt;
- int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- struct page **pages = kzalloc(sizeof(struct page *) * npages,
- GFP_KERNEL);
- struct page **tmp = pages;
-
- if (buffer->flags & ION_FLAG_CACHED)
- pgprot = PAGE_KERNEL;
- else
- pgprot = pgprot_writecombine(PAGE_KERNEL);
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
- struct page *page = sg_page(sg);
- BUG_ON(i >= npages);
- for (j = 0; j < npages_this_entry; j++) {
- *(tmp++) = page++;
- }
- }
- vaddr = vmap(pages, npages, VM_MAP, pgprot);
- kfree(pages);
-
- return vaddr;
-}
-
-void ion_system_heap_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- vunmap(buffer->vaddr);
-}
-
-void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
-int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- struct sg_table *table = buffer->priv_virt;
- unsigned long addr = vma->vm_start;
- unsigned long offset = vma->vm_pgoff;
- struct scatterlist *sg;
- int i;
-
- if (!ION_IS_CACHED(buffer->flags)) {
- pr_err("%s: cannot map system heap uncached\n", __func__);
- return -EINVAL;
- }
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- if (offset) {
- offset--;
- continue;
- }
- remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
- sg_dma_len(sg), vma->vm_page_prot);
- addr += sg_dma_len(sg);
- }
- return 0;
-}
-
-int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
- void *vaddr, unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
-
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- if (!vaddr)
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- else
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- if (!vaddr)
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- else
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- if (!vaddr) {
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- } else {
- dmac_flush_range(vaddr, vaddr + length);
- }
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
-
- if (system_heap_has_outer_cache) {
- unsigned long pstart;
- struct sg_table *table = buffer->priv_virt;
- struct scatterlist *sg;
- int i;
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
- pstart = page_to_phys(page);
- /*
- * If page -> phys is returning NULL, something
- * has really gone wrong...
- */
- if (!pstart) {
- WARN(1, "Could not translate virtual address to physical address\n");
- return -EINVAL;
- }
- outer_cache_op(pstart, pstart + PAGE_SIZE);
- }
- }
- return 0;
-}
-
static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *unused)
{
@@ -331,111 +270,65 @@
return 0;
}
-int ion_system_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- unsigned long extra_iova_addr;
- struct sg_table *table = buffer->priv_virt;
- int prot = IOMMU_WRITE | IOMMU_READ;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- if (!ION_IS_CACHED(flags))
- return -EINVAL;
-
- if (!msm_use_iommu())
- return -EINVAL;
-
- data->mapped_size = iova_length;
- extra = iova_length - buffer->size;
-
- /* Use the biggest alignment to allow bigger IOMMU mappings.
- * Use the first entry since the first entry will always be the
- * biggest entry. To take advantage of bigger mapping sizes both the
- * VA and PA addresses have to be aligned to the biggest size.
- */
- if (table->sgl->length > align)
- align = table->sgl->length;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, table->sgl,
- buffer->size, prot);
-
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- extra_iova_addr = data->iova_addr + buffer->size;
- if (extra) {
- unsigned long phys_addr = sg_phys(table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
-static struct ion_heap_ops vmalloc_ops = {
+static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
.map_dma = ion_system_heap_map_dma,
.unmap_dma = ion_system_heap_unmap_dma,
- .map_kernel = ion_system_heap_map_kernel,
- .unmap_kernel = ion_system_heap_unmap_kernel,
- .map_user = ion_system_heap_map_user,
- .cache_op = ion_system_heap_cache_ops,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
.print_debug = ion_system_print_debug,
- .map_iommu = ion_system_heap_map_iommu,
- .unmap_iommu = ion_system_heap_unmap_iommu,
};
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
{
- struct ion_heap *heap;
+ struct ion_system_heap *heap;
+ int i;
- heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+ heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
- heap->ops = &vmalloc_ops;
- heap->type = ION_HEAP_TYPE_SYSTEM;
- system_heap_has_outer_cache = pheap->has_outer_cache;
- return heap;
+ heap->heap.ops = &system_heap_ops;
+ heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+ heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+ GFP_KERNEL);
+ if (!heap->pools)
+ goto err_alloc_pools;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
+ if (!pool)
+ goto err_create_pool;
+ heap->pools[i] = pool;
+ }
+ return &heap->heap;
+err_create_pool:
+ for (i = 0; i < num_orders; i++)
+ if (heap->pools[i])
+ ion_page_pool_destroy(heap->pools[i]);
+ kfree(heap->pools);
+err_alloc_pools:
+ kfree(heap);
+ return ERR_PTR(-ENOMEM);
}
void ion_system_heap_destroy(struct ion_heap *heap)
{
- kfree(heap);
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+
+ for (i = 0; i < num_orders; i++)
+ ion_page_pool_destroy(sys_heap->pools[i]);
+ kfree(sys_heap->pools);
+ kfree(sys_heap);
}
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
@@ -508,46 +401,6 @@
}
}
-int ion_system_contig_heap_cache_ops(struct ion_heap *heap,
- struct ion_buffer *buffer, void *vaddr,
- unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
-
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
-
- if (system_heap_contig_has_outer_cache) {
- unsigned long pstart;
-
- pstart = virt_to_phys(buffer->priv_virt) + offset;
- if (!pstart) {
- WARN(1, "Could not do virt to phys translation on %p\n",
- buffer->priv_virt);
- return -EINVAL;
- }
-
- outer_cache_op(pstart, pstart + PAGE_SIZE);
- }
-
- return 0;
-}
-
static int ion_system_contig_print_debug(struct ion_heap *heap,
struct seq_file *s,
const struct rb_root *unused)
@@ -558,84 +411,6 @@
return 0;
}
-int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- struct scatterlist *sglist = 0;
- struct page *page = 0;
- int prot = IOMMU_WRITE | IOMMU_READ;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- if (!ION_IS_CACHED(flags))
- return -EINVAL;
-
- if (!msm_use_iommu()) {
- data->iova_addr = virt_to_phys(buffer->vaddr);
- return 0;
- }
-
- data->mapped_size = iova_length;
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
- page = virt_to_page(buffer->vaddr);
-
- sglist = vmalloc(sizeof(*sglist));
- if (!sglist)
- goto out1;
-
- sg_init_table(sglist, 1);
- sg_set_page(sglist, page, buffer->size, 0);
-
- ret = iommu_map_range(domain, data->iova_addr, sglist,
- buffer->size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- unsigned long phys_addr = sg_phys(sglist);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- vfree(sglist);
- return ret;
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-
-out1:
- vfree(sglist);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
@@ -654,13 +429,10 @@
.phys = ion_system_contig_heap_phys,
.map_dma = ion_system_contig_heap_map_dma,
.unmap_dma = ion_system_contig_heap_unmap_dma,
- .map_kernel = ion_system_contig_heap_map_kernel,
- .unmap_kernel = ion_system_contig_heap_unmap_kernel,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_system_contig_heap_map_user,
- .cache_op = ion_system_contig_heap_cache_ops,
.print_debug = ion_system_contig_print_debug,
- .map_iommu = ion_system_contig_heap_map_iommu,
- .unmap_iommu = ion_system_heap_unmap_iommu,
};
struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
@@ -672,7 +444,6 @@
return ERR_PTR(-ENOMEM);
heap->ops = &kmalloc_ops;
heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
- system_heap_contig_has_outer_cache = pheap->has_outer_cache;
return heap;
}
diff --git a/drivers/gpu/ion/msm/Makefile b/drivers/gpu/ion/msm/Makefile
index 1893405..becdb02 100644
--- a/drivers/gpu/ion/msm/Makefile
+++ b/drivers/gpu/ion/msm/Makefile
@@ -1 +1 @@
-obj-y += msm_ion.o ion_cp_common.o
+obj-y += msm_ion.o ion_cp_common.o ion_iommu_map.o
diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c
index 7d54cfa..58eca24 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.c
+++ b/drivers/gpu/ion/msm/ion_cp_common.c
@@ -15,6 +15,7 @@
#include <linux/memory_alloc.h>
#include <linux/types.h>
#include <mach/scm.h>
+#include <linux/highmem.h>
#include "../ion_priv.h"
#include "ion_cp_common.h"
@@ -157,6 +158,8 @@
request.chunks.chunk_list_size = nchunks;
request.chunks.chunk_size = chunk_size;
+ kmap_flush_unused();
+ kmap_atomic_flush_unused();
return scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
&request, sizeof(request), &resp, sizeof(resp));
diff --git a/drivers/gpu/ion/msm/ion_iommu_map.c b/drivers/gpu/ion/msm/ion_iommu_map.c
new file mode 100644
index 0000000..5ce03db
--- /dev/null
+++ b/drivers/gpu/ion/msm/ion_iommu_map.c
@@ -0,0 +1,538 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/iommu.h>
+#include <linux/ion.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include <mach/iommu_domains.h>
+
+enum {
+ DI_PARTITION_NUM = 0,
+ DI_DOMAIN_NUM = 1,
+ DI_MAX,
+};
+
+#define iommu_map_domain(__m) ((__m)->domain_info[1])
+#define iommu_map_partition(__m) ((__m)->domain_info[0])
+
+/**
+ * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
+ * @iova_addr - iommu virtual address
+ * @node - rb node to exist in the buffer's tree of iommu mappings
+ * @domain_info - contains the partition number and domain number
+ * domain_info[1] = domain number
+ * domain_info[0] = partition number
+ * @ref - for reference counting this mapping
+ * @mapped_size - size of the iova space mapped
+ * (may not be the same as the buffer size)
+ * @flags - iommu domain/partition specific flags.
+ *
+ * Represents a mapping of one ion buffer to a particular iommu domain
+ * and address range. There may exist other mappings of this buffer in
+ * different domains or address ranges. All mappings will have the same
+ * cacheability and security.
+ */
+struct ion_iommu_map {
+ unsigned long iova_addr;
+ struct rb_node node;
+ union {
+ int domain_info[DI_MAX];
+ uint64_t key;
+ };
+ struct ion_iommu_meta *meta;
+ struct kref ref;
+ int mapped_size;
+ unsigned long flags;
+};
+
+
+struct ion_iommu_meta {
+ struct rb_node node;
+ struct ion_handle *handle;
+ struct rb_root iommu_maps;
+ struct kref ref;
+ struct sg_table *table;
+ unsigned long size;
+ struct mutex lock;
+};
+
+static struct rb_root iommu_root;
+DEFINE_MUTEX(msm_iommu_map_mutex);
+
+static void ion_iommu_meta_add(struct ion_iommu_meta *meta)
+{
+ struct rb_root *root = &iommu_root;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_meta *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_meta, node);
+
+ if (meta->handle < entry->handle) {
+ p = &(*p)->rb_left;
+ } else if (meta->handle > entry->handle) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: handle %p already exists\n", __func__,
+ entry->handle);
+ BUG();
+ }
+ }
+
+ rb_link_node(&meta->node, parent, p);
+ rb_insert_color(&meta->node, root);
+}
+
+
+static struct ion_iommu_meta *ion_iommu_meta_lookup(struct ion_handle *handle)
+{
+ struct rb_root *root = &iommu_root;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_meta *entry = NULL;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_meta, node);
+
+ if (handle < entry->handle)
+ p = &(*p)->rb_left;
+ else if (handle > entry->handle)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ return NULL;
+}
+
+
+
+static void ion_iommu_add(struct ion_iommu_meta *meta,
+ struct ion_iommu_map *iommu)
+{
+ struct rb_node **p = &meta->iommu_maps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_map *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_map, node);
+
+ if (iommu->key < entry->key) {
+ p = &(*p)->rb_left;
+ } else if (iommu->key > entry->key) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: handle %p already has mapping for domain %d and partition %d\n",
+ __func__,
+ meta->handle,
+ iommu_map_domain(iommu),
+ iommu_map_partition(iommu));
+ BUG();
+ }
+ }
+
+ rb_link_node(&iommu->node, parent, p);
+ rb_insert_color(&iommu->node, &meta->iommu_maps);
+}
+
+
+static struct ion_iommu_map *ion_iommu_lookup(
+ struct ion_iommu_meta *meta,
+ unsigned int domain_no,
+ unsigned int partition_no)
+{
+ struct rb_node **p = &meta->iommu_maps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_map *entry;
+ uint64_t key = domain_no;
+ key = key << 32 | partition_no;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_map, node);
+
+ if (key < entry->key)
+ p = &(*p)->rb_left;
+ else if (key > entry->key)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ return NULL;
+}
+
+static int ion_iommu_map_iommu(struct ion_iommu_meta *meta,
+ struct ion_iommu_map *data,
+ unsigned int domain_num,
+ unsigned int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long flags)
+{
+ struct iommu_domain *domain;
+ int ret = 0;
+ unsigned long extra, size;
+ struct sg_table *table;
+ int prot = IOMMU_WRITE | IOMMU_READ;
+
+
+ size = meta->size;
+ data->mapped_size = iova_length;
+ extra = iova_length - size;
+ table = meta->table;
+
+ /* Use the biggest alignment to allow bigger IOMMU mappings.
+ * Use the first entry since the first entry will always be the
+ * biggest entry. To take advantage of bigger mapping sizes both the
+ * VA and PA addresses have to be aligned to the biggest size.
+ */
+ if (sg_dma_len(table->sgl) > align)
+ align = sg_dma_len(table->sgl);
+
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
+
+ if (ret)
+ goto out;
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ ret = iommu_map_range(domain, data->iova_addr,
+ table->sgl,
+ size, prot);
+ if (ret) {
+ pr_err("%s: could not map %lx in domain %p\n",
+ __func__, data->iova_addr, domain);
+ goto out1;
+ }
+
+ if (extra) {
+ unsigned long extra_iova_addr = data->iova_addr + size;
+ unsigned long phys_addr = sg_phys(table->sgl);
+ ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+ extra, SZ_4K, prot);
+ if (ret)
+ goto out2;
+ }
+ return ret;
+
+out2:
+ iommu_unmap_range(domain, data->iova_addr, size);
+out1:
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ size);
+
+out:
+
+ return ret;
+}
+
+static void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+ unsigned int domain_num;
+ unsigned int partition_num;
+ struct iommu_domain *domain;
+
+ BUG_ON(!msm_use_iommu());
+
+ domain_num = iommu_map_domain(data);
+ partition_num = iommu_map_partition(data);
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+ return;
+ }
+
+ iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ data->mapped_size);
+
+ return;
+}
+
+
+
+static struct ion_iommu_map *__ion_iommu_map(struct ion_iommu_meta *meta,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long flags,
+ unsigned long *iova)
+{
+ struct ion_iommu_map *data;
+ int ret;
+
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ iommu_map_domain(data) = domain_num;
+ iommu_map_partition(data) = partition_num;
+
+ ret = ion_iommu_map_iommu(meta, data,
+ domain_num,
+ partition_num,
+ align,
+ iova_length,
+ flags);
+
+ if (ret)
+ goto out;
+
+ kref_init(&data->ref);
+ *iova = data->iova_addr;
+ data->meta = meta;
+
+ ion_iommu_add(meta, data);
+
+ return data;
+
+out:
+ kfree(data);
+ return ERR_PTR(ret);
+}
+
+static struct ion_iommu_meta *ion_iommu_meta_create(struct ion_handle *handle,
+ struct sg_table *table,
+ unsigned long size)
+{
+ struct ion_iommu_meta *meta;
+
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+
+ if (!meta)
+ return ERR_PTR(-ENOMEM);
+
+ meta->handle = handle;
+ meta->table = table;
+ meta->size = size;
+ kref_init(&meta->ref);
+ mutex_init(&meta->lock);
+ ion_iommu_meta_add(meta);
+
+ return meta;
+}
+
+static void ion_iommu_meta_destroy(struct kref *kref)
+{
+ struct ion_iommu_meta *meta = container_of(kref, struct ion_iommu_meta,
+ ref);
+
+
+ rb_erase(&meta->node, &iommu_root);
+ kfree(meta);
+}
+
+static void ion_iommu_meta_put(struct ion_iommu_meta *meta)
+{
+ /*
+ * Need to lock here to prevent race against map/unmap
+ */
+ mutex_lock(&msm_iommu_map_mutex);
+ kref_put(&meta->ref, ion_iommu_meta_destroy);
+ mutex_unlock(&msm_iommu_map_mutex);
+}
+
+int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags, unsigned long iommu_flags)
+{
+ struct ion_iommu_map *iommu_map;
+ struct ion_iommu_meta *iommu_meta = NULL;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret = 0;
+ int i;
+ unsigned long size = 0;
+
+ if (IS_ERR_OR_NULL(client)) {
+ pr_err("%s: client pointer is invalid\n", __func__);
+ return -EINVAL;
+ }
+ if (IS_ERR_OR_NULL(handle)) {
+ pr_err("%s: handle pointer is invalid\n", __func__);
+ return -EINVAL;
+ }
+
+ table = ion_sg_table(client, handle);
+
+ if (IS_ERR_OR_NULL(table))
+ return PTR_ERR(table);
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ size += sg_dma_len(sg);
+
+ if (!msm_use_iommu()) {
+ unsigned long pa = sg_dma_address(table->sgl);
+ if (pa == 0)
+ pa = sg_phys(table->sgl);
+ *iova = pa;
+ *buffer_size = size;
+ }
+ /*
+ * If clients don't want a custom iova length, just use whatever
+ * the buffer size is
+ */
+ if (!iova_length)
+ iova_length = size;
+
+ if (size > iova_length) {
+ pr_debug("%s: iova length %lx is not at least buffer size %lx\n",
+ __func__, iova_length, size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (size & ~PAGE_MASK) {
+ pr_debug("%s: buffer size %lx is not aligned to %lx", __func__,
+ size, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (iova_length & ~PAGE_MASK) {
+ pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
+ iova_length, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&msm_iommu_map_mutex);
+ iommu_meta = ion_iommu_meta_lookup(handle);
+
+ if (!iommu_meta)
+ iommu_meta = ion_iommu_meta_create(handle, table, size);
+ else
+ kref_get(&iommu_meta->ref);
+
+ mutex_unlock(&msm_iommu_map_mutex);
+
+ iommu_map = ion_iommu_lookup(iommu_meta, domain_num, partition_num);
+ if (!iommu_map) {
+ iommu_map = __ion_iommu_map(iommu_meta, domain_num,
+ partition_num, align, iova_length,
+ flags, iova);
+ if (!IS_ERR_OR_NULL(iommu_map)) {
+ iommu_map->flags = iommu_flags;
+ ret = 0;
+ } else {
+ ret = PTR_ERR(iommu_map);
+ goto out;
+ }
+ } else {
+ if (iommu_map->flags != iommu_flags) {
+ pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
+ __func__, handle,
+ iommu_map->flags, iommu_flags);
+ ret = -EINVAL;
+ goto out;
+ } else if (iommu_map->mapped_size != iova_length) {
+ pr_err("%s: handle %p is already mapped with length %x, trying to map with length %lx\n",
+ __func__, handle, iommu_map->mapped_size,
+ iova_length);
+ ret = -EINVAL;
+ goto out;
+ } else {
+ kref_get(&iommu_map->ref);
+ *iova = iommu_map->iova_addr;
+ }
+ }
+ *buffer_size = size;
+ return ret;
+
+out:
+
+ ion_iommu_meta_put(iommu_meta);
+ return ret;
+}
+EXPORT_SYMBOL(ion_map_iommu);
+
+
+static void ion_iommu_map_release(struct kref *kref)
+{
+ struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
+ ref);
+ struct ion_iommu_meta *meta = map->meta;
+
+ rb_erase(&map->node, &meta->iommu_maps);
+ ion_iommu_heap_unmap_iommu(map);
+ kfree(map);
+}
+
+void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num)
+{
+ struct ion_iommu_map *iommu_map;
+ struct ion_iommu_meta *meta;
+
+ if (IS_ERR_OR_NULL(client)) {
+ pr_err("%s: client pointer is invalid\n", __func__);
+ return;
+ }
+ if (IS_ERR_OR_NULL(handle)) {
+ pr_err("%s: handle pointer is invalid\n", __func__);
+ return;
+ }
+
+
+ mutex_lock(&msm_iommu_map_mutex);
+ meta = ion_iommu_meta_lookup(handle);
+ if (!meta) {
+ WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
+ domain_num, partition_num, handle);
+ mutex_lock(&msm_iommu_map_mutex);
+ goto out;
+
+ }
+ mutex_unlock(&msm_iommu_map_mutex);
+
+ mutex_lock(&meta->lock);
+ iommu_map = ion_iommu_lookup(meta, domain_num, partition_num);
+
+ if (!iommu_map) {
+ WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
+ domain_num, partition_num, handle);
+ mutex_unlock(&meta->lock);
+ goto out;
+ }
+
+ kref_put(&iommu_map->ref, ion_iommu_map_release);
+ mutex_unlock(&meta->lock);
+
+ ion_iommu_meta_put(meta);
+
+out:
+ return;
+}
+EXPORT_SYMBOL(ion_unmap_iommu);
+
+
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index 33e6fed..f43d276 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -26,8 +26,10 @@
#include <linux/rwsem.h>
#include <linux/uaccess.h>
#include <linux/memblock.h>
+#include <linux/dma-mapping.h>
#include <mach/ion.h>
#include <mach/msm_memtypes.h>
+#include <asm/cacheflush.h>
#include "../ion_priv.h"
#include "ion_cp_common.h"
@@ -126,7 +128,7 @@
struct ion_client *msm_ion_client_create(unsigned int heap_mask,
const char *name)
{
- return ion_client_create(idev, heap_mask, name);
+ return ion_client_create(idev, name);
}
EXPORT_SYMBOL(msm_ion_client_create);
@@ -177,6 +179,210 @@
}
EXPORT_SYMBOL(msm_ion_do_cache_op);
+static int ion_no_pages_cache_ops(struct ion_client *client,
+ struct ion_handle *handle,
+ void *vaddr,
+ unsigned int offset, unsigned int length,
+ unsigned int cmd)
+{
+ void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
+ unsigned int size_to_vmap, total_size;
+ int i, j, ret;
+ void *ptr = NULL;
+ ion_phys_addr_t buff_phys = 0;
+ ion_phys_addr_t buff_phys_start = 0;
+ size_t buf_length = 0;
+
+ ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
+ if (ret)
+ return -EINVAL;
+
+ buff_phys = buff_phys_start;
+
+ if (!vaddr) {
+ /*
+ * Split the vmalloc space into smaller regions in
+ * order to clean and/or invalidate the cache.
+ */
+ size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
+ total_size = buf_length;
+
+ for (i = 0; i < total_size; i += size_to_vmap) {
+ size_to_vmap = min(size_to_vmap, total_size - i);
+ for (j = 0; j < 10 && size_to_vmap; ++j) {
+ ptr = ioremap(buff_phys, size_to_vmap);
+ if (ptr) {
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ dmac_clean_range(ptr,
+ ptr + size_to_vmap);
+ outer_cache_op =
+ outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ dmac_inv_range(ptr,
+ ptr + size_to_vmap);
+ outer_cache_op =
+ outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ dmac_flush_range(ptr,
+ ptr + size_to_vmap);
+ outer_cache_op =
+ outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+ buff_phys += size_to_vmap;
+ break;
+ } else {
+ size_to_vmap >>= 1;
+ }
+ }
+ if (!ptr) {
+ pr_err("Couldn't io-remap the memory\n");
+ return -EINVAL;
+ }
+ iounmap(ptr);
+ }
+ } else {
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ dmac_clean_range(vaddr, vaddr + length);
+ outer_cache_op = outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ dmac_inv_range(vaddr, vaddr + length);
+ outer_cache_op = outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ dmac_flush_range(vaddr, vaddr + length);
+ outer_cache_op = outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ outer_cache_op(buff_phys_start + offset,
+ buff_phys_start + offset + length);
+
+ return 0;
+}
+
+#ifdef CONFIG_OUTER_CACHE
+static void ion_pages_outer_cache_op(void (*op)(phys_addr_t, phys_addr_t),
+ struct sg_table *table)
+{
+ unsigned long pstart;
+ struct scatterlist *sg;
+ int i;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ pstart = page_to_phys(page);
+ /*
+ * If page -> phys is returning NULL, something
+ * has really gone wrong...
+ */
+ if (!pstart) {
+ WARN(1, "Could not translate virtual address to physical address\n");
+ return;
+ }
+ op(pstart, pstart + PAGE_SIZE);
+ }
+}
+#else
+static void ion_pages_outer_cache_op(void (*op)(phys_addr_t, phys_addr_t),
+ struct sg_table *table)
+{
+
+}
+#endif
+
+static int ion_pages_cache_ops(struct ion_client *client,
+ struct ion_handle *handle,
+ void *vaddr, unsigned int offset, unsigned int length,
+ unsigned int cmd)
+{
+ void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+ struct sg_table *table = NULL;
+
+ table = ion_sg_table(client, handle);
+ if (IS_ERR_OR_NULL(table))
+ return PTR_ERR(table);
+
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ if (!vaddr)
+ dma_sync_sg_for_device(NULL, table->sgl,
+ table->nents, DMA_TO_DEVICE);
+ else
+ dmac_clean_range(vaddr, vaddr + length);
+ outer_cache_op = outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ if (!vaddr)
+ dma_sync_sg_for_cpu(NULL, table->sgl,
+ table->nents, DMA_FROM_DEVICE);
+ else
+ dmac_inv_range(vaddr, vaddr + length);
+ outer_cache_op = outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ if (!vaddr) {
+ dma_sync_sg_for_device(NULL, table->sgl,
+ table->nents, DMA_TO_DEVICE);
+ dma_sync_sg_for_cpu(NULL, table->sgl,
+ table->nents, DMA_FROM_DEVICE);
+ } else {
+ dmac_flush_range(vaddr, vaddr + length);
+ }
+ outer_cache_op = outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ion_pages_outer_cache_op(outer_cache_op, table);
+
+ return 0;
+}
+
+int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+ void *uaddr, unsigned long offset, unsigned long len,
+ unsigned int cmd)
+{
+ int ret = -EINVAL;
+ unsigned long flags;
+ struct sg_table *table;
+ struct page *page;
+
+ ret = ion_handle_get_flags(client, handle, &flags);
+ if (ret)
+ return -EINVAL;
+
+ if (!ION_IS_CACHED(flags))
+ return 0;
+
+ table = ion_sg_table(client, handle);
+
+ if (IS_ERR_OR_NULL(table))
+ return PTR_ERR(table);
+
+ page = sg_page(table->sgl);
+
+ if (page)
+ ret = ion_pages_cache_ops(client, handle, uaddr,
+ offset, len, cmd);
+ else
+ ret = ion_no_pages_cache_ops(client, handle, uaddr,
+ offset, len, cmd);
+
+ return ret;
+
+}
+
static ion_phys_addr_t msm_ion_get_base(unsigned long size, int memory_type,
unsigned int align)
{
@@ -750,6 +956,75 @@
return 0;
}
+static struct ion_heap *msm_ion_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_heap *heap = NULL;
+
+ switch ((int)heap_data->type) {
+ case ION_HEAP_TYPE_IOMMU:
+ heap = ion_iommu_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_CP:
+ heap = ion_cp_heap_create(heap_data);
+ break;
+#ifdef CONFIG_CMA
+ case ION_HEAP_TYPE_DMA:
+ heap = ion_cma_heap_create(heap_data);
+ break;
+
+ case ION_HEAP_TYPE_SECURE_DMA:
+ heap = ion_secure_cma_heap_create(heap_data);
+ break;
+#endif
+ case ION_HEAP_TYPE_REMOVED:
+ heap = ion_removed_heap_create(heap_data);
+ break;
+
+ default:
+ heap = ion_heap_create(heap_data);
+ }
+
+ if (IS_ERR_OR_NULL(heap)) {
+ pr_err("%s: error creating heap %s type %d base %pa size %u\n",
+ __func__, heap_data->name, heap_data->type,
+ &heap_data->base, heap_data->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ heap->name = heap_data->name;
+ heap->id = heap_data->id;
+ heap->priv = heap_data->priv;
+ return heap;
+}
+
+static void msm_ion_heap_destroy(struct ion_heap *heap)
+{
+ if (!heap)
+ return;
+
+ switch ((int)heap->type) {
+ case ION_HEAP_TYPE_IOMMU:
+ ion_iommu_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_CP:
+ ion_cp_heap_destroy(heap);
+ break;
+#ifdef CONFIG_CMA
+ case ION_HEAP_TYPE_DMA:
+ ion_cma_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_SECURE_DMA:
+ ion_secure_cma_heap_destroy(heap);
+ break;
+#endif
+ case ION_HEAP_TYPE_REMOVED:
+ ion_removed_heap_destroy(heap);
+ break;
+ default:
+ ion_heap_destroy(heap);
+ }
+}
+
static int msm_ion_probe(struct platform_device *pdev)
{
struct ion_platform_data *pdata;
@@ -791,7 +1066,7 @@
msm_ion_allocate(heap_data);
heap_data->has_outer_cache = pdata->has_outer_cache;
- heaps[i] = ion_heap_create(heap_data);
+ heaps[i] = msm_ion_heap_create(heap_data);
if (IS_ERR_OR_NULL(heaps[i])) {
heaps[i] = 0;
continue;
@@ -829,7 +1104,7 @@
int i;
for (i = 0; i < num_heaps; i++)
- ion_heap_destroy(heaps[i]);
+ msm_ion_heap_destroy(heaps[i]);
ion_device_destroy(idev);
kfree(heaps);
diff --git a/drivers/gpu/ion/msm_ion_priv.h b/drivers/gpu/ion/msm_ion_priv.h
new file mode 100644
index 0000000..2de4e8a
--- /dev/null
+++ b/drivers/gpu/ion/msm_ion_priv.h
@@ -0,0 +1,114 @@
+/*
+ * drivers/gpu/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_ION_PRIV_H
+#define _MSM_ION_PRIV_H
+
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/seq_file.h>
+
+/**
+ * struct mem_map_data - represents information about the memory map for a heap
+ * @node: rb node used to store in the tree of mem_map_data
+ * @addr: start address of memory region.
+ * @addr: end address of memory region.
+ * @size: size of memory region
+ * @client_name: name of the client who owns this buffer.
+ *
+ */
+struct mem_map_data {
+ struct rb_node node;
+ ion_phys_addr_t addr;
+ ion_phys_addr_t addr_end;
+ unsigned long size;
+ const char *client_name;
+};
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
+void ion_iommu_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *);
+void ion_cp_heap_destroy(struct ion_heap *);
+
+#ifdef CONFIG_CMA
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
+void ion_cma_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *);
+void ion_secure_cma_heap_destroy(struct ion_heap *);
+#endif
+
+struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *);
+void ion_removed_heap_destroy(struct ion_heap *);
+
+#define ION_CP_ALLOCATE_FAIL -1
+#define ION_RESERVED_ALLOCATE_FAIL -1
+
+/**
+ * ion_do_cache_op - do cache operations.
+ *
+ * @client - pointer to ION client.
+ * @handle - pointer to buffer handle.
+ * @uaddr - virtual address to operate on.
+ * @offset - offset from physical address.
+ * @len - Length of data to do cache operation on.
+ * @cmd - Cache operation to perform:
+ * ION_IOC_CLEAN_CACHES
+ * ION_IOC_INV_CACHES
+ * ION_IOC_CLEAN_INV_CACHES
+ *
+ * Returns 0 on success
+ */
+int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+ void *uaddr, unsigned long offset, unsigned long len,
+ unsigned int cmd);
+
+void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
+ unsigned long *size);
+
+void ion_mem_map_show(struct ion_heap *heap);
+
+
+
+int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
+ int version, void *data, int flags);
+
+int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle);
+
+int ion_heap_allow_secure_allocation(enum ion_heap_type type);
+
+int ion_heap_allow_heap_secure(enum ion_heap_type type);
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type);
+
+/**
+ * ion_create_chunked_sg_table - helper function to create sg table
+ * with specified chunk size
+ * @buffer_base: The starting address used for the sg dma address
+ * @chunk_size: The size of each entry in the sg table
+ * @total_size: The total size of the sg table (i.e. the sum of the
+ * entries). This will be rounded up to the nearest
+ * multiple of `chunk_size'
+ */
+struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
+ size_t chunk_size, size_t total_size);
+#endif /* _MSM_ION_PRIV_H */
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index a2f0e60..5f435f3 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -179,6 +179,7 @@
#define A3XX_CP_MEQ_ADDR 0x1DA
#define A3XX_CP_MEQ_DATA 0x1DB
#define A3XX_CP_PERFCOUNTER_SELECT 0x445
+#define A3XX_CP_WFI_PEND_CTR 0x01F5
#define A3XX_CP_HW_FAULT 0x45C
#define A3XX_CP_AHB_FAULT 0x54D
#define A3XX_CP_PROTECT_CTRL 0x45E
@@ -396,6 +397,19 @@
#define A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x3058
#define A3XX_VBIF_OUT_AXI_AOOO_EN 0x305E
#define A3XX_VBIF_OUT_AXI_AOOO 0x305F
+#define A3XX_VBIF_PERF_CNT_EN 0x3070
+#define A3XX_VBIF_PERF_CNT_CLR 0x3071
+#define A3XX_VBIF_PERF_CNT_SEL 0x3072
+#define A3XX_VBIF_PERF_CNT0_LO 0x3073
+#define A3XX_VBIF_PERF_CNT0_HI 0x3074
+#define A3XX_VBIF_PERF_CNT1_LO 0x3075
+#define A3XX_VBIF_PERF_CNT1_HI 0x3076
+#define A3XX_VBIF_PERF_PWR_CNT0_LO 0x3077
+#define A3XX_VBIF_PERF_PWR_CNT0_HI 0x3078
+#define A3XX_VBIF_PERF_PWR_CNT1_LO 0x3079
+#define A3XX_VBIF_PERF_PWR_CNT1_HI 0x307a
+#define A3XX_VBIF_PERF_PWR_CNT2_LO 0x307b
+#define A3XX_VBIF_PERF_PWR_CNT2_HI 0x307c
/* Bit flags for RBBM_CTL */
#define RBBM_RBBM_CTL_RESET_PWR_CTR0 BIT(0)
@@ -670,11 +684,11 @@
#define A305C_RBBM_CLOCK_CTL_DEFAULT 0xAAAAAAAA
#define A320_RBBM_CLOCK_CTL_DEFAULT 0xBFFFFFFF
#define A330_RBBM_CLOCK_CTL_DEFAULT 0xBFFCFFFF
-#define A330v2_RBBM_CLOCK_CTL_DEFAULT 0xBFFCFFFF
+#define A330v2_RBBM_CLOCK_CTL_DEFAULT 0xAAAAAAAA
#define A305B_RBBM_CLOCK_CTL_DEFAULT 0xAAAAAAAA
#define A330_RBBM_GPR0_CTL_DEFAULT 0x00000000
-#define A330v2_RBBM_GPR0_CTL_DEFAULT 0x00000000
+#define A330v2_RBBM_GPR0_CTL_DEFAULT 0x05515455
/* COUNTABLE FOR SP PERFCOUNTER */
#define SP_FS_FULL_ALU_INSTRUCTIONS 0x0E
@@ -682,4 +696,20 @@
#define SP0_ICL1_MISSES 0x1A
#define SP_FS_CFLOW_INSTRUCTIONS 0x0C
+/* VBIF PERFCOUNTER ENA/CLR values */
+#define VBIF_PERF_CNT_0 BIT(0)
+#define VBIF_PERF_CNT_1 BIT(1)
+#define VBIF_PERF_PWR_CNT_0 BIT(2)
+#define VBIF_PERF_PWR_CNT_1 BIT(3)
+#define VBIF_PERF_PWR_CNT_2 BIT(4)
+
+/* VBIF PERFCOUNTER SEL values */
+#define VBIF_PERF_CNT_0_SEL 0
+#define VBIF_PERF_CNT_0_SEL_MASK 0x7f
+#define VBIF_PERF_CNT_1_SEL 8
+#define VBIF_PERF_CNT_1_SEL_MASK 0x7f00
+
+/* VBIF countables */
+#define VBIF_DDR_TOTAL_CYCLES 110
+
#endif
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 2377397..60bab32 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -30,7 +30,6 @@
#include "kgsl_cffdump.h"
#include "kgsl_sharedmem.h"
#include "kgsl_iommu.h"
-#include "kgsl_trace.h"
#include "adreno.h"
#include "adreno_pm4types.h"
@@ -532,6 +531,7 @@
result = adreno_dev->gpudev->irq_handler(adreno_dev);
+ device->pwrctrl.irq_last = 1;
if (device->requested_state == KGSL_STATE_NONE) {
if (device->pwrctrl.nap_allowed == true) {
kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
@@ -607,41 +607,15 @@
return result;
}
-static void adreno_iommu_setstate(struct kgsl_device *device,
- unsigned int context_id,
- uint32_t flags)
+static unsigned int _adreno_iommu_setstate_v0(struct kgsl_device *device,
+ unsigned int *cmds_orig,
+ unsigned int pt_val,
+ int num_iommu_units, uint32_t flags)
{
- unsigned int pt_val, reg_pt_val;
- unsigned int link[230];
- unsigned int *cmds = &link[0];
- int sizedwords = 0;
+ unsigned int reg_pt_val;
+ unsigned int *cmds = cmds_orig;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- int num_iommu_units, i;
- struct kgsl_context *context;
- struct adreno_context *adreno_ctx = NULL;
-
- /*
- * If we're idle and we don't need to use the GPU to save context
- * state, use the CPU instead of the GPU to reprogram the
- * iommu for simplicity's sake.
- */
- if (!adreno_dev->drawctxt_active || device->ftbl->isidle(device))
- return kgsl_mmu_device_setstate(&device->mmu, flags);
-
- num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
-
- context = idr_find(&device->context_idr, context_id);
- if (context == NULL)
- return;
- adreno_ctx = context->devctxt;
-
- if (kgsl_mmu_enable_clk(&device->mmu,
- KGSL_IOMMU_CONTEXT_USER))
- return;
-
- cmds += __adreno_add_idle_indirect_cmds(cmds,
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ int i;
if (cpu_is_msm8960())
cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
@@ -658,8 +632,6 @@
/* Acquire GPU-CPU sync Lock here */
cmds += kgsl_mmu_sync_lock(&device->mmu, cmds);
- pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
- device->mmu.hwpagetable);
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
/*
* We need to perfrom the following operations for all
@@ -736,25 +708,169 @@
cmds += adreno_add_idle_cmds(adreno_dev, cmds);
- sizedwords += (cmds - &link[0]);
- if (sizedwords) {
- /* invalidate all base pointers */
- *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
- *cmds++ = 0x7fff;
- sizedwords += 2;
- /* This returns the per context timestamp but we need to
- * use the global timestamp for iommu clock disablement */
- adreno_ringbuffer_issuecmds(device, adreno_ctx,
- KGSL_CMD_FLAGS_PMODE,
- &link[0], sizedwords);
- kgsl_mmu_disable_clk_on_ts(&device->mmu,
- adreno_dev->ringbuffer.timestamp[KGSL_MEMSTORE_GLOBAL], true);
+ return cmds - cmds_orig;
+}
+
+static unsigned int _adreno_iommu_setstate_v1(struct kgsl_device *device,
+ unsigned int *cmds_orig,
+ unsigned int pt_val,
+ int num_iommu_units, uint32_t flags)
+{
+ unsigned int reg_pt_val;
+ unsigned int *cmds = cmds_orig;
+ int i;
+ unsigned int ttbr0, tlbiall, tlbstatus, tlbsync, mmu_ctrl;
+
+ for (i = 0; i < num_iommu_units; i++) {
+ reg_pt_val = (pt_val + kgsl_mmu_get_pt_lsb(&device->mmu,
+ i, KGSL_IOMMU_CONTEXT_USER));
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+ mmu_ctrl = kgsl_mmu_get_reg_ahbaddr(
+ &device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL) >> 2;
+
+ ttbr0 = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TTBR0) >> 2;
+
+ if (kgsl_mmu_hw_halt_supported(&device->mmu, i)) {
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0;
+ /*
+ * glue commands together until next
+ * WAIT_FOR_ME
+ */
+ cmds += adreno_wait_reg_eq(cmds,
+ A3XX_CP_WFI_PEND_CTR, 1, 0xFFFFFFFF, 0xF);
+
+ /* set the iommu lock bit */
+ *cmds++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmds++ = mmu_ctrl;
+ /* AND to unmask the lock bit */
+ *cmds++ =
+ ~(KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT);
+ /* OR to set the IOMMU lock bit */
+ *cmds++ =
+ KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT;
+ /* wait for smmu to lock */
+ cmds += adreno_wait_reg_eq(cmds, mmu_ctrl,
+ KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE,
+ KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE, 0xF);
+ }
+ /* set ttbr0 */
+ *cmds++ = cp_type0_packet(ttbr0, 1);
+ *cmds++ = reg_pt_val;
+ if (kgsl_mmu_hw_halt_supported(&device->mmu, i)) {
+ /* unlock the IOMMU lock */
+ *cmds++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmds++ = mmu_ctrl;
+ /* AND to unmask the lock bit */
+ *cmds++ =
+ ~(KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT);
+ /* OR with 0 so lock bit is unset */
+ *cmds++ = 0;
+ /* release all commands with wait_for_me */
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
+ *cmds++ = 0;
+ }
+ }
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ tlbiall = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TLBIALL) >> 2;
+ *cmds++ = cp_type0_packet(tlbiall, 1);
+ *cmds++ = 1;
+
+ tlbsync = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TLBSYNC) >> 2;
+ *cmds++ = cp_type0_packet(tlbsync, 1);
+ *cmds++ = 0;
+
+ tlbstatus = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TLBSTATUS) >> 2;
+ cmds += adreno_wait_reg_eq(cmds, tlbstatus, 0,
+ KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE, 0xF);
+ }
}
+ return cmds - cmds_orig;
+}
+
+static void adreno_iommu_setstate(struct kgsl_device *device,
+ unsigned int context_id,
+ uint32_t flags)
+{
+ unsigned int pt_val;
+ unsigned int link[230];
+ unsigned int *cmds = &link[0];
+ int sizedwords = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int num_iommu_units;
+ struct kgsl_context *context;
+ struct adreno_context *adreno_ctx = NULL;
+
+ if (!adreno_dev->drawctxt_active) {
+ kgsl_mmu_device_setstate(&device->mmu, flags);
+ return;
+ }
+ num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
+
+ context = idr_find(&device->context_idr, context_id);
+ if (context == NULL)
+ return;
+
+ kgsl_context_get(context);
+
+ adreno_ctx = context->devctxt;
+
+ if (kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER))
+ return;
+
+ pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
+ device->mmu.hwpagetable);
+
+ cmds += __adreno_add_idle_indirect_cmds(cmds,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
+ if (msm_soc_version_supports_iommu_v0())
+ cmds += _adreno_iommu_setstate_v0(device, cmds, pt_val,
+ num_iommu_units, flags);
+ else
+ cmds += _adreno_iommu_setstate_v1(device, cmds, pt_val,
+ num_iommu_units, flags);
+
+ sizedwords += (cmds - &link[0]);
+ if (sizedwords == 0) {
+ KGSL_DRV_ERR(device, "no commands generated\n");
+ BUG();
+ }
+ /* invalidate all base pointers */
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+ *cmds++ = 0x7fff;
+ sizedwords += 2;
if (sizedwords > (ARRAY_SIZE(link))) {
KGSL_DRV_ERR(device, "Temp command buffer overflow\n");
BUG();
}
+ /*
+ * This returns the per context timestamp but we need to
+ * use the global timestamp for iommu clock disablement
+ */
+ adreno_ringbuffer_issuecmds(device, adreno_ctx, KGSL_CMD_FLAGS_PMODE,
+ &link[0], sizedwords);
+ /* timestamp based clock gating is currently unstable on iommuv1 */
+ if (msm_soc_version_supports_iommu_v0()) {
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ kgsl_mmu_disable_clk_on_ts(&device->mmu,
+ rb->timestamp[KGSL_MEMSTORE_GLOBAL], true);
+ }
+
+ kgsl_context_put(context);
}
static void adreno_gpummu_setstate(struct kgsl_device *device,
@@ -1284,6 +1400,8 @@
data->physstart = reg_val[0];
data->physend = data->physstart + reg_val[1] - 1;
+ data->iommu_halt_enable = of_property_read_bool(node,
+ "qcom,iommu-enable-halt");
data->iommu_ctx_count = 0;
@@ -1772,6 +1890,8 @@
start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
size);
kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
+ /* Ensure above read is finished before next read */
+ rmb();
if (KGSL_CMD_IDENTIFIER == val1) {
if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
start_ptr = adreno_ringbuffer_dec_wrapped(
@@ -1809,6 +1929,8 @@
temp_rb_rptr, size);
kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
temp_rb_rptr);
+ /* Ensure above read is finished before next read */
+ rmb();
if (check && ((inc && val[i] == global_eop) ||
(!inc && (val[i] ==
@@ -1855,7 +1977,8 @@
}
if (status)
KGSL_FT_ERR(rb->device,
- "Failed to find the command sequence after eop timestamp\n");
+ "Failed to find the command sequence after eop timestamp %x\n",
+ global_eop);
return status;
}
@@ -1873,6 +1996,8 @@
while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+ /* Ensure above read is finished before next read */
+ rmb();
if (check && val[i] == ib1) {
/* decrement i, i.e i = (i - 1 + 2) % 2 */
@@ -1939,6 +2064,9 @@
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
eoptimestamp));
+ /* Ensure context id and global eop ts read complete */
+ rmb();
+
ft_data->rb_buffer = vmalloc(rb->buffer_desc.size);
if (!ft_data->rb_buffer) {
KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
@@ -1963,44 +2091,38 @@
/* find the start of bad command sequence in rb */
context = idr_find(&device->context_idr, ft_data->context_id);
- /* Look for the command stream that is right after the global eop */
-
- if (!context) {
- /*
- * If there is no context then fault tolerance does not need to
- * replay anything, just reset GPU and thats it
- */
- return;
- }
ft_data->ft_policy = adreno_dev->ft_policy;
if (!ft_data->ft_policy)
ft_data->ft_policy = KGSL_FT_DEFAULT_POLICY;
+ /* Look for the command stream that is right after the global eop */
ret = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
ft_data->global_eop + 1, false);
if (ret) {
ft_data->ft_policy |= KGSL_FT_TEMP_DISABLE;
return;
- } else
+ } else {
+ ft_data->start_of_replay_cmds = rb_rptr;
ft_data->ft_policy &= ~KGSL_FT_TEMP_DISABLE;
+ }
- ft_data->start_of_replay_cmds = rb_rptr;
-
- adreno_context = context->devctxt;
- if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
- if (ft_data->ib1) {
- ret = _find_hanging_ib_sequence(rb,
- &rb_rptr, ft_data->ib1);
- if (ret) {
- KGSL_FT_ERR(device,
- "Start not found for replay IB sequence\n");
- ret = 0;
- return;
+ if (context) {
+ adreno_context = context->devctxt;
+ if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
+ if (ft_data->ib1) {
+ ret = _find_hanging_ib_sequence(rb,
+ &rb_rptr, ft_data->ib1);
+ if (ret) {
+ KGSL_FT_ERR(device,
+ "Start not found for replay IB seq\n");
+ ret = 0;
+ return;
+ }
+ ft_data->start_of_replay_cmds = rb_rptr;
+ ft_data->replay_for_snapshot = rb_rptr;
}
- ft_data->start_of_replay_cmds = rb_rptr;
- ft_data->replay_for_snapshot = rb_rptr;
}
}
}
@@ -2016,6 +2138,8 @@
&curr_global_ts,
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
eoptimestamp));
+ /* Ensure above read is finished before long ib check */
+ rmb();
/* Mark long ib as handled */
adreno_dev->long_ib = 0;
@@ -2035,9 +2159,6 @@
_adreno_ft_restart_device(struct kgsl_device *device,
struct kgsl_context *context)
{
-
- struct adreno_context *adreno_context = context->devctxt;
-
/* restart device */
if (adreno_stop(device)) {
KGSL_FT_ERR(device, "Device stop failed\n");
@@ -2054,9 +2175,11 @@
return 1;
}
- if (context)
+ if (context) {
+ struct adreno_context *adreno_context = context->devctxt;
kgsl_mmu_setstate(&device->mmu, adreno_context->pagetable,
KGSL_MEMSTORE_GLOBAL);
+ }
/* If iommu is used then we need to make sure that the iommu clocks
* are on since there could be commands in pipeline that touch iommu */
@@ -2157,13 +2280,22 @@
struct adreno_context *adreno_context = NULL;
struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
unsigned int long_ib = 0;
+ static int no_context_ft;
context = idr_find(&device->context_idr, ft_data->context_id);
if (context == NULL) {
KGSL_FT_ERR(device, "Last context unknown id:%d\n",
ft_data->context_id);
- goto play_good_cmds;
+ if (no_context_ft) {
+ /*
+ * If 2 consecutive no context ft occurred then
+ * just reset GPU
+ */
+ no_context_ft = 0;
+ goto play_good_cmds;
+ }
} else {
+ no_context_ft = 0;
adreno_context = context->devctxt;
adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
/*
@@ -2173,6 +2305,7 @@
context->wait_on_invalid_ts = false;
if (!(adreno_context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
+ ft_data->status = 1;
KGSL_FT_ERR(device, "Fault tolerance not supported\n");
goto play_good_cmds;
}
@@ -2207,6 +2340,7 @@
/* If long IB detected do not attempt replay of bad cmds */
if (long_ib) {
+ ft_data->status = 1;
_adreno_debug_ft_info(device, ft_data);
goto play_good_cmds;
}
@@ -2219,7 +2353,7 @@
}
/* Do not try the reply if hang is due to a pagefault */
- if (adreno_context->pagefault) {
+ if (adreno_context && adreno_context->pagefault) {
if ((ft_data->context_id == adreno_context->id) &&
(ft_data->global_eop == adreno_context->pagefault_ts)) {
ft_data->ft_policy &= ~KGSL_FT_REPLAY;
@@ -2284,7 +2418,7 @@
/* EOF not found in RB, discard till EOF in
next IB submission */
- if (i == ft_data->bad_rb_size) {
+ if (adreno_context && (i == ft_data->bad_rb_size)) {
adreno_context->flags |= CTXT_FLAGS_SKIP_EOF;
KGSL_FT_INFO(device,
"EOF not found in RB, skip next issueib till EOF\n");
@@ -2321,8 +2455,14 @@
ft_data->good_rb_buffer, ft_data->good_rb_size);
if (ret) {
- /* If we fail here we can try to invalidate another
- * context and try fault tolerance again */
+ /*
+ * If we fail here we can try to invalidate another
+ * context and try fault tolerance again, although
+ * we will only try ft with no context once to avoid
+ * going into continuous loop of trying ft with no context
+ */
+ if (!context)
+ no_context_ft = 1;
ret = -EAGAIN;
KGSL_FT_ERR(device, "Playing good commands unsuccessful\n");
goto done;
@@ -2762,7 +2902,7 @@
if (device->state == KGSL_STATE_ACTIVE) {
/* Is the ring buffer is empty? */
GSL_RB_GET_READPTR(rb, &rb->rptr);
- if (!device->active_cnt && (rb->rptr == rb->wptr)) {
+ if (rb->rptr == rb->wptr) {
/*
* Are there interrupts pending? If so then pretend we
* are not idle - this avoids the possiblity that we go
@@ -2932,7 +3072,7 @@
if (!in_interrupt())
kgsl_pre_hwaccess(device);
- trace_kgsl_regwrite(device, offsetwords, value);
+ kgsl_trace_regwrite(device, offsetwords, value);
kgsl_cffdump_regwrite(device->id, offsetwords << 2, value);
reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
@@ -3020,7 +3160,7 @@
if (context && device->state != KGSL_STATE_SLUMBER) {
adreno_ringbuffer_issuecmds(device, context->devctxt,
- KGSL_CMD_FLAGS_NONE, NULL, 0);
+ KGSL_CMD_FLAGS_GET_INT, NULL, 0);
}
}
@@ -3155,6 +3295,31 @@
"Fault tolerance no context found\n");
}
}
+ for (i = 0; i < ft_detect_regs_count; i++) {
+ if (curr_reg_val[i] != prev_reg_val[i]) {
+ fast_hang_detected = 0;
+
+ /* Check for long IB here */
+ if ((i >=
+ LONG_IB_DETECT_REG_INDEX_START)
+ &&
+ (i <=
+ LONG_IB_DETECT_REG_INDEX_END))
+ long_ib_detected = 0;
+ }
+ }
+
+ if (fast_hang_detected) {
+ KGSL_FT_ERR(device,
+ "Proc %s, ctxt_id %d ts %d triggered fault tolerance"
+ " on global ts %d\n",
+ curr_context ? curr_context->pid_name : "",
+ curr_context ? curr_context->id : 0,
+ (kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED) + 1),
+ curr_global_ts + 1);
+ return 1;
+ }
if (curr_context != NULL) {
@@ -3164,31 +3329,6 @@
curr_context->pid_name, curr_context->ib_gpu_time_used,
curr_global_ts+1);
- for (i = 0; i < ft_detect_regs_count; i++) {
- if (curr_reg_val[i] != prev_reg_val[i]) {
- fast_hang_detected = 0;
-
- /* Check for long IB here */
- if ((i >=
- LONG_IB_DETECT_REG_INDEX_START)
- &&
- (i <=
- LONG_IB_DETECT_REG_INDEX_END))
- long_ib_detected = 0;
- }
- }
-
- if (fast_hang_detected) {
- KGSL_FT_ERR(device,
- "Proc %s, ctxt_id %d ts %d triggered fault tolerance"
- " on global ts %d\n",
- curr_context->pid_name, curr_context->id
- , (kgsl_readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED)+1),
- curr_global_ts+1);
- return 1;
- }
-
if ((long_ib_detected) &&
(!(curr_context->flags &
CTXT_FLAGS_NO_FAULT_TOLERANCE))) {
@@ -3218,10 +3358,6 @@
}
}
}
- } else {
- KGSL_FT_ERR(device,
- "Last context unknown id:%d\n",
- curr_context_id);
}
} else {
/* GPU is moving forward */
@@ -3580,15 +3716,20 @@
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- unsigned int cycles;
+ unsigned int cycles = 0;
- /* Get the busy cycles counted since the counter was last reset */
- /* Calling this function also resets and restarts the counter */
+ /*
+ * Get the busy cycles counted since the counter was last reset.
+ * If we're not currently active, there shouldn't have been
+ * any cycles since the last time this function was called.
+ */
+ if (device->state == KGSL_STATE_ACTIVE)
+ cycles = adreno_dev->gpudev->busy_cycles(adreno_dev);
- cycles = adreno_dev->gpudev->busy_cycles(adreno_dev);
-
- /* In order to calculate idle you have to have run the algorithm *
- * at least once to get a start time. */
+ /*
+ * In order to calculate idle you have to have run the algorithm
+ * at least once to get a start time.
+ */
if (pwr->time != 0) {
s64 tmp = ktime_to_us(ktime_get());
stats->total_time = tmp - pwr->time;
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 90d6027..fa892b9 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -34,6 +34,7 @@
#define KGSL_CMD_FLAGS_NONE 0x00000000
#define KGSL_CMD_FLAGS_PMODE 0x00000001
#define KGSL_CMD_FLAGS_INTERNAL_ISSUE 0x00000002
+#define KGSL_CMD_FLAGS_GET_INT 0x00000004
#define KGSL_CMD_FLAGS_EOF 0x00000100
/* Command identifiers */
@@ -506,4 +507,25 @@
return cmds - start;
}
+/*
+ * adreno_wait_reg_eq() - Add a CP_WAIT_REG_EQ command
+ * @cmds: Pointer to memory where commands are to be added
+ * @addr: Regiater address to poll for
+ * @val: Value to poll for
+ * @mask: The value against which register value is masked
+ * @interval: wait interval
+ */
+static inline int adreno_wait_reg_eq(unsigned int *cmds, unsigned int addr,
+ unsigned int val, unsigned int mask,
+ unsigned int interval)
+{
+ unsigned int *start = cmds;
+ *cmds++ = cp_type3_packet(CP_WAIT_REG_EQ, 4);
+ *cmds++ = addr;
+ *cmds++ = val;
+ *cmds++ = mask;
+ *cmds++ = interval;
+ return cmds - start;
+}
+
#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 3f31e36..dd9bdc3 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1840,14 +1840,14 @@
static unsigned int a2xx_irq_pending(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = &adreno_dev->dev;
- unsigned int rbbm, cp, mh;
+ unsigned int status;
- adreno_regread(device, REG_RBBM_INT_CNTL, &rbbm);
- adreno_regread(device, REG_CP_INT_CNTL, &cp);
- adreno_regread(device, MH_INTERRUPT_MASK, &mh);
+ adreno_regread(device, REG_MASTER_INT_SIGNAL, &status);
- return ((rbbm & RBBM_INT_MASK) || (cp & CP_INT_MASK) ||
- (mh & kgsl_mmu_get_int_mask())) ? 1 : 0;
+ return (status &
+ (MASTER_INT_SIGNAL__MH_INT_STAT |
+ MASTER_INT_SIGNAL__CP_INT_STAT |
+ MASTER_INT_SIGNAL__RBBM_INT_STAT)) ? 1 : 0;
}
static int a2xx_rb_init(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 13c723a..a4b3121 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2752,6 +2752,58 @@
return;
}
+static void a3xx_perfcounter_enable_vbif(struct kgsl_device *device,
+ unsigned int counter,
+ unsigned int countable)
+{
+ unsigned int in, out, bit, sel;
+
+ if (counter > 1 || countable > 0x7f)
+ return;
+
+ adreno_regread(device, A3XX_VBIF_PERF_CNT_EN, &in);
+ adreno_regread(device, A3XX_VBIF_PERF_CNT_SEL, &sel);
+
+ if (counter == 0) {
+ bit = VBIF_PERF_CNT_0;
+ sel = (sel & ~VBIF_PERF_CNT_0_SEL_MASK) | countable;
+ } else {
+ bit = VBIF_PERF_CNT_1;
+ sel = (sel & ~VBIF_PERF_CNT_1_SEL_MASK)
+ | (countable << VBIF_PERF_CNT_1_SEL);
+ }
+
+ out = in | bit;
+
+ adreno_regwrite(device, A3XX_VBIF_PERF_CNT_SEL, sel);
+
+ adreno_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, bit);
+ adreno_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, 0);
+
+ adreno_regwrite(device, A3XX_VBIF_PERF_CNT_EN, out);
+}
+
+static void a3xx_perfcounter_enable_vbif_pwr(struct kgsl_device *device,
+ unsigned int countable)
+{
+ unsigned int in, out, bit;
+
+ adreno_regread(device, A3XX_VBIF_PERF_CNT_EN, &in);
+ if (countable == 0)
+ bit = VBIF_PERF_PWR_CNT_0;
+ else if (countable == 1)
+ bit = VBIF_PERF_PWR_CNT_1;
+ else
+ bit = VBIF_PERF_PWR_CNT_2;
+
+ out = in | bit;
+
+ adreno_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, bit);
+ adreno_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, 0);
+
+ adreno_regwrite(device, A3XX_VBIF_PERF_CNT_EN, out);
+}
+
/*
* a3xx_perfcounter_enable - Configure a performance counter for a countable
* @adreno_dev - Adreno device to configure
@@ -2769,15 +2821,19 @@
unsigned int val = 0;
struct a3xx_perfcounter_register *reg;
- if (group > ARRAY_SIZE(a3xx_perfcounter_reglist))
+ if (group >= ARRAY_SIZE(a3xx_perfcounter_reglist))
return;
- if (counter > a3xx_perfcounter_reglist[group].count)
+ if (counter >= a3xx_perfcounter_reglist[group].count)
return;
- /* Special case - power */
+ /* Special cases */
if (group == KGSL_PERFCOUNTER_GROUP_PWR)
return a3xx_perfcounter_enable_pwr(device, countable);
+ else if (group == KGSL_PERFCOUNTER_GROUP_VBIF)
+ return a3xx_perfcounter_enable_vbif(device, counter, countable);
+ else if (group == KGSL_PERFCOUNTER_GROUP_VBIF_PWR)
+ return a3xx_perfcounter_enable_vbif_pwr(device, countable);
reg = &(a3xx_perfcounter_reglist[group].regs[counter]);
@@ -2802,7 +2858,10 @@
unsigned int lo = 0, hi = 0;
unsigned int val;
- if (group > ARRAY_SIZE(a3xx_perfcounter_reglist))
+ if (group >= ARRAY_SIZE(a3xx_perfcounter_reglist))
+ return 0;
+
+ if (counter >= a3xx_perfcounter_reglist[group].count)
return 0;
reg = &(a3xx_perfcounter_reglist[group].regs[counter]);
@@ -3265,6 +3324,16 @@
{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PWR_1_LO, 0 },
};
+static struct adreno_perfcount_register a3xx_perfcounters_vbif[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_CNT0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_CNT1_LO },
+};
+static struct adreno_perfcount_register a3xx_perfcounters_vbif_pwr[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT1_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT2_LO },
+};
+
static struct adreno_perfcount_group a3xx_perfcounter_groups[] = {
{ a3xx_perfcounters_cp, ARRAY_SIZE(a3xx_perfcounters_cp) },
{ a3xx_perfcounters_rbbm, ARRAY_SIZE(a3xx_perfcounters_rbbm) },
@@ -3279,6 +3348,8 @@
{ a3xx_perfcounters_sp, ARRAY_SIZE(a3xx_perfcounters_sp) },
{ a3xx_perfcounters_rb, ARRAY_SIZE(a3xx_perfcounters_rb) },
{ a3xx_perfcounters_pwr, ARRAY_SIZE(a3xx_perfcounters_pwr) },
+ { a3xx_perfcounters_vbif, ARRAY_SIZE(a3xx_perfcounters_vbif) },
+ { a3xx_perfcounters_vbif_pwr, ARRAY_SIZE(a3xx_perfcounters_vbif_pwr) },
};
static struct adreno_perfcounters a3xx_perfcounters = {
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index ef599e9..980ff13 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -93,4 +93,7 @@
adreno_dev->ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY;
debugfs_create_u32("ft_pagefault_policy", 0644, device->d_debugfs,
&adreno_dev->ft_pf_policy);
+
+ debugfs_create_u32("active_cnt", 0444, device->d_debugfs,
+ &device->active_cnt);
}
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 5396196..2249907 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -69,6 +69,8 @@
{CP_SET_PROTECTED_MODE, "ST_PRT_M"},
{CP_SET_SHADER_BASES, "ST_SHD_B"},
{CP_WAIT_FOR_IDLE, "WAIT4IDL"},
+ {CP_WAIT_FOR_ME, "WAIT4ME"},
+ {CP_WAIT_REG_EQ, "WAITRGEQ"},
};
static const struct pm_id_name pm3_nop_values[] = {
@@ -854,7 +856,12 @@
(num_iommu_units && this_cmd ==
kgsl_mmu_get_reg_gpuaddr(&device->mmu, 0,
KGSL_IOMMU_CONTEXT_USER,
- KGSL_IOMMU_CTX_TTBR0))) {
+ KGSL_IOMMU_CTX_TTBR0)) ||
+ (num_iommu_units && this_cmd == cp_type0_packet(
+ kgsl_mmu_get_reg_ahbaddr(
+ &device->mmu, 0,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TTBR0), 1))) {
KGSL_LOG_DUMP(device, "Current pagetable: %x\t"
"pagetable base: %x\n",
kgsl_mmu_get_ptname_from_ptbase(&device->mmu,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index a4bb4fa..61ea916 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -18,7 +18,6 @@
#include "kgsl.h"
#include "kgsl_sharedmem.h"
#include "kgsl_cffdump.h"
-#include "kgsl_trace.h"
#include "adreno.h"
#include "adreno_pm4types.h"
@@ -544,13 +543,15 @@
/*
* if the context was not created with per context timestamp
* support, we must use the global timestamp since issueibcmds
- * will be returning that one.
+ * will be returning that one, or if an internal issue then
+ * use global timestamp.
*/
- if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
+ if ((context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) &&
+ !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
context_id = context->id;
- if ((context && context->flags & CTXT_FLAGS_USER_GENERATED_TS) &&
- (!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))) {
+ if ((context && (context->flags & CTXT_FLAGS_USER_GENERATED_TS)) &&
+ !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
if (timestamp_cmp(rb->timestamp[context_id],
timestamp) >= 0) {
KGSL_DRV_ERR(rb->device,
@@ -574,6 +575,11 @@
/* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
total_sizedwords += context ? 13 : 0;
+ if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
+ (flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
+ KGSL_CMD_FLAGS_GET_INT)))
+ total_sizedwords += 2;
+
if (adreno_is_a3xx(adreno_dev))
total_sizedwords += 7;
@@ -584,11 +590,9 @@
total_sizedwords += 3; /* sop timestamp */
total_sizedwords += 4; /* eop timestamp */
- if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS &&
- !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
+ if (KGSL_MEMSTORE_GLOBAL != context_id)
total_sizedwords += 3; /* global timestamp without cache
* flush for non-zero context */
- }
if (adreno_is_a20x(adreno_dev))
total_sizedwords += 2; /* CACHE_FLUSH */
@@ -619,12 +623,12 @@
/* always increment the global timestamp. once. */
rb->timestamp[KGSL_MEMSTORE_GLOBAL]++;
- /* Do not update context's timestamp for internal submissions */
- if (context && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
- if (context_id == KGSL_MEMSTORE_GLOBAL)
- rb->timestamp[context->id] =
- rb->timestamp[KGSL_MEMSTORE_GLOBAL];
- else if (context->flags & CTXT_FLAGS_USER_GENERATED_TS)
+ /*
+ * If global timestamp then we are not using per context ts for
+ * this submission
+ */
+ if (context_id != KGSL_MEMSTORE_GLOBAL) {
+ if (context->flags & CTXT_FLAGS_USER_GENERATED_TS)
rb->timestamp[context_id] = timestamp;
else
rb->timestamp[context_id]++;
@@ -695,9 +699,7 @@
KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
- if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS
- && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
-
+ if (KGSL_MEMSTORE_GLOBAL != context_id) {
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
@@ -749,6 +751,19 @@
GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
}
+ /*
+ * If per context timestamps are enabled and any of the kgsl
+ * internal commands want INT to be generated trigger the INT
+ */
+ if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
+ (flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
+ KGSL_CMD_FLAGS_GET_INT))) {
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_INTERRUPT, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ CP_INT_CNTL__RB_INT_MASK);
+ }
+
if (adreno_is_a3xx(adreno_dev)) {
/* Dummy set-constant to trigger context rollover */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
@@ -1107,8 +1122,10 @@
ret = 0;
done:
- trace_kgsl_issueibcmds(device, context->id, ibdesc, numibs,
- *timestamp, flags, ret, drawctxt->type);
+ device->pwrctrl.irq_last = 0;
+ kgsl_trace_issueibcmds(device, context ? context->id : 0, ibdesc,
+ numibs, *timestamp, flags, ret,
+ drawctxt ? drawctxt->type : 0);
kfree(link);
return ret;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 53ef392..5275267 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -53,6 +53,46 @@
static struct ion_client *kgsl_ion_client;
+/**
+ * kgsl_trace_issueibcmds() - Call trace_issueibcmds by proxy
+ * device: KGSL device
+ * id: ID of the context submitting the command
+ * ibdesc: Pointer to the list of IB descriptors
+ * numib: Number of IBs in the list
+ * timestamp: Timestamp assigned to the command batch
+ * flags: Flags sent by the user
+ * result: Result of the submission attempt
+ * type: Type of context issuing the command
+ *
+ * Wrap the issueibcmds ftrace hook into a function that can be called from the
+ * GPU specific modules.
+ */
+void kgsl_trace_issueibcmds(struct kgsl_device *device, int id,
+ struct kgsl_ibdesc *ibdesc, int numibs,
+ unsigned int timestamp, unsigned int flags,
+ int result, unsigned int type)
+{
+ trace_kgsl_issueibcmds(device, id, ibdesc, numibs,
+ timestamp, flags, result, type);
+}
+EXPORT_SYMBOL(kgsl_trace_issueibcmds);
+
+/**
+ * kgsl_trace_regwrite - call regwrite ftrace function by proxy
+ * device: KGSL device
+ * offset: dword offset of the register being written
+ * value: Value of the register being written
+ *
+ * Wrap the regwrite ftrace hook into a function that can be called from the
+ * GPU specific modules.
+ */
+void kgsl_trace_regwrite(struct kgsl_device *device, unsigned int offset,
+ unsigned int value)
+{
+ trace_kgsl_regwrite(device, offset, value);
+}
+EXPORT_SYMBOL(kgsl_trace_regwrite);
+
int kgsl_memfree_hist_init(void)
{
void *base;
@@ -413,27 +453,6 @@
kfree(context);
}
-static void kgsl_check_idle_locked(struct kgsl_device *device)
-{
- if (device->pwrctrl.nap_allowed == true &&
- device->state == KGSL_STATE_ACTIVE &&
- device->requested_state == KGSL_STATE_NONE) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
- kgsl_pwrscale_idle(device);
- if (kgsl_pwrctrl_sleep(device) != 0)
- mod_timer(&device->idle_timer,
- jiffies +
- device->pwrctrl.interval_timeout);
- }
-}
-
-static void kgsl_check_idle(struct kgsl_device *device)
-{
- mutex_lock(&device->mutex);
- kgsl_check_idle_locked(device);
- mutex_unlock(&device->mutex);
-}
-
struct kgsl_device *kgsl_get_device(int dev_idx)
{
int i;
@@ -496,13 +515,12 @@
policy_saved = device->pwrscale.policy;
device->pwrscale.policy = NULL;
kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
- /* Make sure no user process is waiting for a timestamp *
- * before supending */
- if (device->active_cnt != 0) {
- mutex_unlock(&device->mutex);
- wait_for_completion(&device->suspend_gate);
- mutex_lock(&device->mutex);
- }
+ /*
+ * Make sure no user process is waiting for a timestamp
+ * before supending.
+ */
+ kgsl_active_count_wait(device);
+
/* Don't let the timer wake us during suspended sleep. */
del_timer_sync(&device->idle_timer);
switch (device->state) {
@@ -513,6 +531,8 @@
device->ftbl->idle(device);
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
+ /* make sure power is on to stop the device */
+ kgsl_pwrctrl_enable(device);
/* Get the completion ready to be waited upon. */
INIT_COMPLETION(device->hwaccess_gate);
device->ftbl->suspend_context(device);
@@ -632,9 +652,16 @@
device->pwrctrl.restore_slumber = false;
if (device->pwrscale.policy == NULL)
kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
- kgsl_pwrctrl_wake(device);
+ if (kgsl_pwrctrl_wake(device) != 0)
+ return;
+ /*
+ * We don't have a way to go directly from
+ * a deeper sleep state to NAP, which is
+ * the desired state here.
+ */
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ kgsl_pwrctrl_sleep(device);
mutex_unlock(&device->mutex);
- kgsl_check_idle(device);
KGSL_PWR_WARN(device, "late resume end\n");
}
EXPORT_SYMBOL(kgsl_late_resume_driver);
@@ -745,7 +772,7 @@
filep->private_data = NULL;
mutex_lock(&device->mutex);
- kgsl_check_suspended(device);
+ kgsl_active_count_get(device);
while (1) {
context = idr_get_next(&device->context_idr, &next);
@@ -767,10 +794,17 @@
device->open_count--;
if (device->open_count == 0) {
+ BUG_ON(device->active_cnt > 1);
result = device->ftbl->stop(device);
kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ /*
+ * active_cnt special case: we just stopped the device,
+ * so no need to use kgsl_active_count_put()
+ */
+ device->active_cnt--;
+ } else {
+ kgsl_active_count_put(device);
}
-
mutex_unlock(&device->mutex);
kfree(dev_priv);
@@ -816,9 +850,14 @@
filep->private_data = dev_priv;
mutex_lock(&device->mutex);
- kgsl_check_suspended(device);
if (device->open_count == 0) {
+ /*
+ * active_cnt special case: we are starting up for the first
+ * time, so use this sequence instead of the kgsl_pwrctrl_wake()
+ * which will be called by kgsl_active_count_get().
+ */
+ device->active_cnt++;
kgsl_sharedmem_set(&device->memstore, 0, 0,
device->memstore.size);
@@ -831,6 +870,7 @@
goto err_freedevpriv;
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+ kgsl_active_count_put(device);
}
device->open_count++;
mutex_unlock(&device->mutex);
@@ -856,10 +896,15 @@
mutex_lock(&device->mutex);
device->open_count--;
if (device->open_count == 0) {
+ /* make sure power is on to stop the device */
+ kgsl_pwrctrl_enable(device);
result = device->ftbl->stop(device);
kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
}
err_freedevpriv:
+ /* only the first open takes an active count */
+ if (device->open_count == 0)
+ device->active_cnt--;
mutex_unlock(&device->mutex);
filep->private_data = NULL;
kfree(dev_priv);
@@ -1073,10 +1118,6 @@
struct kgsl_device *device = dev_priv->device;
unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
- /* Set the active count so that suspend doesn't do the wrong thing */
-
- device->active_cnt++;
-
trace_kgsl_waittimestamp_entry(device, context_id,
kgsl_readtimestamp(device, context,
KGSL_TIMESTAMP_RETIRED),
@@ -1090,9 +1131,6 @@
KGSL_TIMESTAMP_RETIRED),
result);
- /* Fire off any pending suspend operations that are in flight */
- kgsl_active_count_put(dev_priv->device);
-
return result;
}
@@ -1887,7 +1925,6 @@
trace_kgsl_mem_map(entry, param->fd);
- kgsl_check_idle(dev_priv->device);
return result;
error_unmap:
@@ -1907,7 +1944,6 @@
}
error:
kfree(entry);
- kgsl_check_idle(dev_priv->device);
return result;
}
@@ -2035,7 +2071,6 @@
entry->memtype = KGSL_MEM_ENTRY_KERNEL;
- kgsl_check_idle(dev_priv->device);
*ret_entry = entry;
return result;
err:
@@ -2370,7 +2405,7 @@
kgsl_ioctl_cff_user_event, 0),
KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
kgsl_ioctl_timestamp_event,
- KGSL_IOCTL_LOCK),
+ KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
kgsl_ioctl_device_setproperty,
KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
@@ -2462,14 +2497,19 @@
if (lock) {
mutex_lock(&dev_priv->device->mutex);
- if (use_hw)
- kgsl_check_suspended(dev_priv->device);
+ if (use_hw) {
+ ret = kgsl_active_count_get(dev_priv->device);
+ if (ret < 0)
+ goto unlock;
+ }
}
ret = func(dev_priv, cmd, uptr);
+unlock:
if (lock) {
- kgsl_check_idle_locked(dev_priv->device);
+ if (use_hw)
+ kgsl_active_count_put(dev_priv->device);
mutex_unlock(&dev_priv->device->mutex);
}
@@ -2600,12 +2640,18 @@
return ret;
}
+static inline bool
+mmap_range_valid(unsigned long addr, unsigned long len)
+{
+ return (addr + len) > addr && (addr + len) < TASK_SIZE;
+}
+
static unsigned long
kgsl_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
- unsigned long ret = 0;
+ unsigned long ret = 0, orig_len = len;
unsigned long vma_offset = pgoff << PAGE_SHIFT;
struct kgsl_device_private *dev_priv = file->private_data;
struct kgsl_process_private *private = dev_priv->process_priv;
@@ -2650,10 +2696,26 @@
if (align)
len += 1 << align;
+
+ if (!mmap_range_valid(addr, len))
+ addr = 0;
do {
ret = get_unmapped_area(NULL, addr, len, pgoff, flags);
- if (IS_ERR_VALUE(ret))
+ if (IS_ERR_VALUE(ret)) {
+ /*
+ * If we are really fragmented, there may not be room
+ * for the alignment padding, so try again without it.
+ */
+ if (!retry && (ret == (unsigned long)-ENOMEM)
+ && (align > PAGE_SHIFT)) {
+ align = PAGE_SHIFT;
+ addr = 0;
+ len = orig_len;
+ retry = 1;
+ continue;
+ }
break;
+ }
if (align)
ret = ALIGN(ret, (1 << align));
@@ -2675,13 +2737,13 @@
* the whole address space at least once by wrapping
* back around once.
*/
- if (!retry && (addr + len >= TASK_SIZE)) {
+ if (!retry && !mmap_range_valid(addr, len)) {
addr = 0;
retry = 1;
} else {
ret = -EBUSY;
}
- } while (addr + len < TASK_SIZE);
+ } while (mmap_range_valid(addr, len));
if (IS_ERR_VALUE(ret))
KGSL_MEM_INFO(device,
@@ -3032,11 +3094,7 @@
/* For a manual dump, make sure that the system is idle */
if (manual) {
- if (device->active_cnt != 0) {
- mutex_unlock(&device->mutex);
- wait_for_completion(&device->suspend_gate);
- mutex_lock(&device->mutex);
- }
+ kgsl_active_count_wait(device);
if (device->state == KGSL_STATE_ACTIVE)
kgsl_idle(device);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index c568db5..abe9100 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -226,6 +226,14 @@
void kgsl_early_suspend_driver(struct early_suspend *h);
void kgsl_late_resume_driver(struct early_suspend *h);
+void kgsl_trace_regwrite(struct kgsl_device *device, unsigned int offset,
+ unsigned int value);
+
+void kgsl_trace_issueibcmds(struct kgsl_device *device, int id,
+ struct kgsl_ibdesc *ibdesc, int numibs,
+ unsigned int timestamp, unsigned int flags,
+ int result, unsigned int type);
+
#ifdef CONFIG_MSM_KGSL_DRM
extern int kgsl_drm_init(struct platform_device *dev);
extern void kgsl_drm_exit(void);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 0d11660..ac82820 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -454,23 +454,4 @@
kref_put(&context->refcount, kgsl_context_destroy);
}
-/**
- * kgsl_active_count_put - Decrease the device active count
- * @device: Pointer to a KGSL device
- *
- * Decrease the active count for the KGSL device and trigger the suspend_gate
- * completion if it hits zero
- */
-static inline void
-kgsl_active_count_put(struct kgsl_device *device)
-{
- if (device->active_cnt == 1)
- INIT_COMPLETION(device->suspend_gate);
-
- device->active_cnt--;
-
- if (device->active_cnt == 0)
- complete(&device->suspend_gate);
-}
-
#endif /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_drm.c b/drivers/gpu/msm/kgsl_drm.c
index 11d6ffa..007f89a 100644
--- a/drivers/gpu/msm/kgsl_drm.c
+++ b/drivers/gpu/msm/kgsl_drm.c
@@ -18,6 +18,7 @@
#include "drm.h"
#include <linux/msm_ion.h>
+#include <linux/genlock.h>
#include "kgsl.h"
#include "kgsl_device.h"
@@ -119,6 +120,8 @@
uint32_t gpuaddr;
} bufs[DRM_KGSL_GEM_MAX_BUFFERS];
+ struct genlock_handle *glock_handle[DRM_KGSL_GEM_MAX_BUFFERS];
+
int bound;
int lockpid;
/* Put these here to avoid allocing all the time */
@@ -154,6 +157,7 @@
kgsl_gem_alloc_memory(struct drm_gem_object *obj)
{
struct drm_kgsl_gem_object *priv = obj->driver_private;
+ struct kgsl_mmu *mmu;
struct sg_table *sg_table;
struct scatterlist *s;
int index;
@@ -165,7 +169,17 @@
return 0;
if (priv->pagetable == NULL) {
- priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
+ /* Hard coded to use A2X device for MSM7X27 and MSM8625
+ * Others to use A3X device
+ */
+#if defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM8625)
+ mmu = &kgsl_get_device(KGSL_DEVICE_2D0)->mmu;
+#else
+ mmu = &kgsl_get_device(KGSL_DEVICE_3D0)->mmu;
+#endif
+
+ priv->pagetable = kgsl_mmu_getpagetable(mmu,
+ KGSL_MMU_GLOBAL_PT);
if (priv->pagetable == NULL) {
DRM_ERROR("Unable to get the GPU MMU pagetable\n");
@@ -259,8 +273,7 @@
priv->memdesc.sglen++;
}
- result = kgsl_mmu_map(priv->pagetable, &priv->memdesc,
- GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+ result = kgsl_mmu_map(priv->pagetable, &priv->memdesc);
if (result) {
DRM_ERROR(
"kgsl_mmu_map failed. result = %d\n", result);
@@ -293,6 +306,7 @@
kgsl_gem_free_memory(struct drm_gem_object *obj)
{
struct drm_kgsl_gem_object *priv = obj->driver_private;
+ int index;
if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
return;
@@ -311,6 +325,11 @@
memset(&priv->memdesc, 0, sizeof(priv->memdesc));
+ for (index = 0; index < priv->bufcount; index++) {
+ if (priv->glock_handle[index])
+ genlock_put_handle(priv->glock_handle[index]);
+ }
+
kgsl_mmu_putpagetable(priv->pagetable);
priv->pagetable = NULL;
@@ -552,6 +571,7 @@
struct scatterlist *s;
int ret, handle;
unsigned long size;
+ struct kgsl_mmu *mmu;
ion_handle = ion_import_dma_buf(kgsl_drm_ion_client, args->ion_fd);
if (IS_ERR_OR_NULL(ion_handle)) {
@@ -591,7 +611,13 @@
priv->type = DRM_KGSL_GEM_TYPE_KMEM;
list_add(&priv->list, &kgsl_mem_list);
- priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
+#if defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM8625)
+ mmu = &kgsl_get_device(KGSL_DEVICE_2D0)->mmu;
+#else
+ mmu = &kgsl_get_device(KGSL_DEVICE_3D0)->mmu;
+#endif
+
+ priv->pagetable = kgsl_mmu_getpagetable(mmu, KGSL_MMU_GLOBAL_PT);
priv->memdesc.pagetable = priv->pagetable;
@@ -619,8 +645,7 @@
priv->memdesc.sglen++;
}
- ret = kgsl_mmu_map(priv->pagetable, &priv->memdesc,
- GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+ ret = kgsl_mmu_map(priv->pagetable, &priv->memdesc);
if (ret) {
DRM_ERROR("kgsl_mmu_map failed. ret = %d\n", ret);
ion_free(kgsl_drm_ion_client,
@@ -877,6 +902,68 @@
return ret;
}
+/* Get the genlock handles base off the GEM handle
+ */
+
+int
+kgsl_gem_get_glock_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_glockinfo *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int index;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ for (index = 0; index < priv->bufcount; index++) {
+ args->glockhandle[index] = genlock_get_fd_handle(
+ priv->glock_handle[index]);
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+int
+kgsl_gem_set_glock_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_glockinfo *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int index;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ for (index = 0; index < priv->bufcount; index++) {
+ priv->glock_handle[index] = genlock_get_handle_fd(
+ args->glockhandle[index]);
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
int
kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -919,6 +1006,32 @@
}
int
+kgsl_gem_get_bufcount_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_bufcount *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ args->bufcount = priv->bufcount;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+int
kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1375,9 +1488,15 @@
DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_ION_FD, kgsl_gem_get_ion_fd_ioctl, 0),
DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FROM_ION,
- kgsl_gem_create_from_ion_ioctl, 0),
+ kgsl_gem_create_from_ion_ioctl, 0),
DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT,
- kgsl_gem_set_bufcount_ioctl, 0),
+ kgsl_gem_set_bufcount_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFCOUNT,
+ kgsl_gem_get_bufcount_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_GLOCK_HANDLES_INFO,
+ kgsl_gem_set_glock_handles_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_GLOCK_HANDLES_INFO,
+ kgsl_gem_get_glock_handles_ioctl, 0),
DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
DRM_IOCTL_DEF_DRV(KGSL_GEM_LOCK_HANDLE,
kgsl_gem_lock_handle_ioctl, 0),
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index 9e9c0da..d872783 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -51,6 +51,7 @@
void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
void *owner)
{
+ int ret;
struct kgsl_event *event;
unsigned int cur_ts;
struct kgsl_context *context = NULL;
@@ -82,6 +83,16 @@
if (event == NULL)
return -ENOMEM;
+ /*
+ * Increase the active count on the device to avoid going into power
+ * saving modes while events are pending
+ */
+ ret = kgsl_active_count_get_light(device);
+ if (ret < 0) {
+ kfree(event);
+ return ret;
+ }
+
event->context = context;
event->timestamp = ts;
event->priv = priv;
@@ -112,13 +123,6 @@
} else
_add_event_to_list(&device->events, event);
- /*
- * Increase the active count on the device to avoid going into power
- * saving modes while events are pending
- */
-
- device->active_cnt++;
-
queue_work(device->work_queue, &device->ts_expired_ws);
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 5cc0dff..8d071d1 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -759,7 +759,9 @@
.mmu_disable_clk_on_ts = NULL,
.mmu_get_pt_lsb = NULL,
.mmu_get_reg_gpuaddr = NULL,
+ .mmu_get_reg_ahbaddr = NULL,
.mmu_get_num_iommu_units = kgsl_gpummu_get_num_iommu_units,
+ .mmu_hw_halt_supported = NULL,
};
struct kgsl_mmu_pt_ops gpummu_pt_ops = {
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 739fcff..15f35c9 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -37,31 +37,51 @@
static struct kgsl_iommu_register_list kgsl_iommuv0_reg[KGSL_IOMMU_REG_MAX] = {
- { 0, 0, 0 }, /* GLOBAL_BASE */
- { 0x10, 0x0003FFFF, 14 }, /* TTBR0 */
- { 0x14, 0x0003FFFF, 14 }, /* TTBR1 */
- { 0x20, 0, 0 }, /* FSR */
- { 0x800, 0, 0 }, /* TLBIALL */
- { 0x820, 0, 0 }, /* RESUME */
- { 0x03C, 0, 0 }, /* TLBLKCR */
- { 0x818, 0, 0 }, /* V2PUR */
- { 0x2C, 0, 0 }, /* FSYNR0 */
- { 0x2C, 0, 0 }, /* FSYNR0 */
+ { 0, 0 }, /* GLOBAL_BASE */
+ { 0x10, 1 }, /* TTBR0 */
+ { 0x14, 1 }, /* TTBR1 */
+ { 0x20, 1 }, /* FSR */
+ { 0x800, 1 }, /* TLBIALL */
+ { 0x820, 1 }, /* RESUME */
+ { 0x03C, 1 }, /* TLBLKCR */
+ { 0x818, 1 }, /* V2PUR */
+ { 0x2C, 1 }, /* FSYNR0 */
+ { 0x2C, 1 }, /* FSYNR0 */
+ { 0, 0 }, /* TLBSYNC, not in v0 */
+ { 0, 0 }, /* TLBSTATUS, not in v0 */
+ { 0, 0 } /* IMPLDEF_MICRO_MMU_CRTL, not in v0 */
};
static struct kgsl_iommu_register_list kgsl_iommuv1_reg[KGSL_IOMMU_REG_MAX] = {
- { 0, 0, 0 }, /* GLOBAL_BASE */
- { 0x20, 0x00FFFFFF, 14 }, /* TTBR0 */
- { 0x28, 0x00FFFFFF, 14 }, /* TTBR1 */
- { 0x58, 0, 0 }, /* FSR */
- { 0x618, 0, 0 }, /* TLBIALL */
- { 0x008, 0, 0 }, /* RESUME */
- { 0, 0, 0 }, /* TLBLKCR */
- { 0, 0, 0 }, /* V2PUR */
- { 0x68, 0, 0 }, /* FSYNR0 */
- { 0x6C, 0, 0 } /* FSYNR1 */
+ { 0, 0 }, /* GLOBAL_BASE */
+ { 0x20, 1 }, /* TTBR0 */
+ { 0x28, 1 }, /* TTBR1 */
+ { 0x58, 1 }, /* FSR */
+ { 0x618, 1 }, /* TLBIALL */
+ { 0x008, 1 }, /* RESUME */
+ { 0, 0 }, /* TLBLKCR not in V1 */
+ { 0, 0 }, /* V2PUR not in V1 */
+ { 0x68, 0 }, /* FSYNR0 */
+ { 0x6C, 0 }, /* FSYNR1 */
+ { 0x7F0, 1 }, /* TLBSYNC */
+ { 0x7F4, 1 }, /* TLBSTATUS */
+ { 0x2000, 0 } /* IMPLDEF_MICRO_MMU_CRTL */
};
+static struct iommu_access_ops *iommu_access_ops;
+
+static void _iommu_lock(void)
+{
+ if (iommu_access_ops && iommu_access_ops->iommu_lock_acquire)
+ iommu_access_ops->iommu_lock_acquire();
+}
+
+static void _iommu_unlock(void)
+{
+ if (iommu_access_ops && iommu_access_ops->iommu_lock_release)
+ iommu_access_ops->iommu_lock_release();
+}
+
struct remote_iommu_petersons_spinlock kgsl_iommu_sync_lock_vars;
static int get_iommu_unit(struct device *dev, struct kgsl_mmu **mmu_out,
@@ -582,18 +602,13 @@
struct kgsl_pagetable *pt,
unsigned int pt_base)
{
- struct kgsl_iommu *iommu = mmu->priv;
struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
unsigned int domain_ptbase = iommu_pt ?
iommu_get_pt_base_addr(iommu_pt->domain) : 0;
/* Only compare the valid address bits of the pt_base */
- domain_ptbase &=
- (iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask <<
- iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift);
+ domain_ptbase &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
- pt_base &=
- (iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask <<
- iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift);
+ pt_base &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
return domain_ptbase && pt_base &&
(domain_ptbase == pt_base);
@@ -649,15 +664,18 @@
domain_num = msm_register_domain(&kgsl_layout);
if (domain_num >= 0) {
iommu_pt->domain = msm_get_iommu_domain(domain_num);
- iommu_set_fault_handler(iommu_pt->domain,
- kgsl_iommu_fault_handler, NULL);
- } else {
- KGSL_CORE_ERR("Failed to create iommu domain\n");
- kfree(iommu_pt);
- return NULL;
+
+ if (iommu_pt->domain) {
+ iommu_set_fault_handler(iommu_pt->domain,
+ kgsl_iommu_fault_handler, NULL);
+
+ return iommu_pt;
+ }
}
- return iommu_pt;
+ KGSL_CORE_ERR("Failed to create iommu domain\n");
+ kfree(iommu_pt);
+ return NULL;
}
/*
@@ -889,11 +907,14 @@
if (iommu->sync_lock_initialized)
return status;
- /* Get the physical address of the Lock variables */
- lock_phy_addr = (msm_iommu_lock_initialize()
+ iommu_access_ops = get_iommu_access_ops_v0();
+
+ if (iommu_access_ops && iommu_access_ops->iommu_lock_initialize)
+ lock_phy_addr = (iommu_access_ops->iommu_lock_initialize()
- MSM_SHARED_RAM_BASE + msm_shared_ram_phys);
if (!lock_phy_addr) {
+ iommu_access_ops = NULL;
KGSL_DRV_ERR(mmu->device,
"GPU CPU sync lock is not supported by kernel\n");
return -ENXIO;
@@ -911,8 +932,10 @@
iommu->sync_lock_desc.physaddr,
iommu->sync_lock_desc.size);
- if (status)
+ if (status) {
+ iommu_access_ops = NULL;
return status;
+ }
/* Flag Sync Lock is Initialized */
iommu->sync_lock_initialized = 1;
@@ -1092,6 +1115,9 @@
iommu_unit->reg_map.size);
if (ret)
goto err;
+
+ iommu_unit->iommu_halt_enable = data.iommu_halt_enable;
+ iommu_unit->ahb_base = data.physstart - mmu->device->reg_phys;
}
iommu->unit_count = pdata_dev->iommu_count;
return ret;
@@ -1118,11 +1144,9 @@
static unsigned int kgsl_iommu_get_pt_base_addr(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pt)
{
- struct kgsl_iommu *iommu = mmu->priv;
struct kgsl_iommu_pt *iommu_pt = pt->priv;
return iommu_get_pt_base_addr(iommu_pt->domain) &
- (iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask <<
- iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift);
+ KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
}
/*
@@ -1241,6 +1265,30 @@
}
+/*
+ * kgsl_iommu_get_reg_ahbaddr - Returns the ahb address of the register
+ * @mmu - Pointer to mmu structure
+ * @iommu_unit - The iommu unit for which base address is requested
+ * @ctx_id - The context ID of the IOMMU ctx
+ * @reg - The register for which address is required
+ *
+ * Return - The address of register which can be used in type0 packet
+ */
+static unsigned int kgsl_iommu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
+ int iommu_unit, int ctx_id,
+ enum kgsl_iommu_reg_map reg)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (iommu->iommu_reg_list[reg].ctx_reg)
+ return iommu->iommu_units[iommu_unit].ahb_base +
+ iommu->iommu_reg_list[reg].reg_offset +
+ (ctx_id << KGSL_IOMMU_CTX_SHIFT) + iommu->ctx_offset;
+ else
+ return iommu->iommu_units[iommu_unit].ahb_base +
+ iommu->iommu_reg_list[reg].reg_offset;
+}
+
static int kgsl_iommu_init(struct kgsl_mmu *mmu)
{
/*
@@ -1265,13 +1313,15 @@
status = kgsl_set_register_map(mmu);
if (status)
goto done;
- status = kgsl_iommu_init_sync_lock(mmu);
- if (status)
- goto done;
- /* We presently do not support per-process for IOMMU-v1 */
+ /*
+ * IOMMU-v1 requires hardware halt support to do in stream
+ * pagetable switching. This check assumes that if there are
+ * multiple units, they will be matching hardware.
+ */
mmu->pt_per_process = KGSL_MMU_USE_PER_PROCESS_PT &&
- msm_soc_version_supports_iommu_v0();
+ (msm_soc_version_supports_iommu_v0() ||
+ iommu->iommu_units[0].iommu_halt_enable);
/*
* For IOMMU per-process pagetables, the allocatable range
@@ -1294,6 +1344,9 @@
mmu->use_cpu_map = false;
}
+ status = kgsl_iommu_init_sync_lock(mmu);
+ if (status)
+ goto done;
iommu->iommu_reg_list = kgsl_iommuv0_reg;
iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V0;
@@ -1535,7 +1588,7 @@
* changing pagetables we can use this lsb value of the pagetable w/o
* having to read it again
*/
- msm_iommu_lock();
+ _iommu_lock();
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
@@ -1547,7 +1600,7 @@
}
}
kgsl_iommu_lock_rb_in_tlb(mmu);
- msm_iommu_unlock();
+ _iommu_unlock();
/* For complete CFF */
kgsl_cffdump_setmem(mmu->setstate_memory.gpuaddr +
@@ -1667,12 +1720,12 @@
for (j = 0; j < iommu_unit->dev_count; j++) {
if (iommu_unit->dev[j].fault) {
kgsl_iommu_enable_clk(mmu, j);
- msm_iommu_lock();
+ _iommu_lock();
KGSL_IOMMU_SET_CTX_REG(iommu,
iommu_unit,
iommu_unit->dev[j].ctx_id,
RESUME, 1);
- msm_iommu_unlock();
+ _iommu_unlock();
iommu_unit->dev[j].fault = 0;
}
}
@@ -1732,9 +1785,7 @@
KGSL_IOMMU_CONTEXT_USER,
TTBR0);
kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
- return pt_base &
- (iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask <<
- iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift);
+ return pt_base & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
}
/*
@@ -1764,15 +1815,14 @@
return;
}
/* Mask off the lsb of the pt base address since lsb will not change */
- pt_base &= (iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask <<
- iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift);
+ pt_base &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
/* For v0 SMMU GPU needs to be idle for tlb invalidate as well */
if (msm_soc_version_supports_iommu_v0())
kgsl_idle(mmu->device);
/* Acquire GPU-CPU sync Lock here */
- msm_iommu_lock();
+ _iommu_lock();
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
if (!msm_soc_version_supports_iommu_v0())
@@ -1795,15 +1845,42 @@
}
/* Flush tlb */
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ unsigned long wait_for_flush;
for (i = 0; i < iommu->unit_count; i++) {
KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]),
KGSL_IOMMU_CONTEXT_USER, TLBIALL, 1);
mb();
+ /*
+ * Wait for flush to complete by polling the flush
+ * status bit of TLBSTATUS register for not more than
+ * 2 s. After 2s just exit, at that point the SMMU h/w
+ * may be stuck and will eventually cause GPU to hang
+ * or bring the system down.
+ */
+ if (!msm_soc_version_supports_iommu_v0()) {
+ wait_for_flush = jiffies +
+ msecs_to_jiffies(2000);
+ KGSL_IOMMU_SET_CTX_REG(iommu,
+ (&iommu->iommu_units[i]),
+ KGSL_IOMMU_CONTEXT_USER, TLBSYNC, 0);
+ while (KGSL_IOMMU_GET_CTX_REG(iommu,
+ (&iommu->iommu_units[i]),
+ KGSL_IOMMU_CONTEXT_USER, TLBSTATUS) &
+ (KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE)) {
+ if (time_after(jiffies,
+ wait_for_flush)) {
+ KGSL_DRV_ERR(mmu->device,
+ "Wait limit reached for IOMMU tlb flush\n");
+ break;
+ }
+ cpu_relax();
+ }
+ }
}
}
/* Release GPU-CPU sync Lock here */
- msm_iommu_unlock();
+ _iommu_unlock();
/* Disable smmu clock */
kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
@@ -1816,8 +1893,7 @@
* @ctx_id - The context ID of the IOMMU ctx
* @reg - The register for which address is required
*
- * Return - The number of iommu units which is also the number of register
- * mapped descriptor arrays which the out parameter will have
+ * Return - The gpu address of register which can be used in type3 packet
*/
static unsigned int kgsl_iommu_get_reg_gpuaddr(struct kgsl_mmu *mmu,
int iommu_unit, int ctx_id, int reg)
@@ -1826,10 +1902,25 @@
if (KGSL_IOMMU_GLOBAL_BASE == reg)
return iommu->iommu_units[iommu_unit].reg_map.gpuaddr;
- else
+
+ if (iommu->iommu_reg_list[reg].ctx_reg)
return iommu->iommu_units[iommu_unit].reg_map.gpuaddr +
iommu->iommu_reg_list[reg].reg_offset +
(ctx_id << KGSL_IOMMU_CTX_SHIFT) + iommu->ctx_offset;
+ else
+ return iommu->iommu_units[iommu_unit].reg_map.gpuaddr +
+ iommu->iommu_reg_list[reg].reg_offset;
+}
+/*
+ * kgsl_iommu_hw_halt_supported - Returns whether IOMMU halt command is
+ * supported
+ * @mmu - Pointer to mmu structure
+ * @iommu_unit - The iommu unit for which the property is requested
+ */
+static int kgsl_iommu_hw_halt_supported(struct kgsl_mmu *mmu, int iommu_unit)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ return iommu->iommu_units[iommu_unit].iommu_halt_enable;
}
static int kgsl_iommu_get_num_iommu_units(struct kgsl_mmu *mmu)
@@ -1851,9 +1942,11 @@
.mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
.mmu_get_pt_lsb = kgsl_iommu_get_pt_lsb,
.mmu_get_reg_gpuaddr = kgsl_iommu_get_reg_gpuaddr,
+ .mmu_get_reg_ahbaddr = kgsl_iommu_get_reg_ahbaddr,
.mmu_get_num_iommu_units = kgsl_iommu_get_num_iommu_units,
.mmu_pt_equal = kgsl_iommu_pt_equal,
.mmu_get_pt_base_addr = kgsl_iommu_get_pt_base_addr,
+ .mmu_hw_halt_supported = kgsl_iommu_hw_halt_supported,
/* These callbacks will be set on some chipsets */
.mmu_setup_pt = NULL,
.mmu_cleanup_pt = NULL,
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index c09bc4b..b1b83c0 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -46,6 +46,16 @@
#define KGSL_IOMMU_V1_FSYNR0_WNR_MASK 0x00000001
#define KGSL_IOMMU_V1_FSYNR0_WNR_SHIFT 4
+/* TTBR0 register fields */
+#define KGSL_IOMMU_CTX_TTBR0_ADDR_MASK 0xFFFFC000
+
+/* TLBSTATUS register fields */
+#define KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE BIT(0)
+
+/* IMPLDEF_MICRO_MMU_CTRL register fields */
+#define KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT BIT(2)
+#define KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE BIT(3)
+
enum kgsl_iommu_reg_map {
KGSL_IOMMU_GLOBAL_BASE = 0,
KGSL_IOMMU_CTX_TTBR0,
@@ -57,15 +67,31 @@
KGSL_IOMMU_CTX_V2PUR,
KGSL_IOMMU_CTX_FSYNR0,
KGSL_IOMMU_CTX_FSYNR1,
+ KGSL_IOMMU_CTX_TLBSYNC,
+ KGSL_IOMMU_CTX_TLBSTATUS,
+ KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL,
KGSL_IOMMU_REG_MAX
};
struct kgsl_iommu_register_list {
unsigned int reg_offset;
- unsigned int reg_mask;
- unsigned int reg_shift;
+ int ctx_reg;
};
+#ifdef CONFIG_MSM_IOMMU
+extern struct iommu_access_ops iommu_access_ops_v0;
+
+static inline struct iommu_access_ops *get_iommu_access_ops_v0(void)
+{
+ return &iommu_access_ops_v0;
+}
+#else
+static inline struct iommu_access_ops *get_iommu_access_ops_v0(void)
+{
+ return NULL;
+}
+#endif
+
/*
* Max number of iommu units that the gpu core can have
* On APQ8064, KGSL can control a maximum of 2 IOMMU units.
@@ -91,10 +117,8 @@
iommu->ctx_offset)
/* Gets the lsb value of pagetable */
-#define KGSL_IOMMMU_PT_LSB(iommu, pt_val) \
- (pt_val & \
- ~(iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask << \
- iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift))
+#define KGSL_IOMMMU_PT_LSB(iommu, pt_val) \
+ (pt_val & ~(KGSL_IOMMU_CTX_TTBR0_ADDR_MASK))
/* offset at which a nop command is placed in setstate_memory */
#define KGSL_IOMMU_SETSTATE_NOP_OFFSET 1024
@@ -130,11 +154,18 @@
* @dev_count: Number of IOMMU contexts that are valid in the previous feild
* @reg_map: Memory descriptor which holds the mapped address of this IOMMU
* units register range
+ * @ahb_base - The base address from where IOMMU registers can be accesed from
+ * ahb bus
+ * @iommu_halt_enable: Valid only on IOMMU-v1, when set indicates that the iommu
+ * unit supports halting of the IOMMU, which can be enabled while programming
+ * the IOMMU registers for synchronization
*/
struct kgsl_iommu_unit {
struct kgsl_iommu_device dev[KGSL_IOMMU_MAX_DEVS_PER_UNIT];
unsigned int dev_count;
struct kgsl_memdesc reg_map;
+ unsigned int ahb_base;
+ int iommu_halt_enable;
};
/*
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 4e95373..6e41707 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -610,6 +610,7 @@
* kgsl_pwrctrl_irq() is called
*/
}
+EXPORT_SYMBOL(kgsl_mh_start);
int
kgsl_mmu_map(struct kgsl_pagetable *pagetable,
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index d7d9516..ef5b0f4 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -14,7 +14,7 @@
#define __KGSL_MMU_H
#include <mach/iommu.h>
-
+#include "kgsl_iommu.h"
/*
* These defines control the address range for allocations that
* are mapped into all pagetables.
@@ -150,6 +150,9 @@
enum kgsl_iommu_context_id ctx_id);
unsigned int (*mmu_get_reg_gpuaddr)(struct kgsl_mmu *mmu,
int iommu_unit_num, int ctx_id, int reg);
+ unsigned int (*mmu_get_reg_ahbaddr)(struct kgsl_mmu *mmu,
+ int iommu_unit_num, int ctx_id,
+ enum kgsl_iommu_reg_map reg);
int (*mmu_get_num_iommu_units)(struct kgsl_mmu *mmu);
int (*mmu_pt_equal) (struct kgsl_mmu *mmu,
struct kgsl_pagetable *pt,
@@ -165,6 +168,7 @@
(struct kgsl_mmu *mmu, unsigned int *cmds);
unsigned int (*mmu_sync_unlock)
(struct kgsl_mmu *mmu, unsigned int *cmds);
+ int (*mmu_hw_halt_supported)(struct kgsl_mmu *mmu, int iommu_unit_num);
};
struct kgsl_mmu_pt_ops {
@@ -337,6 +341,29 @@
return 0;
}
+/*
+ * kgsl_mmu_get_reg_ahbaddr() - Calls the mmu specific function pointer to
+ * return the address that GPU can use to access register
+ * @mmu: Pointer to the device mmu
+ * @iommu_unit_num: There can be multiple iommu units used for graphics.
+ * This parameter is an index to the iommu unit being used
+ * @ctx_id: The context id within the iommu unit
+ * @reg: Register whose address is to be returned
+ *
+ * Returns the ahb address of reg else 0
+ */
+static inline unsigned int kgsl_mmu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
+ int iommu_unit_num,
+ int ctx_id,
+ enum kgsl_iommu_reg_map reg)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_reg_ahbaddr)
+ return mmu->mmu_ops->mmu_get_reg_ahbaddr(mmu, iommu_unit_num,
+ ctx_id, reg);
+ else
+ return 0;
+}
+
static inline int kgsl_mmu_get_num_iommu_units(struct kgsl_mmu *mmu)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_num_iommu_units)
@@ -346,6 +373,22 @@
}
/*
+ * kgsl_mmu_hw_halt_supported() - Runtime check for iommu hw halt
+ * @mmu: the mmu
+ *
+ * Returns non-zero if the iommu supports hw halt,
+ * 0 if not.
+ */
+static inline int kgsl_mmu_hw_halt_supported(struct kgsl_mmu *mmu,
+ int iommu_unit_num)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_hw_halt_supported)
+ return mmu->mmu_ops->mmu_hw_halt_supported(mmu, iommu_unit_num);
+ else
+ return 0;
+}
+
+/*
* kgsl_mmu_is_perprocess() - Runtime check for per-process
* pagetables.
* @mmu: the mmu
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 2f8d93e..b124257 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -18,6 +18,7 @@
#include <mach/msm_iomap.h>
#include <mach/msm_bus.h>
#include <linux/ktime.h>
+#include <linux/delay.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
@@ -33,6 +34,16 @@
#define UPDATE_BUSY_VAL 1000000
#define UPDATE_BUSY 50
+/*
+ * Expected delay for post-interrupt processing on A3xx.
+ * The delay may be longer, gradually increase the delay
+ * to compensate. If the GPU isn't done by max delay,
+ * it's working on something other than just the final
+ * command sequence so stop waiting for it to be idle.
+ */
+#define INIT_UDELAY 200
+#define MAX_UDELAY 2000
+
struct clk_pair {
const char *name;
uint map;
@@ -59,6 +70,10 @@
.name = "mem_iface_clk",
.map = KGSL_CLK_MEM_IFACE,
},
+ {
+ .name = "alt_mem_iface_clk",
+ .map = KGSL_CLK_ALT_MEM_IFACE,
+ },
};
/* Update the elapsed time at a particular clock level
@@ -1055,8 +1070,18 @@
pwr->power_flags = 0;
}
+/**
+ * kgsl_idle_check() - Work function for GPU interrupts and idle timeouts.
+ * @device: The device
+ *
+ * This function is called for work that is queued by the interrupt
+ * handler or the idle timer. It attempts to transition to a clocks
+ * off state if the active_cnt is 0 and the hardware is idle.
+ */
void kgsl_idle_check(struct work_struct *work)
{
+ int delay = INIT_UDELAY;
+ int requested_state;
struct kgsl_device *device = container_of(work, struct kgsl_device,
idle_check_ws);
WARN_ON(device == NULL);
@@ -1064,21 +1089,52 @@
return;
mutex_lock(&device->mutex);
- if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
- kgsl_pwrscale_idle(device);
- if (kgsl_pwrctrl_sleep(device) != 0) {
+ kgsl_pwrscale_idle(device);
+
+ if (device->state == KGSL_STATE_ACTIVE
+ || device->state == KGSL_STATE_NAP) {
+ /*
+ * If no user is explicitly trying to use the GPU
+ * (active_cnt is zero), then loop with increasing delay,
+ * waiting for the GPU to become idle.
+ */
+ while (!device->active_cnt && delay < MAX_UDELAY) {
+ requested_state = device->requested_state;
+ if (!kgsl_pwrctrl_sleep(device))
+ break;
+ /*
+ * If no new commands have been issued since the
+ * last interrupt, stay in this loop waiting for
+ * the GPU to become idle.
+ */
+ if (!device->pwrctrl.irq_last)
+ break;
+ kgsl_pwrctrl_request_state(device, requested_state);
+ mutex_unlock(&device->mutex);
+ udelay(delay);
+ delay *= 2;
+ mutex_lock(&device->mutex);
+ }
+
+
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ if (device->state == KGSL_STATE_ACTIVE) {
mod_timer(&device->idle_timer,
jiffies +
device->pwrctrl.interval_timeout);
- /* If the GPU has been too busy to sleep, make sure *
- * that is acurately reflected in the % busy numbers. */
+ /*
+ * If the GPU has been too busy to sleep, make sure
+ * that is acurately reflected in the % busy numbers.
+ */
device->pwrctrl.clk_stats.no_nap_cnt++;
if (device->pwrctrl.clk_stats.no_nap_cnt >
UPDATE_BUSY) {
kgsl_pwrctrl_busy_time(device, true);
device->pwrctrl.clk_stats.no_nap_cnt = 0;
}
+ } else {
+ device->pwrctrl.irq_last = 0;
}
} else if (device->state & (KGSL_STATE_HUNG |
KGSL_STATE_DUMP_AND_FT)) {
@@ -1087,6 +1143,7 @@
mutex_unlock(&device->mutex);
}
+EXPORT_SYMBOL(kgsl_idle_check);
void kgsl_timer(unsigned long data)
{
@@ -1104,54 +1161,26 @@
}
}
+
+/**
+ * kgsl_pre_hwaccess - Enforce preconditions for touching registers
+ * @device: The device
+ *
+ * This function ensures that the correct lock is held and that the GPU
+ * clock is on immediately before a register is read or written. Note
+ * that this function does not check active_cnt because the registers
+ * must be accessed during device start and stop, when the active_cnt
+ * may legitimately be 0.
+ */
void kgsl_pre_hwaccess(struct kgsl_device *device)
{
+ /* In order to touch a register you must hold the device mutex...*/
BUG_ON(!mutex_is_locked(&device->mutex));
- switch (device->state) {
- case KGSL_STATE_ACTIVE:
- return;
- case KGSL_STATE_NAP:
- case KGSL_STATE_SLEEP:
- case KGSL_STATE_SLUMBER:
- kgsl_pwrctrl_wake(device);
- break;
- case KGSL_STATE_SUSPEND:
- kgsl_check_suspended(device);
- break;
- case KGSL_STATE_INIT:
- case KGSL_STATE_HUNG:
- case KGSL_STATE_DUMP_AND_FT:
- if (test_bit(KGSL_PWRFLAGS_CLK_ON,
- &device->pwrctrl.power_flags))
- break;
- else
- KGSL_PWR_ERR(device,
- "hw access while clocks off from state %d\n",
- device->state);
- break;
- default:
- KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
- device->state);
- break;
- }
+ /* and have the clock on! */
+ BUG_ON(!test_bit(KGSL_PWRFLAGS_CLK_ON, &device->pwrctrl.power_flags));
}
EXPORT_SYMBOL(kgsl_pre_hwaccess);
-void kgsl_check_suspended(struct kgsl_device *device)
-{
- if (device->requested_state == KGSL_STATE_SUSPEND ||
- device->state == KGSL_STATE_SUSPEND) {
- mutex_unlock(&device->mutex);
- wait_for_completion(&device->hwaccess_gate);
- mutex_lock(&device->mutex);
- } else if (device->state == KGSL_STATE_DUMP_AND_FT) {
- mutex_unlock(&device->mutex);
- wait_for_completion(&device->ft_gate);
- mutex_lock(&device->mutex);
- } else if (device->state == KGSL_STATE_SLUMBER)
- kgsl_pwrctrl_wake(device);
-}
-
static int
_nap(struct kgsl_device *device)
{
@@ -1230,6 +1259,8 @@
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
del_timer_sync(&device->idle_timer);
+ /* make sure power is on to stop the device*/
+ kgsl_pwrctrl_enable(device);
device->ftbl->suspend_context(device);
device->ftbl->stop(device);
_sleep_accounting(device);
@@ -1278,9 +1309,9 @@
/******************************************************************/
/* Caller must hold the device mutex. */
-void kgsl_pwrctrl_wake(struct kgsl_device *device)
+int kgsl_pwrctrl_wake(struct kgsl_device *device)
{
- int status;
+ int status = 0;
unsigned int context_id;
unsigned int state = device->state;
unsigned int ts_processed = 0xdeaddead;
@@ -1329,8 +1360,10 @@
KGSL_PWR_WARN(device, "unhandled state %s\n",
kgsl_pwrstate_to_str(device->state));
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ status = -EINVAL;
break;
}
+ return status;
}
EXPORT_SYMBOL(kgsl_pwrctrl_wake);
@@ -1396,3 +1429,124 @@
}
EXPORT_SYMBOL(kgsl_pwrstate_to_str);
+
+/**
+ * kgsl_active_count_get() - Increase the device active count
+ * @device: Pointer to a KGSL device
+ *
+ * Increase the active count for the KGSL device and turn on
+ * clocks if this is the first reference. Code paths that need
+ * to touch the hardware or wait for the hardware to complete
+ * an operation must hold an active count reference until they
+ * are finished. An error code will be returned if waking the
+ * device fails. The device mutex must be held while *calling
+ * this function.
+ */
+int kgsl_active_count_get(struct kgsl_device *device)
+{
+ int ret = 0;
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (device->active_cnt == 0) {
+ if (device->requested_state == KGSL_STATE_SUSPEND ||
+ device->state == KGSL_STATE_SUSPEND) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->hwaccess_gate);
+ mutex_lock(&device->mutex);
+ } else if (device->state == KGSL_STATE_DUMP_AND_FT) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->ft_gate);
+ mutex_lock(&device->mutex);
+ }
+ ret = kgsl_pwrctrl_wake(device);
+ }
+ if (ret == 0)
+ device->active_cnt++;
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_active_count_get);
+
+/**
+ * kgsl_active_count_get_light() - Increase the device active count
+ * @device: Pointer to a KGSL device
+ *
+ * Increase the active count for the KGSL device WITHOUT
+ * turning on the clocks. Currently this is only used for creating
+ * kgsl_events. The device mutex must be held while calling this function.
+ */
+int kgsl_active_count_get_light(struct kgsl_device *device)
+{
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (device->state != KGSL_STATE_ACTIVE) {
+ dev_WARN_ONCE(device->dev, 1, "device in unexpected state %s\n",
+ kgsl_pwrstate_to_str(device->state));
+ return -EINVAL;
+ }
+
+ if (device->active_cnt == 0) {
+ dev_WARN_ONCE(device->dev, 1, "active count is 0!\n");
+ return -EINVAL;
+ }
+
+ device->active_cnt++;
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_active_count_get_light);
+
+/**
+ * kgsl_active_count_put() - Decrease the device active count
+ * @device: Pointer to a KGSL device
+ *
+ * Decrease the active count for the KGSL device and turn off
+ * clocks if there are no remaining references. This function will
+ * transition the device to NAP if there are no other pending state
+ * changes. It also completes the suspend gate. The device mutex must
+ * be held while calling this function.
+ */
+void kgsl_active_count_put(struct kgsl_device *device)
+{
+ BUG_ON(!mutex_is_locked(&device->mutex));
+ BUG_ON(device->active_cnt == 0);
+
+ kgsl_pwrscale_idle(device);
+ if (device->active_cnt > 1) {
+ device->active_cnt--;
+ return;
+ }
+
+ INIT_COMPLETION(device->suspend_gate);
+
+ if (device->pwrctrl.nap_allowed == true &&
+ (device->state == KGSL_STATE_ACTIVE &&
+ device->requested_state == KGSL_STATE_NONE)) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ if (kgsl_pwrctrl_sleep(device) && device->pwrctrl.irq_last) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ queue_work(device->work_queue, &device->idle_check_ws);
+ }
+ }
+ device->active_cnt--;
+
+ if (device->active_cnt == 0)
+ complete(&device->suspend_gate);
+}
+EXPORT_SYMBOL(kgsl_active_count_put);
+
+/**
+ * kgsl_active_count_wait() - Wait for activity to finish.
+ * @device: Pointer to a KGSL device
+ *
+ * Block until all active_cnt users put() their reference.
+ */
+void kgsl_active_count_wait(struct kgsl_device *device)
+{
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (device->active_cnt != 0) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->suspend_gate);
+ mutex_lock(&device->mutex);
+ }
+}
+EXPORT_SYMBOL(kgsl_active_count_wait);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index ced52e1..b3e8702 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -23,7 +23,7 @@
#define KGSL_PWRLEVEL_NOMINAL 1
#define KGSL_PWRLEVEL_LAST_OFFSET 2
-#define KGSL_MAX_CLKS 5
+#define KGSL_MAX_CLKS 6
struct platform_device;
@@ -91,6 +91,7 @@
struct pm_qos_request pm_qos_req_dma;
unsigned int pm_qos_latency;
unsigned int step_mul;
+ unsigned int irq_last;
};
void kgsl_pwrctrl_irq(struct kgsl_device *device, int state);
@@ -99,9 +100,8 @@
void kgsl_timer(unsigned long data);
void kgsl_idle_check(struct work_struct *work);
void kgsl_pre_hwaccess(struct kgsl_device *device);
-void kgsl_check_suspended(struct kgsl_device *device);
int kgsl_pwrctrl_sleep(struct kgsl_device *device);
-void kgsl_pwrctrl_wake(struct kgsl_device *device);
+int kgsl_pwrctrl_wake(struct kgsl_device *device);
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
unsigned int level);
int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device);
@@ -115,4 +115,10 @@
void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state);
void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state);
+
+int kgsl_active_count_get(struct kgsl_device *device);
+int kgsl_active_count_get_light(struct kgsl_device *device);
+void kgsl_active_count_put(struct kgsl_device *device);
+void kgsl_active_count_wait(struct kgsl_device *device);
+
#endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 02ada38..afef62e 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -240,6 +240,7 @@
device->pwrscale.policy->busy(device,
&device->pwrscale);
}
+EXPORT_SYMBOL(kgsl_pwrscale_busy);
void kgsl_pwrscale_idle(struct kgsl_device *device)
{
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
index 9b2ac70..5d5d5b1 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -31,6 +31,7 @@
unsigned int no_switch_cnt;
unsigned int skip_cnt;
struct kgsl_power_stats bin;
+ unsigned int idle_dcvs;
};
spinlock_t tz_lock;
@@ -47,24 +48,32 @@
#define SKIP_COUNTER 500
#define TZ_RESET_ID 0x3
#define TZ_UPDATE_ID 0x4
+#define TZ_INIT_ID 0x6
-#ifdef CONFIG_MSM_SCM
/* Trap into the TrustZone, and call funcs there. */
-static int __secure_tz_entry(u32 cmd, u32 val, u32 id)
+static int __secure_tz_entry2(u32 cmd, u32 val1, u32 val2)
{
int ret;
spin_lock(&tz_lock);
+ /* sync memory before sending the commands to tz*/
__iowmb();
- ret = scm_call_atomic2(SCM_SVC_IO, cmd, val, id);
+ ret = scm_call_atomic2(SCM_SVC_IO, cmd, val1, val2);
spin_unlock(&tz_lock);
return ret;
}
-#else
-static int __secure_tz_entry(u32 cmd, u32 val, u32 id)
+
+static int __secure_tz_entry3(u32 cmd, u32 val1, u32 val2,
+ u32 val3)
{
- return 0;
+ int ret;
+ spin_lock(&tz_lock);
+ /* sync memory before sending the commands to tz*/
+ __iowmb();
+ ret = scm_call_atomic3(SCM_SVC_IO, cmd, val1, val2,
+ val3);
+ spin_unlock(&tz_lock);
+ return ret;
}
-#endif /* CONFIG_MSM_SCM */
static ssize_t tz_governor_show(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale,
@@ -172,11 +181,21 @@
*/
if (priv->bin.busy_time > CEILING) {
val = -1;
- } else {
+ } else if (priv->idle_dcvs) {
idle = priv->bin.total_time - priv->bin.busy_time;
idle = (idle > 0) ? idle : 0;
- val = __secure_tz_entry(TZ_UPDATE_ID, idle, device->id);
+ val = __secure_tz_entry2(TZ_UPDATE_ID, idle, device->id);
+ } else {
+ if (pwr->step_mul > 1)
+ val = __secure_tz_entry3(TZ_UPDATE_ID,
+ (pwr->active_pwrlevel + 1)/2,
+ priv->bin.total_time, priv->bin.busy_time);
+ else
+ val = __secure_tz_entry3(TZ_UPDATE_ID,
+ pwr->active_pwrlevel,
+ priv->bin.total_time, priv->bin.busy_time);
}
+
priv->bin.total_time = 0;
priv->bin.busy_time = 0;
@@ -201,7 +220,7 @@
{
struct tz_priv *priv = pwrscale->priv;
- __secure_tz_entry(TZ_RESET_ID, 0, device->id);
+ __secure_tz_entry2(TZ_RESET_ID, 0, 0);
priv->no_switch_cnt = 0;
priv->bin.total_time = 0;
priv->bin.busy_time = 0;
@@ -210,16 +229,32 @@
#ifdef CONFIG_MSM_SCM
static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
{
+ int i = 0, j = 1, ret = 0;
struct tz_priv *priv;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ unsigned int tz_pwrlevels[KGSL_MAX_PWRLEVELS + 1];
priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL);
if (pwrscale->priv == NULL)
return -ENOMEM;
-
+ priv->idle_dcvs = 0;
priv->governor = TZ_GOVERNOR_ONDEMAND;
spin_lock_init(&tz_lock);
kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group);
-
+ for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
+ if (i == 0)
+ tz_pwrlevels[j] = pwr->pwrlevels[i].gpu_freq;
+ else if (pwr->pwrlevels[i].gpu_freq !=
+ pwr->pwrlevels[i - 1].gpu_freq) {
+ j++;
+ tz_pwrlevels[j] = pwr->pwrlevels[i].gpu_freq;
+ }
+ }
+ tz_pwrlevels[0] = j;
+ ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
+ sizeof(tz_pwrlevels), NULL, 0);
+ if (ret)
+ priv->idle_dcvs = 1;
return 0;
}
#else
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 595f78f..62db513 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -607,16 +607,22 @@
while (len > 0) {
struct page *page;
- unsigned int gfp_mask = GFP_KERNEL | __GFP_HIGHMEM |
- __GFP_NOWARN | __GFP_NORETRY;
+ unsigned int gfp_mask = __GFP_HIGHMEM;
int j;
/* don't waste space at the end of the allocation*/
if (len < page_size)
page_size = PAGE_SIZE;
+ /*
+ * Don't do some of the more aggressive memory recovery
+ * techniques for large order allocations
+ */
if (page_size != PAGE_SIZE)
- gfp_mask |= __GFP_COMP;
+ gfp_mask |= __GFP_COMP | __GFP_NORETRY |
+ __GFP_NO_KSWAPD | __GFP_NOWARN;
+ else
+ gfp_mask |= GFP_KERNEL | __GFP_NORETRY;
page = alloc_pages(gfp_mask, get_order(page_size));
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index 296de11..abcebfb 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -106,7 +106,12 @@
{
struct kgsl_snapshot_linux_context *header = _ctxtptr;
struct kgsl_context *context = ptr;
- struct kgsl_device *device = context->dev_priv->device;
+ struct kgsl_device *device;
+
+ if (context)
+ device = context->dev_priv->device;
+ else
+ device = (struct kgsl_device *)data;
header->id = id;
@@ -141,6 +146,9 @@
idr_for_each(&device->context_idr, snapshot_context_count, &ctxtcount);
+ /* Increment ctxcount for the global memstore */
+ ctxtcount++;
+
size += ctxtcount * sizeof(struct kgsl_snapshot_linux_context);
/* Make sure there is enough room for the data */
@@ -186,8 +194,10 @@
header->ctxtcount = ctxtcount;
- /* append information for each context */
_ctxtptr = snapshot + sizeof(*header);
+ /* append information for the global context */
+ snapshot_context_info(KGSL_MEMSTORE_GLOBAL, NULL, device);
+ /* append information for each context */
idr_for_each(&device->context_idr, snapshot_context_info, NULL);
/* Return the size of the data segment */
@@ -315,6 +325,7 @@
return 0;
}
+EXPORT_SYMBOL(kgsl_snapshot_have_object);
/* kgsl_snapshot_get_object - Mark a GPU buffer to be frozen
* @device - the device that is being snapshotted
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 0e3e046..813305a 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -69,6 +69,7 @@
struct kgsl_fence_event_priv {
struct kgsl_context *context;
+ unsigned int timestamp;
};
/**
@@ -85,7 +86,7 @@
void *priv, u32 context_id, u32 timestamp)
{
struct kgsl_fence_event_priv *ev = priv;
- kgsl_sync_timeline_signal(ev->context->timeline, timestamp);
+ kgsl_sync_timeline_signal(ev->context->timeline, ev->timestamp);
kgsl_context_put(ev->context);
kfree(ev);
}
@@ -125,6 +126,7 @@
if (event == NULL)
return -ENOMEM;
event->context = context;
+ event->timestamp = timestamp;
kgsl_context_get(context);
pt = kgsl_sync_pt_create(context->timeline, timestamp);
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index a07959b..49265fc 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -17,7 +17,6 @@
#include "kgsl.h"
#include "kgsl_cffdump.h"
#include "kgsl_sharedmem.h"
-#include "kgsl_trace.h"
#include "z180.h"
#include "z180_reg.h"
@@ -485,7 +484,7 @@
z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
error:
- trace_kgsl_issueibcmds(device, context->id, ibdesc, numibs,
+ kgsl_trace_issueibcmds(device, context->id, ibdesc, numibs,
*timestamp, ctrl, result, 0);
return (int)result;
diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h
index 268aac3..1be0870 100644
--- a/drivers/gpu/msm/z180.h
+++ b/drivers/gpu/msm/z180.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,7 +29,7 @@
#define Z180_DEFAULT_PWRSCALE_POLICY NULL
/* Wait a maximum of 10 seconds when trying to idle the core */
-#define Z180_IDLE_TIMEOUT (10 * 1000)
+#define Z180_IDLE_TIMEOUT (20 * 1000)
struct z180_ringbuffer {
unsigned int prevctx;
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index 1458bc5..b3b5643 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -638,20 +638,21 @@
{
struct qpnp_vadc_linear_graph vbatt_param;
int rc = 0;
+ int64_t low_thr = 0, high_thr = 0;
rc = qpnp_get_vadc_gain_and_offset(&vbatt_param, CALIB_ABSOLUTE);
if (rc < 0)
return rc;
- *low_threshold = (((param->low_thr/3) - QPNP_ADC_625_UV) *
+ low_thr = (((param->low_thr/3) - QPNP_ADC_625_UV) *
vbatt_param.dy);
- do_div(*low_threshold, QPNP_ADC_625_UV);
- *low_threshold += vbatt_param.adc_gnd;
+ do_div(low_thr, QPNP_ADC_625_UV);
+ *low_threshold = low_thr + vbatt_param.adc_gnd;
- *high_threshold = (((param->high_thr/3) - QPNP_ADC_625_UV) *
+ high_thr = (((param->high_thr/3) - QPNP_ADC_625_UV) *
vbatt_param.dy);
- do_div(*high_threshold, QPNP_ADC_625_UV);
- *high_threshold += vbatt_param.adc_gnd;
+ do_div(high_thr, QPNP_ADC_625_UV);
+ *high_threshold = high_thr + vbatt_param.adc_gnd;
pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
param->low_thr);
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
index 1fd4fee..66811bf 100644
--- a/drivers/hwmon/qpnp-adc-current.c
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -115,7 +115,7 @@
#define QPNP_ADC_GAIN_NV 17857
#define QPNP_OFFSET_CALIBRATION_SHORT_CADC_LEADS_IDEAL 0
#define QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR 10000000
-#define QPNP_IADC_NANO_VOLTS_FACTOR 1000000000
+#define QPNP_IADC_NANO_VOLTS_FACTOR 1000000
#define QPNP_IADC_CALIB_SECONDS 300000
#define QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT 15625
#define QPNP_IADC_DIE_TEMP_CALIB_OFFSET 5000
@@ -125,18 +125,17 @@
#define QPNP_BIT_SHIFT_8 8
#define QPNP_RSENSE_MSB_SIGN_CHECK 0x80
#define QPNP_ADC_COMPLETION_TIMEOUT HZ
-#define QPNP_IADC_ERR_CHK_RATELIMIT 3
struct qpnp_iadc_drv {
struct qpnp_adc_drv *adc;
int32_t rsense;
+ bool external_rsense;
struct device *iadc_hwmon;
bool iadc_initialized;
int64_t die_temp_calib_offset;
struct delayed_work iadc_work;
struct mutex iadc_vadc_lock;
bool iadc_mode_sel;
- uint32_t iadc_err_cnt;
struct sensor_device_attribute sens_attr[0];
};
@@ -254,7 +253,7 @@
return rc;
}
- pr_err("EOC not set with status:%x, dig:%x, ch:%x, mode:%x, en:%x\n",
+ pr_debug("EOC not set with status:%x, dig:%x, ch:%x, mode:%x, en:%x\n",
status1, dig, chan, mode, en);
rc = qpnp_iadc_enable(false);
@@ -347,6 +346,8 @@
return rc;
}
+ INIT_COMPLETION(iadc->adc->adc_rslt_completion);
+
rc = qpnp_iadc_enable(true);
if (rc)
return rc;
@@ -495,16 +496,12 @@
int rc = 0;
rc = qpnp_iadc_calibrate_for_trim();
- if (rc) {
- pr_err("periodic IADC calibration failed\n");
- iadc->iadc_err_cnt++;
- }
-
- if (iadc->iadc_err_cnt < QPNP_IADC_ERR_CHK_RATELIMIT)
+ if (rc)
+ pr_debug("periodic IADC calibration failed\n");
+ else
schedule_delayed_work(&iadc->iadc_work,
round_jiffies_relative(msecs_to_jiffies
(QPNP_IADC_CALIB_SECONDS)));
-
return;
}
@@ -547,6 +544,9 @@
if (!iadc || !iadc->iadc_initialized)
return -EPROBE_DEFER;
+ if (iadc->external_rsense)
+ *rsense = iadc->rsense;
+
rc = qpnp_iadc_read_reg(QPNP_IADC_NOMINAL_RSENSE, &rslt_rsense);
if (rc < 0) {
pr_err("qpnp adc rsense read failed with %d\n", rc);
@@ -575,15 +575,21 @@
{
struct qpnp_iadc_drv *iadc = qpnp_iadc;
struct qpnp_vadc_result result_pmic_therm;
+ int64_t die_temp_offset;
int rc = 0;
rc = qpnp_vadc_read(DIE_TEMP, &result_pmic_therm);
if (rc < 0)
return rc;
- if (((uint64_t) (result_pmic_therm.physical -
- iadc->die_temp_calib_offset))
- > QPNP_IADC_DIE_TEMP_CALIB_OFFSET) {
+ die_temp_offset = result_pmic_therm.physical -
+ iadc->die_temp_calib_offset;
+ if (die_temp_offset < 0)
+ die_temp_offset = -die_temp_offset;
+
+ if (die_temp_offset > QPNP_IADC_DIE_TEMP_CALIB_OFFSET) {
+ iadc->die_temp_calib_offset =
+ result_pmic_therm.physical;
rc = qpnp_iadc_calibrate_for_trim();
if (rc)
pr_err("periodic IADC calibration failed\n");
@@ -597,6 +603,7 @@
{
struct qpnp_iadc_drv *iadc = qpnp_iadc;
int32_t rc, rsense_n_ohms, sign = 0, num, mode_sel = 0;
+ int32_t rsense_u_ohms = 0;
int64_t result_current;
uint16_t raw_data;
@@ -622,7 +629,7 @@
rc = qpnp_iadc_get_rsense(&rsense_n_ohms);
pr_debug("current raw:0%x and rsense:%d\n",
raw_data, rsense_n_ohms);
-
+ rsense_u_ohms = rsense_n_ohms/1000;
num = raw_data - iadc->adc->calib.offset_raw;
if (num < 0) {
sign = 1;
@@ -633,7 +640,7 @@
(iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw);
result_current = result->result_uv;
result_current *= QPNP_IADC_NANO_VOLTS_FACTOR;
- do_div(result_current, rsense_n_ohms);
+ do_div(result_current, rsense_u_ohms);
if (sign) {
result->result_uv = -result->result_uv;
@@ -825,9 +832,11 @@
rc = of_property_read_u32(node, "qcom,rsense",
&iadc->rsense);
- if (rc) {
- pr_err("Invalid rsens reference property\n");
- goto fail;
+ if (rc)
+ pr_debug("Defaulting to internal rsense\n");
+ else {
+ pr_debug("Use external rsense\n");
+ iadc->external_rsense = true;
}
rc = devm_request_irq(&spmi->dev, iadc->adc->adc_irq_eoc,
@@ -857,7 +866,6 @@
mutex_init(&iadc->iadc_vadc_lock);
INIT_DELAYED_WORK(&iadc->iadc_work, qpnp_iadc_work);
- iadc->iadc_err_cnt = 0;
iadc->iadc_initialized = true;
rc = qpnp_iadc_calibrate_for_trim();
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index e268541..d296a47 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -90,6 +90,7 @@
#define QPNP_VADC_CONV_TIME_MIN 2000
#define QPNP_VADC_CONV_TIME_MAX 2100
#define QPNP_ADC_COMPLETION_TIMEOUT HZ
+#define QPNP_VADC_ERR_COUNT 5
struct qpnp_vadc_drv {
struct qpnp_adc_drv *adc;
@@ -434,7 +435,7 @@
{
struct qpnp_vadc_drv *vadc = qpnp_vadc;
struct qpnp_adc_amux_properties conv;
- int rc, calib_read_1, calib_read_2;
+ int rc, calib_read_1, calib_read_2, count = 0;
u8 status1 = 0;
conv.amux_channel = REF_125V;
@@ -456,6 +457,11 @@
status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
usleep_range(QPNP_VADC_CONV_TIME_MIN,
QPNP_VADC_CONV_TIME_MAX);
+ count++;
+ if (count > QPNP_VADC_ERR_COUNT) {
+ rc = -ENODEV;
+ goto calib_fail;
+ }
}
rc = qpnp_vadc_read_conversion_result(&calib_read_1);
@@ -476,6 +482,7 @@
}
status1 = 0;
+ count = 0;
while (status1 != QPNP_VADC_STATUS1_EOC) {
rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
if (rc < 0)
@@ -483,6 +490,11 @@
status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
usleep_range(QPNP_VADC_CONV_TIME_MIN,
QPNP_VADC_CONV_TIME_MAX);
+ count++;
+ if (count > QPNP_VADC_ERR_COUNT) {
+ rc = -ENODEV;
+ goto calib_fail;
+ }
}
rc = qpnp_vadc_read_conversion_result(&calib_read_2);
@@ -516,6 +528,7 @@
}
status1 = 0;
+ count = 0;
while (status1 != QPNP_VADC_STATUS1_EOC) {
rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
if (rc < 0)
@@ -523,6 +536,11 @@
status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
usleep_range(QPNP_VADC_CONV_TIME_MIN,
QPNP_VADC_CONV_TIME_MAX);
+ count++;
+ if (count > QPNP_VADC_ERR_COUNT) {
+ rc = -ENODEV;
+ goto calib_fail;
+ }
}
rc = qpnp_vadc_read_conversion_result(&calib_read_1);
@@ -543,6 +561,7 @@
}
status1 = 0;
+ count = 0;
while (status1 != QPNP_VADC_STATUS1_EOC) {
rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
if (rc < 0)
@@ -550,6 +569,11 @@
status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
usleep_range(QPNP_VADC_CONV_TIME_MIN,
QPNP_VADC_CONV_TIME_MAX);
+ count++;
+ if (count > QPNP_VADC_ERR_COUNT) {
+ rc = -ENODEV;
+ goto calib_fail;
+ }
}
rc = qpnp_vadc_read_conversion_result(&calib_read_2);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 0ea230a..29b269a 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -128,6 +128,7 @@
#define MXT_SPT_DIGITIZER_T43 43
#define MXT_SPT_MESSAGECOUNT_T44 44
#define MXT_SPT_CTECONFIG_T46 46
+#define MXT_SPT_EXTRANOISESUPCTRLS_T58 58
#define MXT_SPT_TIMER_T61 61
/* MXT_GEN_COMMAND_T6 field */
@@ -396,6 +397,7 @@
atomic_t st_enabled;
atomic_t st_pending_irqs;
struct completion st_completion;
+ struct completion st_powerdown;
#endif
};
@@ -432,6 +434,7 @@
case MXT_SPT_USERDATA_T38:
case MXT_SPT_DIGITIZER_T43:
case MXT_SPT_CTECONFIG_T46:
+ case MXT_SPT_EXTRANOISESUPCTRLS_T58:
case MXT_SPT_TIMER_T61:
case MXT_PROCI_ADAPTIVETHRESHOLD_T55:
return true;
@@ -469,6 +472,7 @@
case MXT_SPT_USERDATA_T38:
case MXT_SPT_DIGITIZER_T43:
case MXT_SPT_CTECONFIG_T46:
+ case MXT_SPT_EXTRANOISESUPCTRLS_T58:
case MXT_SPT_TIMER_T61:
case MXT_PROCI_ADAPTIVETHRESHOLD_T55:
return true;
@@ -998,8 +1002,8 @@
static irqreturn_t mxt_filter_interrupt(struct mxt_data *data)
{
if (atomic_read(&data->st_enabled)) {
- atomic_cmpxchg(&data->st_pending_irqs, 0, 1);
- complete(&data->st_completion);
+ if (atomic_cmpxchg(&data->st_pending_irqs, 0, 1) == 0)
+ complete(&data->st_completion);
return IRQ_HANDLED;
}
return IRQ_NONE;
@@ -1993,6 +1997,7 @@
atomic_set(&data->st_enabled, 0);
complete(&data->st_completion);
mxt_interrupt(data->client->irq, data);
+ complete(&data->st_powerdown);
break;
case 1:
if (atomic_read(&data->st_enabled)) {
@@ -2005,6 +2010,8 @@
err = -EIO;
break;
}
+ INIT_COMPLETION(data->st_completion);
+ INIT_COMPLETION(data->st_powerdown);
atomic_set(&data->st_pending_irqs, 0);
atomic_set(&data->st_enabled, 1);
break;
@@ -2032,7 +2039,7 @@
return err;
if (atomic_cmpxchg(&data->st_pending_irqs, 1, 0) != 1)
- return -EBADF;
+ return -EINVAL;
return scnprintf(buf, PAGE_SIZE, "%u", 1);
}
@@ -2061,9 +2068,25 @@
.attrs = mxt_attrs,
};
+
+#if defined(CONFIG_SECURE_TOUCH)
+static void mxt_secure_touch_stop(struct mxt_data *data)
+{
+ if (atomic_read(&data->st_enabled)) {
+ complete(&data->st_completion);
+ wait_for_completion_interruptible(&data->st_powerdown);
+ }
+}
+#else
+static void mxt_secure_touch_stop(struct mxt_data *data)
+{
+}
+#endif
+
static int mxt_start(struct mxt_data *data)
{
int error;
+ mxt_secure_touch_stop(data);
/* restore the old power state values and reenable touch */
error = __mxt_write_reg(data->client, data->t7_start_addr,
@@ -2081,6 +2104,7 @@
{
int error;
u8 t7_data[T7_DATA_SIZE] = {0};
+ mxt_secure_touch_stop(data);
error = __mxt_write_reg(data->client, data->t7_start_addr,
T7_DATA_SIZE, t7_data);
@@ -2462,6 +2486,7 @@
/* calibrate */
if (data->pdata->need_calibration) {
+ mxt_secure_touch_stop(data);
error = mxt_write_object(data, MXT_GEN_COMMAND_T6,
MXT_COMMAND_CALIBRATE, 1);
if (error < 0)
@@ -2797,12 +2822,13 @@
#endif
#if defined(CONFIG_SECURE_TOUCH)
-static void __devinit secure_touch_init(struct mxt_data *data)
+static void __devinit mxt_secure_touch_init(struct mxt_data *data)
{
init_completion(&data->st_completion);
+ init_completion(&data->st_powerdown);
}
#else
-static void __devinit secure_touch_init(struct mxt_data *data)
+static void __devinit mxt_secure_touch_init(struct mxt_data *data)
{
}
#endif
@@ -2999,7 +3025,7 @@
mxt_debugfs_init(data);
- secure_touch_init(data);
+ mxt_secure_touch_init(data);
return 0;
diff --git a/drivers/input/touchscreen/synaptics_fw_update.c b/drivers/input/touchscreen/synaptics_fw_update.c
index 5d66241..986c062 100644
--- a/drivers/input/touchscreen/synaptics_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_fw_update.c
@@ -30,7 +30,6 @@
#define DEBUG_FW_UPDATE
#define SHOW_PROGRESS
-#define FW_IMAGE_NAME "PR1063486-s7301_00000000.img"
#define MAX_FIRMWARE_ID_LEN 10
#define FORCE_UPDATE false
#define INSIDE_FIRMWARE_UPDATE
@@ -585,7 +584,8 @@
deviceFirmwareID = extract_uint(firmware_id);
/* .img firmware id */
- strptr = strstr(FW_IMAGE_NAME, "PR");
+ strptr = strnstr(fwu->rmi4_data->fw_image_name, "PR",
+ sizeof(fwu->rmi4_data->fw_image_name));
if (!strptr) {
dev_err(&i2c_client->dev,
"No valid PR number (PRxxxxxxx)" \
@@ -1219,19 +1219,28 @@
pr_notice("%s: Start of reflash process\n", __func__);
+ if (!fwu->rmi4_data->fw_image_name) {
+ retval = 0;
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "Firmware image name not given, skipping update\n");
+ goto exit;
+ }
+
if (fwu->ext_data_source)
fw_image = fwu->ext_data_source;
else {
dev_dbg(&fwu->rmi4_data->i2c_client->dev,
"%s: Requesting firmware image %s\n",
- __func__, FW_IMAGE_NAME);
+ __func__, fwu->rmi4_data->fw_image_name);
- retval = request_firmware(&fw_entry, FW_IMAGE_NAME,
+ retval = request_firmware(&fw_entry,
+ fwu->rmi4_data->fw_image_name,
&fwu->rmi4_data->i2c_client->dev);
if (retval != 0) {
dev_err(&fwu->rmi4_data->i2c_client->dev,
"%s: Firmware image %s not available\n",
- __func__, FW_IMAGE_NAME);
+ __func__,
+ fwu->rmi4_data->fw_image_name);
retval = -EINVAL;
goto exit;
}
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.c b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
index e1b3884..4e2b1a4 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.c
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
@@ -34,6 +34,9 @@
#define DRIVER_NAME "synaptics_rmi4_i2c"
#define INPUT_PHYS_NAME "synaptics_rmi4_i2c/input0"
+
+#define RESET_DELAY 100
+
#define TYPE_B_PROTOCOL
#define NO_0D_WHILE_2D
@@ -68,6 +71,16 @@
#define NO_SLEEP_OFF (0 << 2)
#define NO_SLEEP_ON (1 << 2)
+enum device_status {
+ STATUS_NO_ERROR = 0x00,
+ STATUS_RESET_OCCURED = 0x01,
+ STATUS_INVALID_CONFIG = 0x02,
+ STATUS_DEVICE_FAILURE = 0x03,
+ STATUS_CONFIG_CRC_FAILURE = 0x04,
+ STATUS_FIRMWARE_CRC_FAILURE = 0x05,
+ STATUS_CRC_IN_PROGRESS = 0x06
+};
+
#define RMI4_VTG_MIN_UV 2700000
#define RMI4_VTG_MAX_UV 3300000
#define RMI4_ACTIVE_LOAD_UA 15000
@@ -79,7 +92,6 @@
#define RMI4_I2C_LPM_LOAD_UA 10
#define RMI4_GPIO_SLEEP_LOW_US 10000
-#define RMI4_GPIO_WAIT_HIGH_MS 25
static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
unsigned short addr, unsigned char *data,
@@ -1006,6 +1018,13 @@
rmi4_pdata->panel_y = temp_val;
}
+ rc = of_property_read_string(np, "synaptics,fw-image-name",
+ &rmi4_pdata->fw_image_name);
+ if (rc && (rc != -EINVAL)) {
+ dev_err(dev, "Unable to read fw image name\n");
+ return rc;
+ }
+
/* reset, irq gpio info */
rmi4_pdata->reset_gpio = of_get_named_gpio_flags(np,
"synaptics,reset-gpio", 0, &rmi4_pdata->reset_flags);
@@ -1475,6 +1494,16 @@
if (retval < 0)
return retval;
+ while (status.status_code == STATUS_CRC_IN_PROGRESS) {
+ msleep(1);
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ status.data,
+ sizeof(status.data));
+ if (retval < 0)
+ return retval;
+ }
+
if (status.flash_prog == 1) {
pr_notice("%s: In flash prog mode, status = 0x%02x\n",
__func__,
@@ -1638,7 +1667,7 @@
return retval;
}
- msleep(100);
+ msleep(RESET_DELAY);
return retval;
};
@@ -2030,6 +2059,8 @@
rmi4_data->flip_x = rmi4_data->board->x_flip;
rmi4_data->flip_y = rmi4_data->board->y_flip;
+ rmi4_data->fw_image_name = rmi4_data->board->fw_image_name;
+
rmi4_data->input_dev->name = DRIVER_NAME;
rmi4_data->input_dev->phys = INPUT_PHYS_NAME;
rmi4_data->input_dev->id.bustype = BUS_I2C;
@@ -2101,7 +2132,7 @@
gpio_set_value(platform_data->reset_gpio, 0);
usleep(RMI4_GPIO_SLEEP_LOW_US);
gpio_set_value(platform_data->reset_gpio, 1);
- msleep(RMI4_GPIO_WAIT_HIGH_MS);
+ msleep(RESET_DELAY);
} else
synaptics_rmi4_reset_command(rmi4_data);
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.h b/drivers/input/touchscreen/synaptics_i2c_rmi4.h
index 9356937..16b1f8f 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.h
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.h
@@ -198,6 +198,7 @@
#ifdef CONFIG_HAS_EARLYSUSPEND
struct early_suspend early_suspend;
#endif
+ const char *fw_image_name;
unsigned char current_page;
unsigned char button_0d_enabled;
unsigned char full_pm_cycle;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 330c850..aa69475 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -16,7 +16,7 @@
# MSM IOMMU support
config MSM_IOMMU
bool "MSM IOMMU Support"
- depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064 || ARCH_MSM8974 || ARCH_MPQ8092 || ARCH_MSM8610 || ARCH_MSM8226 || ARCH_MSMZINC
+ depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064 || ARCH_MSM8974 || ARCH_MPQ8092 || ARCH_MSM8610 || ARCH_MSM8226 || ARCH_APQ8084
select IOMMU_API
help
Support for the IOMMUs found on certain Qualcomm SOCs.
@@ -49,7 +49,6 @@
config IOMMU_PGTABLES_L2
bool "Allow SMMU page tables in the L2 cache (Experimental)"
depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
- default y
help
Improves TLB miss latency at the expense of potential L2 pollution.
However, with large multimedia buffers, the TLB should mostly contain
diff --git a/drivers/iommu/msm_iommu-v0.c b/drivers/iommu/msm_iommu-v0.c
index c0a4720..b1960c6 100644
--- a/drivers/iommu/msm_iommu-v0.c
+++ b/drivers/iommu/msm_iommu-v0.c
@@ -55,6 +55,9 @@
.name = "msm_iommu_sec_bus",
};
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+ unsigned int len);
+
static inline void clean_pte(unsigned long *start, unsigned long *end,
int redirect)
{
@@ -167,6 +170,11 @@
/* No need to do anything. IOMMUv0 is always on. */
}
+static void *_iommu_lock_initialize(void)
+{
+ return msm_iommu_lock_initialize();
+}
+
static void _iommu_lock_acquire(void)
{
msm_iommu_lock();
@@ -182,6 +190,7 @@
.iommu_power_off = __disable_regulators,
.iommu_clk_on = __enable_clocks,
.iommu_clk_off = __disable_clocks,
+ .iommu_lock_initialize = _iommu_lock_initialize,
.iommu_lock_acquire = _iommu_lock_acquire,
.iommu_lock_release = _iommu_lock_release,
};
@@ -953,6 +962,7 @@
int prot)
{
unsigned int pa;
+ unsigned int start_va = va;
unsigned int offset = 0;
unsigned long *fl_table;
unsigned long *fl_pte;
@@ -1026,12 +1036,6 @@
chunk_offset = 0;
sg = sg_next(sg);
pa = get_phys_addr(sg);
- if (pa == 0) {
- pr_debug("No dma address for sg %p\n",
- sg);
- ret = -EINVAL;
- goto fail;
- }
}
continue;
}
@@ -1085,12 +1089,6 @@
chunk_offset = 0;
sg = sg_next(sg);
pa = get_phys_addr(sg);
- if (pa == 0) {
- pr_debug("No dma address for sg %p\n",
- sg);
- ret = -EINVAL;
- goto fail;
- }
}
}
@@ -1103,6 +1101,8 @@
__flush_iotlb(domain);
fail:
mutex_unlock(&msm_iommu_lock);
+ if (ret && offset > 0)
+ msm_iommu_unmap_range(domain, start_va, offset);
return ret;
}
diff --git a/drivers/iommu/msm_iommu_dev-v0.c b/drivers/iommu/msm_iommu_dev-v0.c
index 549800f..7ae0b21 100644
--- a/drivers/iommu/msm_iommu_dev-v0.c
+++ b/drivers/iommu/msm_iommu_dev-v0.c
@@ -414,6 +414,7 @@
pmon_info->iommu.ops = &iommu_access_ops_v0;
pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v0();
pmon_info->iommu.iommu_name = drvdata->name;
+ pmon_info->iommu.always_on = 1;
ret = msm_iommu_pm_iommu_register(pmon_info);
if (ret) {
pr_err("%s iommu register fail\n",
diff --git a/drivers/iommu/msm_iommu_pagetable.c b/drivers/iommu/msm_iommu_pagetable.c
index b62bb76..9614692 100644
--- a/drivers/iommu/msm_iommu_pagetable.c
+++ b/drivers/iommu/msm_iommu_pagetable.c
@@ -431,6 +431,7 @@
struct scatterlist *sg, unsigned int len, int prot)
{
phys_addr_t pa;
+ unsigned int start_va = va;
unsigned int offset = 0;
unsigned long *fl_pte;
unsigned long fl_offset;
@@ -495,12 +496,6 @@
chunk_offset = 0;
sg = sg_next(sg);
pa = get_phys_addr(sg);
- if (pa == 0) {
- pr_debug("No dma address for sg %p\n",
- sg);
- ret = -EINVAL;
- goto fail;
- }
}
continue;
}
@@ -553,12 +548,6 @@
chunk_offset = 0;
sg = sg_next(sg);
pa = get_phys_addr(sg);
- if (pa == 0) {
- pr_debug("No dma address for sg %p\n",
- sg);
- ret = -EINVAL;
- goto fail;
- }
}
}
@@ -569,6 +558,9 @@
}
fail:
+ if (ret && offset > 0)
+ msm_iommu_pagetable_unmap_range(pt, start_va, offset);
+
return ret;
}
diff --git a/drivers/iommu/msm_iommu_perfmon.c b/drivers/iommu/msm_iommu_perfmon.c
index fee8a4a..a11d794 100644
--- a/drivers/iommu/msm_iommu_perfmon.c
+++ b/drivers/iommu/msm_iommu_perfmon.c
@@ -90,6 +90,19 @@
return pos;
}
+static int iommu_pm_event_class_supported(struct iommu_pmon *pmon,
+ int event_class)
+{
+ unsigned int nevent_cls = pmon->nevent_cls_supported;
+ unsigned int i;
+
+ for (i = 0; i < nevent_cls; ++i) {
+ if (event_class == pmon->event_cls_supported[i])
+ return event_class;
+ }
+ return MSM_IOMMU_PMU_NO_EVENT_CLASS;
+}
+
static const char *iommu_pm_find_event_class_name(int event_class)
{
size_t array_len;
@@ -113,7 +126,8 @@
return event_class_name;
}
-static int iommu_pm_find_event_class(const char *event_class_name)
+static int iommu_pm_find_event_class(struct iommu_pmon *pmon,
+ const char *event_class_name)
{
size_t array_len;
struct event_class *ptr;
@@ -134,6 +148,7 @@
}
out:
+ event_class = iommu_pm_event_class_supported(pmon, event_class);
return event_class;
}
@@ -389,11 +404,11 @@
rv = kstrtol(buf, 10, &value);
if (!rv) {
counter->current_event_class =
- iommu_pm_find_event_class(
+ iommu_pm_find_event_class(pmon,
iommu_pm_find_event_class_name(value));
} else {
counter->current_event_class =
- iommu_pm_find_event_class(buf);
+ iommu_pm_find_event_class(pmon, buf);
} }
if (current_event_class != counter->current_event_class)
@@ -488,14 +503,17 @@
rv = kstrtoul(buf, 10, &cmd);
if (!rv && (cmd < 2)) {
if (pmon->enabled == 1 && cmd == 0) {
- if (pmon->iommu_attach_count > 0)
+ if (pmon->iommu.always_on ||
+ pmon->iommu_attach_count > 0)
iommu_pm_off(pmon);
} else if (pmon->enabled == 0 && cmd == 1) {
/* We can only turn on perf. monitoring if
- * iommu is attached. Delay turning on perf.
- * monitoring until we are attached.
+ * iommu is attached (if not always on).
+ * Delay turning on perf. monitoring until
+ * we are attached.
*/
- if (pmon->iommu_attach_count > 0)
+ if (pmon->iommu.always_on ||
+ pmon->iommu_attach_count > 0)
iommu_pm_on(pmon);
else
pmon->enabled = 1;
@@ -788,9 +806,9 @@
++pmon->iommu_attach_count;
if (pmon->iommu_attach_count == 1) {
/* If perf. mon was enabled before we attached we do
- * the actual after we attach.
+ * the actual enabling after we attach.
*/
- if (pmon->enabled)
+ if (pmon->enabled && !pmon->iommu.always_on)
iommu_pm_on(pmon);
}
mutex_unlock(&pmon->lock);
@@ -805,9 +823,9 @@
mutex_lock(&pmon->lock);
if (pmon->iommu_attach_count == 1) {
/* If perf. mon is still enabled we have to disable
- * before we do the detach.
+ * before we do the detach if iommu is not always on.
*/
- if (pmon->enabled)
+ if (pmon->enabled && !pmon->iommu.always_on)
iommu_pm_off(pmon);
}
BUG_ON(pmon->iommu_attach_count == 0);
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index e88e574..3667296 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -159,6 +159,27 @@
#define PWM_LUT_MAX_SIZE 63
#define RGB_LED_DISABLE 0x00
+#define MPP_MAX_LEVEL LED_FULL
+#define LED_MPP_MODE_CTRL(base) (base + 0x40)
+#define LED_MPP_VIN_CTRL(base) (base + 0x41)
+#define LED_MPP_EN_CTRL(base) (base + 0x46)
+#define LED_MPP_SINK_CTRL(base) (base + 0x4C)
+
+#define LED_MPP_CURRENT_DEFAULT 10
+#define LED_MPP_SOURCE_SEL_DEFAULT LED_MPP_MODE_ENABLE
+
+#define LED_MPP_SINK_MASK 0x07
+#define LED_MPP_MODE_MASK 0x7F
+#define LED_MPP_EN_MASK 0x80
+
+#define LED_MPP_MODE_SINK (0x06 << 4)
+#define LED_MPP_MODE_ENABLE 0x01
+#define LED_MPP_MODE_OUTPUT 0x10
+#define LED_MPP_MODE_DISABLE 0x00
+#define LED_MPP_EN_ENABLE 0x80
+#define LED_MPP_EN_DISABLE 0x00
+
+#define MPP_SOURCE_DTEST1 0x08
/**
* enum qpnp_leds - QPNP supported led ids
* @QPNP_ID_WLED - White led backlight
@@ -170,6 +191,7 @@
QPNP_ID_RGB_RED,
QPNP_ID_RGB_GREEN,
QPNP_ID_RGB_BLUE,
+ QPNP_ID_LED_MPP,
QPNP_ID_MAX,
};
@@ -240,6 +262,11 @@
static u8 rgb_pwm_debug_regs[] = {
0x45, 0x46, 0x47,
};
+
+static u8 mpp_debug_regs[] = {
+ 0x40, 0x41, 0x42, 0x45, 0x46, 0x4c,
+};
+
/**
* wled_config_data - wled configuration data
* @num_strings - number of wled strings supported
@@ -264,6 +291,16 @@
};
/**
+ * mpp_config_data - mpp configuration data
+ * @current_setting - current setting, 5ma-40ma in 5ma increments
+ */
+struct mpp_config_data {
+ u8 current_setting;
+ u8 source_sel;
+ u8 mode_ctrl;
+};
+
+/**
* flash_config_data - flash configuration data
* @current_prgm - current to be programmed, scaled by max level
* @clamp_curr - clamp current to use
@@ -336,6 +373,7 @@
struct wled_config_data *wled_cfg;
struct flash_config_data *flash_cfg;
struct rgb_config_data *rgb_cfg;
+ struct mpp_config_data *mpp_cfg;
int max_current;
bool default_on;
int turn_off_delay_ms;
@@ -458,6 +496,68 @@
return 0;
}
+static int qpnp_mpp_set(struct qpnp_led_data *led)
+{
+ int rc, val;
+
+ if (led->cdev.brightness) {
+ val = (led->cdev.brightness * LED_MPP_SINK_MASK) / LED_FULL;
+ rc = qpnp_led_masked_write(led,
+ LED_MPP_SINK_CTRL(led->base),
+ LED_MPP_SINK_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led enable reg\n");
+ return rc;
+ }
+
+ val = led->mpp_cfg->source_sel | led->mpp_cfg->mode_ctrl;
+
+ rc = qpnp_led_masked_write(led,
+ LED_MPP_MODE_CTRL(led->base), LED_MPP_MODE_MASK,
+ val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led mode reg\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ LED_MPP_EN_CTRL(led->base), LED_MPP_EN_MASK,
+ LED_MPP_EN_ENABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led enable " \
+ "reg\n");
+ return rc;
+ }
+ } else {
+ rc = qpnp_led_masked_write(led,
+ LED_MPP_MODE_CTRL(led->base),
+ LED_MPP_MODE_MASK,
+ LED_MPP_MODE_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led mode reg\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ LED_MPP_EN_CTRL(led->base),
+ LED_MPP_EN_MASK,
+ LED_MPP_EN_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led enable reg\n");
+ return rc;
+ }
+ }
+
+ qpnp_dump_regs(led, mpp_debug_regs, ARRAY_SIZE(mpp_debug_regs));
+
+ return 0;
+}
+
static int qpnp_flash_set(struct qpnp_led_data *led)
{
int rc;
@@ -750,6 +850,12 @@
dev_err(&led->spmi_dev->dev,
"RGB set brightness failed (%d)\n", rc);
break;
+ case QPNP_ID_LED_MPP:
+ rc = qpnp_mpp_set(led);
+ if (rc < 0)
+ dev_err(&led->spmi_dev->dev,
+ "MPP set brightness failed (%d)\n", rc);
+ break;
default:
dev_err(&led->spmi_dev->dev, "Invalid LED(%d)\n", led->id);
break;
@@ -772,6 +878,9 @@
case QPNP_ID_RGB_BLUE:
led->cdev.max_brightness = RGB_MAX_LEVEL;
break;
+ case QPNP_ID_LED_MPP:
+ led->cdev.max_brightness = MPP_MAX_LEVEL;
+ break;
default:
dev_err(&led->spmi_dev->dev, "Invalid LED(%d)\n", led->id);
return -EINVAL;
@@ -1211,6 +1320,8 @@
dev_err(&led->spmi_dev->dev,
"RGB initialize failed(%d)\n", rc);
break;
+ case QPNP_ID_LED_MPP:
+ break;
default:
dev_err(&led->spmi_dev->dev, "Invalid LED(%d)\n", led->id);
return -EINVAL;
@@ -1433,7 +1544,7 @@
rc = of_property_read_u32(node, "qcom,pwm-channel", &val);
if (!rc)
- led->rgb_cfg->pwm_channel = (u8) val;
+ led->rgb_cfg->pwm_channel = val;
else
return rc;
@@ -1495,22 +1606,22 @@
rc = of_property_read_u32(node, "qcom,start-idx", &val);
if (!rc) {
- led->rgb_cfg->lut_params.start_idx = (u8) val;
- led->rgb_cfg->duty_cycles->start_idx = (u8) val;
+ led->rgb_cfg->lut_params.start_idx = val;
+ led->rgb_cfg->duty_cycles->start_idx = val;
} else
return rc;
led->rgb_cfg->lut_params.lut_pause_hi = 0;
rc = of_property_read_u32(node, "qcom,pause-hi", &val);
if (!rc)
- led->rgb_cfg->lut_params.lut_pause_hi = (u8) val;
+ led->rgb_cfg->lut_params.lut_pause_hi = val;
else if (rc != -EINVAL)
return rc;
led->rgb_cfg->lut_params.lut_pause_lo = 0;
rc = of_property_read_u32(node, "qcom,pause-lo", &val);
if (!rc)
- led->rgb_cfg->lut_params.lut_pause_lo = (u8) val;
+ led->rgb_cfg->lut_params.lut_pause_lo = val;
else if (rc != -EINVAL)
return rc;
@@ -1518,14 +1629,14 @@
QPNP_LUT_RAMP_STEP_DEFAULT;
rc = of_property_read_u32(node, "qcom,ramp-step-ms", &val);
if (!rc)
- led->rgb_cfg->lut_params.ramp_step_ms = (u8) val;
+ led->rgb_cfg->lut_params.ramp_step_ms = val;
else if (rc != -EINVAL)
return rc;
led->rgb_cfg->lut_params.flags = QPNP_LED_PWM_FLAGS;
rc = of_property_read_u32(node, "qcom,lut-flags", &val);
if (!rc)
- led->rgb_cfg->lut_params.flags = (u8) val;
+ led->rgb_cfg->lut_params.flags = val;
else if (rc != -EINVAL)
return rc;
@@ -1536,6 +1647,43 @@
return 0;
}
+static int __devinit qpnp_get_config_mpp(struct qpnp_led_data *led,
+ struct device_node *node)
+{
+ int rc;
+ u32 val;
+
+ led->mpp_cfg = devm_kzalloc(&led->spmi_dev->dev,
+ sizeof(struct mpp_config_data), GFP_KERNEL);
+ if (!led->mpp_cfg) {
+ dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ led->mpp_cfg->current_setting = LED_MPP_CURRENT_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,current-setting", &val);
+ if (!rc)
+ led->mpp_cfg->current_setting = (u8) val;
+ else if (rc != -EINVAL)
+ return rc;
+
+ led->mpp_cfg->source_sel = LED_MPP_SOURCE_SEL_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,source-sel", &val);
+ if (!rc)
+ led->mpp_cfg->source_sel = (u8) val;
+ else if (rc != -EINVAL)
+ return rc;
+
+ led->mpp_cfg->mode_ctrl = LED_MPP_MODE_SINK;
+ rc = of_property_read_u32(node, "qcom,mode-ctrl", &val);
+ if (!rc)
+ led->mpp_cfg->mode_ctrl = (u8) val;
+ else if (rc != -EINVAL)
+ return rc;
+
+ return 0;
+}
+
static int __devinit qpnp_leds_probe(struct spmi_device *spmi)
{
struct qpnp_led_data *led, *led_array;
@@ -1638,6 +1786,13 @@
"Unable to read rgb config data\n");
goto fail_id_check;
}
+ } else if (strncmp(led_label, "mpp", sizeof("mpp")) == 0) {
+ rc = qpnp_get_config_mpp(led, temp);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read mpp config data\n");
+ goto fail_id_check;
+ }
} else {
dev_err(&led->spmi_dev->dev, "No LED matching label\n");
rc = -EINVAL;
diff --git a/drivers/media/dvb/dvb-core/demux.h b/drivers/media/dvb/dvb-core/demux.h
index 1c15a41..89500f9 100644
--- a/drivers/media/dvb/dvb-core/demux.h
+++ b/drivers/media/dvb/dvb-core/demux.h
@@ -66,13 +66,16 @@
DMX_OK = 0, /* Received Ok */
DMX_OK_PES_END, /* Received OK, data reached end of PES packet */
DMX_OK_PCR, /* Received OK, data with new PCR/STC pair */
+ DMX_OK_EOS, /* Received OK, reached End-of-Stream (EOS) */
+ DMX_OK_MARKER, /* Received OK, reached a data Marker */
DMX_LENGTH_ERROR, /* Incorrect length */
DMX_OVERRUN_ERROR, /* Receiver ring buffer overrun */
DMX_CRC_ERROR, /* Incorrect CRC */
DMX_FRAME_ERROR, /* Frame alignment error */
DMX_FIFO_ERROR, /* Receiver FIFO overrun */
DMX_MISSED_ERROR, /* Receiver missed packet */
- DMX_OK_DECODER_BUF /* Received OK, new ES data in decoder buffer */
+ DMX_OK_DECODER_BUF, /* Received OK, new ES data in decoder buffer */
+ DMX_OK_IDX /* Received OK, new index event */
} ;
@@ -87,7 +90,7 @@
enum dmx_success status;
/*
- * data_length may be 0 in case of DMX_OK_PES_END
+ * data_length may be 0 in case of DMX_OK_PES_END or DMX_OK_EOS
* and in non-DMX_OK_XXX events. In DMX_OK_PES_END,
* data_length is for data comming after the end of PES.
*/
@@ -124,7 +127,14 @@
u32 cont_err_counter;
u32 ts_packets_num;
u32 ts_dropped_bytes;
+ u64 stc;
} buf;
+
+ struct {
+ u64 id;
+ } marker;
+
+ struct dmx_index_event_info idx_event;
};
};
@@ -216,8 +226,10 @@
struct timespec timeout);
int (*start_filtering) (struct dmx_ts_feed* feed);
int (*stop_filtering) (struct dmx_ts_feed* feed);
- int (*set_indexing_params) (struct dmx_ts_feed *feed,
- struct dmx_indexing_video_params *params);
+ int (*set_video_codec) (struct dmx_ts_feed *feed,
+ enum dmx_video_codec video_codec);
+ int (*set_idx_params) (struct dmx_ts_feed *feed,
+ struct dmx_indexing_params *idx_params);
int (*get_decoder_buff_status)(
struct dmx_ts_feed *feed,
struct dmx_buffer_status *dmx_buffer_status);
@@ -232,6 +244,9 @@
enum dmx_tsp_format_t tsp_format);
int (*set_secure_mode)(struct dmx_ts_feed *feed,
struct dmx_secure_mode *sec_mode);
+ int (*oob_command) (struct dmx_ts_feed *feed,
+ struct dmx_oob_command *cmd);
+
};
/*--------------------------------------------------------------------------*/
@@ -280,6 +295,8 @@
u32 bytes_num);
int (*set_secure_mode)(struct dmx_section_feed *feed,
struct dmx_secure_mode *sec_mode);
+ int (*oob_command) (struct dmx_section_feed *feed,
+ struct dmx_oob_command *cmd);
};
/*--------------------------------------------------------------------------*/
@@ -413,6 +430,8 @@
int (*unmap_buffer) (struct dmx_demux *demux,
void *priv_handle);
+
+ int (*get_tsp_size) (struct dmx_demux *demux);
};
#endif /* #ifndef __DEMUX_H */
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index dce37e5..5e7a09e 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -485,23 +485,56 @@
return NULL;
}
-static int dvr_input_thread_entry(void *arg)
+static void dvb_dvr_oob_cmd(struct dmxdev *dmxdev, struct dmx_oob_command *cmd)
{
- struct dmxdev *dmxdev = arg;
- struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
- int ret;
- size_t todo;
- int bytes_written;
- size_t split;
+ int i;
+ struct dmxdev_filter *filter;
+ struct dmxdev_feed *feed;
- while (1) {
+ for (i = 0; i < dmxdev->filternum; i++) {
+ filter = &dmxdev->filter[i];
+ if (!filter || filter->state != DMXDEV_STATE_GO)
+ continue;
+
+ switch (filter->type) {
+ case DMXDEV_TYPE_SEC:
+ filter->feed.sec.feed->oob_command(
+ filter->feed.sec.feed, cmd);
+ break;
+ case DMXDEV_TYPE_PES:
+ feed = list_first_entry(&filter->feed.ts,
+ struct dmxdev_feed, next);
+ feed->ts->oob_command(feed->ts, cmd);
+ break;
+ case DMXDEV_TYPE_NONE:
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int dvb_dvr_feed_cmd(struct dmxdev *dmxdev, struct dvr_command *dvr_cmd)
+{
+ int ret = 0;
+ size_t todo;
+ int bytes_written = 0;
+ size_t split;
+ size_t tsp_size;
+ struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
+ todo = dvr_cmd->cmd.data_feed_count;
+
+ if (dmxdev->demux->get_tsp_size)
+ tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
+ else
+ tsp_size = 188;
+
+ while (todo >= tsp_size) {
/* wait for input */
ret = wait_event_interruptible(
src->queue,
- (!src->data) ||
- (dvb_ringbuffer_avail(src) > 188) ||
- (src->error != 0) ||
- dmxdev->dvr_in_exit);
+ (dvb_ringbuffer_avail(src) >= tsp_size) || (!src->data)
+ || (dmxdev->dvr_in_exit) || (src->error));
if (ret < 0)
break;
@@ -510,23 +543,21 @@
if (!src->data || dmxdev->exit || dmxdev->dvr_in_exit) {
spin_unlock(&dmxdev->dvr_in_lock);
+ ret = -ENODEV;
break;
}
if (src->error) {
spin_unlock(&dmxdev->dvr_in_lock);
wake_up_all(&src->queue);
+ ret = -EINVAL;
break;
}
dmxdev->dvr_processing_input = 1;
- ret = dvb_ringbuffer_avail(src);
- todo = ret;
-
- split = (src->pread + ret > src->size) ?
- src->size - src->pread :
- 0;
+ split = (src->pread + todo > src->size) ?
+ src->size - src->pread : 0;
/*
* In DVR PULL mode, write might block.
@@ -537,54 +568,128 @@
*/
if (split > 0) {
spin_unlock(&dmxdev->dvr_in_lock);
- bytes_written = dmxdev->demux->write(dmxdev->demux,
+ ret = dmxdev->demux->write(dmxdev->demux,
src->data + src->pread,
split);
- if (bytes_written < 0) {
+ if (ret < 0) {
printk(KERN_ERR "dmxdev: dvr write error %d\n",
- bytes_written);
+ ret);
continue;
}
- if (dmxdev->dvr_in_exit)
+ if (dmxdev->dvr_in_exit) {
+ ret = -ENODEV;
break;
+ }
spin_lock(&dmxdev->dvr_in_lock);
- todo -= bytes_written;
- DVB_RINGBUFFER_SKIP(src, bytes_written);
- if (bytes_written < split) {
+ todo -= ret;
+ bytes_written += ret;
+ DVB_RINGBUFFER_SKIP(src, ret);
+ if (ret < split) {
dmxdev->dvr_processing_input = 0;
spin_unlock(&dmxdev->dvr_in_lock);
wake_up_all(&src->queue);
continue;
}
-
}
spin_unlock(&dmxdev->dvr_in_lock);
- bytes_written = dmxdev->demux->write(dmxdev->demux,
- src->data + src->pread, todo);
+ ret = dmxdev->demux->write(dmxdev->demux,
+ src->data + src->pread, todo);
- if (bytes_written < 0) {
+ if (ret < 0) {
printk(KERN_ERR "dmxdev: dvr write error %d\n",
- bytes_written);
+ ret);
continue;
}
- if (dmxdev->dvr_in_exit)
+ if (dmxdev->dvr_in_exit) {
+ ret = -ENODEV;
break;
+ }
spin_lock(&dmxdev->dvr_in_lock);
- DVB_RINGBUFFER_SKIP(src, bytes_written);
+ todo -= ret;
+ bytes_written += ret;
+ DVB_RINGBUFFER_SKIP(src, ret);
dmxdev->dvr_processing_input = 0;
spin_unlock(&dmxdev->dvr_in_lock);
wake_up_all(&src->queue);
}
+ if (ret < 0)
+ return ret;
+
+ return bytes_written;
+}
+
+static int dvr_input_thread_entry(void *arg)
+{
+ struct dmxdev *dmxdev = arg;
+ struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+ struct dvr_command dvr_cmd;
+ int leftover = 0;
+ int ret;
+
+ while (1) {
+ /* wait for input */
+ ret = wait_event_interruptible(
+ cmdbuf->queue,
+ (!cmdbuf->data) ||
+ (dvb_ringbuffer_avail(cmdbuf) >= sizeof(dvr_cmd)) ||
+ (dmxdev->dvr_in_exit));
+
+ if (ret < 0)
+ break;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ if (!cmdbuf->data || dmxdev->exit || dmxdev->dvr_in_exit) {
+ spin_unlock(&dmxdev->dvr_in_lock);
+ break;
+ }
+
+ dvb_ringbuffer_read(cmdbuf, (u8 *)&dvr_cmd, sizeof(dvr_cmd));
+
+ spin_unlock(&dmxdev->dvr_in_lock);
+
+ if (dvr_cmd.type == DVR_DATA_FEED_CMD) {
+ dvr_cmd.cmd.data_feed_count += leftover;
+
+ ret = dvb_dvr_feed_cmd(dmxdev, &dvr_cmd);
+ if (ret < 0) {
+ printk(KERN_ERR
+ "%s: DVR data feed failed, ret=%d\n",
+ __func__, ret);
+ continue;
+ }
+
+ leftover = dvr_cmd.cmd.data_feed_count - ret;
+ } else {
+ /*
+ * For EOS, try to process leftover data in the input
+ * buffer.
+ */
+ if (dvr_cmd.cmd.oobcmd.type == DMX_OOB_CMD_EOS) {
+ struct dvr_command feed_cmd;
+
+ feed_cmd.type = DVR_DATA_FEED_CMD;
+ feed_cmd.cmd.data_feed_count =
+ dvb_ringbuffer_avail(
+ &dmxdev->dvr_input_buffer);
+
+ dvb_dvr_feed_cmd(dmxdev, &dvr_cmd);
+ }
+
+ dvb_dvr_oob_cmd(dmxdev, &dvr_cmd.cmd.oobcmd);
+ }
+ }
+
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
schedule();
@@ -675,6 +780,15 @@
dmxdev->demux->dvr_input.priv_handle = NULL;
dmxdev->demux->dvr_input.ringbuff = &dmxdev->dvr_input_buffer;
+ mem = vmalloc(DVR_CMDS_BUFFER_SIZE);
+ if (!mem) {
+ vfree(dmxdev->dvr_input_buffer.data);
+ dmxdev->dvr_input_buffer.data = NULL;
+ mutex_unlock(&dmxdev->mutex);
+ return -ENOMEM;
+ }
+ dvb_ringbuffer_init(&dmxdev->dvr_cmd_buffer, mem,
+ DVR_CMDS_BUFFER_SIZE);
dvbdev->writers--;
dmxdev->dvr_input_thread =
@@ -684,6 +798,10 @@
"dvr_input");
if (IS_ERR(dmxdev->dvr_input_thread)) {
+ vfree(dmxdev->dvr_input_buffer.data);
+ vfree(dmxdev->dvr_cmd_buffer.data);
+ dmxdev->dvr_input_buffer.data = NULL;
+ dmxdev->dvr_cmd_buffer.data = NULL;
mutex_unlock(&dmxdev->mutex);
return -ENOMEM;
}
@@ -725,7 +843,8 @@
int i;
dmxdev->dvr_in_exit = 1;
- wake_up_all(&dmxdev->dvr_input_buffer.queue);
+
+ wake_up_all(&dmxdev->dvr_cmd_buffer.queue);
/*
* There might be dmx filters reading now from DVR
@@ -776,6 +895,15 @@
dmxdev->demux->dvr_input.priv_handle);
dmxdev->demux->dvr_input.priv_handle = NULL;
}
+
+ if (dmxdev->dvr_cmd_buffer.data) {
+ void *mem = dmxdev->dvr_cmd_buffer.data;
+ mb();
+ spin_lock_irq(&dmxdev->dvr_in_lock);
+ dmxdev->dvr_cmd_buffer.data = NULL;
+ spin_unlock_irq(&dmxdev->dvr_in_lock);
+ vfree(mem);
+ }
}
/* TODO */
dvbdev->users--;
@@ -850,12 +978,57 @@
return ret;
}
+static void dvb_dvr_queue_data_feed(struct dmxdev *dmxdev, size_t count)
+{
+ struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+ struct dvr_command *dvr_cmd;
+ int last_dvr_cmd;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ /* Peek at the last DVR command queued, try to coalesce FEED commands */
+ if (dvb_ringbuffer_avail(cmdbuf) >= sizeof(*dvr_cmd)) {
+ last_dvr_cmd = cmdbuf->pwrite - sizeof(*dvr_cmd);
+ if (last_dvr_cmd < 0)
+ last_dvr_cmd += cmdbuf->size;
+
+ dvr_cmd = (struct dvr_command *)&cmdbuf->data[last_dvr_cmd];
+ if (dvr_cmd->type == DVR_DATA_FEED_CMD) {
+ dvr_cmd->cmd.data_feed_count += count;
+ spin_unlock(&dmxdev->dvr_in_lock);
+ return;
+ }
+ }
+
+ /*
+ * We assume command buffer is large enough so that overflow should not
+ * happen. Overflow to the command buffer means data previously written
+ * to the input buffer is 'orphan' - does not have a matching FEED
+ * command. Issue a warning if this ever happens.
+ * Orphan data might still be processed if EOS is issued.
+ */
+ if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd)) {
+ printk(KERN_ERR "%s: DVR command buffer overflow\n", __func__);
+ spin_unlock(&dmxdev->dvr_in_lock);
+ return;
+ }
+
+ dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite];
+ dvr_cmd->type = DVR_DATA_FEED_CMD;
+ dvr_cmd->cmd.data_feed_count = count;
+ DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd));
+ spin_unlock(&dmxdev->dvr_in_lock);
+
+ wake_up_all(&cmdbuf->queue);
+}
+
static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
+ struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
int ret;
size_t todo;
ssize_t free_space;
@@ -864,7 +1037,7 @@
return -EOPNOTSUPP;
if (((file->f_flags & O_ACCMODE) == O_RDONLY) ||
- (!src->data))
+ (!src->data) || (!cmdbuf->data))
return -EINVAL;
if ((file->f_flags & O_NONBLOCK) &&
@@ -874,10 +1047,9 @@
ret = 0;
for (todo = count; todo > 0; todo -= ret) {
ret = wait_event_interruptible(src->queue,
- (!src->data) ||
- (dvb_ringbuffer_free(src)) ||
- (src->error != 0) ||
- (dmxdev->dvr_in_exit));
+ (dvb_ringbuffer_free(src)) ||
+ (!src->data) || (!cmdbuf->data) ||
+ (src->error != 0) || (dmxdev->dvr_in_exit));
if (ret < 0)
return ret;
@@ -885,7 +1057,7 @@
if (mutex_lock_interruptible(&dmxdev->mutex))
return -ERESTARTSYS;
- if (!src->data) {
+ if ((!src->data) || (!cmdbuf->data)) {
mutex_unlock(&dmxdev->mutex);
return 0;
}
@@ -917,8 +1089,9 @@
buf += ret;
+ dvb_dvr_queue_data_feed(dmxdev, ret);
+
mutex_unlock(&dmxdev->mutex);
- wake_up_all(&src->queue);
}
return (count - todo) ? (count - todo) : ret;
@@ -968,6 +1141,34 @@
return res;
}
+/*
+ * dvb_dvr_push_oob_cmd
+ *
+ * Note: this function assume dmxdev->mutex was taken, so command buffer cannot
+ * be released during its operation.
+ */
+static int dvb_dvr_push_oob_cmd(struct dmxdev *dmxdev, unsigned int f_flags,
+ struct dmx_oob_command *cmd)
+{
+ struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+ struct dvr_command *dvr_cmd;
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY ||
+ dmxdev->source < DMX_SOURCE_DVR0)
+ return -EPERM;
+
+ if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd))
+ return -ENOMEM;
+
+ dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite];
+ dvr_cmd->type = DVR_OOB_CMD;
+ dvr_cmd->cmd.oobcmd = *cmd;
+ DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd));
+ wake_up_all(&cmdbuf->queue);
+
+ return 0;
+}
+
static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
unsigned int f_flags,
unsigned long size)
@@ -1245,9 +1446,19 @@
return 0;
}
+/*
+ * dvb_dvr_feed_data - Notify new data in DVR input buffer
+ *
+ * @dmxdev - demux device instance
+ * @f_flags - demux device file flag (access mode)
+ * @bytes_count - how many bytes were written to the input buffer
+ *
+ * Note: this function assume dmxdev->mutex was taken, so buffer cannot
+ * be released during its operation.
+ */
static int dvb_dvr_feed_data(struct dmxdev *dmxdev,
- unsigned int f_flags,
- u32 bytes_count)
+ unsigned int f_flags,
+ u32 bytes_count)
{
ssize_t free_space;
struct dvb_ringbuffer *buffer = &dmxdev->dvr_input_buffer;
@@ -1263,10 +1474,9 @@
if (bytes_count > free_space)
return -EINVAL;
- buffer->pwrite =
- (buffer->pwrite + bytes_count) % buffer->size;
+ DVB_RINGBUFFER_PUSH(buffer, bytes_count);
- wake_up_all(&buffer->queue);
+ dvb_dvr_queue_data_feed(dmxdev, bytes_count);
return 0;
}
@@ -1504,8 +1714,44 @@
return 0;
}
-static int dvb_dmxdev_ts_fullness_callback(
- struct dmx_ts_feed *filter,
+static int dvb_dmxdev_set_indexing_params(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_indexing_params *idx_params)
+{
+ int found_pid;
+ struct dmxdev_feed *feed;
+ struct dmxdev_feed *ts_feed = NULL;
+
+ if (!idx_params ||
+ (dmxdevfilter->state < DMXDEV_STATE_SET) ||
+ (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+ ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
+ (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
+ return -EINVAL;
+
+ if (idx_params->enable && !idx_params->types)
+ return -EINVAL;
+
+ found_pid = 0;
+ list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
+ if (feed->pid == idx_params->pid) {
+ found_pid = 1;
+ ts_feed = feed;
+ ts_feed->idx_params = *idx_params;
+ if ((dmxdevfilter->state == DMXDEV_STATE_GO) &&
+ ts_feed->ts->set_idx_params)
+ ts_feed->ts->set_idx_params(
+ ts_feed->ts, idx_params);
+ break;
+ }
+ }
+
+ if (!found_pid)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dvb_dmxdev_ts_fullness_callback(struct dmx_ts_feed *filter,
int required_space)
{
struct dmxdev_filter *dmxdevfilter = filter->priv;
@@ -1825,8 +2071,10 @@
wake_up_all(&dmxdevfilter->buffer.queue);
return 0;
}
+
spin_lock(&dmxdevfilter->dev->lock);
- if (dmxdevfilter->state != DMXDEV_STATE_GO) {
+ if (dmxdevfilter->state != DMXDEV_STATE_GO ||
+ dmxdevfilter->eos_state) {
spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
@@ -1896,13 +2144,15 @@
int ret;
spin_lock(&dmxdevfilter->dev->lock);
- if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER ||
+ dmxdevfilter->state != DMXDEV_STATE_GO ||
+ dmxdevfilter->eos_state) {
spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
- if (dmxdevfilter->params.pes.output == DMX_OUT_TAP
- || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) {
+ if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
buffer = &dmxdevfilter->buffer;
events = &dmxdevfilter->events;
} else {
@@ -1916,11 +2166,6 @@
return 0;
}
- if (dmxdevfilter->state != DMXDEV_STATE_GO) {
- spin_unlock(&dmxdevfilter->dev->lock);
- return 0;
- }
-
if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) {
if ((success == DMX_OK) &&
(!events->current_event_data_size)) {
@@ -2010,7 +2255,8 @@
spin_lock(&dmxdevfilter->dev->lock);
- if (dmxdevfilter->state != DMXDEV_STATE_GO) {
+ if (dmxdevfilter->state != DMXDEV_STATE_GO ||
+ dmxdevfilter->eos_state) {
spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
@@ -2023,6 +2269,17 @@
spin_unlock(&dmxdevfilter->dev->lock);
wake_up_all(&dmxdevfilter->buffer.queue);
+ } else if (dmx_data_ready->status == DMX_OK_EOS) {
+ event.type = DMX_EVENT_EOS;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else if (dmx_data_ready->status == DMX_OK_MARKER) {
+ event.type = DMX_EVENT_MARKER;
+ event.params.marker.id = dmx_data_ready->marker.id;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
} else {
spin_unlock(&dmxdevfilter->dev->lock);
}
@@ -2076,7 +2333,8 @@
spin_lock(&dmxdevfilter->dev->lock);
- if (dmxdevfilter->state != DMXDEV_STATE_GO) {
+ if (dmxdevfilter->state != DMXDEV_STATE_GO ||
+ dmxdevfilter->eos_state) {
spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
@@ -2089,6 +2347,27 @@
events = &dmxdevfilter->dev->dvr_output_events;
}
+ if (dmx_data_ready->status == DMX_OK_EOS) {
+ dmxdevfilter->eos_state = 1;
+ dprintk("dmxdev: DMX_OK_EOS - entering EOS state\n");
+ event.type = DMX_EVENT_EOS;
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_MARKER) {
+ dprintk("dmxdev: DMX_OK_MARKER - id=%llu\n",
+ dmx_data_ready->marker.id);
+ event.type = DMX_EVENT_MARKER;
+ event.params.marker.id = dmx_data_ready->marker.id;
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ return 0;
+ }
+
if (dmx_data_ready->status == DMX_OK_PCR) {
dprintk("dmxdev: event callback DMX_OK_PCR\n");
event.type = DMX_EVENT_NEW_PCR;
@@ -2106,6 +2385,17 @@
return 0;
}
+ if (dmx_data_ready->status == DMX_OK_IDX) {
+ dprintk("dmxdev: event callback DMX_OK_IDX\n");
+ event.type = DMX_EVENT_NEW_INDEX_ENTRY;
+ event.params.index = dmx_data_ready->idx_event;
+
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
if (dmx_data_ready->status == DMX_OK_DECODER_BUF) {
event.type = DMX_EVENT_NEW_ES_DATA;
event.params.es_data.buf_handle = dmx_data_ready->buf.handle;
@@ -2116,6 +2406,7 @@
event.params.es_data.pts = dmx_data_ready->buf.pts;
event.params.es_data.dts_valid = dmx_data_ready->buf.dts_exists;
event.params.es_data.dts = dmx_data_ready->buf.dts;
+ event.params.es_data.stc = dmx_data_ready->buf.stc;
event.params.es_data.transport_error_indicator_counter =
dmx_data_ready->buf.tei_counter;
event.params.es_data.continuity_error_counter =
@@ -2130,8 +2421,18 @@
return 0;
}
- if ((dmxdevfilter->params.pes.output == DMX_OUT_DECODER) ||
- (buffer->error)) {
+ if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
+ if (DMX_OVERRUN_ERROR == dmx_data_ready->status) {
+ dprintk("dmxdev: buffer overflow\n");
+ event.type = DMX_EVENT_BUFFER_OVERFLOW;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ }
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (buffer->error) {
spin_unlock(&dmxdevfilter->dev->lock);
wake_up_all(&buffer->queue);
return 0;
@@ -2453,15 +2754,13 @@
if (tsfeed->set_secure_mode)
tsfeed->set_secure_mode(tsfeed, &feed->sec_mode);
- /* Support indexing for video PES */
if ((para->pes_type == DMX_PES_VIDEO0) ||
(para->pes_type == DMX_PES_VIDEO1) ||
(para->pes_type == DMX_PES_VIDEO2) ||
(para->pes_type == DMX_PES_VIDEO3)) {
-
- if (tsfeed->set_indexing_params) {
- ret = tsfeed->set_indexing_params(tsfeed,
- ¶->video_params);
+ if (tsfeed->set_video_codec) {
+ ret = tsfeed->set_video_codec(tsfeed,
+ para->video_codec);
if (ret < 0) {
dmxdev->demux->release_ts_feed(dmxdev->demux,
@@ -2471,6 +2770,12 @@
}
}
+ if ((filter->params.pes.output == DMX_OUT_TS_TAP) ||
+ (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP))
+ if (tsfeed->set_idx_params)
+ tsfeed->set_idx_params(
+ tsfeed, &feed->idx_params);
+
ret = tsfeed->start_filtering(tsfeed);
if (ret < 0) {
dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
@@ -2505,6 +2810,8 @@
spin_unlock_irq(&filter->dev->lock);
}
+ filter->eos_state = 0;
+
spin_lock_irq(&filter->dev->lock);
dvb_dmxdev_flush_output(&filter->buffer, &filter->events);
spin_unlock_irq(&filter->dev->lock);
@@ -2535,7 +2842,7 @@
secfeed,
dvb_dmxdev_section_callback);
if (ret < 0) {
- printk("DVB (%s): could not alloc feed\n",
+ printk(KERN_ERR "DVB (%s): could not alloc feed\n",
__func__);
return ret;
}
@@ -2556,7 +2863,7 @@
ret = (*secfeed)->set(*secfeed, para->pid, 32768,
(para->flags & DMX_CHECK_CRC) ? 1 : 0);
if (ret < 0) {
- printk("DVB (%s): could not set feed\n",
+ printk(KERN_ERR "DVB (%s): could not set feed\n",
__func__);
dvb_dmxdev_feed_restart(filter);
return ret;
@@ -2779,6 +3086,7 @@
feed->pid = pid;
feed->sec_mode.is_secured = 0;
+ feed->idx_params.enable = 0;
list_add(&feed->next, &filter->feed.ts);
if (filter->state >= DMXDEV_STATE_GO)
@@ -2903,23 +3211,6 @@
if (params->pes_type > DMX_PES_OTHER || params->pes_type < 0)
return -EINVAL;
- if (params->flags & DMX_ENABLE_INDEXING) {
- if (!(dmxdev->capabilities & DMXDEV_CAP_INDEXING))
- return -EINVAL;
-
- /* can do indexing only on video PES */
- if ((params->pes_type != DMX_PES_VIDEO0) &&
- (params->pes_type != DMX_PES_VIDEO1) &&
- (params->pes_type != DMX_PES_VIDEO2) &&
- (params->pes_type != DMX_PES_VIDEO3))
- return -EINVAL;
-
- /* can do indexing only when recording */
- if ((params->output != DMX_OUT_TS_TAP) &&
- (params->output != DMX_OUT_TSDEMUX_TAP))
- return -EINVAL;
- }
-
dmxdevfilter->type = DMXDEV_TYPE_PES;
memcpy(&dmxdevfilter->params, params,
sizeof(struct dmx_pes_filter_params));
@@ -3033,6 +3324,12 @@
if (mutex_lock_interruptible(&dmxdevfilter->mutex))
return -ERESTARTSYS;
+ if (dmxdevfilter->eos_state &&
+ dvb_ringbuffer_empty(&dmxdevfilter->buffer)) {
+ mutex_unlock(&dmxdevfilter->mutex);
+ return 0;
+ }
+
if (dmxdevfilter->type == DMXDEV_TYPE_SEC)
ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
else
@@ -3319,6 +3616,15 @@
mutex_unlock(&dmxdevfilter->mutex);
break;
+ case DMX_SET_INDEXING_PARAMS:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_indexing_params(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -3494,6 +3800,10 @@
ret = dvb_dvr_get_event(dmxdev, file->f_flags, parg);
break;
+ case DMX_PUSH_OOB_COMMAND:
+ ret = dvb_dvr_push_oob_cmd(dmxdev, file->f_flags, parg);
+ break;
+
default:
ret = -EINVAL;
break;
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index 1443de5..2ed99ae 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -59,6 +59,7 @@
struct dmxdev_feed {
u16 pid;
struct dmx_secure_mode sec_mode;
+ struct dmx_indexing_params idx_params;
struct dmx_ts_feed *ts;
struct list_head next;
};
@@ -144,6 +145,9 @@
enum dmx_tsp_format_t dmx_tsp_format;
u32 rec_chunk_size;
+ /* End-of-stream indication has been received */
+ int eos_state;
+
/* only for sections */
struct timer_list timer;
int todo;
@@ -186,6 +190,8 @@
struct dvb_ringbuffer dvr_input_buffer;
enum dmx_buffer_mode dvr_input_buffer_mode;
struct task_struct *dvr_input_thread;
+ /* DVR commands (data feed / OOB command) queue */
+ struct dvb_ringbuffer dvr_cmd_buffer;
#define DVR_BUFFER_SIZE (10*188*1024)
@@ -194,6 +200,21 @@
spinlock_t dvr_in_lock;
};
+enum dvr_cmd {
+ DVR_DATA_FEED_CMD,
+ DVR_OOB_CMD
+};
+
+struct dvr_command {
+ enum dvr_cmd type;
+ union {
+ struct dmx_oob_command oobcmd;
+ size_t data_feed_count;
+ } cmd;
+};
+
+#define DVR_CMDS_BUFFER_SIZE (sizeof(struct dvr_command)*500)
+
int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *);
void dvb_dmxdev_release(struct dmxdev *dmxdev);
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 0fef315..3f3d222 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -65,6 +65,91 @@
printk(x); \
} while (0)
+static const struct dvb_dmx_video_patterns mpeg2_seq_hdr = {
+ {0x00, 0x00, 0x01, 0xB3},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_MPEG_SEQ_HEADER
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_gop = {
+ {0x00, 0x00, 0x01, 0xB8},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_MPEG_GOP
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_iframe = {
+ {0x00, 0x00, 0x01, 0x00, 0x00, 0x08},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38},
+ 6,
+ DMX_IDX_MPEG_I_FRAME_START
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_pframe = {
+ {0x00, 0x00, 0x01, 0x00, 0x00, 0x10},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38},
+ 6,
+ DMX_IDX_MPEG_P_FRAME_START
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_bframe = {
+ {0x00, 0x00, 0x01, 0x00, 0x00, 0x18},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38},
+ 6,
+ DMX_IDX_MPEG_B_FRAME_START
+};
+
+static const struct dvb_dmx_video_patterns h264_sps = {
+ {0x00, 0x00, 0x01, 0x07},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_SPS
+};
+
+static const struct dvb_dmx_video_patterns h264_pps = {
+ {0x00, 0x00, 0x01, 0x08},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_PPS
+};
+
+static const struct dvb_dmx_video_patterns h264_idr = {
+ {0x00, 0x00, 0x01, 0x05, 0x80},
+ {0xFF, 0xFF, 0xFF, 0x1F, 0x80},
+ 5,
+ DMX_IDX_H264_IDR_START
+};
+
+static const struct dvb_dmx_video_patterns h264_non_idr = {
+ {0x00, 0x00, 0x01, 0x01, 0x80},
+ {0xFF, 0xFF, 0xFF, 0x1F, 0x80},
+ 5,
+ DMX_IDX_H264_NON_IDR_START
+};
+
+static const struct dvb_dmx_video_patterns vc1_seq_hdr = {
+ {0x00, 0x00, 0x01, 0x0F},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_VC1_SEQ_HEADER
+};
+
+static const struct dvb_dmx_video_patterns vc1_entry_point = {
+ {0x00, 0x00, 0x01, 0x0E},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_VC1_ENTRY_POINT
+};
+
+static const struct dvb_dmx_video_patterns vc1_frame = {
+ {0x00, 0x00, 0x01, 0x0D},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_VC1_FRAME_START
+};
+
+
/******************************************************************************
* static inlined helper functions
******************************************************************************/
@@ -122,6 +207,236 @@
* Software filter functions
******************************************************************************/
+/*
+ * Check if two patterns are identical, taking mask into consideration.
+ * @pattern1: the first byte pattern to compare.
+ * @pattern2: the second byte pattern to compare.
+ * @mask: the bit mask to use.
+ * @pattern_size: the length of both patterns and the mask, in bytes.
+ *
+ * Return: 1 if patterns match, 0 otherwise.
+ */
+static inline int dvb_dmx_patterns_match(const u8 *pattern1, const u8 *pattern2,
+ const u8 *mask, size_t pattern_size)
+{
+ int i;
+
+ /*
+ * Assumption: it is OK to access pattern1, pattern2 and mask.
+ * This function performs no sanity checks to keep things fast.
+ */
+
+ for (i = 0; i < pattern_size; i++)
+ if ((pattern1[i] & mask[i]) != (pattern2[i] & mask[i]))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * dvb_dmx_video_pattern_search -
+ * search for framing patterns in a given buffer.
+ *
+ * Optimized version: first search for a common substring, e.g. 0x00 0x00 0x01.
+ * If this string is found, go over all the given patterns (all must start
+ * with this string) and search for their ending in the buffer.
+ *
+ * Assumption: the patterns we look for do not spread over more than two
+ * buffers.
+ *
+ * @paterns: the full patterns information to look for.
+ * @patterns_num: the number of patterns to look for.
+ * @buf: the buffer to search.
+ * @buf_size: the size of the buffer to search. we search the entire buffer.
+ * @prefix_size_masks: a bit mask (per pattern) of possible prefix sizes to use
+ * when searching for a pattern that started at the last buffer.
+ * Updated in this function for use in the next lookup.
+ * @results: lookup results (offset, type, used_prefix_size) per found pattern,
+ * up to DVB_DMX_MAX_FOUND_PATTERNS.
+ *
+ * Return:
+ * Number of patterns found (up to DVB_DMX_MAX_FOUND_PATTERNS).
+ * 0 if pattern was not found.
+ * error value on failure.
+ */
+int dvb_dmx_video_pattern_search(
+ const struct dvb_dmx_video_patterns
+ *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+ int patterns_num,
+ const u8 *buf,
+ size_t buf_size,
+ struct dvb_dmx_video_prefix_size_masks *prefix_size_masks,
+ struct dvb_dmx_video_patterns_results *results)
+{
+ int i, j;
+ unsigned int current_size;
+ u32 prefix;
+ int found = 0;
+ int start_offset = 0;
+ /* the starting common substring to look for */
+ u8 string[] = {0x00, 0x00, 0x01};
+ /* the mask for the starting string */
+ u8 string_mask[] = {0xFF, 0xFF, 0xFF};
+ /* the size of the starting string (in bytes) */
+ size_t string_size = 3;
+
+ if ((patterns == NULL) || (patterns_num <= 0) || (buf == NULL))
+ return -EINVAL;
+
+ memset(results, 0, sizeof(struct dvb_dmx_video_patterns_results));
+
+ /*
+ * handle prefix - disregard string, simply check all patterns,
+ * looking for a matching suffix at the very beginning of the buffer.
+ */
+ for (j = 0; (j < patterns_num) && !found; j++) {
+ prefix = prefix_size_masks->size_mask[j];
+ current_size = 32;
+ while (prefix) {
+ if (prefix & (0x1 << (current_size - 1))) {
+ /*
+ * check that we don't look further
+ * than buf_size boundary
+ */
+ if ((int)(patterns[j]->size - current_size) >
+ buf_size)
+ break;
+
+ if (dvb_dmx_patterns_match(
+ (patterns[j]->pattern + current_size),
+ buf, (patterns[j]->mask + current_size),
+ (patterns[j]->size - current_size))) {
+
+ /*
+ * pattern found using prefix at the
+ * very beginning of the buffer, so
+ * offset is 0, but we already zeroed
+ * everything in the beginning of the
+ * function. that's why the next line
+ * is commented.
+ */
+ /* results->info[found].offset = 0; */
+ results->info[found].type =
+ patterns[j]->type;
+ results->info[found].used_prefix_size =
+ current_size;
+ found++;
+ /*
+ * save offset to start looking from
+ * in the buffer, to avoid reusing the
+ * data of a pattern we already found.
+ */
+ start_offset = (patterns[j]->size -
+ current_size);
+
+ if (found >= DVB_DMX_MAX_FOUND_PATTERNS)
+ goto next_prefix_lookup;
+ /*
+ * we don't want to search for the same
+ * pattern with several possible prefix
+ * sizes if we have already found it,
+ * so we break from the inner loop.
+ * since we incremented 'found', we
+ * will not search for additional
+ * patterns using a prefix - that would
+ * imply ambiguous patterns where one
+ * pattern can be included in another.
+ * the for loop will exit.
+ */
+ break;
+ }
+ }
+ prefix &= ~(0x1 << (current_size - 1));
+ current_size--;
+ }
+ }
+
+ /*
+ * Search buffer for entire pattern, starting with the string.
+ * Note the external for loop does not execute if buf_size is
+ * smaller than string_size (the cast to int is required, since
+ * size_t is unsigned).
+ */
+ for (i = start_offset; i < (int)(buf_size - string_size + 1); i++) {
+ if (dvb_dmx_patterns_match(string, (buf + i), string_mask,
+ string_size)) {
+ /* now search for patterns: */
+ for (j = 0; j < patterns_num; j++) {
+ /* avoid overflow to next buffer */
+ if ((i + patterns[j]->size) > buf_size)
+ continue;
+
+ if (dvb_dmx_patterns_match(
+ (patterns[j]->pattern + string_size),
+ (buf + i + string_size),
+ (patterns[j]->mask + string_size),
+ (patterns[j]->size - string_size))) {
+
+ results->info[found].offset = i;
+ results->info[found].type =
+ patterns[j]->type;
+ /*
+ * save offset to start next prefix
+ * lookup, to avoid reusing the data
+ * of any pattern we already found.
+ */
+ if ((i + patterns[j]->size) >
+ start_offset)
+ start_offset = (i +
+ patterns[j]->size);
+ /*
+ * did not use a prefix to find this
+ * pattern, but we zeroed everything
+ * in the beginning of the function.
+ * So no need to zero used_prefix_size
+ * for results->info[found]
+ */
+
+ found++;
+ if (found >= DVB_DMX_MAX_FOUND_PATTERNS)
+ goto next_prefix_lookup;
+ /*
+ * theoretically we don't have to break
+ * here, but we don't want to search
+ * for the other matching patterns on
+ * the very same same place in the
+ * buffer. That would mean the
+ * (pattern & mask) combinations are
+ * not unique. So we break from inner
+ * loop and move on to the next place
+ * in the buffer.
+ */
+ break;
+ }
+ }
+ }
+ }
+
+next_prefix_lookup:
+ /* check for possible prefix sizes for the next buffer */
+ for (j = 0; j < patterns_num; j++) {
+ prefix_size_masks->size_mask[j] = 0;
+ for (i = 1; i < patterns[j]->size; i++) {
+ /*
+ * avoid looking outside of the buffer
+ * or reusing previously used data.
+ */
+ if (i > (buf_size - start_offset))
+ break;
+
+ if (dvb_dmx_patterns_match(patterns[j]->pattern,
+ (buf + buf_size - i),
+ patterns[j]->mask, i)) {
+ prefix_size_masks->size_mask[j] |=
+ (1 << (i - 1));
+ }
+ }
+ }
+
+ return found;
+}
+EXPORT_SYMBOL(dvb_dmx_video_pattern_search);
+
static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
const u8 *buf)
{
@@ -376,6 +691,10 @@
else
ccok = ((feed->cc + 1) & 0x0f) == cc;
+ /* discard TS packets holding sections with TEI bit set */
+ if (buf[1] & 0x80)
+ return -EINVAL;
+
feed->first_cc = 0;
feed->cc = cc;
@@ -430,6 +749,374 @@
return 0;
}
+static int dvb_demux_save_idx_event(struct dvb_demux_feed *feed,
+ struct dmx_index_event_info *idx_event,
+ int traverse_from_tail)
+{
+ struct dmx_index_entry *idx_entry;
+ struct dmx_index_entry *curr_entry;
+ struct list_head *pos;
+
+ /* get entry from free list */
+ if (list_empty(&feed->rec_info->idx_info.free_list)) {
+ printk(KERN_ERR "%s: index free list is empty\n", __func__);
+ return -ENOMEM;
+ }
+
+ idx_entry = list_first_entry(&feed->rec_info->idx_info.free_list,
+ struct dmx_index_entry, next);
+ list_del(&idx_entry->next);
+
+ idx_entry->event = *idx_event;
+
+ pos = &feed->rec_info->idx_info.ready_list;
+ if (traverse_from_tail) {
+ list_for_each_entry_reverse(curr_entry,
+ &feed->rec_info->idx_info.ready_list, next) {
+ if (curr_entry->event.match_tsp_num <=
+ idx_event->match_tsp_num) {
+ pos = &curr_entry->next;
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry(curr_entry,
+ &feed->rec_info->idx_info.ready_list, next) {
+ if (curr_entry->event.match_tsp_num >
+ idx_event->match_tsp_num) {
+ pos = &curr_entry->next;
+ break;
+ }
+ }
+ }
+
+ if (traverse_from_tail)
+ list_add(&idx_entry->next, pos);
+ else
+ list_add_tail(&idx_entry->next, pos);
+
+ return 0;
+}
+
+int dvb_demux_push_idx_event(struct dvb_demux_feed *feed,
+ struct dmx_index_event_info *idx_event)
+{
+ int ret;
+
+ spin_lock(&feed->demux->lock);
+ ret = dvb_demux_save_idx_event(feed, idx_event, 1);
+ spin_unlock(&feed->demux->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(dvb_demux_push_idx_event);
+
+static inline void dvb_dmx_notify_indexing(struct dvb_demux_feed *feed)
+{
+ struct dmx_data_ready dmx_data_ready;
+ struct dmx_index_entry *curr_entry;
+ struct list_head *n, *pos;
+
+ dmx_data_ready.status = DMX_OK_IDX;
+
+ list_for_each_safe(pos, n, &feed->rec_info->idx_info.ready_list) {
+ curr_entry = list_entry(pos, struct dmx_index_entry, next);
+
+ if ((feed->rec_info->idx_info.min_pattern_tsp_num == (u64)-1) ||
+ (curr_entry->event.match_tsp_num <=
+ feed->rec_info->idx_info.min_pattern_tsp_num)) {
+ dmx_data_ready.idx_event = curr_entry->event;
+ feed->data_ready_cb.ts(&feed->feed.ts, &dmx_data_ready);
+ list_del(&curr_entry->next);
+ list_add_tail(&curr_entry->next,
+ &feed->rec_info->idx_info.free_list);
+ }
+ }
+}
+
+void dvb_dmx_notify_idx_events(struct dvb_demux_feed *feed)
+{
+ spin_lock(&feed->demux->lock);
+ dvb_dmx_notify_indexing(feed);
+ spin_unlock(&feed->demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_notify_idx_events);
+
+static void dvb_dmx_process_pattern_result(struct dvb_demux_feed *feed,
+ struct dvb_dmx_video_patterns_results *patterns, int pattern,
+ u64 curr_stc, u64 prev_stc,
+ u64 curr_match_tsp, u64 prev_match_tsp,
+ u64 curr_pusi_tsp, u64 prev_pusi_tsp)
+{
+ int mpeg_frame_start;
+ int h264_frame_start;
+ int vc1_frame_start;
+ int seq_start;
+ u64 frame_end_in_seq;
+ struct dmx_index_event_info idx_event;
+
+ idx_event.pid = feed->pid;
+ if (patterns->info[pattern].used_prefix_size) {
+ idx_event.match_tsp_num = prev_match_tsp;
+ idx_event.last_pusi_tsp_num = prev_pusi_tsp;
+ idx_event.stc = prev_stc;
+ } else {
+ idx_event.match_tsp_num = curr_match_tsp;
+ idx_event.last_pusi_tsp_num = curr_pusi_tsp;
+ idx_event.stc = curr_stc;
+ }
+
+ /* notify on frame-end if needed */
+ if (feed->prev_frame_valid) {
+ if (feed->prev_frame_type & DMX_IDX_MPEG_I_FRAME_START) {
+ idx_event.type = DMX_IDX_MPEG_I_FRAME_END;
+ frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END;
+ } else if (feed->prev_frame_type & DMX_IDX_MPEG_P_FRAME_START) {
+ idx_event.type = DMX_IDX_MPEG_P_FRAME_END;
+ frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END;
+ } else if (feed->prev_frame_type & DMX_IDX_MPEG_B_FRAME_START) {
+ idx_event.type = DMX_IDX_MPEG_B_FRAME_END;
+ frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END;
+ } else if (feed->prev_frame_type & DMX_IDX_H264_IDR_START) {
+ idx_event.type = DMX_IDX_H264_IDR_END;
+ frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+ } else if (feed->prev_frame_type & DMX_IDX_H264_NON_IDR_START) {
+ idx_event.type = DMX_IDX_H264_NON_IDR_END;
+ frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+ } else {
+ idx_event.type = DMX_IDX_VC1_FRAME_END;
+ frame_end_in_seq = DMX_IDX_VC1_FIRST_SEQ_FRAME_END;
+ }
+
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+
+ if (feed->first_frame_in_seq_notified &&
+ feed->idx_params.types & frame_end_in_seq) {
+ idx_event.type = frame_end_in_seq;
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ feed->first_frame_in_seq_notified = 0;
+ }
+ }
+
+ seq_start = patterns->info[pattern].type &
+ (DMX_IDX_MPEG_SEQ_HEADER | DMX_IDX_H264_SPS |
+ DMX_IDX_VC1_SEQ_HEADER);
+
+ /* did we find start of sequence/SPS? */
+ if (seq_start) {
+ feed->first_frame_in_seq = 1;
+ feed->first_frame_in_seq_notified = 0;
+ feed->prev_frame_valid = 0;
+ idx_event.type = patterns->info[pattern].type;
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ return;
+ }
+
+ mpeg_frame_start = patterns->info[pattern].type &
+ (DMX_IDX_MPEG_I_FRAME_START |
+ DMX_IDX_MPEG_P_FRAME_START |
+ DMX_IDX_MPEG_B_FRAME_START);
+
+ h264_frame_start = patterns->info[pattern].type &
+ (DMX_IDX_H264_IDR_START | DMX_IDX_H264_NON_IDR_START);
+
+ vc1_frame_start = patterns->info[pattern].type &
+ DMX_IDX_VC1_FRAME_START;
+
+ if (!mpeg_frame_start && !h264_frame_start && !vc1_frame_start) {
+ /* neither sequence nor frame, notify on the entry if needed */
+ idx_event.type = patterns->info[pattern].type;
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ feed->prev_frame_valid = 0;
+ return;
+ }
+
+ /* notify on first frame in sequence/sps if needed */
+ if (feed->first_frame_in_seq) {
+ feed->first_frame_in_seq = 0;
+ feed->first_frame_in_seq_notified = 1;
+ if (mpeg_frame_start)
+ idx_event.type = DMX_IDX_MPEG_FIRST_SEQ_FRAME_START;
+ else if (h264_frame_start)
+ idx_event.type = DMX_IDX_H264_FIRST_SPS_FRAME_START;
+ else
+ idx_event.type = DMX_IDX_VC1_FIRST_SEQ_FRAME_START;
+
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ }
+
+ /* notify on frame start if needed */
+ idx_event.type = patterns->info[pattern].type;
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+
+ feed->prev_frame_valid = 1;
+ feed->prev_frame_type = patterns->info[pattern].type;
+}
+
+void dvb_dmx_process_idx_pattern(struct dvb_demux_feed *feed,
+ struct dvb_dmx_video_patterns_results *patterns, int pattern,
+ u64 curr_stc, u64 prev_stc,
+ u64 curr_match_tsp, u64 prev_match_tsp,
+ u64 curr_pusi_tsp, u64 prev_pusi_tsp)
+{
+ spin_lock(&feed->demux->lock);
+ dvb_dmx_process_pattern_result(feed,
+ patterns, pattern,
+ curr_stc, prev_stc,
+ curr_match_tsp, prev_match_tsp,
+ curr_pusi_tsp, prev_pusi_tsp);
+ spin_unlock(&feed->demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_process_idx_pattern);
+
+static void dvb_dmx_index(struct dvb_demux_feed *feed,
+ const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN])
+{
+ int i;
+ int p;
+ u64 stc;
+ int found_patterns;
+ int count = payload(buf);
+ u64 min_pattern_tsp_num;
+ struct dvb_demux_feed *tmp_feed;
+ struct dvb_demux *demux = feed->demux;
+ struct dmx_index_event_info idx_event;
+ struct dvb_dmx_video_patterns_results patterns;
+
+ if (feed->demux->convert_ts)
+ feed->demux->convert_ts(feed, timestamp, &stc);
+ else
+ stc = 0;
+
+ idx_event.pid = feed->pid;
+ idx_event.stc = stc;
+ idx_event.match_tsp_num = feed->rec_info->ts_output_count;
+
+ /* PUSI ? */
+ if (buf[1] & 0x40) {
+ feed->curr_pusi_tsp_num = feed->rec_info->ts_output_count;
+ if (feed->idx_params.types & DMX_IDX_PUSI) {
+ idx_event.type = DMX_IDX_PUSI;
+ idx_event.last_pusi_tsp_num =
+ feed->curr_pusi_tsp_num;
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ }
+ }
+
+ /*
+ * if we still did not encounter a TS packet with PUSI indication,
+ * we cannot report index entries yet as we need to provide
+ * the TS packet number with PUSI indication preceeding the TS
+ * packet pointed by the reported index entry.
+ */
+ if (feed->curr_pusi_tsp_num == (u64)-1) {
+ dvb_dmx_notify_indexing(feed);
+ return;
+ }
+
+ if ((feed->idx_params.types & DMX_IDX_RAI) && /* index RAI? */
+ (buf[3] & 0x20) && /* adaptation field exists? */
+ (buf[4] > 0) && /* adaptation field len > 0 ? */
+ (buf[5] & 0x40)) { /* RAI is set? */
+ idx_event.type = DMX_IDX_RAI;
+ idx_event.last_pusi_tsp_num =
+ feed->curr_pusi_tsp_num;
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ }
+
+ /*
+ * if no pattern search is required, or the TS packet has no payload,
+ * pattern search is not executed.
+ */
+ if (!feed->pattern_num || !count) {
+ dvb_dmx_notify_indexing(feed);
+ return;
+ }
+
+ p = 188 - count; /* payload start */
+
+ found_patterns =
+ dvb_dmx_video_pattern_search(feed->patterns,
+ feed->pattern_num, &buf[p], count,
+ &feed->prefix_size, &patterns);
+
+ for (i = 0; i < found_patterns; i++)
+ dvb_dmx_process_pattern_result(feed, &patterns, i,
+ stc, feed->prev_stc,
+ feed->rec_info->ts_output_count, feed->prev_tsp_num,
+ feed->curr_pusi_tsp_num, feed->prev_pusi_tsp_num);
+
+ feed->prev_tsp_num = feed->rec_info->ts_output_count;
+ feed->prev_pusi_tsp_num = feed->curr_pusi_tsp_num;
+ feed->prev_stc = stc;
+ feed->last_pattern_tsp_num = feed->rec_info->ts_output_count;
+
+ /*
+ * it is possible to have a TS packet that has a prefix of
+ * a video pattern but the video pattern is not identified yet
+ * until we get the next TS packet of that PID. When we get
+ * the next TS packet of that PID, pattern-search would
+ * detect that we have a new index entry that starts in the
+ * previous TS packet.
+ * In order to notify the user on index entries with match_tsp_num
+ * in ascending order, index events with match_tsp_num up to
+ * the last_pattern_tsp_num are notified now to the user,
+ * the rest can't be notified now as we might hit the above
+ * scenario and cause the events not to be notified with
+ * ascending order of match_tsp_num.
+ */
+ if (feed->rec_info->idx_info.pattern_search_feeds_num == 1) {
+ /*
+ * optimization for case we have only one PID
+ * with video pattern search, in this case
+ * min_pattern_tsp_num is simply updated to the new
+ * TS packet number of the PID with pattern search.
+ */
+ feed->rec_info->idx_info.min_pattern_tsp_num =
+ feed->last_pattern_tsp_num;
+ dvb_dmx_notify_indexing(feed);
+ return;
+ }
+
+ /*
+ * if we have more than one PID with pattern search,
+ * min_pattern_tsp_num needs to be updated now based on
+ * last_pattern_tsp_num of all PIDs with pattern search.
+ */
+ min_pattern_tsp_num = (u64)-1;
+ i = feed->rec_info->idx_info.pattern_search_feeds_num;
+ list_for_each_entry(tmp_feed, &demux->feed_list, list_head) {
+ if ((tmp_feed->state != DMX_STATE_GO) ||
+ (tmp_feed->type != DMX_TYPE_TS) ||
+ (tmp_feed->feed.ts.buffer.ringbuff !=
+ feed->feed.ts.buffer.ringbuff))
+ continue;
+
+ if ((tmp_feed->last_pattern_tsp_num != (u64)-1) &&
+ ((min_pattern_tsp_num == (u64)-1) ||
+ (tmp_feed->last_pattern_tsp_num <
+ min_pattern_tsp_num)))
+ min_pattern_tsp_num = tmp_feed->last_pattern_tsp_num;
+
+ if (tmp_feed->pattern_num) {
+ i--;
+ if (i == 0)
+ break;
+ }
+ }
+
+ feed->rec_info->idx_info.min_pattern_tsp_num = min_pattern_tsp_num;
+
+ /* notify all index entries up to min_pattern_tsp_num */
+ dvb_dmx_notify_indexing(feed);
+}
+
static inline void dvb_dmx_swfilter_output_packet(
struct dvb_demux_feed *feed,
const u8 *buf,
@@ -452,6 +1139,11 @@
if (feed->tsp_out_format == DMX_TSP_FORMAT_192_TAIL)
feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
0, &feed->feed.ts, DMX_OK);
+
+ if (feed->idx_params.enable)
+ dvb_dmx_index(feed, buf, timestamp);
+
+ feed->rec_info->ts_output_count++;
}
static inline void dvb_dmx_configure_decoder_fullness(
@@ -629,7 +1321,7 @@
((f)->feed.ts.is_filtering) && \
(((f)->ts_type & (TS_PACKET | TS_DEMUX)) == TS_PACKET))
-void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
+static void dvb_dmx_swfilter_one_packet(struct dvb_demux *demux, const u8 *buf,
const u8 timestamp[TIMESTAMP_LEN])
{
struct dvb_demux_feed *feed;
@@ -709,6 +1401,14 @@
dvb_dmx_swfilter_output_packet(feed, buf, timestamp);
}
}
+
+void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN])
+{
+ spin_lock(&demux->lock);
+ dvb_dmx_swfilter_one_packet(demux, buf, timestamp);
+ spin_unlock(&demux->lock);
+}
EXPORT_SYMBOL(dvb_dmx_swfilter_packet);
void dvb_dmx_swfilter_section_packets(struct dvb_demux *demux, const u8 *buf,
@@ -773,7 +1473,7 @@
while (count--) {
if (buf[0] == 0x47)
- dvb_dmx_swfilter_packet(demux, buf, timestamp);
+ dvb_dmx_swfilter_one_packet(demux, buf, timestamp);
buf += 188;
}
@@ -853,10 +1553,11 @@
if (pktsize == 192 &&
leadingbytes &&
demux->tsbuf[leadingbytes] == 0x47) /* double check */
- dvb_dmx_swfilter_packet(demux,
+ dvb_dmx_swfilter_one_packet(demux,
demux->tsbuf + TIMESTAMP_LEN, timestamp);
else if (demux->tsbuf[0] == 0x47) /* double check */
- dvb_dmx_swfilter_packet(demux, demux->tsbuf, timestamp);
+ dvb_dmx_swfilter_one_packet(demux,
+ demux->tsbuf, timestamp);
demux->tsbufp = 0;
p += j;
}
@@ -885,13 +1586,13 @@
q = &buf[p+leadingbytes];
memcpy(timestamp, &buf[p], TIMESTAMP_LEN);
} else {
- memcpy(timestamp, &buf[188], TIMESTAMP_LEN);
+ memcpy(timestamp, &buf[p+188], TIMESTAMP_LEN);
}
} else {
memset(timestamp, 0, TIMESTAMP_LEN);
}
- dvb_dmx_swfilter_packet(demux, q, timestamp);
+ dvb_dmx_swfilter_one_packet(demux, q, timestamp);
p += pktsize;
}
@@ -986,6 +1687,249 @@
return &demux->feed[i];
}
+const struct dvb_dmx_video_patterns *dvb_dmx_get_pattern(u64 dmx_idx_pattern)
+{
+ switch (dmx_idx_pattern) {
+ case DMX_IDX_MPEG_SEQ_HEADER:
+ return &mpeg2_seq_hdr;
+
+ case DMX_IDX_MPEG_GOP:
+ return &mpeg2_gop;
+
+ case DMX_IDX_MPEG_I_FRAME_START:
+ return &mpeg2_iframe;
+
+ case DMX_IDX_MPEG_P_FRAME_START:
+ return &mpeg2_pframe;
+
+ case DMX_IDX_MPEG_B_FRAME_START:
+ return &mpeg2_bframe;
+
+ case DMX_IDX_H264_SPS:
+ return &h264_sps;
+
+ case DMX_IDX_H264_PPS:
+ return &h264_pps;
+
+ case DMX_IDX_H264_IDR_START:
+ return &h264_idr;
+
+ case DMX_IDX_H264_NON_IDR_START:
+ return &h264_non_idr;
+
+ case DMX_IDX_VC1_SEQ_HEADER:
+ return &vc1_seq_hdr;
+
+ case DMX_IDX_VC1_ENTRY_POINT:
+ return &vc1_entry_point;
+
+ case DMX_IDX_VC1_FRAME_START:
+ return &vc1_frame;
+
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(dvb_dmx_get_pattern);
+
+static void dvb_dmx_init_idx_state(struct dvb_demux_feed *feed)
+{
+ feed->prev_tsp_num = (u64)-1;
+ feed->curr_pusi_tsp_num = (u64)-1;
+ feed->prev_pusi_tsp_num = (u64)-1;
+ feed->prev_frame_valid = 0;
+ feed->first_frame_in_seq = 0;
+ feed->first_frame_in_seq_notified = 0;
+ feed->last_pattern_tsp_num = (u64)-1;
+ feed->pattern_num = 0;
+ memset(&feed->prefix_size, 0,
+ sizeof(struct dvb_dmx_video_prefix_size_masks));
+
+ if (feed->idx_params.types &
+ (DMX_IDX_MPEG_SEQ_HEADER |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_END)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_SEQ_HEADER);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_MPEG_GOP)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_GOP);
+ feed->pattern_num++;
+ }
+
+ /* MPEG2 I-frame */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_MPEG_I_FRAME_START | DMX_IDX_MPEG_I_FRAME_END |
+ DMX_IDX_MPEG_P_FRAME_END | DMX_IDX_MPEG_B_FRAME_END |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_I_FRAME_START);
+ feed->pattern_num++;
+ }
+
+ /* MPEG2 P-frame */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_MPEG_P_FRAME_START | DMX_IDX_MPEG_P_FRAME_END |
+ DMX_IDX_MPEG_I_FRAME_END | DMX_IDX_MPEG_B_FRAME_END |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_P_FRAME_START);
+ feed->pattern_num++;
+ }
+
+ /* MPEG2 B-frame */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_MPEG_B_FRAME_START | DMX_IDX_MPEG_B_FRAME_END |
+ DMX_IDX_MPEG_I_FRAME_END | DMX_IDX_MPEG_P_FRAME_END |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_B_FRAME_START);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_H264_SPS |
+ DMX_IDX_H264_FIRST_SPS_FRAME_START |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_SPS);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_H264_PPS)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_PPS);
+ feed->pattern_num++;
+ }
+
+ /* H264 IDR */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_H264_IDR_START | DMX_IDX_H264_IDR_END |
+ DMX_IDX_H264_NON_IDR_END |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_IDR_START);
+ feed->pattern_num++;
+ }
+
+ /* H264 non-IDR */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_H264_NON_IDR_START | DMX_IDX_H264_NON_IDR_END |
+ DMX_IDX_H264_IDR_END |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_START);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_VC1_SEQ_HEADER |
+ DMX_IDX_VC1_FIRST_SEQ_FRAME_START |
+ DMX_IDX_VC1_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_VC1_SEQ_HEADER);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_VC1_ENTRY_POINT)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_VC1_ENTRY_POINT);
+ feed->pattern_num++;
+ }
+
+ /* VC1 frame */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_VC1_FRAME_START | DMX_IDX_VC1_FRAME_END |
+ DMX_IDX_VC1_FIRST_SEQ_FRAME_START |
+ DMX_IDX_VC1_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_VC1_FRAME_START);
+ feed->pattern_num++;
+ }
+
+ if (feed->pattern_num)
+ feed->rec_info->idx_info.pattern_search_feeds_num++;
+}
+
+static struct dvb_demux_rec_info *dvb_dmx_alloc_rec_info(
+ struct dmx_ts_feed *ts_feed)
+{
+ int i;
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+ struct dvb_demux_rec_info *rec_info;
+ struct dvb_demux_feed *tmp_feed;
+
+ /* check if this feed share recording buffer with other active feeds */
+ list_for_each_entry(tmp_feed, &demux->feed_list, list_head) {
+ if ((tmp_feed->state == DMX_STATE_GO) &&
+ (tmp_feed->type == DMX_TYPE_TS) &&
+ (tmp_feed != feed) &&
+ (tmp_feed->feed.ts.buffer.ringbuff ==
+ ts_feed->buffer.ringbuff)) {
+ /* indexing information is shared between the feeds */
+ tmp_feed->rec_info->ref_count++;
+ return tmp_feed->rec_info;
+ }
+ }
+
+ /* Need to allocate a new indexing info */
+ for (i = 0; i < demux->feednum; i++)
+ if (!demux->rec_info_pool[i].ref_count)
+ break;
+
+ if (i == demux->feednum)
+ return NULL;
+
+ rec_info = &demux->rec_info_pool[i];
+ rec_info->ref_count++;
+ INIT_LIST_HEAD(&rec_info->idx_info.free_list);
+ INIT_LIST_HEAD(&rec_info->idx_info.ready_list);
+
+ for (i = 0; i < DMX_IDX_EVENT_QUEUE_SIZE; i++)
+ list_add(&rec_info->idx_info.events[i].next,
+ &rec_info->idx_info.free_list);
+
+ rec_info->ts_output_count = 0;
+ rec_info->idx_info.min_pattern_tsp_num = (u64)-1;
+ rec_info->idx_info.pattern_search_feeds_num = 0;
+
+ return rec_info;
+}
+
+static void dvb_dmx_free_rec_info(struct dmx_ts_feed *ts_feed)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+
+ if (!feed->rec_info || !feed->rec_info->ref_count) {
+ printk(KERN_ERR "%s: invalid idx info state\n", __func__);
+ return;
+ }
+
+ feed->rec_info->ref_count--;
+
+ return;
+}
+
static int dvb_demux_feed_find(struct dvb_demux_feed *feed)
{
struct dvb_demux_feed *entry;
@@ -1101,7 +2045,22 @@
feed->first_cc = 1;
+ if ((feed->ts_type & TS_PACKET) &&
+ !(feed->ts_type & TS_PAYLOAD_ONLY)) {
+ feed->rec_info = dvb_dmx_alloc_rec_info(ts_feed);
+ if (!feed->rec_info) {
+ mutex_unlock(&demux->mutex);
+ return -ENOMEM;
+ }
+ dvb_dmx_init_idx_state(feed);
+ } else {
+ feed->pattern_num = 0;
+ feed->rec_info = NULL;
+ }
+
if ((ret = demux->start_feed(feed)) < 0) {
+ dvb_dmx_free_rec_info(ts_feed);
+ feed->rec_info = NULL;
mutex_unlock(&demux->mutex);
return ret;
}
@@ -1139,6 +2098,14 @@
ts_feed->is_filtering = 0;
feed->state = DMX_STATE_ALLOCATED;
spin_unlock_irq(&demux->lock);
+
+ if (feed->rec_info) {
+ if (feed->pattern_num)
+ feed->rec_info->idx_info.pattern_search_feeds_num--;
+ dvb_dmx_free_rec_info(ts_feed);
+ feed->rec_info = NULL;
+ }
+
mutex_unlock(&demux->mutex);
return ret;
@@ -1238,18 +2205,132 @@
return ret;
}
-static int dmx_ts_set_indexing_params(
+static int dmx_ts_set_video_codec(
struct dmx_ts_feed *ts_feed,
- struct dmx_indexing_video_params *params)
+ enum dmx_video_codec video_codec)
{
struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
- memcpy(&feed->indexing_params, params,
- sizeof(struct dmx_indexing_video_params));
+ feed->video_codec = video_codec;
return 0;
}
+static int dmx_ts_set_idx_params(struct dmx_ts_feed *ts_feed,
+ struct dmx_indexing_params *idx_params)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *dvbdmx = feed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if ((feed->state == DMX_STATE_GO) &&
+ !feed->rec_info) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+ feed->idx_params = *idx_params;
+
+ if (feed->state == DMX_STATE_GO) {
+ spin_lock_irq(&dvbdmx->lock);
+ if (feed->pattern_num)
+ feed->rec_info->idx_info.pattern_search_feeds_num--;
+ dvb_dmx_init_idx_state(feed);
+ spin_unlock_irq(&dvbdmx->lock);
+ }
+
+ mutex_unlock(&dvbdmx->mutex);
+
+ return 0;
+}
+
+static int dvbdmx_ts_feed_oob_cmd(struct dmx_ts_feed *ts_feed,
+ struct dmx_oob_command *cmd)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dmx_data_ready data;
+ struct dvb_demux *dvbdmx = feed->demux;
+ int ret;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (feed->state != DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ /* Decoder feeds are handled by plug-in */
+ if (feed->ts_type & TS_DECODER) {
+ if (feed->demux->oob_command)
+ ret = feed->demux->oob_command(feed, cmd);
+ else
+ ret = 0;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+ }
+
+ data.data_length = 0;
+
+ switch (cmd->type) {
+ case DMX_OOB_CMD_EOS:
+ if (feed->ts_type & TS_PAYLOAD_ONLY) {
+ if (feed->secure_mode.is_secured) {
+ /* Secure feeds are handled by plug-in */
+ if (feed->demux->oob_command)
+ ret = feed->demux->oob_command(feed,
+ cmd);
+ else
+ ret = 0;
+ break;
+ }
+
+ /* Close last PES on non-secure feeds */
+ if (feed->pusi_seen) {
+ data.status = DMX_OK_PES_END;
+ data.pes_end.start_gap = 0;
+ data.pes_end.actual_length =
+ feed->peslen;
+ data.pes_end.disc_indicator_set = 0;
+ data.pes_end.pes_length_mismatch = 0;
+ data.pes_end.stc = 0;
+ data.pes_end.tei_counter =
+ feed->pes_tei_counter;
+ data.pes_end.cont_err_counter =
+ feed->pes_cont_err_counter;
+ data.pes_end.ts_packets_num =
+ feed->pes_ts_packets_num;
+
+ feed->peslen = 0;
+ feed->pes_tei_counter = 0;
+ feed->pes_ts_packets_num = 0;
+ feed->pes_cont_err_counter = 0;
+
+ ret = feed->data_ready_cb.ts(&feed->feed.ts,
+ &data);
+ if (ret)
+ break;
+ }
+ }
+ data.status = DMX_OK_EOS;
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ break;
+
+ case DMX_OOB_CMD_MARKER:
+ data.status = DMX_OK_MARKER;
+ data.marker.id = cmd->params.marker.id;
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+}
+
static int dmx_ts_set_tsp_out_format(
struct dmx_ts_feed *ts_feed,
enum dmx_tsp_format_t tsp_format)
@@ -1295,8 +2376,7 @@
feed->secure_mode.is_secured = 0;
feed->buffer = NULL;
feed->tsp_out_format = DMX_TSP_FORMAT_188;
- memset(&feed->indexing_params, 0,
- sizeof(struct dmx_indexing_video_params));
+ feed->idx_params.enable = 0;
/* default behaviour - pass first PES data even if it is
* partial PES data from previous PES that we didn't receive its header.
@@ -1312,13 +2392,15 @@
(*ts_feed)->start_filtering = dmx_ts_feed_start_filtering;
(*ts_feed)->stop_filtering = dmx_ts_feed_stop_filtering;
(*ts_feed)->set = dmx_ts_feed_set;
- (*ts_feed)->set_indexing_params = dmx_ts_set_indexing_params;
+ (*ts_feed)->set_video_codec = dmx_ts_set_video_codec;
+ (*ts_feed)->set_idx_params = dmx_ts_set_idx_params;
(*ts_feed)->set_tsp_out_format = dmx_ts_set_tsp_out_format;
(*ts_feed)->get_decoder_buff_status = dmx_ts_feed_decoder_buff_status;
(*ts_feed)->reuse_decoder_buffer = dmx_ts_feed_reuse_decoder_buffer;
(*ts_feed)->data_ready_cb = dmx_ts_feed_data_ready_cb;
(*ts_feed)->notify_data_read = NULL;
(*ts_feed)->set_secure_mode = dmx_ts_set_secure_mode;
+ (*ts_feed)->oob_command = dvbdmx_ts_feed_oob_cmd;
if (!(feed->filter = dvb_dmx_filter_alloc(demux))) {
feed->state = DMX_STATE_FREE;
@@ -1598,6 +2680,55 @@
return 0;
}
+static int dvbdmx_section_feed_oob_cmd(struct dmx_section_feed *section_feed,
+ struct dmx_oob_command *cmd)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)section_feed;
+ struct dvb_demux *dvbdmx = feed->demux;
+ struct dmx_data_ready data;
+ int ret;
+
+ data.data_length = 0;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (feed->state != DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ /* Secure section feeds are handled by the plug-in */
+ if (feed->secure_mode.is_secured) {
+ if (feed->demux->oob_command)
+ ret = feed->demux->oob_command(feed, cmd);
+ else
+ ret = 0;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+ }
+
+ switch (cmd->type) {
+ case DMX_OOB_CMD_EOS:
+ data.status = DMX_OK_EOS;
+ ret = feed->data_ready_cb.sec(&feed->filter->filter, &data);
+ break;
+
+ case DMX_OOB_CMD_MARKER:
+ data.status = DMX_OK_MARKER;
+ data.marker.id = cmd->params.marker.id;
+ ret = feed->data_ready_cb.sec(&feed->filter->filter, &data);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+}
+
static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
struct dmx_section_feed **feed,
dmx_section_cb callback)
@@ -1637,6 +2768,7 @@
(*feed)->data_ready_cb = dmx_section_feed_data_ready_cb;
(*feed)->notify_data_read = NULL;
(*feed)->set_secure_mode = dmx_section_set_secure_mode;
+ (*feed)->oob_command = dvbdmx_section_feed_oob_cmd;
mutex_unlock(&dvbdmx->mutex);
return 0;
@@ -1814,6 +2946,18 @@
return 0;
}
+static int dvbdmx_get_tsp_size(struct dmx_demux *demux)
+{
+ int tsp_size;
+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
+
+ mutex_lock(&dvbdemux->mutex);
+ tsp_size = dvbdemux->ts_packet_size;
+ mutex_unlock(&dvbdemux->mutex);
+
+ return tsp_size;
+}
+
static int dvbdmx_set_tsp_format(
struct dmx_demux *demux,
enum dmx_tsp_format_t tsp_format)
@@ -1863,6 +3007,16 @@
return -ENOMEM;
}
+ dvbdemux->rec_info_pool = vmalloc(dvbdemux->feednum *
+ sizeof(struct dvb_demux_rec_info));
+ if (!dvbdemux->rec_info_pool) {
+ vfree(dvbdemux->feed);
+ vfree(dvbdemux->filter);
+ dvbdemux->feed = NULL;
+ dvbdemux->filter = NULL;
+ return -ENOMEM;
+ }
+
dvbdemux->total_process_time = 0;
dvbdemux->total_crc_time = 0;
snprintf(dvbdemux->alias,
@@ -1891,9 +3045,12 @@
dvbdemux->filter[i].state = DMX_STATE_FREE;
dvbdemux->filter[i].index = i;
}
+
for (i = 0; i < dvbdemux->feednum; i++) {
dvbdemux->feed[i].state = DMX_STATE_FREE;
dvbdemux->feed[i].index = i;
+
+ dvbdemux->rec_info_pool[i].ref_count = 0;
}
dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
@@ -1944,6 +3101,7 @@
dmx->get_pes_pids = dvbdmx_get_pes_pids;
dmx->set_tsp_format = dvbdmx_set_tsp_format;
+ dmx->get_tsp_size = dvbdmx_get_tsp_size;
mutex_init(&dvbdemux->mutex);
spin_lock_init(&dvbdemux->lock);
@@ -1962,6 +3120,7 @@
vfree(dvbdemux->cnt_storage);
vfree(dvbdemux->filter);
vfree(dvbdemux->feed);
+ vfree(dvbdemux->rec_info_pool);
}
EXPORT_SYMBOL(dvb_dmx_release);
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index f3dc4b8..879aad2 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -69,6 +69,88 @@
#define DMX_FEED_ENTRY(pos) list_entry(pos, struct dvb_demux_feed, list_head)
+
+struct dmx_index_entry {
+ struct dmx_index_event_info event;
+ struct list_head next;
+};
+
+#define DMX_IDX_EVENT_QUEUE_SIZE 100
+struct dvb_demux_rec_info {
+ /* Reference counter for number of feeds using this information */
+ int ref_count;
+
+ /* Counter for number of TS packets output to recording buffer */
+ u64 ts_output_count;
+
+ /* Indexing information */
+ struct {
+ /*
+ * Minimum TS packet number encountered in recording filter
+ * among all feeds that search for video patterns
+ */
+ u64 min_pattern_tsp_num;
+
+ /* Number of feeds with video pattern search request */
+ u8 pattern_search_feeds_num;
+
+ /* Index entries pool */
+ struct dmx_index_entry events[DMX_IDX_EVENT_QUEUE_SIZE];
+
+ /* List of free entries that can be used for new index events */
+ struct list_head free_list;
+
+ /* List holding ready index entries not notified to user yet */
+ struct list_head ready_list;
+ } idx_info;
+};
+
+#define DVB_DMX_MAX_PATTERN_LEN 6
+struct dvb_dmx_video_patterns {
+ /* the byte pattern to look for */
+ u8 pattern[DVB_DMX_MAX_PATTERN_LEN];
+
+ /* the byte mask to use (same length as pattern) */
+ u8 mask[DVB_DMX_MAX_PATTERN_LEN];
+
+ /* the length of the pattern, in bytes */
+ size_t size;
+
+ /* the type of the pattern. One of DMX_IDX_* definitions */
+ u64 type;
+};
+
+#define DVB_DMX_MAX_FOUND_PATTERNS 20
+#define DVB_DMX_MAX_SEARCH_PATTERN_NUM 20
+struct dvb_dmx_video_prefix_size_masks {
+ /*
+ * a bit mask (per pattern) of possible prefix sizes to use
+ * when searching for a pattern that started in the previous TS packet.
+ * Updated by dvb_dmx_video_pattern_search for use in the next lookup.
+ */
+ u32 size_mask[DVB_DMX_MAX_FOUND_PATTERNS];
+};
+
+struct dvb_dmx_video_patterns_results {
+ struct {
+ /*
+ * The offset in the buffer where the pattern was found.
+ * If a pattern is found using a prefix (i.e. started on the
+ * previous buffer), offset is zero.
+ */
+ u32 offset;
+
+ /*
+ * The type of the pattern found.
+ * One of DMX_IDX_* definitions.
+ */
+ u64 type;
+
+ /* The prefix size that was used to find this pattern */
+ u32 used_prefix_size;
+ } info[DVB_DMX_MAX_FOUND_PATTERNS];
+};
+
struct dvb_demux_feed {
union {
struct dmx_ts_feed ts;
@@ -105,6 +187,21 @@
int first_cc;
int pusi_seen; /* prevents feeding of garbage from previous section */
+ struct dvb_demux_rec_info *rec_info;
+ u64 prev_tsp_num;
+ u64 prev_stc;
+ u64 curr_pusi_tsp_num;
+ u64 prev_pusi_tsp_num;
+ int prev_frame_valid;
+ u64 prev_frame_type;
+ int first_frame_in_seq;
+ int first_frame_in_seq_notified;
+ u64 last_pattern_tsp_num;
+ int pattern_num;
+ const struct dvb_dmx_video_patterns
+ *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM];
+ struct dvb_dmx_video_prefix_size_masks prefix_size;
+
u32 peslen;
u32 pes_tei_counter;
u32 pes_cont_err_counter;
@@ -113,7 +210,8 @@
struct list_head list_head;
unsigned int index; /* a unique index for each feed (can be used as hardware pid filter index) */
- struct dmx_indexing_video_params indexing_params;
+ enum dmx_video_codec video_codec;
+ struct dmx_indexing_params idx_params;
};
struct dvb_demux {
@@ -139,6 +237,11 @@
const u8 *buf, size_t len);
void (*memcopy)(struct dvb_demux_feed *feed, u8 *dst,
const u8 *src, size_t len);
+ int (*oob_command)(struct dvb_demux_feed *feed,
+ struct dmx_oob_command *cmd);
+ void (*convert_ts)(struct dvb_demux_feed *feed,
+ const u8 timestamp[TIMESTAMP_LEN],
+ u64 *timestampIn27Mhz);
int users;
#define MAX_DVB_DEMUX_USERS 10
@@ -176,6 +279,8 @@
dmx_section_fullness sec;
} buffer_ctrl;
+ struct dvb_demux_rec_info *rec_info_pool;
+
/*
* the following is used for debugfs exposing info
* about dvb demux performance.
@@ -202,6 +307,22 @@
enum dmx_tsp_format_t tsp_format);
void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
const u8 timestamp[TIMESTAMP_LEN]);
+const struct dvb_dmx_video_patterns *dvb_dmx_get_pattern(u64 dmx_idx_pattern);
+int dvb_dmx_video_pattern_search(
+ const struct dvb_dmx_video_patterns
+ *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+ int patterns_num,
+ const u8 *buf, size_t buf_size,
+ struct dvb_dmx_video_prefix_size_masks *prefix_size_masks,
+ struct dvb_dmx_video_patterns_results *results);
+int dvb_demux_push_idx_event(struct dvb_demux_feed *feed,
+ struct dmx_index_event_info *idx_event);
+void dvb_dmx_process_idx_pattern(struct dvb_demux_feed *feed,
+ struct dvb_dmx_video_patterns_results *patterns, int pattern,
+ u64 curr_stc, u64 prev_stc,
+ u64 curr_match_tsp, u64 prev_match_tsp,
+ u64 curr_pusi_tsp, u64 prev_pusi_tsp);
+void dvb_dmx_notify_idx_events(struct dvb_demux_feed *feed);
/**
* dvb_dmx_is_video_feed - Returns whether the PES feed
diff --git a/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_core.c b/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_core.c
index 84f7307..9370fc9 100644
--- a/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_core.c
+++ b/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -142,7 +142,7 @@
buf_p = msm_gemini_hw_pingpong_active_buffer(&we_pingpong_buf);
if (buf_p) {
buf_p->framedone_len = msm_gemini_hw_encode_output_size();
- GMN_DBG("%s:%d] framedone_len %d\n", __func__, __LINE__,
+ pr_debug("%s:%d] framedone_len %d\n", __func__, __LINE__,
buf_p->framedone_len);
}
diff --git a/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_hw.c b/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_hw.c
index 0cbb101..79c533e 100644
--- a/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_hw.c
+++ b/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_hw.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010,2013 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -297,6 +297,8 @@
struct msm_gemini_hw_cmd *hw_cmd_p;
+ pr_debug("%s:%d] pingpong index %d", __func__, __LINE__,
+ pingpong_index);
if (pingpong_index == 0) {
hw_cmd_p = &hw_cmd_we_ping_update[0];
@@ -486,40 +488,38 @@
return is_copy_to_user;
}
-void msm_gemini_hw_region_dump(int size)
+#ifdef MSM_GMN_DBG_DUMP
+void msm_gemini_io_dump(int size)
{
- uint32_t *p;
- uint8_t *p8;
-
- if (size > gemini_region_size)
- GMN_PR_ERR("%s:%d] wrong region dump size\n",
- __func__, __LINE__);
-
- p = (uint32_t *) gemini_region_base;
- while (size >= 16) {
- GMN_DBG("0x%08X] %08X %08X %08X %08X\n",
- gemini_region_size - size,
- readl(p), readl(p+1), readl(p+2), readl(p+3));
- p += 4;
- size -= 16;
- }
-
- if (size > 0) {
- uint32_t d;
- GMN_DBG("0x%08X] ", gemini_region_size - size);
- while (size >= 4) {
- GMN_DBG("%08X ", readl(p++));
- size -= 4;
+ char line_str[128], *p_str;
+ void __iomem *addr = gemini_region_base;
+ int i;
+ u32 *p = (u32 *) addr;
+ u32 data;
+ pr_info("%s: %p %d reg_size %d\n", __func__, addr, size,
+ gemini_region_size);
+ line_str[0] = '\0';
+ p_str = line_str;
+ for (i = 0; i < size/4; i++) {
+ if (i % 4 == 0) {
+ snprintf(p_str, 12, "%08x: ", (u32) p);
+ p_str += 10;
}
-
- d = readl(p);
- p8 = (uint8_t *) &d;
- while (size) {
- GMN_DBG("%02X", *p8++);
- size--;
+ data = readl_relaxed(p++);
+ snprintf(p_str, 12, "%08x ", data);
+ p_str += 9;
+ if ((i + 1) % 4 == 0) {
+ pr_info("%s\n", line_str);
+ line_str[0] = '\0';
+ p_str = line_str;
}
-
- GMN_DBG("\n");
}
+ if (line_str[0] != '\0')
+ pr_info("%s\n", line_str);
}
+#else
+void msm_gemini_io_dump(int size)
+{
+}
+#endif
diff --git a/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_hw.h b/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_hw.h
index 1c8de19..aa6c4aa1 100644
--- a/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_hw.h
+++ b/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_hw.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -95,7 +95,7 @@
int msm_gemini_hw_wait(struct msm_gemini_hw_cmd *hw_cmd_p, int m_us);
void msm_gemini_hw_delay(struct msm_gemini_hw_cmd *hw_cmd_p, int m_us);
int msm_gemini_hw_exec_cmds(struct msm_gemini_hw_cmd *hw_cmd_p, int m_cmds);
-void msm_gemini_hw_region_dump(int size);
+void msm_gemini_io_dump(int size);
#define MSM_GEMINI_PIPELINE_CLK_128MHZ 128 /* 8MP 128MHz */
#define MSM_GEMINI_PIPELINE_CLK_140MHZ 140 /* 9MP 140MHz */
diff --git a/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_hw_reg.h b/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_hw_reg.h
index ea13d68..2fe6038 100644
--- a/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_hw_reg.h
+++ b/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_hw_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010, 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -171,6 +171,6 @@
#define HWIO_JPEG_IRQ_STATUS_RMSK 0xffffffff
#define HWIO_JPEG_STATUS_ENCODE_OUTPUT_SIZE_ADDR (GEMINI_REG_BASE + 0x00000034)
-#define HWIO_JPEG_STATUS_ENCODE_OUTPUT_SIZE_RMSK 0xffffff
+#define HWIO_JPEG_STATUS_ENCODE_OUTPUT_SIZE_RMSK 0xffffffff
#endif /* MSM_GEMINI_HW_REG_H */
diff --git a/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_sync.c b/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_sync.c
index ed2222a..50c7284 100644
--- a/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_sync.c
+++ b/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_sync.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,8 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <media/msm_gemini.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
#include "msm_gemini_sync.h"
#include "msm_gemini_core.h"
#include "msm_gemini_platform.h"
@@ -23,6 +25,9 @@
static int release_buf;
+/* size is based on 4k page size */
+static const int g_max_out_size = 0x7ff000;
+
/*************** queue helper ****************/
inline void msm_gemini_q_init(char const *name, struct msm_gemini_q *q_p)
{
@@ -180,7 +185,7 @@
{
int rc = 0;
- GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ pr_debug("%s:%d] buf_in %p", __func__, __LINE__, buf_in);
if (buf_in) {
buf_in->vbuf.framedone_len = buf_in->framedone_len;
@@ -266,19 +271,88 @@
/*************** output queue ****************/
+int msm_gemini_get_out_buffer(struct msm_gemini_device *pgmn_dev,
+ struct msm_gemini_hw_buf *p_outbuf)
+{
+ int buf_size = 0;
+ int bytes_remaining = 0;
+ if (pgmn_dev->out_offset >= pgmn_dev->out_buf.y_len) {
+ GMN_PR_ERR("%s:%d] no more buffers", __func__, __LINE__);
+ return -EINVAL;
+ }
+ bytes_remaining = pgmn_dev->out_buf.y_len - pgmn_dev->out_offset;
+ buf_size = min(bytes_remaining, pgmn_dev->max_out_size);
+
+ pgmn_dev->out_frag_cnt++;
+ pr_debug("%s:%d] buf_size[%d] %d", __func__, __LINE__,
+ pgmn_dev->out_frag_cnt, buf_size);
+ p_outbuf->y_len = buf_size;
+ p_outbuf->y_buffer_addr = pgmn_dev->out_buf.y_buffer_addr +
+ pgmn_dev->out_offset;
+ pgmn_dev->out_offset += buf_size;
+ return 0;
+}
+
+int msm_gemini_outmode_single_we_pingpong_irq(
+ struct msm_gemini_device *pgmn_dev,
+ struct msm_gemini_core_buf *buf_in)
+{
+ int rc = 0;
+ struct msm_gemini_core_buf out_buf;
+ int frame_done = buf_in &&
+ buf_in->vbuf.type == MSM_GEMINI_EVT_FRAMEDONE;
+ pr_debug("%s:%d] framedone %d", __func__, __LINE__, frame_done);
+ if (!pgmn_dev->out_buf_set) {
+ pr_err("%s:%d] output buffer not set",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+ if (frame_done) {
+ /* send the buffer back */
+ pgmn_dev->out_buf.vbuf.framedone_len = buf_in->framedone_len;
+ pgmn_dev->out_buf.vbuf.type = MSM_GEMINI_EVT_FRAMEDONE;
+ rc = msm_gemini_q_in_buf(&pgmn_dev->output_rtn_q,
+ &pgmn_dev->out_buf);
+ if (rc) {
+ pr_err("%s:%d] cannot queue the output buffer",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+ rc = msm_gemini_q_wakeup(&pgmn_dev->output_rtn_q);
+ /* reset the output buffer since the ownership is
+ transferred to the rtn queue */
+ if (!rc)
+ pgmn_dev->out_buf_set = 0;
+ } else {
+ /* configure ping/pong */
+ rc = msm_gemini_get_out_buffer(pgmn_dev, &out_buf);
+ if (rc)
+ msm_gemini_core_we_buf_reset(&out_buf);
+ else
+ msm_gemini_core_we_buf_update(&out_buf);
+ }
+ return rc;
+}
+
int msm_gemini_we_pingpong_irq(struct msm_gemini_device *pgmn_dev,
struct msm_gemini_core_buf *buf_in)
{
int rc = 0;
struct msm_gemini_core_buf *buf_out;
- GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ pr_debug("%s:%d] Enter mode %d", __func__, __LINE__,
+ pgmn_dev->out_mode);
+
+ if (pgmn_dev->out_mode == MSM_GMN_OUTMODE_SINGLE)
+ return msm_gemini_outmode_single_we_pingpong_irq(pgmn_dev,
+ buf_in);
+
if (buf_in) {
- GMN_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+ pr_debug("%s:%d] 0x%08x %d\n", __func__, __LINE__,
(int) buf_in->y_buffer_addr, buf_in->y_len);
rc = msm_gemini_q_in_buf(&pgmn_dev->output_rtn_q, buf_in);
} else {
- GMN_DBG("%s:%d] no output return buffer\n", __func__,
+ pr_debug("%s:%d] no output return buffer\n", __func__,
__LINE__);
rc = -1;
return rc;
@@ -291,7 +365,7 @@
kfree(buf_out);
} else {
msm_gemini_core_we_buf_reset(buf_in);
- GMN_DBG("%s:%d] no output buffer\n", __func__, __LINE__);
+ pr_debug("%s:%d] no output buffer\n", __func__, __LINE__);
rc = -2;
}
@@ -339,6 +413,43 @@
return 0;
}
+int msm_gemini_set_output_buf(struct msm_gemini_device *pgmn_dev,
+ void __user *arg)
+{
+ struct msm_gemini_buf buf_cmd;
+
+ if (pgmn_dev->out_buf_set) {
+ pr_err("%s:%d] outbuffer buffer already provided",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf_cmd, arg, sizeof(struct msm_gemini_buf))) {
+ pr_err("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ GMN_DBG("%s:%d] output addr 0x%08x len %d", __func__, __LINE__,
+ (int) buf_cmd.vaddr,
+ buf_cmd.y_len);
+
+ pgmn_dev->out_buf.y_buffer_addr = msm_gemini_platform_v2p(
+ buf_cmd.fd,
+ buf_cmd.y_len,
+ &pgmn_dev->out_buf.file,
+ &pgmn_dev->out_buf.handle);
+ if (!pgmn_dev->out_buf.y_buffer_addr) {
+ pr_err("%s:%d] cannot map the output address",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+ pgmn_dev->out_buf.y_len = buf_cmd.y_len;
+ pgmn_dev->out_buf.vbuf = buf_cmd;
+ pgmn_dev->out_buf_set = 1;
+
+ return 0;
+}
+
int msm_gemini_output_buf_enqueue(struct msm_gemini_device *pgmn_dev,
void __user *arg)
{
@@ -456,6 +567,7 @@
struct msm_gemini_core_buf *buf_p;
struct msm_gemini_buf buf_cmd;
int rc = 0;
+ struct msm_bus_scale_pdata *p_bus_scale_data = NULL;
if (copy_from_user(&buf_cmd, arg, sizeof(struct msm_gemini_buf))) {
GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
@@ -484,9 +596,9 @@
return rc;
}
} else {
- buf_p->y_buffer_addr = msm_gemini_platform_v2p(buf_cmd.fd,
- buf_cmd.y_len + buf_cmd.cbcr_len, &buf_p->file,
- &buf_p->handle) + buf_cmd.offset + buf_cmd.y_off;
+ buf_p->y_buffer_addr = msm_gemini_platform_v2p(buf_cmd.fd,
+ buf_cmd.y_len + buf_cmd.cbcr_len, &buf_p->file,
+ &buf_p->handle) + buf_cmd.offset + buf_cmd.y_off;
}
buf_p->y_len = buf_cmd.y_len;
@@ -504,6 +616,30 @@
return -1;
}
buf_p->vbuf = buf_cmd;
+ buf_p->vbuf.type = MSM_GEMINI_EVT_RESET;
+
+ /* Set bus vectors */
+ p_bus_scale_data = (struct msm_bus_scale_pdata *)
+ pgmn_dev->pdev->dev.platform_data;
+ if (pgmn_dev->bus_perf_client &&
+ (MSM_GMN_OUTMODE_SINGLE == pgmn_dev->out_mode)) {
+ int rc;
+ struct msm_bus_paths *path = &(p_bus_scale_data->usecase[1]);
+ GMN_DBG("%s:%d] Update bus bandwidth", __func__, __LINE__);
+ if (pgmn_dev->op_mode & MSM_GEMINI_MODE_OFFLINE_ENCODE) {
+ path->vectors[0].ab = (buf_p->y_len + buf_p->cbcr_len) *
+ 15 * 2;
+ path->vectors[0].ib = path->vectors[0].ab;
+ path->vectors[1].ab = 0;
+ path->vectors[1].ib = 0;
+ }
+ rc = msm_bus_scale_client_update_request(
+ pgmn_dev->bus_perf_client, 1);
+ if (rc < 0) {
+ GMN_PR_ERR("%s:%d] update_request fails %d",
+ __func__, __LINE__, rc);
+ }
+ }
msm_gemini_q_in(&pgmn_dev->input_buf_q, buf_p);
@@ -545,6 +681,9 @@
int __msm_gemini_open(struct msm_gemini_device *pgmn_dev)
{
int rc;
+ struct msm_bus_scale_pdata *p_bus_scale_data =
+ (struct msm_bus_scale_pdata *)pgmn_dev->pdev->dev.
+ platform_data;
mutex_lock(&pgmn_dev->lock);
if (pgmn_dev->open_count) {
@@ -576,7 +715,23 @@
msm_gemini_q_cleanup(&pgmn_dev->input_rtn_q);
msm_gemini_q_cleanup(&pgmn_dev->input_buf_q);
msm_gemini_core_init();
+ pgmn_dev->out_mode = MSM_GMN_OUTMODE_FRAGMENTED;
+ pgmn_dev->out_buf_set = 0;
+ pgmn_dev->out_offset = 0;
+ pgmn_dev->max_out_size = g_max_out_size;
+ pgmn_dev->out_frag_cnt = 0;
+ pgmn_dev->bus_perf_client = 0;
+ if (p_bus_scale_data) {
+ GMN_DBG("%s:%d] register bus client", __func__, __LINE__);
+ pgmn_dev->bus_perf_client =
+ msm_bus_scale_register_client(p_bus_scale_data);
+ if (!pgmn_dev->bus_perf_client) {
+ GMN_PR_ERR("%s:%d] bus client register failed",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ }
GMN_DBG("%s:%d] success\n", __func__, __LINE__);
return rc;
}
@@ -593,13 +748,23 @@
pgmn_dev->open_count--;
mutex_unlock(&pgmn_dev->lock);
- msm_gemini_core_release(release_buf);
+ if (pgmn_dev->out_mode == MSM_GMN_OUTMODE_FRAGMENTED) {
+ msm_gemini_core_release(release_buf);
+ } else if (pgmn_dev->out_buf_set) {
+ msm_gemini_platform_p2v(pgmn_dev->out_buf.file,
+ &pgmn_dev->out_buf.handle);
+ }
msm_gemini_q_cleanup(&pgmn_dev->evt_q);
msm_gemini_q_cleanup(&pgmn_dev->output_rtn_q);
msm_gemini_outbuf_q_cleanup(&pgmn_dev->output_buf_q);
msm_gemini_q_cleanup(&pgmn_dev->input_rtn_q);
msm_gemini_outbuf_q_cleanup(&pgmn_dev->input_buf_q);
+ if (pgmn_dev->bus_perf_client) {
+ msm_bus_scale_unregister_client(pgmn_dev->bus_perf_client);
+ pgmn_dev->bus_perf_client = 0;
+ }
+
if (pgmn_dev->open_count)
GMN_PR_ERR(KERN_ERR "%s: multiple opens\n", __func__);
@@ -699,29 +864,63 @@
}
}
- for (i = 0; i < 2; i++) {
- buf_out_free[i] = msm_gemini_q_out(&pgmn_dev->output_buf_q);
+ if (pgmn_dev->out_mode == MSM_GMN_OUTMODE_FRAGMENTED) {
+ for (i = 0; i < 2; i++) {
+ buf_out_free[i] =
+ msm_gemini_q_out(&pgmn_dev->output_buf_q);
- if (buf_out_free[i]) {
- msm_gemini_core_we_buf_update(buf_out_free[i]);
- } else if (i == 1) {
- /* set the pong to same address as ping */
- buf_out_free[0]->y_len >>= 1;
- buf_out_free[0]->y_buffer_addr +=
- buf_out_free[0]->y_len;
- msm_gemini_core_we_buf_update(buf_out_free[0]);
- /* since ping and pong are same buf release only once*/
- release_buf = 0;
- } else {
- GMN_DBG("%s:%d] no output buffer\n",
- __func__, __LINE__);
- break;
+ if (buf_out_free[i]) {
+ msm_gemini_core_we_buf_update(buf_out_free[i]);
+ } else if (i == 1) {
+ /* set the pong to same address as ping */
+ buf_out_free[0]->y_len >>= 1;
+ buf_out_free[0]->y_buffer_addr +=
+ buf_out_free[0]->y_len;
+ msm_gemini_core_we_buf_update(buf_out_free[0]);
+ /*
+ * since ping and pong are same buf
+ * release only once
+ */
+ release_buf = 0;
+ } else {
+ GMN_DBG("%s:%d] no output buffer\n",
+ __func__, __LINE__);
+ break;
+ }
}
+ for (i = 0; i < 2; i++)
+ kfree(buf_out_free[i]);
+ } else {
+ struct msm_gemini_core_buf out_buf;
+ /*
+ * Since the same buffer is fragmented, p2v need not be
+ * called for all the buffers
+ */
+ release_buf = 0;
+ if (!pgmn_dev->out_buf_set) {
+ GMN_PR_ERR("%s:%d] output buffer not set",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+ /* configure ping */
+ rc = msm_gemini_get_out_buffer(pgmn_dev, &out_buf);
+ if (rc) {
+ GMN_PR_ERR("%s:%d] no output buffer for ping",
+ __func__, __LINE__);
+ return rc;
+ }
+ msm_gemini_core_we_buf_update(&out_buf);
+ /* configure pong */
+ rc = msm_gemini_get_out_buffer(pgmn_dev, &out_buf);
+ if (rc) {
+ GMN_DBG("%s:%d] no output buffer for pong",
+ __func__, __LINE__);
+ /* fall through to configure same buffer */
+ }
+ msm_gemini_core_we_buf_update(&out_buf);
+ msm_gemini_io_dump(0x150);
}
- for (i = 0; i < 2; i++)
- kfree(buf_out_free[i]);
-
rc = msm_gemini_ioctl_hw_cmds(pgmn_dev, arg);
GMN_DBG("%s:%d]\n", __func__, __LINE__);
return rc;
@@ -746,12 +945,22 @@
return rc;
}
-int msm_gemini_ioctl_test_dump_region(struct msm_gemini_device *pgmn_dev,
- unsigned long arg)
+int msm_gemini_ioctl_set_outmode(struct msm_gemini_device *pgmn_dev,
+ void * __user arg)
{
- GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
- msm_gemini_hw_region_dump(arg);
- return 0;
+ int rc = 0;
+ enum msm_gmn_out_mode mode;
+
+ if (copy_from_user(&mode, arg, sizeof(mode))) {
+ GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ GMN_DBG("%s:%d] mode %d", __func__, __LINE__, mode);
+
+ if ((mode == MSM_GMN_OUTMODE_FRAGMENTED)
+ || (mode == MSM_GMN_OUTMODE_SINGLE))
+ pgmn_dev->out_mode = mode;
+ return rc;
}
long __msm_gemini_ioctl(struct msm_gemini_device *pgmn_dev,
@@ -790,8 +999,12 @@
break;
case MSM_GMN_IOCTL_OUTPUT_BUF_ENQUEUE:
- rc = msm_gemini_output_buf_enqueue(pgmn_dev,
- (void __user *) arg);
+ if (pgmn_dev->out_mode == MSM_GMN_OUTMODE_FRAGMENTED)
+ rc = msm_gemini_output_buf_enqueue(pgmn_dev,
+ (void __user *) arg);
+ else
+ rc = msm_gemini_set_output_buf(pgmn_dev,
+ (void __user *) arg);
break;
case MSM_GMN_IOCTL_OUTPUT_GET:
@@ -818,8 +1031,8 @@
rc = msm_gemini_ioctl_hw_cmds(pgmn_dev, (void __user *) arg);
break;
- case MSM_GMN_IOCTL_TEST_DUMP_REGION:
- rc = msm_gemini_ioctl_test_dump_region(pgmn_dev, arg);
+ case MSM_GMN_IOCTL_SET_MODE:
+ rc = msm_gemini_ioctl_set_outmode(pgmn_dev, (void __user *)arg);
break;
default:
diff --git a/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_sync.h b/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_sync.h
index d1a43e1..88e9615 100644
--- a/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_sync.h
+++ b/drivers/media/platform/msm/camera_v1/gemini/msm_gemini_sync.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010,2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -74,6 +74,16 @@
struct msm_gemini_q input_buf_q;
struct v4l2_subdev subdev;
+ enum msm_gmn_out_mode out_mode;
+
+ /* single out mode parameters */
+ struct msm_gemini_hw_buf out_buf;
+ int out_offset;
+ int out_buf_set;
+ int max_out_size;
+ int out_frag_cnt;
+
+ uint32_t bus_perf_client;
};
int __msm_gemini_open(struct msm_gemini_device *pgmn_dev);
diff --git a/drivers/media/platform/msm/camera_v2/camera/camera.c b/drivers/media/platform/msm/camera_v2/camera/camera.c
index 32aa4ef..4579cee 100644
--- a/drivers/media/platform/msm/camera_v2/camera/camera.c
+++ b/drivers/media/platform/msm/camera_v2/camera/camera.c
@@ -38,6 +38,7 @@
struct camera_v4l2_private {
struct v4l2_fh fh;
unsigned int stream_id;
+ unsigned int is_vb2_valid; /*0 if no vb2 buffers on stream, else 1*/
struct vb2_queue vb2_q;
};
@@ -194,9 +195,18 @@
static int camera_v4l2_reqbufs(struct file *filep, void *fh,
struct v4l2_requestbuffers *req)
{
+ int ret;
+ struct msm_session *session;
struct camera_v4l2_private *sp = fh_to_private(fh);
-
- return vb2_reqbufs(&sp->vb2_q, req);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+ session = msm_session_find(session_id);
+ if (WARN_ON(!session))
+ return -EIO;
+ mutex_lock(&session->lock);
+ ret = vb2_reqbufs(&sp->vb2_q, req);
+ mutex_unlock(&session->lock);
+ return ret;
}
static int camera_v4l2_querybuf(struct file *filep, void *fh,
@@ -208,17 +218,35 @@
static int camera_v4l2_qbuf(struct file *filep, void *fh,
struct v4l2_buffer *pb)
{
+ int ret;
+ struct msm_session *session;
struct camera_v4l2_private *sp = fh_to_private(fh);
-
- return vb2_qbuf(&sp->vb2_q, pb);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+ session = msm_session_find(session_id);
+ if (WARN_ON(!session))
+ return -EIO;
+ mutex_lock(&session->lock);
+ ret = vb2_qbuf(&sp->vb2_q, pb);
+ mutex_unlock(&session->lock);
+ return ret;
}
static int camera_v4l2_dqbuf(struct file *filep, void *fh,
struct v4l2_buffer *pb)
{
+ int ret;
+ struct msm_session *session;
struct camera_v4l2_private *sp = fh_to_private(fh);
-
- return vb2_dqbuf(&sp->vb2_q, pb, filep->f_flags & O_NONBLOCK);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+ session = msm_session_find(session_id);
+ if (WARN_ON(!session))
+ return -EIO;
+ mutex_lock(&session->lock);
+ ret = vb2_dqbuf(&sp->vb2_q, pb, filep->f_flags & O_NONBLOCK);
+ mutex_unlock(&session->lock);
+ return ret;
}
static int camera_v4l2_streamon(struct file *filep, void *fh,
@@ -314,6 +342,7 @@
rc = camera_check_event_status(&event);
if (rc < 0)
goto set_fmt_fail;
+ sp->is_vb2_valid = 1;
}
return rc;
@@ -550,8 +579,8 @@
{
int rc = 0;
struct camera_v4l2_private *sp = fh_to_private(filep->private_data);
-
- rc = vb2_poll(&sp->vb2_q, filep, wait);
+ if (sp->is_vb2_valid == 1)
+ rc = vb2_poll(&sp->vb2_q, filep, wait);
poll_wait(filep, &sp->fh.wait, wait);
if (v4l2_event_pending(&sp->fh))
@@ -566,13 +595,18 @@
struct v4l2_event event;
struct msm_video_device *pvdev = video_drvdata(filep);
struct camera_v4l2_private *sp = fh_to_private(filep->private_data);
-
BUG_ON(!pvdev);
atomic_sub_return(1, &pvdev->opened);
if (atomic_read(&pvdev->opened) == 0) {
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_DEL_STREAM, -1, &event);
+
+ /* Donot wait, imaging server may have crashed */
+ msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+
camera_pack_event(filep, MSM_CAMERA_DEL_SESSION, 0, -1, &event);
/* Donot wait, imaging server may have crashed */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index 22e8400..8ea1a00 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -281,16 +281,21 @@
list_for_each_entry(temp_buf_info,
&bufq->share_head, share_list) {
if (!temp_buf_info->buf_used[id]) {
- *buf_info = temp_buf_info;
temp_buf_info->buf_used[id] = 1;
temp_buf_info->buf_get_count++;
if (temp_buf_info->buf_get_count ==
bufq->buf_client_count)
list_del_init(
&temp_buf_info->share_list);
+ if (temp_buf_info->buf_reuse_flag) {
+ kfree(temp_buf_info);
+ } else {
+ *buf_info = temp_buf_info;
+ rc = 0;
+ }
spin_unlock_irqrestore(
&bufq->bufq_lock, flags);
- return 0;
+ return rc;
}
}
}
@@ -322,21 +327,31 @@
}
if (!(*buf_info)) {
- spin_unlock_irqrestore(&bufq->bufq_lock, flags);
- return rc;
- }
-
- (*buf_info)->state = MSM_ISP_BUFFER_STATE_DEQUEUED;
- if (bufq->buf_type == ISP_SHARE_BUF) {
- memset((*buf_info)->buf_used, 0,
- sizeof(uint8_t) * bufq->buf_client_count);
- (*buf_info)->buf_used[id] = 1;
- (*buf_info)->buf_get_count = 1;
- (*buf_info)->buf_put_count = 0;
- list_add_tail(&(*buf_info)->share_list, &bufq->share_head);
+ if (bufq->buf_type == ISP_SHARE_BUF) {
+ temp_buf_info = kzalloc(
+ sizeof(struct msm_isp_buffer), GFP_ATOMIC);
+ temp_buf_info->buf_reuse_flag = 1;
+ temp_buf_info->buf_used[id] = 1;
+ temp_buf_info->buf_get_count = 1;
+ list_add_tail(&temp_buf_info->share_list,
+ &bufq->share_head);
+ }
+ } else {
+ (*buf_info)->state = MSM_ISP_BUFFER_STATE_DEQUEUED;
+ if (bufq->buf_type == ISP_SHARE_BUF) {
+ memset((*buf_info)->buf_used, 0,
+ sizeof(uint8_t) * bufq->buf_client_count);
+ (*buf_info)->buf_used[id] = 1;
+ (*buf_info)->buf_get_count = 1;
+ (*buf_info)->buf_put_count = 0;
+ (*buf_info)->buf_reuse_flag = 0;
+ list_add_tail(&(*buf_info)->share_list,
+ &bufq->share_head);
+ }
+ rc = 0;
}
spin_unlock_irqrestore(&bufq->bufq_lock, flags);
- return 0;
+ return rc;
}
static int msm_isp_put_buf(struct msm_isp_buf_mgr *buf_mgr,
@@ -648,22 +663,35 @@
}
}
-static int msm_isp_attach_ctx(struct msm_isp_buf_mgr *buf_mgr,
- struct device *iommu_ctx)
+static void msm_isp_register_ctx(struct msm_isp_buf_mgr *buf_mgr,
+ struct device **iommu_ctx, int num_iommu_ctx)
{
- int rc;
- rc = iommu_attach_device(buf_mgr->iommu_domain, iommu_ctx);
- if (rc) {
- pr_err("%s: Iommu attach error\n", __func__);
- return -EINVAL;
+ int i;
+ buf_mgr->num_iommu_ctx = num_iommu_ctx;
+ for (i = 0; i < num_iommu_ctx; i++)
+ buf_mgr->iommu_ctx[i] = iommu_ctx[i];
+}
+
+static int msm_isp_attach_ctx(struct msm_isp_buf_mgr *buf_mgr)
+{
+ int rc, i;
+ for (i = 0; i < buf_mgr->num_iommu_ctx; i++) {
+ rc = iommu_attach_device(buf_mgr->iommu_domain,
+ buf_mgr->iommu_ctx[i]);
+ if (rc) {
+ pr_err("%s: Iommu attach error\n", __func__);
+ return -EINVAL;
+ }
}
return 0;
}
-static void msm_isp_detach_ctx(struct msm_isp_buf_mgr *buf_mgr,
- struct device *iommu_ctx)
+static void msm_isp_detach_ctx(struct msm_isp_buf_mgr *buf_mgr)
{
- iommu_detach_device(buf_mgr->iommu_domain, iommu_ctx);
+ int i;
+ for (i = 0; i < buf_mgr->num_iommu_ctx; i++)
+ iommu_detach_device(buf_mgr->iommu_domain,
+ buf_mgr->iommu_ctx[i]);
}
static int msm_isp_init_isp_buf_mgr(
@@ -680,6 +708,7 @@
}
CDBG("%s: E\n", __func__);
+ msm_isp_attach_ctx(buf_mgr);
buf_mgr->num_buf_q = num_buf_q;
buf_mgr->bufq =
kzalloc(sizeof(struct msm_isp_bufq) * num_buf_q,
@@ -704,6 +733,7 @@
ion_client_destroy(buf_mgr->client);
kfree(buf_mgr->bufq);
buf_mgr->num_buf_q = 0;
+ msm_isp_detach_ctx(buf_mgr);
return 0;
}
@@ -740,8 +770,7 @@
.flush_buf = msm_isp_flush_buf,
.buf_done = msm_isp_buf_done,
.buf_divert = msm_isp_buf_divert,
- .attach_ctx = msm_isp_attach_ctx,
- .detach_ctx = msm_isp_detach_ctx,
+ .register_ctx = msm_isp_register_ctx,
.buf_mgr_init = msm_isp_init_isp_buf_mgr,
.buf_mgr_deinit = msm_isp_deinit_isp_buf_mgr,
};
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
index d4e7c88..fda1a57 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
@@ -65,6 +65,7 @@
uint8_t buf_used[ISP_SHARE_BUF_CLIENT];
uint8_t buf_get_count;
uint8_t buf_put_count;
+ uint8_t buf_reuse_flag;
};
struct msm_isp_bufq {
@@ -111,10 +112,8 @@
int (*buf_divert) (struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle, uint32_t buf_index,
struct timeval *tv, uint32_t frame_id);
- int (*attach_ctx) (struct msm_isp_buf_mgr *buf_mgr,
- struct device *iommu_ctx);
- void (*detach_ctx) (struct msm_isp_buf_mgr *buf_mgr,
- struct device *iommu_ctx);
+ void (*register_ctx) (struct msm_isp_buf_mgr *buf_mgr,
+ struct device **iommu_ctx, int num_iommu_ctx);
int (*buf_mgr_init) (struct msm_isp_buf_mgr *buf_mgr,
const char *ctx_name, uint16_t num_buf_q);
int (*buf_mgr_deinit) (struct msm_isp_buf_mgr *buf_mgr);
@@ -136,6 +135,9 @@
/*IOMMU specific*/
int iommu_domain_num;
struct iommu_domain *iommu_domain;
+
+ int num_iommu_ctx;
+ struct device *iommu_ctx[2];
};
int msm_isp_create_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index 447c752..b31b3f1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -138,6 +138,8 @@
kfree(vfe_dev);
return -EINVAL;
}
+ vfe_dev->buf_mgr->ops->register_ctx(vfe_dev->buf_mgr,
+ &vfe_dev->iommu_ctx[0], vfe_dev->hw_info->num_iommu_ctx);
vfe_dev->vfe_open_cnt = 0;
end:
return rc;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index ad8aa82..7bc2b7d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -33,7 +33,7 @@
#define MAX_NUM_COMPOSITE_MASK 4
#define MAX_NUM_STATS_COMP_MASK 2
#define MAX_INIT_FRAME_DROP 31
-#define ISP_SUB(a) ((a > 0) ? a-1 : 0)
+#define ISP_Q2 (1 << 2)
#define VFE_PING_FLAG 0xFFFFFFFF
#define VFE_PONG_FLAG 0x0
@@ -148,7 +148,8 @@
struct msm_vfe_stats_stream *stream_info);
void (*clear_framedrop) (struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info);
- void (*cfg_comp_mask) (struct vfe_device *vfe_dev);
+ void (*cfg_comp_mask) (struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable);
void (*cfg_wm_irq_mask) (struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info);
void (*clear_wm_irq_mask) (struct vfe_device *vfe_dev,
@@ -183,6 +184,7 @@
struct msm_vfe_hardware_info {
int num_iommu_ctx;
+ int vfe_clk_idx;
struct msm_vfe_ops vfe_ops;
struct msm_vfe_axi_hardware_info *axi_hw_info;
struct msm_vfe_stats_hardware_info *stats_hw_info;
@@ -206,6 +208,7 @@
PAUSE,
START_PENDING,
STOP_PENDING,
+ STARTING,
STOPPING,
PAUSE_PENDING,
};
@@ -246,9 +249,16 @@
uint32_t burst_frame_count;/*number of sof before burst stop*/
uint8_t framedrop_update;
+ /*Bandwidth calculation info*/
+ uint32_t max_width;
+ /*Based on format plane size in Q2. e.g NV12 = 1.5*/
+ uint32_t format_factor;
+ uint32_t bandwidth;
+
/*Run time update variables*/
uint32_t runtime_init_frame_drop;
uint32_t runtime_burst_frame_count;/*number of sof before burst stop*/
+ uint32_t runtime_num_burst_capture;
uint8_t runtime_framedrop_update;
};
@@ -263,6 +273,8 @@
uint8_t pix_stream_count;
uint8_t raw_stream_count;
enum msm_vfe_inputmux input_mux;
+ uint32_t width;
+ long pixel_clock;
};
enum msm_wm_ub_cfg_type {
@@ -283,6 +295,7 @@
composite_info[MAX_NUM_COMPOSITE_MASK];
uint8_t num_used_composite_mask;
uint32_t stream_update;
+ enum msm_isp_camif_update_state pipeline_update;
struct msm_vfe_src_info src_info[VFE_SRC_MAX];
uint16_t stream_handle_cnt;
unsigned long event_mask;
@@ -301,6 +314,7 @@
STATS_ACTIVE,
STATS_START_PENDING,
STATS_STOP_PENDING,
+ STATS_STARTING,
STATS_STOPPING,
};
@@ -308,9 +322,11 @@
uint32_t session_id;
uint32_t stream_id;
uint32_t stream_handle;
+ uint32_t composite_flag;
enum msm_isp_stats_type stats_type;
enum msm_vfe_stats_state state;
uint32_t framedrop_pattern;
+ uint32_t framedrop_period;
uint32_t irq_subsample_pattern;
uint32_t buffer_offset;
@@ -320,11 +336,10 @@
struct msm_vfe_stats_shared_data {
struct msm_vfe_stats_stream stream_info[MSM_ISP_STATS_MAX];
- enum msm_vfe_stats_pipeline_policy stats_pipeline_policy;
- uint32_t comp_framedrop_pattern;
- uint32_t comp_irq_subsample_pattern;
uint8_t num_active_stream;
+ atomic_t stats_comp_mask;
uint16_t stream_handle_cnt;
+ atomic_t stats_update;
};
struct msm_vfe_tasklet_queue_cmd {
@@ -369,6 +384,7 @@
struct completion reset_complete;
struct completion halt_complete;
struct completion stream_config_complete;
+ struct completion stats_config_complete;
struct mutex realtime_mutex;
struct mutex core_mutex;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index b981653..a251f0a 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -24,7 +24,7 @@
#define VFE32_BURST_LEN 3
#define VFE32_UB_SIZE 1024
-#define VFE32_EQUAL_SLICE_UB 117
+#define VFE32_EQUAL_SLICE_UB 204
#define VFE32_WM_BASE(idx) (0x4C + 0x18 * idx)
#define VFE32_RDI_BASE(idx) (idx ? 0x734 + 0x4 * (idx - 1) : 0x06FC)
#define VFE32_XBAR_BASE(idx) (0x40 + 0x4 * (idx / 4))
@@ -39,42 +39,7 @@
(VFE32_STATS_BASE(idx) + 0x4 * \
(~(ping_pong >> (idx + VFE32_STATS_PING_PONG_OFFSET)) & 0x1))
-/*Temporary use fixed bus vectors in VFE */
-static struct msm_bus_vectors msm_vfe32_init_vectors[] = {
- {
- .src = MSM_BUS_MASTER_VFE,
- .dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = 0,
- .ib = 0,
- },
-};
-
-static struct msm_bus_vectors msm_vfe32_preview_vectors[] = {
- {
- .src = MSM_BUS_MASTER_VFE,
- .dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = 1027648000,
- .ib = 1105920000,
- },
-};
-
-static struct msm_bus_paths msm_vfe32_bus_client_config[] = {
- {
- ARRAY_SIZE(msm_vfe32_init_vectors),
- msm_vfe32_init_vectors,
- },
- {
- ARRAY_SIZE(msm_vfe32_preview_vectors),
- msm_vfe32_preview_vectors,
- },
-};
-
-static struct msm_bus_scale_pdata msm_vfe32_bus_client_pdata = {
- msm_vfe32_bus_client_config,
- ARRAY_SIZE(msm_vfe32_bus_client_config),
- .name = "msm_camera_vfe",
-};
-
+#define VFE32_CLK_IDX 0
static struct msm_cam_clk_info msm_vfe32_clk_info[] = {
{"vfe_clk", 266667000},
{"vfe_pclk", -1},
@@ -84,15 +49,11 @@
static int msm_vfe32_init_hardware(struct vfe_device *vfe_dev)
{
int rc = -1;
-
- vfe_dev->bus_perf_client =
- msm_bus_scale_register_client(&msm_vfe32_bus_client_pdata);
- if (!vfe_dev->bus_perf_client) {
- pr_err("%s: Registration Failed!\n", __func__);
- vfe_dev->bus_perf_client = 0;
+ rc = msm_isp_init_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+ if (rc < 0) {
+ pr_err("%s: Bandwidth registration Failed!\n", __func__);
goto bus_scale_register_failed;
}
- msm_bus_scale_client_update_request(vfe_dev->bus_perf_client, 1);
if (vfe_dev->fs_vfe) {
rc = regulator_enable(vfe_dev->fs_vfe);
@@ -131,8 +92,7 @@
clk_enable_failed:
regulator_disable(vfe_dev->fs_vfe);
fs_failed:
- msm_bus_scale_client_update_request(vfe_dev->bus_perf_client, 0);
- msm_bus_scale_unregister_client(vfe_dev->bus_perf_client);
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
bus_scale_register_failed:
return rc;
}
@@ -145,8 +105,7 @@
msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe32_clk_info,
vfe_dev->vfe_clk, ARRAY_SIZE(msm_vfe32_clk_info), 0);
regulator_disable(vfe_dev->fs_vfe);
- msm_bus_scale_client_update_request(vfe_dev->bus_perf_client, 0);
- msm_bus_scale_unregister_client(vfe_dev->bus_perf_client);
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
}
static void msm_vfe32_init_hardware_reg(struct vfe_device *vfe_dev)
@@ -155,7 +114,7 @@
msm_camera_io_w(0x07FFFFFF, vfe_dev->vfe_base + 0xC);
/* BUS_CFG */
msm_camera_io_w(0x00000001, vfe_dev->vfe_base + 0x3C);
- msm_camera_io_w(0x00000025, vfe_dev->vfe_base + 0x1C);
+ msm_camera_io_w(0x01000025, vfe_dev->vfe_base + 0x1C);
msm_camera_io_w_mb(0x1DFFFFFF, vfe_dev->vfe_base + 0x20);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x24);
msm_camera_io_w_mb(0x1FFFFFFF, vfe_dev->vfe_base + 0x28);
@@ -345,6 +304,8 @@
if (vfe_dev->axi_data.stream_update)
msm_isp_axi_stream_update(vfe_dev);
+ if (atomic_read(&vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
msm_isp_update_framedrop_reg(vfe_dev);
msm_isp_update_error_frame_count(vfe_dev);
@@ -633,7 +594,7 @@
stream_cfg_cmd->plane_cfg[plane_idx].
output_stride) << 16 |
(stream_cfg_cmd->plane_cfg[plane_idx].
- output_height - 1) << 4 | VFE32_BURST_LEN >> 2;
+ output_height - 1) << 4 | VFE32_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
} else {
msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
@@ -643,7 +604,7 @@
stream_cfg_cmd->plane_cfg[plane_idx].
output_width) << 16 |
(stream_cfg_cmd->plane_cfg[plane_idx].
- output_height - 1) << 4 | VFE32_BURST_LEN >> 2;
+ output_height - 1) << 4 | VFE32_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
}
return;
@@ -808,7 +769,8 @@
}
}
-static void msm_vfe32_stats_cfg_comp_mask(struct vfe_device *vfe_dev)
+static void msm_vfe32_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
{
return;
}
@@ -986,7 +948,7 @@
}
struct msm_vfe_axi_hardware_info msm_vfe32_axi_hw_info = {
- .num_wm = 7,
+ .num_wm = 4,
.num_comp_mask = 3,
.num_rdi = 3,
.num_rdi_master = 3,
@@ -1021,6 +983,7 @@
struct msm_vfe_hardware_info vfe32_hw_info = {
.num_iommu_ctx = 2,
+ .vfe_clk_idx = VFE32_CLK_IDX,
.vfe_ops = {
.irq_ops = {
.read_irq_status = msm_vfe32_read_irq_status,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index a786750..5a17635 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -77,42 +77,7 @@
#define VFE40_BUS_BDG_QOS_CFG_6 0x000002DC
#define VFE40_BUS_BDG_QOS_CFG_7 0x000002E0
-/*Temporary use fixed bus vectors in VFE */
-static struct msm_bus_vectors msm_vfe40_init_vectors[] = {
- {
- .src = MSM_BUS_MASTER_VFE,
- .dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = 0,
- .ib = 0,
- },
-};
-
-static struct msm_bus_vectors msm_vfe40_preview_vectors[] = {
- {
- .src = MSM_BUS_MASTER_VFE,
- .dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = 2027648000U,
- .ib = 2805920000U,
- },
-};
-
-static struct msm_bus_paths msm_vfe40_bus_client_config[] = {
- {
- ARRAY_SIZE(msm_vfe40_init_vectors),
- msm_vfe40_init_vectors,
- },
- {
- ARRAY_SIZE(msm_vfe40_preview_vectors),
- msm_vfe40_preview_vectors,
- },
-};
-
-static struct msm_bus_scale_pdata msm_vfe40_bus_client_pdata = {
- msm_vfe40_bus_client_config,
- ARRAY_SIZE(msm_vfe40_bus_client_config),
- .name = "msm_camera_vfe",
-};
-
+#define VFE40_CLK_IDX 1
static struct msm_cam_clk_info msm_vfe40_clk_info[] = {
{"camss_top_ahb_clk", -1},
{"vfe_clk_src", 266670000},
@@ -223,16 +188,11 @@
static int msm_vfe40_init_hardware(struct vfe_device *vfe_dev)
{
int rc = -1;
-
- vfe_dev->bus_perf_client =
- msm_bus_scale_register_client(&msm_vfe40_bus_client_pdata);
- if (!vfe_dev->bus_perf_client) {
- pr_err("%s: Registration Failed!\n", __func__);
- vfe_dev->bus_perf_client = 0;
+ rc = msm_isp_init_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+ if (rc < 0) {
+ pr_err("%s: Bandwidth registration Failed!\n", __func__);
goto bus_scale_register_failed;
}
- msm_bus_scale_client_update_request(
- vfe_dev->bus_perf_client, 1);
if (vfe_dev->fs_vfe) {
rc = regulator_enable(vfe_dev->fs_vfe);
@@ -280,8 +240,7 @@
clk_enable_failed:
regulator_disable(vfe_dev->fs_vfe);
fs_failed:
- msm_bus_scale_client_update_request(vfe_dev->bus_perf_client, 0);
- msm_bus_scale_unregister_client(vfe_dev->bus_perf_client);
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
bus_scale_register_failed:
return rc;
}
@@ -295,8 +254,7 @@
msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe40_clk_info,
vfe_dev->vfe_clk, ARRAY_SIZE(msm_vfe40_clk_info), 0);
regulator_disable(vfe_dev->fs_vfe);
- msm_bus_scale_client_update_request(vfe_dev->bus_perf_client, 0);
- msm_bus_scale_unregister_client(vfe_dev->bus_perf_client);
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
}
static void msm_vfe40_init_hardware_reg(struct vfe_device *vfe_dev)
@@ -308,7 +266,7 @@
msm_camera_io_w(0xC001FF7F, vfe_dev->vfe_base + 0x974);
/* BUS_CFG */
msm_camera_io_w(0x10000001, vfe_dev->vfe_base + 0x50);
- msm_camera_io_w(0x800000F3, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w(0xE00000F3, vfe_dev->vfe_base + 0x28);
msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x2C);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x34);
@@ -508,6 +466,8 @@
if (vfe_dev->axi_data.stream_update)
msm_isp_axi_stream_update(vfe_dev);
+ if (atomic_read(&vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
msm_isp_update_framedrop_reg(vfe_dev);
msm_isp_update_error_frame_count(vfe_dev);
@@ -1045,12 +1005,16 @@
}
}
-static void msm_vfe40_stats_cfg_comp_mask(struct vfe_device *vfe_dev)
+static void msm_vfe40_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
{
- if (vfe_dev->stats_data.stats_pipeline_policy == STATS_COMP_ALL)
- msm_camera_io_w(0x00FF0000, vfe_dev->vfe_base + 0x44);
+ uint32_t comp_mask;
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x44) >> 16;
+ if (enable)
+ comp_mask |= stats_mask;
else
- msm_camera_io_w(0x00000000, vfe_dev->vfe_base + 0x44);
+ comp_mask &= ~stats_mask;
+ msm_camera_io_w(comp_mask << 16, vfe_dev->vfe_base + 0x44);
}
static void msm_vfe40_stats_cfg_wm_irq_mask(
@@ -1081,9 +1045,10 @@
uint32_t stats_base = VFE40_STATS_BASE(stats_idx);
/*WR_ADDR_CFG*/
- msm_camera_io_w(0x7C, vfe_dev->vfe_base + stats_base + 0x8);
+ msm_camera_io_w(stream_info->framedrop_period << 2,
+ vfe_dev->vfe_base + stats_base + 0x8);
/*WR_IRQ_FRAMEDROP_PATTERN*/
- msm_camera_io_w(0xFFFFFFFF,
+ msm_camera_io_w(stream_info->framedrop_pattern,
vfe_dev->vfe_base + stats_base + 0x10);
/*WR_IRQ_SUBSAMPLE_PATTERN*/
msm_camera_io_w(0xFFFFFFFF,
@@ -1231,11 +1196,9 @@
goto vfe_no_resource;
}
- if (vfe_dev->pdev->id == 0)
- vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe0");
- else if (vfe_dev->pdev->id == 1)
- vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe1");
- if (!vfe_dev->iommu_ctx[0]) {
+ vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe0");
+ vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe1");
+ if (!vfe_dev->iommu_ctx[0] || !vfe_dev->iommu_ctx[1]) {
pr_err("%s: cannot get iommu_ctx\n", __func__);
rc = -ENODEV;
goto vfe_no_resource;
@@ -1287,7 +1250,8 @@
};
struct msm_vfe_hardware_info vfe40_hw_info = {
- .num_iommu_ctx = 1,
+ .num_iommu_ctx = 2,
+ .vfe_clk_idx = VFE40_CLK_IDX,
.vfe_ops = {
.irq_ops = {
.read_irq_status = msm_vfe40_read_irq_status,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index f1bfd68..728e172 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -18,6 +18,8 @@
((src < RDI_INTF_0) ? VFE_PIX_0 : \
(VFE_RAW_0 + src - RDI_INTF_0))
+#define HANDLE_TO_IDX(handle) (handle & 0xFF)
+
int msm_isp_axi_create_stream(
struct msm_vfe_axi_shared_data *axi_data,
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
@@ -67,7 +69,7 @@
int rc = -1, i;
struct msm_vfe_axi_stream *stream_info =
&axi_data->stream_info[
- (stream_cfg_cmd->axi_stream_handle & 0xFF)];
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
switch (stream_cfg_cmd->output_format) {
case V4L2_PIX_FMT_SBGGR8:
@@ -95,12 +97,14 @@
case V4L2_PIX_FMT_QGRBG12:
case V4L2_PIX_FMT_QRGGB12:
stream_info->num_planes = 1;
+ stream_info->format_factor = ISP_Q2;
break;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
stream_info->num_planes = 2;
+ stream_info->format_factor = 1.5 * ISP_Q2;
break;
/*TD: Add more image format*/
default:
@@ -131,9 +135,12 @@
return rc;
}
- for (i = 0; i < stream_info->num_planes; i++)
+ for (i = 0; i < stream_info->num_planes; i++) {
stream_info->plane_offset[i] =
stream_cfg_cmd->plane_cfg[i].plane_addr_offset;
+ stream_info->max_width = max(stream_info->max_width,
+ stream_cfg_cmd->plane_cfg[i].output_width);
+ }
stream_info->stream_src = stream_cfg_cmd->stream_src;
stream_info->frame_based = stream_cfg_cmd->frame_base;
@@ -209,7 +216,7 @@
int i, j;
struct msm_vfe_axi_stream *stream_info =
&axi_data->stream_info[
- (stream_cfg_cmd->axi_stream_handle & 0xFF)];
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
for (i = 0; i < stream_info->num_planes; i++) {
for (j = 0; j < axi_data->hw_info->num_wm; j++) {
@@ -245,7 +252,7 @@
uint8_t comp_mask = 0;
struct msm_vfe_axi_stream *stream_info =
&axi_data->stream_info[
- (stream_cfg_cmd->axi_stream_handle & 0xFF)];
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
for (i = 0; i < stream_info->num_planes; i++)
comp_mask |= 1 << stream_info->wm[i];
@@ -285,7 +292,7 @@
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
stream_info = &axi_data->stream_info[
- (stream_cfg_cmd->stream_handle[i] & 0xFF)];
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
if (stream_info->state != valid_state) {
pr_err("%s: Invalid stream state\n", __func__);
rc = -EINVAL;
@@ -344,6 +351,8 @@
stream_info->runtime_init_frame_drop = stream_info->init_frame_drop;
stream_info->runtime_burst_frame_count =
stream_info->burst_frame_count;
+ stream_info->runtime_num_burst_capture =
+ stream_info->num_burst_capture;
stream_info->runtime_framedrop_update = stream_info->framedrop_update;
vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(vfe_dev, stream_info);
}
@@ -380,38 +389,13 @@
msm_isp_send_event(vfe_dev, ISP_EVENT_SOF, &sof_event);
}
-uint32_t msm_isp_get_framedrop_period(
- enum msm_vfe_frame_skip_pattern frame_skip_pattern)
-{
- switch (frame_skip_pattern) {
- case NO_SKIP:
- case EVERY_2FRAME:
- case EVERY_3FRAME:
- case EVERY_4FRAME:
- case EVERY_5FRAME:
- case EVERY_6FRAME:
- case EVERY_7FRAME:
- case EVERY_8FRAME:
- return frame_skip_pattern + 1;
- case EVERY_16FRAME:
- return 16;
- break;
- case EVERY_32FRAME:
- return 32;
- break;
- default:
- return 1;
- }
- return 1;
-}
-
void msm_isp_calculate_framedrop(
struct msm_vfe_axi_shared_data *axi_data,
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
{
struct msm_vfe_axi_stream *stream_info =
&axi_data->stream_info[
- (stream_cfg_cmd->axi_stream_handle & 0xFF)];
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
uint32_t framedrop_period = msm_isp_get_framedrop_period(
stream_cfg_cmd->frame_skip_pattern);
@@ -443,6 +427,23 @@
}
}
+void msm_isp_calculate_bandwidth(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ if (stream_info->stream_src < RDI_INTF_0) {
+ stream_info->bandwidth =
+ (axi_data->src_info[VFE_PIX_0].pixel_clock /
+ axi_data->src_info[VFE_PIX_0].width) *
+ stream_info->max_width;
+ stream_info->bandwidth = stream_info->bandwidth *
+ stream_info->format_factor / ISP_Q2;
+ } else {
+ int rdi = SRC_TO_INTF(stream_info->stream_src);
+ stream_info->bandwidth = axi_data->src_info[rdi].pixel_clock;
+ }
+}
+
int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0, i;
@@ -461,13 +462,12 @@
if (rc) {
pr_err("%s: Request validation failed\n", __func__);
msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
- (stream_cfg_cmd->axi_stream_handle & 0xFF));
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
return rc;
}
- stream_info =
- &vfe_dev->axi_data.
- stream_info[(stream_cfg_cmd->axi_stream_handle & 0xFF)];
+ stream_info = &vfe_dev->axi_data.
+ stream_info[HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
msm_isp_axi_reserve_wm(&vfe_dev->axi_data, stream_cfg_cmd);
if (stream_cfg_cmd->stream_src == CAMIF_RAW ||
@@ -504,7 +504,7 @@
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
struct msm_vfe_axi_stream *stream_info =
&axi_data->stream_info[
- (stream_release_cmd->stream_handle & 0xFF)];
+ HANDLE_TO_IDX(stream_release_cmd->stream_handle)];
struct msm_vfe_axi_stream_cfg_cmd stream_cfg;
if (stream_info->state == AVALIABLE) {
@@ -538,79 +538,77 @@
msm_isp_axi_free_wm(axi_data, stream_info);
msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
- (stream_release_cmd->stream_handle & 0xFF));
+ HANDLE_TO_IDX(stream_release_cmd->stream_handle));
return rc;
}
-void msm_isp_axi_stream_enable_cfg(
+static void msm_isp_axi_stream_enable_cfg(
struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info,
- uint32_t *wm_reload_mask)
+ struct msm_vfe_axi_stream *stream_info)
{
int i;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
if (stream_info->state == INACTIVE)
return;
for (i = 0; i < stream_info->num_planes; i++) {
- /*TD: Frame base command*/
if (stream_info->state == START_PENDING)
vfe_dev->hw_info->vfe_ops.axi_ops.
enable_wm(vfe_dev, stream_info->wm[i], 1);
else
vfe_dev->hw_info->vfe_ops.axi_ops.
enable_wm(vfe_dev, stream_info->wm[i], 0);
-
- *wm_reload_mask |= (1 << stream_info->wm[i]);
}
- if (stream_info->state == START_PENDING) {
+ if (stream_info->state == START_PENDING)
axi_data->num_active_stream++;
- stream_info->state = ACTIVE;
- } else {
+ else
axi_data->num_active_stream--;
- stream_info->state = INACTIVE;
- }
}
void msm_isp_axi_stream_update(struct vfe_device *vfe_dev)
{
int i;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t wm_reload_mask = 0x0;
for (i = 0; i < MAX_NUM_STREAM; i++) {
if (axi_data->stream_info[i].state == START_PENDING ||
axi_data->stream_info[i].state ==
STOP_PENDING) {
msm_isp_axi_stream_enable_cfg(
- vfe_dev, &axi_data->stream_info[i],
- &wm_reload_mask);
- if (axi_data->stream_info[i].state == STOP_PENDING)
- axi_data->stream_info[i].state = STOPPING;
+ vfe_dev, &axi_data->stream_info[i]);
+ axi_data->stream_info[i].state =
+ axi_data->stream_info[i].state ==
+ START_PENDING ? STARTING : STOPPING;
+ } else if (axi_data->stream_info[i].state == STARTING ||
+ axi_data->stream_info[i].state == STOPPING) {
+ axi_data->stream_info[i].state =
+ axi_data->stream_info[i].state == STARTING ?
+ ACTIVE : INACTIVE;
}
}
- /*Reload AXI*/
- vfe_dev->hw_info->vfe_ops.axi_ops.
- reload_wm(vfe_dev, wm_reload_mask);
- if (vfe_dev->axi_data.stream_update) {
- vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev);
- ISP_DBG("%s: send update complete\n", __func__);
- vfe_dev->axi_data.stream_update = 0;
- complete(&vfe_dev->stream_config_complete);
+
+ if (vfe_dev->axi_data.pipeline_update == DISABLE_CAMIF) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ enable_module(vfe_dev, 0xFF, 0);
+ vfe_dev->axi_data.pipeline_update = NO_UPDATE;
}
+
+ vfe_dev->axi_data.stream_update--;
+ if (vfe_dev->axi_data.stream_update == 0)
+ complete(&vfe_dev->stream_config_complete);
}
static void msm_isp_cfg_pong_address(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
int i;
- struct msm_isp_buffer *buf = stream_info->buf[1];
+ struct msm_isp_buffer *buf = stream_info->buf[0];
for (i = 0; i < stream_info->num_planes; i++)
vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
vfe_dev, stream_info->wm[i],
- VFE_PING_FLAG, buf->mapped_info[i].paddr +
+ VFE_PONG_FLAG, buf->mapped_info[i].paddr +
stream_info->plane_offset[i]);
- stream_info->buf[0] = buf;
+ stream_info->buf[1] = buf;
}
static void msm_isp_get_done_buf(struct vfe_device *vfe_dev,
@@ -636,7 +634,7 @@
struct msm_isp_buffer *buf = NULL;
uint32_t pingpong_bit = 0;
uint32_t bufq_handle = stream_info->bufq_handle;
- uint32_t stream_idx = stream_info->stream_handle & 0xFF;
+ uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
vfe_dev->pdev->id, bufq_handle, &buf);
@@ -673,7 +671,7 @@
{
int rc;
struct msm_isp_event_data buf_event;
- uint32_t stream_idx = stream_info->stream_handle & 0xFF;
+ uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
uint32_t frame_id = vfe_dev->axi_data.
src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
@@ -724,7 +722,7 @@
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
stream_info =
&axi_data->stream_info[
- (stream_cfg_cmd->stream_handle[i] & 0xFF)];
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
if (stream_info->stream_src < RDI_INTF_0)
pix_stream_cnt++;
if (stream_info->stream_src == PIX_ENCODER ||
@@ -793,16 +791,195 @@
ISP_DBG("%s\n", line_str);
}
-int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
+/*Factor in Q2 format*/
+#define ISP_DEFAULT_FORMAT_FACTOR 6
+#define ISP_BUS_UTILIZATION_FACTOR 6
+static int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev)
{
- int rc = 0, i;
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd = arg;
+ int i, rc = 0;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t total_pix_bandwidth = 0, total_rdi_bandwidth = 0;
+ uint32_t num_pix_streams = 0;
+ uint64_t total_bandwidth = 0;
+
+ for (i = 0; i < MAX_NUM_STREAM; i++) {
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->state == ACTIVE ||
+ stream_info->state == START_PENDING) {
+ if (stream_info->stream_src < RDI_INTF_0) {
+ total_pix_bandwidth += stream_info->bandwidth;
+ num_pix_streams++;
+ } else {
+ total_rdi_bandwidth += stream_info->bandwidth;
+ }
+ }
+ }
+ if (num_pix_streams > 0)
+ total_pix_bandwidth = total_pix_bandwidth /
+ num_pix_streams * (num_pix_streams - 1) +
+ axi_data->src_info[VFE_PIX_0].pixel_clock *
+ ISP_DEFAULT_FORMAT_FACTOR / ISP_Q2;
+ total_bandwidth = total_pix_bandwidth + total_rdi_bandwidth;
+
+ rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
+ total_bandwidth, total_bandwidth *
+ ISP_BUS_UTILIZATION_FACTOR / ISP_Q2);
+ if (rc < 0)
+ pr_err("%s: update failed\n", __func__);
+
+ return rc;
+}
+
+static int msm_isp_axi_wait_for_cfg_done(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state camif_update)
+{
+ int rc;
+ unsigned long flags;
+ spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
+ init_completion(&vfe_dev->stream_config_complete);
+ vfe_dev->axi_data.pipeline_update = camif_update;
+ vfe_dev->axi_data.stream_update = 2;
+ spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
+ rc = wait_for_completion_interruptible_timeout(
+ &vfe_dev->stream_config_complete,
+ msecs_to_jiffies(500));
+ if (rc == 0) {
+ pr_err("%s: wait timeout\n", __func__);
+ rc = -1;
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+static int msm_isp_init_stream_ping_pong_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int rc = 0;
+ /*Set address for both PING & PONG register */
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PING_FLAG);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for ping\n",
+ __func__);
+ return rc;
+ }
+
+ /* For burst stream of one capture, only one buffer
+ * is allocated. Duplicate ping buffer address to pong
+ * buffer to ensure hardware write to a valid address
+ */
+ if (stream_info->stream_type == BURST_STREAM &&
+ stream_info->runtime_num_burst_capture <= 1) {
+ msm_isp_cfg_pong_address(vfe_dev, stream_info);
+ } else {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PONG_FLAG);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for pong\n",
+ __func__);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+static void msm_isp_get_stream_wm_mask(
+ struct msm_vfe_axi_stream *stream_info,
+ uint32_t *wm_reload_mask)
+{
+ int i;
+ for (i = 0; i < stream_info->num_planes; i++)
+ *wm_reload_mask |= (1 << stream_info->wm[i]);
+}
+
+static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ enum msm_isp_camif_update_state camif_update)
+{
+ int i, rc = 0;
+ uint8_t src_state, wait_for_complete = 0;
uint32_t wm_reload_mask = 0x0;
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint8_t src_state;
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ src_state = axi_data->src_info[
+ SRC_TO_INTF(stream_info->stream_src)].active;
+
+ msm_isp_calculate_bandwidth(axi_data, stream_info);
+ msm_isp_reset_framedrop(vfe_dev, stream_info);
+ msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+ rc = msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+ if (rc < 0) {
+ pr_err("%s: No buffer for stream%d\n", __func__,
+ HANDLE_TO_IDX(
+ stream_cfg_cmd->stream_handle[i]));
+ return rc;
+ }
+
+ stream_info->state = START_PENDING;
+ if (src_state) {
+ wait_for_complete = 1;
+ } else {
+ if (vfe_dev->dump_reg)
+ msm_camera_io_dump_2(vfe_dev->vfe_base, 0x900);
+
+ /*Configure AXI start bits to start immediately*/
+ msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info);
+ stream_info->state = ACTIVE;
+ }
+ }
+ msm_isp_update_stream_bandwidth(vfe_dev);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev, wm_reload_mask);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev);
+
+ if (camif_update == ENABLE_CAMIF)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, camif_update);
+
+ if (wait_for_complete)
+ rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
+
+ return rc;
+}
+
+static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ enum msm_isp_camif_update_state camif_update)
+{
+ int i, rc = 0;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ stream_info->state = STOP_PENDING;
+ }
+
+ rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
+ if (rc < 0) {
+ pr_err("%s: wait for config done failed\n", __func__);
+ return rc;
+ }
+ msm_isp_update_stream_bandwidth(vfe_dev);
+ if (camif_update == DISABLE_CAMIF)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, DISABLE_CAMIF);
+ return rc;
+}
+
+
+int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd = arg;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
enum msm_isp_camif_update_state camif_update;
- uint8_t wait_for_complete = 0;
+
rc = msm_isp_axi_check_stream_state(vfe_dev, stream_cfg_cmd);
if (rc < 0) {
pr_err("%s: Invalid stream state\n", __func__);
@@ -813,104 +990,18 @@
/*Configure UB*/
vfe_dev->hw_info->vfe_ops.axi_ops.cfg_ub(vfe_dev);
}
-
camif_update =
msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
- if (camif_update == DISABLE_CAMIF)
- vfe_dev->hw_info->vfe_ops.core_ops.
- update_camif_state(vfe_dev, DISABLE_CAMIF);
+ if (stream_cfg_cmd->cmd == START_STREAM)
+ rc = msm_isp_start_axi_stream(
+ vfe_dev, stream_cfg_cmd, camif_update);
+ else
+ rc = msm_isp_stop_axi_stream(
+ vfe_dev, stream_cfg_cmd, camif_update);
- /*
- * Stream start either immediately or at reg update
- * Depends on whether the stream src is active
- * If source is on, start and stop have to be done during reg update
- * If source is off, start can happen immediately or during reg update
- * stop has to be done immediately.
- */
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- stream_info =
- &axi_data->stream_info[
- (stream_cfg_cmd->stream_handle[i] & 0xFF)];
-
- if (stream_info->stream_src < RDI_INTF_0)
- src_state = axi_data->src_info[0].active;
- else
- src_state = axi_data->src_info[
- (stream_info->stream_src - RDI_INTF_0)].active;
-
- stream_info->state = (stream_cfg_cmd->cmd == START_STREAM) ?
- START_PENDING : STOP_PENDING;
-
- if (stream_cfg_cmd->cmd == START_STREAM) {
- /*Configure framedrop*/
- msm_isp_reset_framedrop(vfe_dev, stream_info);
-
- /*Set address for both PING & PONG register */
- rc = msm_isp_cfg_ping_pong_address(vfe_dev,
- stream_info, VFE_PONG_FLAG);
- if (rc < 0) {
- pr_err("%s: No buffer for start stream\n",
- __func__);
- return rc;
- }
- /* For burst stream of one capture, only one buffer
- * is allocated. Duplicate ping buffer address to pong
- * buffer to ensure hardware write to a valid address
- */
- if (stream_info->stream_type == BURST_STREAM &&
- stream_info->num_burst_capture <= 1) {
- msm_isp_cfg_pong_address(vfe_dev, stream_info);
- } else {
- rc = msm_isp_cfg_ping_pong_address(vfe_dev,
- stream_info, VFE_PING_FLAG);
- }
- }
- if (src_state && camif_update != DISABLE_CAMIF) {
- /*On the fly stream start/stop */
- wait_for_complete = 1;
- } else {
- if (vfe_dev->dump_reg &&
- stream_cfg_cmd->cmd == START_STREAM)
- msm_camera_io_dump_2(vfe_dev->vfe_base, 0x900);
- /*Configure AXI start bits to start immediately*/
- msm_isp_axi_stream_enable_cfg(
- vfe_dev, stream_info, &wm_reload_mask);
- }
- }
- if (!wait_for_complete) {
- /*Reload AXI*/
- if (stream_cfg_cmd->cmd == START_STREAM)
- vfe_dev->hw_info->vfe_ops.axi_ops.
- reload_wm(vfe_dev, wm_reload_mask);
-
- vfe_dev->hw_info->vfe_ops.core_ops.
- reg_update(vfe_dev);
-
- if (camif_update == ENABLE_CAMIF)
- vfe_dev->hw_info->vfe_ops.core_ops.
- update_camif_state(vfe_dev, camif_update);
- } else {
- unsigned long flags;
- spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
- init_completion(&vfe_dev->stream_config_complete);
- axi_data->stream_update = 1;
- spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
- /*Reload AXI*/
- if (stream_cfg_cmd->cmd == START_STREAM)
- vfe_dev->hw_info->vfe_ops.axi_ops.
- reload_wm(vfe_dev, wm_reload_mask);
- vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev);
- rc = wait_for_completion_interruptible_timeout(
- &vfe_dev->stream_config_complete,
- msecs_to_jiffies(500));
- if (rc == 0) {
- pr_err("%s: wait timeout\n", __func__);
- rc = -1;
- } else {
- rc = 0;
- }
- }
+ if (rc < 0)
+ pr_err("%s: start/stop stream failed\n", __func__);
return rc;
}
@@ -921,7 +1012,7 @@
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
stream_info = &axi_data->stream_info[
- (update_cmd->stream_handle & 0xFF)];
+ HANDLE_TO_IDX(update_cmd->stream_handle)];
if (stream_info->state != ACTIVE && stream_info->state != INACTIVE) {
pr_err("%s: Invalid stream state\n", __func__);
return -EINVAL;
@@ -984,18 +1075,25 @@
pr_err("%s: Invalid handle for composite irq\n",
__func__);
} else {
- stream_idx = comp_info->stream_handle & 0xFF;
+ stream_idx =
+ HANDLE_TO_IDX(comp_info->stream_handle);
stream_info =
&axi_data->stream_info[stream_idx];
ISP_DBG("%s: stream%d frame id: 0x%x\n",
__func__,
stream_idx, stream_info->frame_id);
stream_info->frame_id++;
+
+ if (stream_info->stream_type == BURST_STREAM)
+ stream_info->
+ runtime_num_burst_capture--;
+
msm_isp_get_done_buf(vfe_dev, stream_info,
pingpong_status, &done_buf);
if (stream_info->stream_type ==
CONTINUOUS_STREAM ||
- stream_info->num_burst_capture > 1) {
+ stream_info->
+ runtime_num_burst_capture > 1) {
rc = msm_isp_cfg_ping_pong_address(
vfe_dev, stream_info,
pingpong_status);
@@ -1015,16 +1113,20 @@
__func__);
continue;
}
- stream_idx = axi_data->free_wm[i] & 0xFF;
+ stream_idx = HANDLE_TO_IDX(axi_data->free_wm[i]);
stream_info = &axi_data->stream_info[stream_idx];
ISP_DBG("%s: stream%d frame id: 0x%x\n",
__func__,
stream_idx, stream_info->frame_id);
stream_info->frame_id++;
+
+ if (stream_info->stream_type == BURST_STREAM)
+ stream_info->runtime_num_burst_capture--;
+
msm_isp_get_done_buf(vfe_dev, stream_info,
pingpong_status, &done_buf);
if (stream_info->stream_type == CONTINUOUS_STREAM ||
- stream_info->num_burst_capture > 1) {
+ stream_info->runtime_num_burst_capture > 1) {
rc = msm_isp_cfg_ping_pong_address(vfe_dev,
stream_info, pingpong_status);
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
index ba845bc..f592a60 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
@@ -46,10 +46,6 @@
int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg);
-void msm_isp_axi_stream_enable_cfg(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info,
- uint32_t *wm_reload_mask);
-
void msm_isp_axi_stream_update(struct vfe_device *vfe_dev);
void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index c47209f..e08dea2 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
#include <linux/io.h>
+#include <linux/atomic.h>
#include <media/v4l2-subdev.h>
#include "msm_isp_util.h"
#include "msm_isp_stats_util.h"
@@ -67,6 +68,7 @@
struct msm_isp_buffer *done_buf;
struct msm_vfe_stats_stream *stream_info = NULL;
uint32_t pingpong_status;
+ uint32_t comp_stats_type_mask = 0;
uint32_t stats_comp_mask = 0, stats_irq_mask = 0;
stats_comp_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
get_comp_mask(irq_status0, irq_status1);
@@ -76,13 +78,17 @@
return;
ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
- if (vfe_dev->stats_data.stats_pipeline_policy == STATS_COMP_ALL) {
- if (!stats_comp_mask)
- return;
- stats_irq_mask = 0xFFFFFFFF;
- }
+ if (!stats_comp_mask)
+ stats_irq_mask &=
+ ~atomic_read(&vfe_dev->stats_data.stats_comp_mask);
+ else
+ stats_irq_mask |=
+ atomic_read(&vfe_dev->stats_data.stats_comp_mask);
memset(&buf_event, 0, sizeof(struct msm_isp_event_data));
+ buf_event.timestamp = ts->event_time;
+ buf_event.frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
pingpong_status = vfe_dev->hw_info->
vfe_ops.stats_ops.get_pingpong_status(vfe_dev);
@@ -98,22 +104,32 @@
done_buf->bufq_handle, done_buf->buf_idx,
&ts->buf_time, vfe_dev->axi_data.
src_info[VFE_PIX_0].frame_id);
- if (rc == 0) {
- stats_event->stats_mask |=
+ if (rc != 0)
+ continue;
+
+ stats_event->stats_buf_idxs[stream_info->stats_type] =
+ done_buf->buf_idx;
+ if (!stream_info->composite_flag) {
+ stats_event->stats_mask =
1 << stream_info->stats_type;
- stats_event->stats_buf_idxs[
- stream_info->stats_type] =
- done_buf->buf_idx;
+ ISP_DBG("%s: stats event frame id: 0x%x\n",
+ __func__, buf_event.frame_id);
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_STATS_NOTIFY +
+ stream_info->stats_type, &buf_event);
+ } else {
+ comp_stats_type_mask |=
+ 1 << stream_info->stats_type;
}
}
}
- if (stats_event->stats_mask) {
- buf_event.timestamp = ts->event_time;
- buf_event.frame_id =
- vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
- msm_isp_send_event(vfe_dev, ISP_EVENT_STATS_NOTIFY +
- stream_info->stats_type, &buf_event);
+ if (comp_stats_type_mask) {
+ ISP_DBG("%s: composite stats event frame id: 0x%x mask: 0x%x\n",
+ __func__, buf_event.frame_id, comp_stats_type_mask);
+ stats_event->stats_mask = comp_stats_type_mask;
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_COMP_STATS_NOTIFY, &buf_event);
}
}
@@ -140,36 +156,19 @@
return rc;
}
- if (stats_data->stats_pipeline_policy != STATS_COMP_ALL) {
- if (stream_req_cmd->framedrop_pattern >= MAX_SKIP) {
- pr_err("%s: Invalid framedrop pattern\n", __func__);
- return rc;
- }
+ if (stream_req_cmd->framedrop_pattern >= MAX_SKIP) {
+ pr_err("%s: Invalid framedrop pattern\n", __func__);
+ return rc;
+ }
- if (stream_req_cmd->irq_subsample_pattern >= MAX_SKIP) {
- pr_err("%s: Invalid irq subsample pattern\n", __func__);
- return rc;
- }
- } else {
- if (stats_data->comp_framedrop_pattern >= MAX_SKIP) {
- pr_err("%s: Invalid comp framedrop pattern\n",
- __func__);
- return rc;
- }
-
- if (stats_data->comp_irq_subsample_pattern >= MAX_SKIP) {
- pr_err("%s: Invalid comp irq subsample pattern\n",
- __func__);
- return rc;
- }
- stream_req_cmd->framedrop_pattern =
- vfe_dev->stats_data.comp_framedrop_pattern;
- stream_req_cmd->irq_subsample_pattern =
- vfe_dev->stats_data.comp_irq_subsample_pattern;
+ if (stream_req_cmd->irq_subsample_pattern >= MAX_SKIP) {
+ pr_err("%s: Invalid irq subsample pattern\n", __func__);
+ return rc;
}
stream_info->session_id = stream_req_cmd->session_id;
stream_info->stream_id = stream_req_cmd->stream_id;
+ stream_info->composite_flag = stream_req_cmd->composite_flag;
stream_info->stats_type = stream_req_cmd->stats_type;
stream_info->buffer_offset = stream_req_cmd->buffer_offset;
stream_info->framedrop_pattern = stream_req_cmd->framedrop_pattern;
@@ -193,6 +192,7 @@
struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
struct msm_vfe_stats_stream *stream_info = NULL;
struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ uint32_t framedrop_period;
uint32_t stats_idx;
rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd);
@@ -204,31 +204,12 @@
stats_idx = STATS_IDX(stream_req_cmd->stream_handle);
stream_info = &stats_data->stream_info[stats_idx];
- switch (stream_info->framedrop_pattern) {
- case NO_SKIP:
- stream_info->framedrop_pattern = VFE_NO_DROP;
- break;
- case EVERY_2FRAME:
- stream_info->framedrop_pattern = VFE_DROP_EVERY_2FRAME;
- break;
- case EVERY_4FRAME:
- stream_info->framedrop_pattern = VFE_DROP_EVERY_4FRAME;
- break;
- case EVERY_8FRAME:
- stream_info->framedrop_pattern = VFE_DROP_EVERY_8FRAME;
- break;
- case EVERY_16FRAME:
- stream_info->framedrop_pattern = VFE_DROP_EVERY_16FRAME;
- break;
- case EVERY_32FRAME:
- stream_info->framedrop_pattern = VFE_DROP_EVERY_32FRAME;
- break;
- default:
- stream_info->framedrop_pattern = VFE_NO_DROP;
- break;
- }
+ framedrop_period = msm_isp_get_framedrop_period(
+ stream_req_cmd->framedrop_pattern);
+ stream_info->framedrop_pattern = 0x1;
+ stream_info->framedrop_period = framedrop_period - 1;
- if (stats_data->stats_pipeline_policy == STATS_COMP_NONE)
+ if (!stream_info->composite_flag)
vfe_dev->hw_info->vfe_ops.stats_ops.
cfg_wm_irq_mask(vfe_dev, stream_info);
@@ -257,7 +238,7 @@
rc = msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
}
- if (stats_data->stats_pipeline_policy == STATS_COMP_NONE)
+ if (!stream_info->composite_flag)
vfe_dev->hw_info->vfe_ops.stats_ops.
clear_wm_irq_mask(vfe_dev, stream_info);
@@ -266,18 +247,145 @@
return 0;
}
-int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg)
+static int msm_isp_init_stats_ping_pong_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int rc = 0;
+ stream_info->bufq_handle =
+ vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, stream_info->session_id,
+ stream_info->stream_id);
+ if (stream_info->bufq_handle == 0) {
+ pr_err("%s: no buf configured for stream: 0x%x\n",
+ __func__, stream_info->stream_handle);
+ return -EINVAL;
+ }
+
+ rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PING_FLAG, NULL);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for ping\n", __func__);
+ return rc;
+ }
+ rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PONG_FLAG, NULL);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for pong\n", __func__);
+ return rc;
+ }
+ return rc;
+}
+
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t stats_mask = 0, comp_stats_mask = 0;
+ uint32_t enable = 0;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+ if (stats_data->stream_info[i].state == STATS_START_PENDING ||
+ stats_data->stream_info[i].state ==
+ STATS_STOP_PENDING) {
+ stats_mask |= i;
+ enable = stats_data->stream_info[i].state ==
+ STATS_START_PENDING ? 1 : 0;
+ stats_data->stream_info[i].state =
+ stats_data->stream_info[i].state ==
+ STATS_START_PENDING ?
+ STATS_STARTING : STATS_STOPPING;
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, BIT(i), enable);
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+ vfe_dev, BIT(i), enable);
+ } else if (stats_data->stream_info[i].state == STATS_STARTING ||
+ stats_data->stream_info[i].state == STATS_STOPPING) {
+ if (stats_data->stream_info[i].composite_flag)
+ comp_stats_mask |= i;
+ if (stats_data->stream_info[i].state == STATS_STARTING)
+ atomic_add(BIT(i),
+ &stats_data->stats_comp_mask);
+ else
+ atomic_sub(BIT(i),
+ &stats_data->stats_comp_mask);
+ stats_data->stream_info[i].state =
+ stats_data->stream_info[i].state ==
+ STATS_STARTING ? STATS_ACTIVE : STATS_INACTIVE;
+ }
+ }
+ atomic_sub(1, &stats_data->stats_update);
+ if (!atomic_read(&stats_data->stats_update))
+ complete(&vfe_dev->stats_config_complete);
+}
+
+static int msm_isp_stats_wait_for_cfg_done(struct vfe_device *vfe_dev)
+{
+ int rc;
+ init_completion(&vfe_dev->stats_config_complete);
+ atomic_set(&vfe_dev->stats_data.stats_update, 2);
+ rc = wait_for_completion_interruptible_timeout(
+ &vfe_dev->stats_config_complete,
+ msecs_to_jiffies(500));
+ if (rc == 0) {
+ pr_err("%s: wait timeout\n", __func__);
+ rc = -1;
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
{
int i, rc = 0;
- struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
+ uint32_t stats_mask = 0, comp_stats_mask = 0, idx;
struct msm_vfe_stats_stream *stream_info;
struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
- int idx;
- uint32_t stats_mask = 0;
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+ stream_info = &stats_data->stream_info[idx];
+ if (stream_info->stream_handle !=
+ stream_cfg_cmd->stream_handle[i]) {
+ pr_err("%s: Invalid stream handle: 0x%x received\n",
+ __func__, stream_cfg_cmd->stream_handle[i]);
+ continue;
+ }
+ rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
+ if (rc < 0) {
+ pr_err("%s: No buffer for stream%d\n", __func__, idx);
+ return rc;
+ }
- if (stats_data->num_active_stream == 0)
- vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
+ stream_info->state = STATS_START_PENDING;
+ else
+ stream_info->state = STATS_ACTIVE;
+ stats_data->num_active_stream++;
+ stats_mask |= 1 << idx;
+ if (stream_info->composite_flag)
+ comp_stats_mask |= 1 << idx;
+ }
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+ rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+ } else {
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ atomic_add(comp_stats_mask, &stats_data->stats_comp_mask);
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+ vfe_dev, comp_stats_mask, 1);
+ }
+ return rc;
+}
+
+static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i, rc = 0;
+ uint32_t stats_mask = 0, comp_stats_mask = 0, idx;
+ struct msm_vfe_stats_stream *stream_info;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
stream_info = &stats_data->stream_info[idx];
@@ -288,71 +396,39 @@
continue;
}
- if (stream_cfg_cmd->enable) {
- stream_info->bufq_handle =
- vfe_dev->buf_mgr->ops->get_bufq_handle(
- vfe_dev->buf_mgr, stream_info->session_id,
- stream_info->stream_id);
- if (stream_info->bufq_handle == 0) {
- pr_err("%s: no buf configured for stream: 0x%x\n",
- __func__,
- stream_info->stream_handle);
- return -EINVAL;
- }
-
- msm_isp_stats_cfg_ping_pong_address(vfe_dev,
- stream_info, VFE_PING_FLAG, NULL);
- msm_isp_stats_cfg_ping_pong_address(vfe_dev,
- stream_info, VFE_PONG_FLAG, NULL);
- stream_info->state = STATS_START_PENDING;
- stats_data->num_active_stream++;
- } else {
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
stream_info->state = STATS_STOP_PENDING;
- stats_data->num_active_stream--;
- }
+ else
+ stream_info->state = STATS_INACTIVE;
+
+ stats_data->num_active_stream--;
stats_mask |= 1 << idx;
+ if (stream_info->composite_flag)
+ comp_stats_mask |= 1 << idx;
}
- vfe_dev->hw_info->vfe_ops.stats_ops.
- enable_module(vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+ rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+ } else {
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ atomic_sub(comp_stats_mask, &stats_data->stats_comp_mask);
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+ vfe_dev, comp_stats_mask, 0);
+ }
return rc;
}
-int msm_isp_cfg_stats_comp_policy(struct vfe_device *vfe_dev, void *arg)
+int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg)
{
- int rc = -1;
- struct msm_vfe_stats_comp_policy_cfg *policy_cfg_cmd = arg;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ int rc = 0;
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
+ if (vfe_dev->stats_data.num_active_stream == 0)
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
- if (stats_data->num_active_stream != 0) {
- pr_err("%s: Cannot update policy when there are active streams\n",
- __func__);
- return rc;
- }
+ if (stream_cfg_cmd->enable)
+ rc = msm_isp_start_stats_stream(vfe_dev, stream_cfg_cmd);
+ else
+ rc = msm_isp_stop_stats_stream(vfe_dev, stream_cfg_cmd);
- if (policy_cfg_cmd->stats_pipeline_policy >= MAX_STATS_POLICY) {
- pr_err("%s: Invalid stats composite policy\n", __func__);
- return rc;
- }
-
- if (policy_cfg_cmd->comp_framedrop_pattern >= MAX_SKIP) {
- pr_err("%s: Invalid comp framedrop pattern\n", __func__);
- return rc;
- }
-
- if (policy_cfg_cmd->comp_irq_subsample_pattern >= MAX_SKIP) {
- pr_err("%s: Invalid comp irq subsample pattern\n", __func__);
- return rc;
- }
-
- stats_data->stats_pipeline_policy =
- policy_cfg_cmd->stats_pipeline_policy;
- stats_data->comp_framedrop_pattern =
- policy_cfg_cmd->comp_framedrop_pattern;
- stats_data->comp_irq_subsample_pattern =
- policy_cfg_cmd->comp_irq_subsample_pattern;
-
- vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(vfe_dev);
-
- return 0;
+ return rc;
}
-
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
index 13e1fd6..7b4c4b4 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
@@ -18,8 +18,8 @@
void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
struct msm_isp_timestamp *ts);
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev);
int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg);
-int msm_isp_cfg_stats_comp_policy(struct vfe_device *vfe_dev, void *arg);
#endif /* __MSM_ISP_STATS_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 3035d93..c981901 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -9,8 +9,10 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#include <linux/mutex.h>
#include <linux/io.h>
#include <media/v4l2-subdev.h>
+#include <linux/ratelimit.h>
#include "msm.h"
#include "msm_isp_util.h"
@@ -19,6 +21,164 @@
#include "msm_camera_io_util.h"
#define MAX_ISP_V4l2_EVENTS 100
+static DEFINE_MUTEX(bandwidth_mgr_mutex);
+static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
+
+#define MSM_ISP_MIN_AB 300000000
+#define MSM_ISP_MIN_IB 450000000
+
+static struct msm_bus_vectors msm_isp_init_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_vectors msm_isp_ping_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = MSM_ISP_MIN_AB,
+ .ib = MSM_ISP_MIN_IB,
+ },
+};
+
+static struct msm_bus_vectors msm_isp_pong_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = MSM_ISP_MIN_AB,
+ .ib = MSM_ISP_MIN_IB,
+ },
+};
+
+static struct msm_bus_paths msm_isp_bus_client_config[] = {
+ {
+ ARRAY_SIZE(msm_isp_init_vectors),
+ msm_isp_init_vectors,
+ },
+ {
+ ARRAY_SIZE(msm_isp_ping_vectors),
+ msm_isp_ping_vectors,
+ },
+ {
+ ARRAY_SIZE(msm_isp_pong_vectors),
+ msm_isp_pong_vectors,
+ },
+};
+
+static struct msm_bus_scale_pdata msm_isp_bus_client_pdata = {
+ msm_isp_bus_client_config,
+ ARRAY_SIZE(msm_isp_bus_client_config),
+ .name = "msm_camera_isp",
+};
+
+int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client)
+{
+ int rc = 0;
+ mutex_lock(&bandwidth_mgr_mutex);
+ isp_bandwidth_mgr.client_info[client].active = 1;
+ if (isp_bandwidth_mgr.use_count++) {
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return rc;
+ }
+ isp_bandwidth_mgr.bus_client =
+ msm_bus_scale_register_client(&msm_isp_bus_client_pdata);
+ if (!isp_bandwidth_mgr.bus_client) {
+ pr_err("%s: client register failed\n", __func__);
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return -EINVAL;
+ }
+
+ isp_bandwidth_mgr.bus_vector_active_idx = 1;
+ msm_bus_scale_client_update_request(
+ isp_bandwidth_mgr.bus_client,
+ isp_bandwidth_mgr.bus_vector_active_idx);
+
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return 0;
+}
+
+int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
+ uint64_t ab, uint64_t ib)
+{
+ int i;
+ struct msm_bus_paths *path;
+ mutex_lock(&bandwidth_mgr_mutex);
+ if (!isp_bandwidth_mgr.use_count ||
+ !isp_bandwidth_mgr.bus_client) {
+ pr_err("%s: bandwidth manager inactive\n", __func__);
+ return -EINVAL;
+ }
+
+ isp_bandwidth_mgr.client_info[client].ab = ab;
+ isp_bandwidth_mgr.client_info[client].ib = ib;
+ ALT_VECTOR_IDX(isp_bandwidth_mgr.bus_vector_active_idx);
+ path =
+ &(msm_isp_bus_client_pdata.usecase[
+ isp_bandwidth_mgr.bus_vector_active_idx]);
+ path->vectors[0].ab = MSM_ISP_MIN_AB;
+ path->vectors[0].ib = MSM_ISP_MIN_IB;
+ for (i = 0; i < MAX_ISP_CLIENT; i++) {
+ if (isp_bandwidth_mgr.client_info[i].active) {
+ path->vectors[0].ab +=
+ isp_bandwidth_mgr.client_info[i].ab;
+ path->vectors[0].ib +=
+ isp_bandwidth_mgr.client_info[i].ib;
+ }
+ }
+ msm_bus_scale_client_update_request(isp_bandwidth_mgr.bus_client,
+ isp_bandwidth_mgr.bus_vector_active_idx);
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return 0;
+}
+
+void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client)
+{
+ mutex_lock(&bandwidth_mgr_mutex);
+ memset(&isp_bandwidth_mgr.client_info[client], 0,
+ sizeof(struct msm_isp_bandwidth_info));
+ if (--isp_bandwidth_mgr.use_count) {
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return;
+ }
+
+ if (!isp_bandwidth_mgr.bus_client)
+ return;
+
+ msm_bus_scale_client_update_request(
+ isp_bandwidth_mgr.bus_client, 0);
+ msm_bus_scale_unregister_client(isp_bandwidth_mgr.bus_client);
+ isp_bandwidth_mgr.bus_client = 0;
+ mutex_unlock(&bandwidth_mgr_mutex);
+}
+
+uint32_t msm_isp_get_framedrop_period(
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern)
+{
+ switch (frame_skip_pattern) {
+ case NO_SKIP:
+ case EVERY_2FRAME:
+ case EVERY_3FRAME:
+ case EVERY_4FRAME:
+ case EVERY_5FRAME:
+ case EVERY_6FRAME:
+ case EVERY_7FRAME:
+ case EVERY_8FRAME:
+ return frame_skip_pattern + 1;
+ case EVERY_16FRAME:
+ return 16;
+ break;
+ case EVERY_32FRAME:
+ return 32;
+ break;
+ default:
+ return 1;
+ }
+ return 1;
+}
static inline void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp)
{
@@ -68,28 +228,67 @@
return rc;
}
-int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
- struct msm_vfe_pix_cfg *pix_cfg)
+static int msm_isp_set_clk_rate(struct vfe_device *vfe_dev, uint32_t rate)
{
int rc = 0;
- /*TD Validate config info
- * should check if all streams are off */
+ int clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+ long round_rate =
+ clk_round_rate(vfe_dev->vfe_clk[clk_idx], rate);
+ if (round_rate < 0) {
+ pr_err("%s: Invalid vfe clock rate\n", __func__);
+ return round_rate;
+ }
- vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux = pix_cfg->input_mux;
+ rc = clk_set_rate(vfe_dev->vfe_clk[clk_idx], round_rate);
+ if (rc < 0) {
+ pr_err("%s: Vfe set rate error\n", __func__);
+ return rc;
+ }
+ return 0;
+}
- vfe_dev->hw_info->vfe_ops.core_ops.cfg_camif(vfe_dev, pix_cfg);
+int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
+ struct msm_vfe_input_cfg *input_cfg)
+{
+ int rc = 0;
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+ pr_err("%s: pixel path is active\n", __func__);
+ return -EINVAL;
+ }
+
+ vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
+ input_cfg->input_pix_clk;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
+ input_cfg->d.pix_cfg.input_mux;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].width =
+ input_cfg->d.pix_cfg.camif_cfg.pixels_per_line;
+
+ rc = msm_isp_set_clk_rate(vfe_dev,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock);
+ if (rc < 0) {
+ pr_err("%s: clock set rate failed\n", __func__);
+ return rc;
+ }
+
+ vfe_dev->hw_info->vfe_ops.core_ops.cfg_camif(
+ vfe_dev, &input_cfg->d.pix_cfg);
return rc;
}
int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
- struct msm_vfe_rdi_cfg *rdi_cfg, enum msm_vfe_input_src input_src)
+ struct msm_vfe_input_cfg *input_cfg)
{
int rc = 0;
- /*TD Validate config info
- * should check if all streams are off */
+ if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) {
+ pr_err("%s: RAW%d path is active\n", __func__,
+ input_cfg->input_src - VFE_RAW_0);
+ return -EINVAL;
+ }
- vfe_dev->hw_info->vfe_ops.core_ops.
- cfg_rdi_reg(vfe_dev, rdi_cfg, input_src);
+ vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
+ input_cfg->input_pix_clk;
+ vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
+ vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
return rc;
}
@@ -100,16 +299,16 @@
switch (input_cfg->input_src) {
case VFE_PIX_0:
- msm_isp_cfg_pix(vfe_dev, &input_cfg->d.pix_cfg);
+ rc = msm_isp_cfg_pix(vfe_dev, input_cfg);
break;
case VFE_RAW_0:
case VFE_RAW_1:
case VFE_RAW_2:
- msm_isp_cfg_rdi(vfe_dev, &input_cfg->d.rdi_cfg,
- input_cfg->input_src);
+ rc = msm_isp_cfg_rdi(vfe_dev, input_cfg);
break;
- case VFE_SRC_MAX:
- break;
+ default:
+ pr_err("%s: Invalid input source\n", __func__);
+ rc = -EINVAL;
}
return rc;
}
@@ -182,11 +381,6 @@
rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
mutex_unlock(&vfe_dev->core_mutex);
break;
- case VIDIOC_MSM_ISP_CFG_STATS_COMP_POLICY:
- mutex_lock(&vfe_dev->core_mutex);
- rc = msm_isp_cfg_stats_comp_policy(vfe_dev, arg);
- mutex_unlock(&vfe_dev->core_mutex);
- break;
case VIDIOC_MSM_ISP_UPDATE_STREAM:
mutex_lock(&vfe_dev->core_mutex);
rc = msm_isp_update_axi_stream(vfe_dev, arg);
@@ -513,6 +707,11 @@
{
int i;
struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
+ static DEFINE_RATELIMIT_STATE(rs,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+ static DEFINE_RATELIMIT_STATE(rs_stats,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
if (error_info->error_count == 1 ||
!(error_info->info_dump_frame_count % 100)) {
vfe_dev->hw_info->vfe_ops.core_ops.
@@ -522,7 +721,8 @@
error_info->camif_status = 0;
error_info->violation_status = 0;
for (i = 0; i < MAX_NUM_STREAM; i++) {
- if (error_info->stream_framedrop_count[i] != 0) {
+ if (error_info->stream_framedrop_count[i] != 0 &&
+ __ratelimit(&rs)) {
pr_err("%s: Stream[%d]: dropped %d frames\n",
__func__, i,
error_info->stream_framedrop_count[i]);
@@ -530,7 +730,8 @@
}
}
for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
- if (error_info->stats_framedrop_count[i] != 0) {
+ if (error_info->stats_framedrop_count[i] != 0 &&
+ __ratelimit(&rs_stats)) {
pr_err("%s: Stats stream[%d]: dropped %d frames\n",
__func__, i,
error_info->stats_framedrop_count[i]);
@@ -577,7 +778,8 @@
spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
queue_cmd = &vfe_dev->tasklet_queue_cmd[vfe_dev->taskletq_idx];
if (queue_cmd->cmd_used) {
- pr_err("%s: Tasklet queue overflow\n", __func__);
+ pr_err_ratelimited("%s: Tasklet queue overflow: %d\n",
+ __func__, vfe_dev->pdev->id);
list_del(&queue_cmd->list);
} else {
atomic_add(1, &vfe_dev->irq_cnt);
@@ -645,7 +847,6 @@
int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- uint32_t i;
struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
long rc;
ISP_DBG("%s\n", __func__);
@@ -678,9 +879,6 @@
vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
- for (i = 0; i < vfe_dev->hw_info->num_iommu_ctx; i++)
- vfe_dev->buf_mgr->ops->attach_ctx(vfe_dev->buf_mgr,
- vfe_dev->iommu_ctx[i]);
vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr, "msm_isp", 28);
memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
@@ -697,7 +895,6 @@
int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- int i;
long rc;
struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
ISP_DBG("%s\n", __func__);
@@ -715,11 +912,6 @@
pr_err("%s: halt timeout\n", __func__);
vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
-
- for (i = vfe_dev->hw_info->num_iommu_ctx - 1; i >= 0; i--)
- vfe_dev->buf_mgr->ops->detach_ctx(vfe_dev->buf_mgr,
- vfe_dev->iommu_ctx[i]);
-
vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
vfe_dev->vfe_open_cnt--;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
index 3dac7e0..34b9859 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
@@ -22,6 +22,35 @@
#define ISP_DBG(fmt, args...) pr_debug(fmt, ##args)
#endif
+#define ALT_VECTOR_IDX(x) {x = 3 - x; }
+struct msm_isp_bandwidth_info {
+ uint32_t active;
+ uint64_t ab;
+ uint64_t ib;
+};
+
+enum msm_isp_hw_client {
+ ISP_VFE0,
+ ISP_VFE1,
+ ISP_CPP,
+ MAX_ISP_CLIENT,
+};
+
+struct msm_isp_bandwidth_mgr {
+ uint32_t bus_client;
+ uint32_t bus_vector_active_idx;
+ uint32_t use_count;
+ struct msm_isp_bandwidth_info client_info[MAX_ISP_CLIENT];
+};
+
+uint32_t msm_isp_get_framedrop_period(
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern);
+
+int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client);
+int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
+ uint64_t ab, uint64_t ib);
+void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client);
+
int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub);
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index d30afb2..962c079 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -11,7 +11,6 @@
*/
#include <linux/delay.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/jiffies.h>
@@ -60,181 +59,25 @@
false : true;
}
-static struct msm_cam_clk_info ispif_8960_clk_info[] = {
- {"csi_pix_clk", 0},
- {"csi_rdi_clk", 0},
- {"csi_pix1_clk", 0},
- {"csi_rdi1_clk", 0},
- {"csi_rdi2_clk", 0},
+static struct msm_cam_clk_info ispif_8974_ahb_clk_info[] = {
+ {"ispif_ahb_clk", -1},
};
-static struct msm_cam_clk_info ispif_8974_clk_info_vfe0[] = {
- {"camss_vfe_vfe_clk", -1},
- {"camss_csi_vfe_clk", -1},
-};
-
-static struct msm_cam_clk_info ispif_8974_clk_info_vfe1[] = {
- {"camss_vfe_vfe_clk1", -1},
- {"camss_csi_vfe_clk1", -1},
-};
-
-static int msm_ispif_clk_enable_one(struct ispif_device *ispif,
- enum msm_ispif_vfe_intf vfe_intf, int enable)
+static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable)
{
int rc = 0;
- if (enable)
- pr_debug("enable clk for VFE%d\n", vfe_intf);
- else
- pr_debug("disable clk for VFE%d\n", vfe_intf);
-
- if (ispif->csid_version < CSID_VERSION_V2) {
- rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
- ispif->ispif_clk[vfe_intf], 2, enable);
- if (rc) {
- pr_err("%s: cannot enable clock, error = %d\n",
- __func__, rc);
- goto end;
- }
- } else if (ispif->csid_version == CSID_VERSION_V2) {
- rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8960_clk_info,
- ispif->ispif_clk[vfe_intf],
- ARRAY_SIZE(ispif_8960_clk_info),
- enable);
- if (rc) {
- pr_err("%s: cannot enable clock, error = %d\n",
- __func__, rc);
- goto end;
- }
- } else if (ispif->csid_version >= CSID_VERSION_V3) {
- if (vfe_intf == VFE0) {
- rc = msm_cam_clk_enable(&ispif->pdev->dev,
- ispif_8974_clk_info_vfe0,
- ispif->ispif_clk[vfe_intf],
- ARRAY_SIZE(ispif_8974_clk_info_vfe0), enable);
- } else {
- rc = msm_cam_clk_enable(&ispif->pdev->dev,
- ispif_8974_clk_info_vfe1,
- ispif->ispif_clk[vfe_intf],
- ARRAY_SIZE(ispif_8974_clk_info_vfe1), enable);
- }
- if (rc) {
- pr_err("%s: cannot enable clock, error = %d, vfeid = %d\n",
- __func__, rc, vfe_intf);
- goto end;
- }
- } else {
- pr_err("%s: unsupported version=%d\n", __func__,
- ispif->csid_version);
- goto end;
+ if (ispif->csid_version < CSID_VERSION_V3) {
+ /* Older ISPIF versiond don't need ahb clokc */
+ return 0;
}
-end:
- return rc;
-}
-
-static int msm_ispif_clk_enable(struct ispif_device *ispif,
- struct msm_ispif_param_data *params, int enable)
-{
- int rc = 0;
- int i, j;
- uint32_t vfe_intf_mask = 0;
-
- for (i = 0; i < params->num; i++) {
- if (vfe_intf_mask & (1 << params->entries[i].vfe_intf))
- continue;
- rc = msm_ispif_clk_enable_one(ispif,
- params->entries[i].vfe_intf, 1);
- if (rc < 0 && enable) {
- pr_err("%s: unable to enable clocks for VFE %d",
- __func__, params->entries[i].vfe_intf);
- for (j = 0; j < i; j++) {
- /* if VFE clock is not enabled do
- * not disable the clock */
- if (!(vfe_intf_mask & (1 <<
- params->entries[i].vfe_intf)))
- continue;
- msm_ispif_clk_enable_one(ispif,
- params->entries[j].vfe_intf, 0);
- /* remove the VFE ID from the mask */
- vfe_intf_mask &=
- ~(1 << params->entries[i].vfe_intf);
- }
- break;
- }
- vfe_intf_mask |= 1 << params->entries[i].vfe_intf;
- }
- return rc;
-}
-
-static int msm_ispif_intf_reset(struct ispif_device *ispif,
- struct msm_ispif_param_data *params)
-{
-
- int i, rc = 0;
- enum msm_ispif_intftype intf_type;
- int vfe_intf = 0;
- uint32_t data = 0;
-
- for (i = 0; i < params->num; i++) {
- data = STROBED_RST_EN;
- vfe_intf = params->entries[i].vfe_intf;
- intf_type = params->entries[i].intftype;
- ispif->sof_count[params->entries[i].vfe_intf].
- sof_cnt[intf_type] = 0;
-
- switch (intf_type) {
- case PIX0:
- data |= (PIX_0_VFE_RST_STB | PIX_0_CSID_RST_STB);
- break;
- case RDI0:
- data |= (RDI_0_VFE_RST_STB | RDI_0_CSID_RST_STB);
- break;
- case PIX1:
- data |= (PIX_1_VFE_RST_STB | PIX_1_CSID_RST_STB);
- break;
- case RDI1:
- data |= (RDI_1_VFE_RST_STB | RDI_1_CSID_RST_STB);
- break;
- case RDI2:
- data |= (RDI_2_VFE_RST_STB | RDI_2_CSID_RST_STB);
- break;
- default:
- rc = -EINVAL;
- break;
- }
- if (data > 0x1) {
- unsigned long jiffes = msecs_to_jiffies(500);
- long lrc = 0;
- unsigned long flags;
-
- spin_lock_irqsave(
- &ispif->auto_complete_lock, flags);
- ispif->wait_timeout[vfe_intf] = 0;
- init_completion(&ispif->reset_complete[vfe_intf]);
- spin_unlock_irqrestore(
- &ispif->auto_complete_lock, flags);
-
- if (vfe_intf == VFE0)
- msm_camera_io_w(data, ispif->base +
- ISPIF_RST_CMD_ADDR);
- else
- msm_camera_io_w(data, ispif->base +
- ISPIF_RST_CMD_1_ADDR);
- lrc = wait_for_completion_interruptible_timeout(
- &ispif->reset_complete[vfe_intf], jiffes);
- if (lrc < 0 || !lrc) {
- pr_err("%s: wait timeout ret = %ld, vfe_id = %d\n",
- __func__, lrc, vfe_intf);
- rc = -EIO;
-
- spin_lock_irqsave(
- &ispif->auto_complete_lock, flags);
- ispif->wait_timeout[vfe_intf] = 1;
- spin_unlock_irqrestore(
- &ispif->auto_complete_lock, flags);
- }
- }
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8974_ahb_clk_info, &ispif->ahb_clk,
+ ARRAY_SIZE(ispif_8974_ahb_clk_info), enable);
+ if (rc < 0) {
+ pr_err("%s: cannot enable clock, error = %d",
+ __func__, rc);
}
return rc;
@@ -243,63 +86,48 @@
static int msm_ispif_reset(struct ispif_device *ispif)
{
int rc = 0;
- long lrc = 0;
- unsigned long jiffes = msecs_to_jiffies(500);
- unsigned long flags;
-
- spin_lock_irqsave(&ispif->auto_complete_lock, flags);
- ispif->wait_timeout[VFE0] = 0;
- init_completion(&ispif->reset_complete[VFE0]);
- if (ispif->csid_version >= CSID_VERSION_V3 &&
- ispif->vfe_info.num_vfe > 1) {
- ispif->wait_timeout[VFE1] = 0;
- init_completion(&ispif->reset_complete[VFE1]);
- }
- spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
+ int i;
BUG_ON(!ispif);
memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
- msm_camera_io_w(ISPIF_RST_CMD_MASK, ispif->base + ISPIF_RST_CMD_ADDR);
+ for (i = 0; i < ispif->vfe_info.num_vfe; i++) {
- lrc = wait_for_completion_interruptible_timeout(
- &ispif->reset_complete[VFE0], jiffes);
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_CTRL_0(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(i));
+ msm_camera_io_w(0xFFFFFFFF, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_0(i));
+ msm_camera_io_w(0xFFFFFFFF, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_1(i));
+ msm_camera_io_w(0xFFFFFFFF, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_2(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_INPUT_SEL(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_INTF_CMD_0(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_INTF_CMD_1(i));
- if (lrc < 0 || !lrc) {
- pr_err("%s: wait timeout ret = %ld, vfeid = %d\n",
- __func__, lrc, VFE0);
- rc = -EIO;
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 0));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 1));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 0));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 1));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 2));
- spin_lock_irqsave(&ispif->auto_complete_lock, flags);
- ispif->wait_timeout[VFE0] = 1;
- spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
-
- goto end;
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CROP(i, 0));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CROP(i, 1));
}
- if (ispif->csid_version >= CSID_VERSION_V3 &&
- ispif->vfe_info.num_vfe > 1) {
- msm_camera_io_w_mb(ISPIF_RST_CMD_1_MASK, ispif->base +
- ISPIF_RST_CMD_1_ADDR);
+ msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+ ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
- lrc = wait_for_completion_interruptible_timeout(
- &ispif->reset_complete[VFE1], jiffes);
-
- if (lrc < 0 || !lrc) {
- pr_err("%s: wait timeout ret = %ld, vfeid = %d\n",
- __func__, lrc, VFE1);
- rc = -EIO;
-
- spin_lock_irqsave(&ispif->auto_complete_lock, flags);
- ispif->wait_timeout[VFE1] = 1;
- spin_unlock_irqrestore(&ispif->auto_complete_lock,
- flags);
- }
-
- }
-
-end:
return rc;
}
@@ -315,7 +143,6 @@
static void msm_ispif_sel_csid_core(struct ispif_device *ispif,
uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
{
- int rc = 0;
uint32_t data;
BUG_ON(!ispif);
@@ -325,19 +152,6 @@
return;
}
- if (ispif->csid_version <= CSID_VERSION_V2) {
- if (ispif->ispif_clk[vfe_intf][intftype] == NULL) {
- CDBG("%s: ispif NULL clk\n", __func__);
- return;
- }
-
- rc = clk_set_rate(ispif->ispif_clk[vfe_intf][intftype], csid);
- if (rc) {
- pr_err("%s: clk_set_rate failed %d\n", __func__, rc);
- return;
- }
- }
-
data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_INPUT_SEL(vfe_intf));
switch (intftype) {
case PIX0:
@@ -361,9 +175,9 @@
data |= (csid << 20);
break;
}
- if (data)
- msm_camera_io_w_mb(data, ispif->base +
- ISPIF_VFE_m_INPUT_SEL(vfe_intf));
+
+ msm_camera_io_w_mb(data, ispif->base +
+ ISPIF_VFE_m_INPUT_SEL(vfe_intf));
}
static void msm_ispif_enable_crop(struct ispif_device *ispif,
@@ -380,6 +194,8 @@
data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf));
data |= (1 << (intftype + 7));
+ if (intftype == PIX0)
+ data |= 1 << PIX0_LINE_BUF_EN_BIT;
msm_camera_io_w(data,
ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf));
@@ -478,6 +294,58 @@
return rc;
}
+static void msm_ispif_select_clk_mux(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
+{
+ uint32_t data = 0;
+
+ switch (intftype) {
+ case PIX0:
+ data = msm_camera_io_r(ispif->clk_mux_base);
+ data &= ~(0xf << (vfe_intf * 8));
+ data |= (csid << (vfe_intf * 8));
+ msm_camera_io_w(data, ispif->clk_mux_base);
+ break;
+
+ case RDI0:
+ data = msm_camera_io_r(ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ data &= ~(0xf << (vfe_intf * 12));
+ data |= (csid << (vfe_intf * 12));
+ msm_camera_io_w(data, ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ break;
+
+ case PIX1:
+ data = msm_camera_io_r(ispif->clk_mux_base);
+ data &= ~(0xf0 << (vfe_intf * 8));
+ data |= (csid << (4 + (vfe_intf * 8)));
+ msm_camera_io_w(data, ispif->clk_mux_base);
+ break;
+
+ case RDI1:
+ data = msm_camera_io_r(ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ data &= ~(0xf << (4 + (vfe_intf * 12)));
+ data |= (csid << (4 + (vfe_intf * 12)));
+ msm_camera_io_w(data, ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ break;
+
+ case RDI2:
+ data = msm_camera_io_r(ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ data &= ~(0xf << (8 + (vfe_intf * 12)));
+ data |= (csid << (8 + (vfe_intf * 12)));
+ msm_camera_io_w(data, ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ break;
+ }
+ CDBG("%s intftype %d data %x\n", __func__, intftype, data);
+ mb();
+ return;
+}
+
static uint16_t msm_ispif_get_cids_mask_from_cfg(
struct msm_ispif_params_entry *entry)
{
@@ -510,11 +378,6 @@
return rc;
}
- rc = msm_ispif_clk_enable(ispif, params, 1);
- if (rc < 0) {
- pr_err("%s: unable to enable clocks", __func__);
- return rc;
- }
for (i = 0; i < params->num; i++) {
vfe_intf = params->entries[i].vfe_intf;
if (!msm_ispif_is_intf_valid(ispif->csid_version,
@@ -547,6 +410,10 @@
return -EINVAL;
}
+ if (ispif->csid_version >= CSID_VERSION_V3)
+ msm_ispif_select_clk_mux(ispif, intftype,
+ params->entries[i].csid, vfe_intf);
+
rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf);
if (rc) {
pr_err("%s:validate_intf_status failed, rc = %d\n",
@@ -589,8 +456,6 @@
msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
- msm_ispif_clk_enable(ispif, params, 0);
-
return rc;
}
@@ -683,7 +548,7 @@
static int msm_ispif_start_frame_boundary(struct ispif_device *ispif,
struct msm_ispif_param_data *params)
{
- int rc;
+ int rc = 0;
if (ispif->ispif_state != ISPIF_POWER_UP) {
pr_err("%s: ispif invalid state %d\n", __func__,
@@ -692,23 +557,8 @@
return rc;
}
- rc = msm_ispif_clk_enable(ispif, params, 1);
- if (rc < 0) {
- pr_err("%s: unable to enable clocks", __func__);
- return rc;
- }
-
- rc = msm_ispif_intf_reset(ispif, params);
- if (rc) {
- pr_err("%s: msm_ispif_intf_reset failed. rc=%d\n",
- __func__, rc);
- goto end;
- }
-
msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
-end:
- msm_ispif_clk_enable(ispif, params, 0);
return rc;
}
@@ -731,12 +581,6 @@
return rc;
}
- rc = msm_ispif_clk_enable(ispif, params, 1);
- if (rc < 0) {
- pr_err("%s: unable to enable clocks", __func__);
- return rc;
- }
-
for (i = 0; i < params->num; i++) {
if (!msm_ispif_is_intf_valid(ispif->csid_version,
params->entries[i].vfe_intf)) {
@@ -788,8 +632,6 @@
}
end:
- msm_ispif_clk_enable(ispif, params, 0);
-
return rc;
}
@@ -860,15 +702,6 @@
ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
- if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ) {
- unsigned long flags;
- spin_lock_irqsave(&ispif->auto_complete_lock, flags);
- if (ispif->wait_timeout[VFE0] == 0)
- complete(&ispif->reset_complete[VFE0]);
- spin_unlock_irqrestore(
- &ispif->auto_complete_lock, flags);
- }
-
if (out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
pr_err("%s: VFE0 pix0 overflow.\n", __func__);
@@ -884,15 +717,6 @@
ispif_process_irq(ispif, out, VFE0);
}
if (ispif->vfe_info.num_vfe > 1) {
- if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ) {
- unsigned long flags;
- spin_lock_irqsave(&ispif->auto_complete_lock, flags);
- if (ispif->wait_timeout[VFE1] == 0)
- complete(&ispif->reset_complete[VFE1]);
- spin_unlock_irqrestore(
- &ispif->auto_complete_lock, flags);
- }
-
if (out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
pr_err("%s: VFE1 pix0 overflow.\n", __func__);
@@ -947,19 +771,20 @@
memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
ispif->csid_version = csid_version;
- rc = msm_ispif_clk_enable_one(ispif, VFE0, 1);
- if (rc < 0) {
- pr_err("%s: unable to enable clocks for VFE0", __func__);
- goto error_clk0;
- }
- if (ispif->csid_version >= CSID_VERSION_V3 &&
- ispif->vfe_info.num_vfe > 1) {
- rc = msm_ispif_clk_enable_one(ispif, VFE1, 1);
- if (rc < 0) {
- pr_err("%s: unable to enable clocks for VFE1",
- __func__);
- goto error_clk1;
+ if (ispif->csid_version >= CSID_VERSION_V3) {
+ if (!ispif->clk_mux_mem || !ispif->clk_mux_io) {
+ pr_err("%s csi clk mux mem %p io %p\n", __func__,
+ ispif->clk_mux_mem, ispif->clk_mux_io);
+ rc = -ENOMEM;
+ return rc;
+ }
+ ispif->clk_mux_base = ioremap(ispif->clk_mux_mem->start,
+ resource_size(ispif->clk_mux_mem));
+ if (!ispif->clk_mux_base) {
+ pr_err("%s: clk_mux_mem ioremap failed\n", __func__);
+ rc = -ENOMEM;
+ return rc;
}
}
@@ -977,31 +802,30 @@
goto error_irq;
}
+ rc = msm_ispif_clk_ahb_enable(ispif, 1);
+ if (rc) {
+ pr_err("%s: ahb_clk enable failed", __func__);
+ goto error_ahb;
+ }
+
rc = msm_ispif_reset(ispif);
if (rc == 0) {
ispif->ispif_state = ISPIF_POWER_UP;
CDBG("%s: power up done\n", __func__);
goto end;
}
+
+error_ahb:
free_irq(ispif->irq->start, ispif);
error_irq:
iounmap(ispif->base);
end:
- if (ispif->csid_version >= CSID_VERSION_V3 &&
- ispif->vfe_info.num_vfe > 1)
- msm_ispif_clk_enable_one(ispif, VFE1, 0);
-
-error_clk1:
- msm_ispif_clk_enable_one(ispif, VFE0, 0);
-
-error_clk0:
return rc;
}
static void msm_ispif_release(struct ispif_device *ispif)
{
- int i;
BUG_ON(!ispif);
if (ispif->ispif_state != ISPIF_POWER_UP) {
@@ -1010,18 +834,16 @@
return;
}
- for (i = 0; i < ispif->vfe_info.num_vfe; i++)
- msm_ispif_clk_enable_one(ispif, i, 1);
-
/* make sure no streaming going on */
msm_ispif_reset(ispif);
+ msm_ispif_clk_ahb_enable(ispif, 0);
+
free_irq(ispif->irq->start, ispif);
iounmap(ispif->base);
- for (i = 0; i < ispif->vfe_info.num_vfe; i++)
- msm_ispif_clk_enable_one(ispif, i, 0);
+ iounmap(ispif->clk_mux_base);
ispif->ispif_state = ISPIF_POWER_DOWN;
}
@@ -1141,7 +963,6 @@
{
int rc;
struct ispif_device *ispif;
- int i;
ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL);
if (!ispif) {
@@ -1195,13 +1016,20 @@
rc = -EBUSY;
goto error;
}
+ ispif->clk_mux_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "csi_clk_mux");
+ if (ispif->clk_mux_mem) {
+ ispif->clk_mux_io = request_mem_region(
+ ispif->clk_mux_mem->start,
+ resource_size(ispif->clk_mux_mem),
+ ispif->clk_mux_mem->name);
+ if (!ispif->clk_mux_io)
+ pr_err("%s: no valid csi_mux region\n", __func__);
+ }
ispif->pdev = pdev;
ispif->ispif_state = ISPIF_POWER_DOWN;
ispif->open_cnt = 0;
- spin_lock_init(&ispif->auto_complete_lock);
- for (i = 0; i < VFE_MAX; i++)
- ispif->wait_timeout[i] = 0;
return 0;
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
index 2c77292..faa32aa 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
@@ -42,21 +42,21 @@
struct platform_device *pdev;
struct msm_sd_subdev msm_sd;
struct resource *mem;
+ struct resource *clk_mux_mem;
struct resource *irq;
struct resource *io;
+ struct resource *clk_mux_io;
void __iomem *base;
+ void __iomem *clk_mux_base;
struct mutex mutex;
uint8_t start_ack_pending;
- struct completion reset_complete[VFE_MAX];
- spinlock_t auto_complete_lock;
- uint8_t wait_timeout[VFE_MAX];
uint32_t csid_version;
int enb_dump_reg;
uint32_t open_cnt;
struct ispif_sof_count sof_count[VFE_MAX];
struct ispif_intf_cmd applied_intf_cmd[VFE_MAX];
enum msm_ispif_state_t ispif_state;
- struct clk *ispif_clk[VFE_MAX][INTF_MAX];
struct msm_ispif_vfe_info vfe_info;
+ struct clk *ahb_clk;
};
#endif
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
index afd91d1..6396486 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
@@ -16,6 +16,7 @@
/* common registers */
#define ISPIF_RST_CMD_ADDR 0x0000
#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x0124
+#define PIX0_LINE_BUF_EN_BIT 0
#define ISPIF_VFE(m) (0x0)
@@ -49,6 +50,8 @@
+/* CSID CLK MUX SEL REGISTERS */
+#define ISPIF_RDI_CLK_MUX_SEL_ADDR 0x8
/*ISPIF RESET BITS*/
#define VFE_CLK_DOMAIN_RST BIT(31)
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
index 80b32d4..c805c3d 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
@@ -17,6 +17,7 @@
#define ISPIF_RST_CMD_ADDR 0x008
#define ISPIF_RST_CMD_1_ADDR 0x00C
#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x01C
+#define PIX0_LINE_BUF_EN_BIT 6
#define ISPIF_VFE(m) ((m) * 0x200)
@@ -45,6 +46,9 @@
#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) (0x2D0 + ISPIF_VFE(m) + 4*(n))
#define ISPIF_VFE_m_3D_DESKEW_SIZE(m) (0x2E4 + ISPIF_VFE(m))
+/* CSID CLK MUX SEL REGISTERS */
+#define ISPIF_RDI_CLK_MUX_SEL_ADDR 0x8
+
/*ISPIF RESET BITS*/
#define VFE_CLK_DOMAIN_RST BIT(31)
#define PIX_1_CLK_DOMAIN_RST BIT(30)
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index e50ac3a..be9f613 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -30,68 +30,6 @@
#include "msm_vb2.h"
#include "msm_sd.h"
-struct msm_queue_head {
- struct list_head list;
- spinlock_t lock;
- int len;
- int max;
-};
-
-/** msm_event:
- *
- * event sent by imaging server
- **/
-struct msm_event {
- struct video_device *vdev;
- atomic_t on_heap;
-};
-
-struct msm_command {
- struct list_head list;
- struct v4l2_event event;
- atomic_t on_heap;
-};
-
-/** struct msm_command_ack
- *
- * Object of command_ack_q, which is
- * created per open operation
- *
- * contains struct msm_command
- **/
-struct msm_command_ack {
- struct list_head list;
- struct msm_queue_head command_q;
- wait_queue_head_t wait;
- int stream_id;
-};
-
-struct msm_v4l2_subdev {
- /* FIXME: for session close and error handling such
- * as daemon shutdown */
- int close_sequence;
-};
-
-struct msm_session {
- struct list_head list;
-
- /* session index */
- unsigned int session_id;
-
- /* event queue sent by imaging server */
- struct msm_event event_q;
-
- /* ACK by imaging server. Object type of
- * struct msm_command_ack per open,
- * assumption is application can send
- * command on every opened video node */
- struct msm_queue_head command_ack_q;
-
- /* real streams(either data or metadate) owned by one
- * session struct msm_stream */
- struct msm_queue_head stream_q;
-};
-
static struct v4l2_device *msm_v4l2_dev;
static struct msm_queue_head *msm_session_q;
@@ -247,6 +185,17 @@
return (ack->stream_id == *(unsigned int *)d2) ? 1 : 0;
}
+
+struct msm_session *msm_session_find(unsigned int session_id)
+{
+ struct msm_session *session;
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (WARN_ON(!session))
+ return NULL;
+ return session;
+}
+
int msm_create_stream(unsigned int session_id,
unsigned int stream_id, struct vb2_queue *q)
{
@@ -393,6 +342,7 @@
msm_init_queue(&session->command_ack_q);
msm_init_queue(&session->stream_q);
msm_enqueue(msm_session_q, &session->list);
+ mutex_init(&session->lock);
return 0;
}
@@ -408,10 +358,12 @@
list, __msm_queue_find_session, &session_id);
if (!session)
return -EINVAL;
-
+ mutex_lock(&session->lock);
cmd_ack = kzalloc(sizeof(*cmd_ack), GFP_KERNEL);
- if (!cmd_ack)
+ if (!cmd_ack) {
+ mutex_unlock(&session->lock);
return -ENOMEM;
+ }
msm_init_queue(&cmd_ack->command_q);
INIT_LIST_HEAD(&cmd_ack->list);
@@ -420,7 +372,7 @@
msm_enqueue(&session->command_ack_q, &cmd_ack->list);
session->command_ack_q.len++;
-
+ mutex_unlock(&session->lock);
return 0;
}
@@ -488,7 +440,7 @@
list_for_each_entry(sd, &msm_v4l2_dev->subdevs, list)
__msm_sd_close_session_streams(sd, sd_close);
spin_unlock_irqrestore(&msm_v4l2_dev->lock, flags);
-
+ INIT_LIST_HEAD(&stream->queued_list);
return 0;
}
@@ -543,7 +495,7 @@
msm_destroy_session_streams(session);
msm_remove_session_cmd_ack_q(session);
-
+ mutex_destroy(&session->lock);
msm_delete_entry(msm_session_q, struct msm_session,
list, session);
@@ -683,33 +635,44 @@
list, __msm_queue_find_session, &session_id);
if (WARN_ON(!session))
return -EIO;
-
+ mutex_lock(&session->lock);
cmd_ack = msm_queue_find(&session->command_ack_q,
struct msm_command_ack, list,
__msm_queue_find_command_ack_q, &stream_id);
- if (WARN_ON(!cmd_ack))
+ if (WARN_ON(!cmd_ack)) {
+ mutex_unlock(&session->lock);
return -EIO;
+ }
v4l2_event_queue(vdev, event);
- if (timeout < 0)
+ if (timeout < 0) {
+ mutex_unlock(&session->lock);
return rc;
+ }
/* should wait on session based condition */
rc = wait_event_interruptible_timeout(cmd_ack->wait,
!list_empty_careful(&cmd_ack->command_q.list),
msecs_to_jiffies(timeout));
if (list_empty_careful(&cmd_ack->command_q.list)) {
- if (!rc)
+ if (!rc) {
+ pr_err("%s: Timed out\n", __func__);
rc = -ETIMEDOUT;
- if (rc < 0)
+ }
+ if (rc < 0) {
+ pr_err("%s: rc = %d\n", __func__, rc);
+ mutex_unlock(&session->lock);
return rc;
+ }
}
cmd = msm_dequeue(&cmd_ack->command_q,
struct msm_command, list);
- if (!cmd)
+ if (!cmd) {
+ mutex_unlock(&session->lock);
return -EINVAL;
+ }
event_data = (struct msm_v4l2_event_data *)cmd->event.u.data;
@@ -721,6 +684,7 @@
*event = cmd->event;
kzfree(cmd);
+ mutex_unlock(&session->lock);
return rc;
}
@@ -730,7 +694,7 @@
struct msm_v4l2_event_data *event_data =
(struct msm_v4l2_event_data *)&event.u.data[0];
struct msm_session *session = d1;
-
+ mutex_lock(&session->lock);
event.type = MSM_CAMERA_V4L2_EVENT_TYPE;
event.id = MSM_CAMERA_MSM_NOTIFY;
event_data->command = MSM_CAMERA_PRIV_SHUTDOWN;
@@ -739,7 +703,7 @@
msm_destroy_session_streams(session);
msm_remove_session_cmd_ack_q(session);
-
+ mutex_unlock(&session->lock);
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/msm.h b/drivers/media/platform/msm/camera_v2/msm.h
index 39901ad..d57cf8d 100644
--- a/drivers/media/platform/msm/camera_v2/msm.h
+++ b/drivers/media/platform/msm/camera_v2/msm.h
@@ -38,6 +38,69 @@
atomic_t opened;
};
+struct msm_queue_head {
+ struct list_head list;
+ spinlock_t lock;
+ int len;
+ int max;
+};
+
+/** msm_event:
+ *
+ * event sent by imaging server
+ **/
+struct msm_event {
+ struct video_device *vdev;
+ atomic_t on_heap;
+};
+
+struct msm_command {
+ struct list_head list;
+ struct v4l2_event event;
+ atomic_t on_heap;
+};
+
+/** struct msm_command_ack
+ *
+ * Object of command_ack_q, which is
+ * created per open operation
+ *
+ * contains struct msm_command
+ **/
+struct msm_command_ack {
+ struct list_head list;
+ struct msm_queue_head command_q;
+ wait_queue_head_t wait;
+ int stream_id;
+};
+
+struct msm_v4l2_subdev {
+ /* FIXME: for session close and error handling such
+ * as daemon shutdown */
+ int close_sequence;
+};
+
+struct msm_session {
+ struct list_head list;
+
+ /* session index */
+ unsigned int session_id;
+
+ /* event queue sent by imaging server */
+ struct msm_event event_q;
+
+ /* ACK by imaging server. Object type of
+ * struct msm_command_ack per open,
+ * assumption is application can send
+ * command on every opened video node */
+ struct msm_queue_head command_ack_q;
+
+ /* real streams(either data or metadate) owned by one
+ * session struct msm_stream */
+ struct msm_queue_head stream_q;
+ struct mutex lock;
+};
+
int msm_post_event(struct v4l2_event *event, int timeout);
int msm_create_session(unsigned int session, struct video_device *vdev);
int msm_destroy_session(unsigned int session_id);
@@ -52,5 +115,5 @@
struct vb2_queue *msm_get_stream_vb2q(unsigned int session_id,
unsigned int stream_id);
struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q);
-
+struct msm_session *msm_session_find(unsigned int session_id);
#endif /*_MSM_H */
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
index 29262af..8fa8f8d 100644
--- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
@@ -48,7 +48,6 @@
}
msm_vb2_buf = container_of(vb, struct msm_vb2_buffer, vb2_buf);
msm_vb2_buf->in_freeq = 0;
- msm_vb2_buf->stream = stream;
return 0;
}
@@ -66,7 +65,7 @@
return;
}
- stream = msm_vb2->stream;
+ stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s:%d] NULL stream", __func__, __LINE__);
return;
@@ -91,7 +90,7 @@
return -EINVAL;
}
- stream = msm_vb2->stream;
+ stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s:%d] NULL stream", __func__, __LINE__);
return -EINVAL;
@@ -122,7 +121,7 @@
return;
}
- stream = msm_vb2->stream;
+ stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s:%d] NULL stream", __func__, __LINE__);
return;
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h
index 027d344..7082f85 100644
--- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h
@@ -42,7 +42,6 @@
struct vb2_buffer vb2_buf;
struct list_head list;
int in_freeq;
- struct msm_stream *stream;
};
struct msm_vb2_private_data {
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/Makefile b/drivers/media/platform/msm/camera_v2/pproc/cpp/Makefile
index 2f969d2..c793ef6 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/Makefile
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/Makefile
@@ -1,3 +1,4 @@
ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/isp/
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
obj-$(CONFIG_MSM_CPP) += msm_cpp.o
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 2598b07..8cdaa4b 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -34,6 +34,7 @@
#include <media/msmb_pproc.h>
#include <media/msmb_generic_buf_mgr.h>
#include "msm_cpp.h"
+#include "msm_isp_util.h"
#include "msm_camera_io_util.h"
#define MSM_CPP_DRV_NAME "msm_cpp"
@@ -582,6 +583,12 @@
static int cpp_init_hardware(struct cpp_device *cpp_dev)
{
int rc = 0;
+ rc = msm_isp_init_bandwidth_mgr(ISP_CPP);
+ if (rc < 0) {
+ pr_err("%s: Bandwidth registration Failed!\n", __func__);
+ goto bus_scale_register_failed;
+ }
+ msm_isp_update_bandwidth(ISP_CPP, 981345600, 1066680000);
if (cpp_dev->fs_cpp == NULL) {
cpp_dev->fs_cpp =
@@ -667,6 +674,9 @@
regulator_disable(cpp_dev->fs_cpp);
regulator_put(cpp_dev->fs_cpp);
fs_failed:
+ msm_isp_update_bandwidth(ISP_CPP, 0, 0);
+ msm_isp_deinit_bandwidth_mgr(ISP_CPP);
+bus_scale_register_failed:
return rc;
}
@@ -688,6 +698,8 @@
regulator_put(cpp_dev->fs_cpp);
cpp_dev->fs_cpp = NULL;
}
+ msm_isp_update_bandwidth(ISP_CPP, 0, 0);
+ msm_isp_deinit_bandwidth_mgr(ISP_CPP);
}
static void cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index fbd4a2e..33eaa69 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -185,49 +185,15 @@
{"csi_pclk", -1},
};
-static struct msm_cam_clk_info csid0_8974_clk_info[] = {
+static struct msm_cam_clk_info csid_8974_clk_info[] = {
{"camss_top_ahb_clk", -1},
{"ispif_ahb_clk", -1},
- {"csi0_ahb_clk", -1},
- {"csi0_src_clk", 200000000},
- {"csi0_clk", -1},
- {"csi0_phy_clk", -1},
- {"csi0_pix_clk", -1},
- {"csi0_rdi_clk", -1},
-};
-
-static struct msm_cam_clk_info csid1_8974_clk_info[] = {
- {"csi1_ahb_clk", -1},
- {"csi1_src_clk", 200000000},
- {"csi1_clk", -1},
- {"csi1_phy_clk", -1},
- {"csi1_pix_clk", -1},
- {"csi1_rdi_clk", -1},
-};
-
-static struct msm_cam_clk_info csid2_8974_clk_info[] = {
- {"csi2_ahb_clk", -1},
- {"csi2_src_clk", 200000000},
- {"csi2_clk", -1},
- {"csi2_phy_clk", -1},
- {"csi2_pix_clk", -1},
- {"csi2_rdi_clk", -1},
-};
-
-static struct msm_cam_clk_info csid3_8974_clk_info[] = {
- {"csi3_ahb_clk", -1},
- {"csi3_src_clk", 200000000},
- {"csi3_clk", -1},
- {"csi3_phy_clk", -1},
- {"csi3_pix_clk", -1},
- {"csi3_rdi_clk", -1},
-};
-
-static struct msm_cam_clk_setting csid_8974_clk_info[] = {
- {&csid0_8974_clk_info[0], ARRAY_SIZE(csid0_8974_clk_info)},
- {&csid1_8974_clk_info[0], ARRAY_SIZE(csid1_8974_clk_info)},
- {&csid2_8974_clk_info[0], ARRAY_SIZE(csid2_8974_clk_info)},
- {&csid3_8974_clk_info[0], ARRAY_SIZE(csid3_8974_clk_info)},
+ {"csi_ahb_clk", -1},
+ {"csi_src_clk", 200000000},
+ {"csi_clk", -1},
+ {"csi_phy_clk", -1},
+ {"csi_pix_clk", -1},
+ {"csi_rdi_clk", -1},
};
static struct camera_vreg_t csid_8960_vreg_info[] = {
@@ -241,7 +207,6 @@
static int msm_csid_init(struct csid_device *csid_dev, uint32_t *csid_version)
{
int rc = 0;
- uint8_t core_id = 0;
if (!csid_version) {
pr_err("%s:%d csid_version NULL\n", __func__, __LINE__);
@@ -306,26 +271,14 @@
}
rc = msm_cam_clk_enable(&csid_dev->pdev->dev,
- csid_8974_clk_info[0].clk_info, csid_dev->csid0_clk,
- csid_8974_clk_info[0].num_clk_info, 1);
+ csid_8974_clk_info, csid_dev->csid_clk,
+ ARRAY_SIZE(csid_8974_clk_info), 1);
if (rc < 0) {
pr_err("%s: clock enable failed\n", __func__);
- goto csid0_clk_enable_failed;
- }
- core_id = csid_dev->pdev->id;
- if (core_id) {
- rc = msm_cam_clk_enable(&csid_dev->pdev->dev,
- csid_8974_clk_info[core_id].clk_info,
- csid_dev->csid_clk,
- csid_8974_clk_info[core_id].num_clk_info, 1);
- if (rc < 0) {
- pr_err("%s: clock enable failed\n",
- __func__);
- goto clk_enable_failed;
- }
+ goto clk_enable_failed;
}
}
-
+ CDBG("%s:%d called\n", __func__, __LINE__);
csid_dev->hw_version =
msm_camera_io_r(csid_dev->base + CSID_HW_VERSION_ADDR);
CDBG("%s:%d called csid_dev->hw_version %x\n", __func__, __LINE__,
@@ -341,12 +294,6 @@
return rc;
clk_enable_failed:
- if (CSID_VERSION >= CSID_VERSION_V3) {
- msm_cam_clk_enable(&csid_dev->pdev->dev,
- csid_8974_clk_info[0].clk_info, csid_dev->csid0_clk,
- csid_8974_clk_info[0].num_clk_info, 0);
- }
-csid0_clk_enable_failed:
if (CSID_VERSION <= CSID_VERSION_V2) {
msm_camera_enable_vreg(&csid_dev->pdev->dev,
csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
@@ -375,7 +322,6 @@
static int msm_csid_release(struct csid_device *csid_dev)
{
uint32_t irq;
- uint8_t core_id = 0;
if (csid_dev->csid_state != CSID_POWER_UP) {
pr_err("%s: csid invalid state %d\n", __func__,
@@ -401,16 +347,8 @@
csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 0);
} else if (csid_dev->hw_version >= CSID_VERSION_V3) {
- core_id = csid_dev->pdev->id;
- if (core_id)
- msm_cam_clk_enable(&csid_dev->pdev->dev,
- csid_8974_clk_info[core_id].clk_info,
- csid_dev->csid_clk,
- csid_8974_clk_info[core_id].num_clk_info, 0);
-
- msm_cam_clk_enable(&csid_dev->pdev->dev,
- csid_8974_clk_info[0].clk_info, csid_dev->csid0_clk,
- csid_8974_clk_info[0].num_clk_info, 0);
+ msm_cam_clk_enable(&csid_dev->pdev->dev, csid_8974_clk_info,
+ csid_dev->csid_clk, ARRAY_SIZE(csid_8974_clk_info), 0);
msm_camera_enable_vreg(&csid_dev->pdev->dev,
csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h
index 7ae1392..fd4db79 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h
@@ -38,7 +38,6 @@
uint32_t hw_version;
enum msm_csid_state_t csid_state;
- struct clk *csid0_clk[11];
struct clk *csid_clk[11];
};
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/csi2.0/msm_csiphy_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/csi2.0/msm_csiphy_hwreg.h
index e5093f8..ba964a2 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/csi2.0/msm_csiphy_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/csi2.0/msm_csiphy_hwreg.h
@@ -14,7 +14,7 @@
#define MSM_CSIPHY_HWREG_H
/*MIPI CSI PHY registers*/
-#define MIPI_CSIPHY_HW_VERSION_ADDR 0x180
+#define MIPI_CSIPHY_HW_VERSION_ADDR 0x17C
#define MIPI_CSIPHY_LNn_CFG1_ADDR 0x0
#define MIPI_CSIPHY_LNn_CFG2_ADDR 0x4
#define MIPI_CSIPHY_LNn_CFG3_ADDR 0x8
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index df3ee60..7d3a1fc 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -43,14 +43,15 @@
uint8_t lane_cnt = 0;
uint16_t lane_mask = 0;
void __iomem *csiphybase;
+ uint8_t csiphy_id = csiphy_dev->pdev->id;
csiphybase = csiphy_dev->base;
if (!csiphybase) {
pr_err("%s: csiphybase NULL\n", __func__);
return -EINVAL;
}
- csiphy_dev->lane_mask[csiphy_dev->pdev->id] |= csiphy_params->lane_mask;
- lane_mask = csiphy_dev->lane_mask[csiphy_dev->pdev->id];
+ csiphy_dev->lane_mask[csiphy_id] |= csiphy_params->lane_mask;
+ lane_mask = csiphy_dev->lane_mask[csiphy_id];
lane_cnt = csiphy_params->lane_cnt;
if (csiphy_params->lane_cnt < 1 || csiphy_params->lane_cnt > 4) {
pr_err("%s: unsupported lane cnt %d\n",
@@ -58,11 +59,28 @@
return rc;
}
- CDBG("%s csiphy_params, mask = %x, cnt = %d, settle cnt = %x\n",
+ CDBG("%s csiphy_params, mask = %x cnt = %d settle cnt = %x csid %d\n",
__func__,
csiphy_params->lane_mask,
csiphy_params->lane_cnt,
- csiphy_params->settle_cnt);
+ csiphy_params->settle_cnt,
+ csiphy_params->csid_core);
+
+ if (csiphy_dev->hw_version >= CSIPHY_VERSION_V3) {
+ val = msm_camera_io_r(csiphy_dev->clk_mux_base);
+ if (csiphy_params->combo_mode &&
+ (csiphy_params->lane_mask & 0x18)) {
+ val &= ~0xf0;
+ val |= csiphy_params->csid_core << 4;
+ } else {
+ val &= ~0xf;
+ val |= csiphy_params->csid_core;
+ }
+ msm_camera_io_w(val, csiphy_dev->clk_mux_base);
+ CDBG("%s clk mux addr %p val 0x%x\n", __func__,
+ csiphy_dev->clk_mux_base, val);
+ mb();
+ }
msm_camera_io_w(0x1, csiphybase + MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR);
msm_camera_io_w(0x1, csiphybase + MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR);
@@ -204,6 +222,22 @@
csiphy_8960_clk_info, csiphy_dev->csiphy_clk,
ARRAY_SIZE(csiphy_8960_clk_info), 1);
} else {
+ if (!csiphy_dev->clk_mux_mem || !csiphy_dev->clk_mux_io) {
+ pr_err("%s clk mux mem %p io %p\n", __func__,
+ csiphy_dev->clk_mux_mem,
+ csiphy_dev->clk_mux_io);
+ rc = -ENOMEM;
+ return rc;
+ }
+ csiphy_dev->clk_mux_base = ioremap(
+ csiphy_dev->clk_mux_mem->start,
+ resource_size(csiphy_dev->clk_mux_mem));
+ if (!csiphy_dev->clk_mux_base) {
+ pr_err("%s: ERROR %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ return rc;
+ }
+
CDBG("%s:%d called\n", __func__, __LINE__);
rc = msm_cam_clk_enable(&csiphy_dev->pdev->dev,
csiphy_8974_clk_info, csiphy_dev->csiphy_clk,
@@ -269,12 +303,31 @@
}
CDBG("%s:%d called\n", __func__, __LINE__);
+
if (CSIPHY_VERSION < CSIPHY_VERSION_V3) {
CDBG("%s:%d called\n", __func__, __LINE__);
rc = msm_cam_clk_enable(&csiphy_dev->pdev->dev,
csiphy_8960_clk_info, csiphy_dev->csiphy_clk,
ARRAY_SIZE(csiphy_8960_clk_info), 1);
} else {
+
+ if (!csiphy_dev->clk_mux_mem || !csiphy_dev->clk_mux_io) {
+ pr_err("%s clk mux mem %p io %p\n", __func__,
+ csiphy_dev->clk_mux_mem,
+ csiphy_dev->clk_mux_io);
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ csiphy_dev->clk_mux_base = ioremap(
+ csiphy_dev->clk_mux_mem->start,
+ resource_size(csiphy_dev->clk_mux_mem));
+ if (!csiphy_dev->clk_mux_base) {
+ pr_err("%s: ERROR %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ return rc;
+ }
+
CDBG("%s:%d called\n", __func__, __LINE__);
rc = msm_cam_clk_enable(&csiphy_dev->pdev->dev,
csiphy_8974_clk_info, csiphy_dev->csiphy_clk,
@@ -359,14 +412,16 @@
disable_irq(csiphy_dev->irq->start);
- if (CSIPHY_VERSION < CSIPHY_VERSION_V3)
+ if (CSIPHY_VERSION < CSIPHY_VERSION_V3) {
msm_cam_clk_enable(&csiphy_dev->pdev->dev,
csiphy_8960_clk_info, csiphy_dev->csiphy_clk,
ARRAY_SIZE(csiphy_8960_clk_info), 0);
- else
+ } else {
msm_cam_clk_enable(&csiphy_dev->pdev->dev,
csiphy_8974_clk_info, csiphy_dev->csiphy_clk,
ARRAY_SIZE(csiphy_8974_clk_info), 0);
+ iounmap(csiphy_dev->clk_mux_base);
+ }
iounmap(csiphy_dev->base);
csiphy_dev->base = NULL;
@@ -426,20 +481,23 @@
msm_camera_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_LNCK_CFG2_ADDR);
msm_camera_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
- if (CSIPHY_VERSION < CSIPHY_VERSION_V3)
+ if (CSIPHY_VERSION < CSIPHY_VERSION_V3) {
msm_cam_clk_enable(&csiphy_dev->pdev->dev,
csiphy_8960_clk_info, csiphy_dev->csiphy_clk,
ARRAY_SIZE(csiphy_8960_clk_info), 0);
- else
+ } else {
msm_cam_clk_enable(&csiphy_dev->pdev->dev,
csiphy_8974_clk_info, csiphy_dev->csiphy_clk,
ARRAY_SIZE(csiphy_8974_clk_info), 0);
+ iounmap(csiphy_dev->clk_mux_base);
+ }
iounmap(csiphy_dev->base);
csiphy_dev->base = NULL;
csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
return 0;
}
+
#endif
static long msm_csiphy_cmd(struct csiphy_device *csiphy_dev, void *arg)
@@ -588,6 +646,17 @@
}
disable_irq(new_csiphy_dev->irq->start);
+ new_csiphy_dev->clk_mux_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "csiphy_clk_mux");
+ if (new_csiphy_dev->clk_mux_mem) {
+ new_csiphy_dev->clk_mux_io = request_mem_region(
+ new_csiphy_dev->clk_mux_mem->start,
+ resource_size(new_csiphy_dev->clk_mux_mem),
+ new_csiphy_dev->clk_mux_mem->name);
+ if (!new_csiphy_dev->clk_mux_io)
+ pr_err("%s: ERROR %d\n", __func__, __LINE__);
+ }
+
new_csiphy_dev->pdev = pdev;
new_csiphy_dev->msm_sd.sd.internal_ops = &msm_csiphy_internal_ops;
new_csiphy_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index e19be34..a11b958 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -32,9 +32,12 @@
struct msm_sd_subdev msm_sd;
struct v4l2_subdev subdev;
struct resource *mem;
+ struct resource *clk_mux_mem;
struct resource *irq;
struct resource *io;
+ struct resource *clk_mux_io;
void __iomem *base;
+ void __iomem *clk_mux_base;
struct mutex mutex;
uint32_t hw_version;
enum msm_csiphy_state_t csiphy_state;
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 6775a23..b56378a 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -25,6 +25,9 @@
/* Length of mandatory fields that must exist in header of video PES */
#define PES_MANDATORY_FIELDS_LEN 9
+/* Index of first byte in TS packet holding STC */
+#define STC_LOCATION_IDX 188
+
#define MAX_PES_LENGTH (SZ_64K)
#define MAX_TS_PACKETS_FOR_SDMX_PROCESS (500)
@@ -95,80 +98,6 @@
static int mpq_sdmx_debug;
module_param(mpq_sdmx_debug, int, S_IRUGO | S_IWUSR);
-
-/**
- * Maximum allowed framing pattern size
- */
-#define MPQ_MAX_PATTERN_SIZE 6
-
-/**
- * Number of patterns to look for when doing framing, per video standard
- */
-#define MPQ_MPEG2_PATTERN_NUM 5
-#define MPQ_H264_PATTERN_NUM 5
-#define MPQ_VC1_PATTERN_NUM 3
-
-/*
- * mpq_framing_pattern_lookup_params - framing pattern lookup parameters.
- *
- * @pattern: the byte pattern to look for.
- * @mask: the byte mask to use (same length as pattern).
- * @size: the length of the pattern, in bytes.
- * @type: the type of the pattern.
- */
-struct mpq_framing_pattern_lookup_params {
- u8 pattern[MPQ_MAX_PATTERN_SIZE];
- u8 mask[MPQ_MAX_PATTERN_SIZE];
- size_t size;
- enum dmx_framing_pattern_type type;
-};
-
-/*
- * Pre-defined video framing lookup pattern information.
- * Note: the first pattern in each patterns database must
- * be the Sequence Header (or equivalent SPS in H.264).
- * The code assumes this is the case when prepending
- * Sequence Header data in case it is required.
- */
-static const struct mpq_framing_pattern_lookup_params
- mpeg2_patterns[MPQ_MPEG2_PATTERN_NUM] = {
- {{0x00, 0x00, 0x01, 0xB3}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
- DMX_FRM_MPEG2_SEQUENCE_HEADER},
- {{0x00, 0x00, 0x01, 0xB8}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
- DMX_FRM_MPEG2_GOP_HEADER},
- {{0x00, 0x00, 0x01, 0x00, 0x00, 0x08},
- {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38}, 6,
- DMX_FRM_MPEG2_I_PIC},
- {{0x00, 0x00, 0x01, 0x00, 0x00, 0x10},
- {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38}, 6,
- DMX_FRM_MPEG2_P_PIC},
- {{0x00, 0x00, 0x01, 0x00, 0x00, 0x18},
- {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38}, 6,
- DMX_FRM_MPEG2_B_PIC}
-};
-
-static const struct mpq_framing_pattern_lookup_params
- h264_patterns[MPQ_H264_PATTERN_NUM] = {
- {{0x00, 0x00, 0x01, 0x07}, {0xFF, 0xFF, 0xFF, 0x1F}, 4,
- DMX_FRM_H264_SPS},
- {{0x00, 0x00, 0x01, 0x08}, {0xFF, 0xFF, 0xFF, 0x1F}, 4,
- DMX_FRM_H264_PPS},
- {{0x00, 0x00, 0x01, 0x05, 0x80}, {0xFF, 0xFF, 0xFF, 0x1F, 0x80}, 5,
- DMX_FRM_H264_IDR_PIC},
- {{0x00, 0x00, 0x01, 0x01, 0x80}, {0xFF, 0xFF, 0xFF, 0x1F, 0x80}, 5,
- DMX_FRM_H264_NON_IDR_PIC}
-};
-
-static const struct mpq_framing_pattern_lookup_params
- vc1_patterns[MPQ_VC1_PATTERN_NUM] = {
- {{0x00, 0x00, 0x01, 0x0F}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
- DMX_FRM_VC1_SEQUENCE_HEADER},
- {{0x00, 0x00, 0x01, 0x0E}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
- DMX_FRM_VC1_ENTRY_POINT_HEADER},
- {{0x00, 0x00, 0x01, 0x0D}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
- DMX_FRM_VC1_FRAME_START_CODE}
-};
-
/* Global data-structure for managing demux devices */
static struct
{
@@ -211,312 +140,74 @@
/* Check if a framing pattern is a video frame pattern or a header pattern */
static inline int mpq_dmx_is_video_frame(
- enum dmx_indexing_video_standard standard,
- enum dmx_framing_pattern_type pattern_type)
+ enum dmx_video_codec codec,
+ u64 pattern_type)
{
- switch (standard) {
- case DMX_INDEXING_MPEG2:
- if ((pattern_type == DMX_FRM_MPEG2_I_PIC) ||
- (pattern_type == DMX_FRM_MPEG2_P_PIC) ||
- (pattern_type == DMX_FRM_MPEG2_B_PIC))
+ switch (codec) {
+ case DMX_VIDEO_CODEC_MPEG2:
+ if ((pattern_type == DMX_IDX_MPEG_I_FRAME_START) ||
+ (pattern_type == DMX_IDX_MPEG_P_FRAME_START) ||
+ (pattern_type == DMX_IDX_MPEG_B_FRAME_START))
return 1;
return 0;
- case DMX_INDEXING_H264:
- if ((pattern_type == DMX_FRM_H264_IDR_PIC) ||
- (pattern_type == DMX_FRM_H264_NON_IDR_PIC))
+
+ case DMX_VIDEO_CODEC_H264:
+ if ((pattern_type == DMX_IDX_H264_IDR_START) ||
+ (pattern_type == DMX_IDX_H264_NON_IDR_START))
return 1;
return 0;
- case DMX_INDEXING_VC1:
- if (pattern_type == DMX_FRM_VC1_FRAME_START_CODE)
+
+ case DMX_VIDEO_CODEC_VC1:
+ if (pattern_type == DMX_IDX_VC1_FRAME_START)
return 1;
return 0;
+
default:
return -EINVAL;
}
}
/*
- * mpq_framing_pattern_lookup_results - framing lookup results
+ * mpq_dmx_get_pattern_params - Returns the required video
+ * patterns for framing operation based on video codec.
*
- * @offset: The offset in the buffer where the pattern was found.
- * If a pattern is found using a prefix (i.e. started on the
- * previous buffer), offset is zero.
- * @type: the type of the pattern found.
- * @used_prefix_size: the prefix size that was used to find this pattern
- */
-struct mpq_framing_pattern_lookup_results {
- struct {
- u32 offset;
- enum dmx_framing_pattern_type type;
- u32 used_prefix_size;
- } info[MPQ_MAX_FOUND_PATTERNS];
-};
-
-/*
- * Check if two patterns are identical, taking mask into consideration.
- * @pattern1: the first byte pattern to compare.
- * @pattern2: the second byte pattern to compare.
- * @mask: the bit mask to use.
- * @pattern_size: the length of both patterns and the mask, in bytes.
- *
- * Return: 1 if patterns match, 0 otherwise.
- */
-static inline int mpq_dmx_patterns_match(const u8 *pattern1, const u8 *pattern2,
- const u8 *mask, size_t pattern_size)
-{
- int i;
-
- /*
- * Assumption: it is OK to access pattern1, pattern2 and mask.
- * This function performs no sanity checks to keep things fast.
- */
-
- for (i = 0; i < pattern_size; i++)
- if ((pattern1[i] & mask[i]) != (pattern2[i] & mask[i]))
- return 0;
-
- return 1;
-}
-
-/*
- * mpq_dmx_framing_pattern_search -
- * search for framing patterns in a given buffer.
- *
- * Optimized version: first search for a common substring, e.g. 0x00 0x00 0x01.
- * If this string is found, go over all the given patterns (all must start
- * with this string) and search for their ending in the buffer.
- *
- * Assumption: the patterns we look for do not spread over more than two
- * buffers.
- *
- * @paterns: the full patterns information to look for.
- * @patterns_num: the number of patterns to look for.
- * @buf: the buffer to search.
- * @buf_size: the size of the buffer to search. we search the entire buffer.
- * @prefix_size_masks: a bit mask (per pattern) of possible prefix sizes to use
- * when searching for a pattern that started at the last buffer.
- * Updated in this function for use in the next lookup.
- * @results: lookup results (offset, type, used_prefix_size) per found pattern,
- * up to MPQ_MAX_FOUND_PATTERNS.
- *
- * Return:
- * Number of patterns found (up to MPQ_MAX_FOUND_PATTERNS).
- * 0 if pattern was not found.
- * Negative error value on failure.
- */
-static int mpq_dmx_framing_pattern_search(
- const struct mpq_framing_pattern_lookup_params *patterns,
- int patterns_num,
- const u8 *buf,
- size_t buf_size,
- struct mpq_framing_prefix_size_masks *prefix_size_masks,
- struct mpq_framing_pattern_lookup_results *results)
-{
- int i, j;
- unsigned int current_size;
- u32 prefix;
- int found = 0;
- int start_offset = 0;
- /* the starting common substring to look for */
- u8 string[] = {0x00, 0x00, 0x01};
- /* the mask for the starting string */
- u8 string_mask[] = {0xFF, 0xFF, 0xFF};
- /* the size of the starting string (in bytes) */
- size_t string_size = 3;
-
- /* sanity checks - can be commented out for optimization purposes */
- if ((patterns == NULL) || (patterns_num <= 0) || (buf == NULL)) {
- MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
- return -EINVAL;
- }
-
- memset(results, 0, sizeof(struct mpq_framing_pattern_lookup_results));
-
- /*
- * handle prefix - disregard string, simply check all patterns,
- * looking for a matching suffix at the very beginning of the buffer.
- */
- for (j = 0; (j < patterns_num) && !found; j++) {
- prefix = prefix_size_masks->size_mask[j];
- current_size = 32;
- while (prefix) {
- if (prefix & (0x1 << (current_size - 1))) {
- /*
- * check that we don't look further
- * than buf_size boundary
- */
- if ((int)(patterns[j].size - current_size) >
- buf_size)
- break;
-
- if (mpq_dmx_patterns_match(
- (patterns[j].pattern + current_size),
- buf, (patterns[j].mask + current_size),
- (patterns[j].size - current_size))) {
-
- MPQ_DVB_DBG_PRINT(
- "%s: Found matching pattern using prefix of size %d\n",
- __func__, current_size);
- /*
- * pattern found using prefix at the
- * very beginning of the buffer, so
- * offset is 0, but we already zeroed
- * everything in the beginning of the
- * function. that's why the next line
- * is commented.
- */
- /* results->info[found].offset = 0; */
- results->info[found].type =
- patterns[j].type;
- results->info[found].used_prefix_size =
- current_size;
- found++;
- /*
- * save offset to start looking from
- * in the buffer, to avoid reusing the
- * data of a pattern we already found.
- */
- start_offset = (patterns[j].size -
- current_size);
-
- if (found >= MPQ_MAX_FOUND_PATTERNS)
- goto next_prefix_lookup;
- /*
- * we don't want to search for the same
- * pattern with several possible prefix
- * sizes if we have already found it,
- * so we break from the inner loop.
- * since we incremented 'found', we
- * will not search for additional
- * patterns using a prefix - that would
- * imply ambiguous patterns where one
- * pattern can be included in another.
- * the for loop will exit.
- */
- break;
- }
- }
- prefix &= ~(0x1 << (current_size - 1));
- current_size--;
- }
- }
-
- /*
- * Search buffer for entire pattern, starting with the string.
- * Note the external for loop does not execute if buf_size is
- * smaller than string_size (the cast to int is required, since
- * size_t is unsigned).
- */
- for (i = start_offset; i < (int)(buf_size - string_size + 1); i++) {
- if (mpq_dmx_patterns_match(string, (buf + i), string_mask,
- string_size)) {
- /* now search for patterns: */
- for (j = 0; j < patterns_num; j++) {
- /* avoid overflow to next buffer */
- if ((i + patterns[j].size) > buf_size)
- continue;
-
- if (mpq_dmx_patterns_match(
- (patterns[j].pattern + string_size),
- (buf + i + string_size),
- (patterns[j].mask + string_size),
- (patterns[j].size - string_size))) {
-
- results->info[found].offset = i;
- results->info[found].type =
- patterns[j].type;
- /*
- * save offset to start next prefix
- * lookup, to avoid reusing the data
- * of any pattern we already found.
- */
- if ((i + patterns[j].size) >
- start_offset)
- start_offset = (i +
- patterns[j].size);
- /*
- * did not use a prefix to find this
- * pattern, but we zeroed everything
- * in the beginning of the function.
- * So no need to zero used_prefix_size
- * for results->info[found]
- */
-
- found++;
- if (found >= MPQ_MAX_FOUND_PATTERNS)
- goto next_prefix_lookup;
- /*
- * theoretically we don't have to break
- * here, but we don't want to search
- * for the other matching patterns on
- * the very same same place in the
- * buffer. That would mean the
- * (pattern & mask) combinations are
- * not unique. So we break from inner
- * loop and move on to the next place
- * in the buffer.
- */
- break;
- }
- }
- }
- }
-
-next_prefix_lookup:
- /* check for possible prefix sizes for the next buffer */
- for (j = 0; j < patterns_num; j++) {
- prefix_size_masks->size_mask[j] = 0;
- for (i = 1; i < patterns[j].size; i++) {
- /*
- * avoid looking outside of the buffer
- * or reusing previously used data.
- */
- if (i > (buf_size - start_offset))
- break;
-
- if (mpq_dmx_patterns_match(patterns[j].pattern,
- (buf + buf_size - i),
- patterns[j].mask, i)) {
- prefix_size_masks->size_mask[j] |=
- (1 << (i - 1));
- }
- }
- }
-
- return found;
-}
-
-/*
- * mpq_dmx_get_pattern_params -
- * get a pointer to the relevant pattern parameters structure,
- * based on the video parameters.
- *
- * @video_params: the video parameters (e.g. video standard).
- * @patterns: a pointer to a pointer to the pattern parameters,
- * updated by this function.
+ * @video_codec: the video codec.
+ * @patterns: a pointer to the pattern parameters, updated by this function.
* @patterns_num: number of patterns, updated by this function.
*/
static inline int mpq_dmx_get_pattern_params(
- struct dmx_indexing_video_params *video_params,
- const struct mpq_framing_pattern_lookup_params **patterns,
- int *patterns_num)
+ enum dmx_video_codec video_codec,
+ const struct dvb_dmx_video_patterns
+ *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+ int *patterns_num)
{
- switch (video_params->standard) {
- case DMX_INDEXING_MPEG2:
- *patterns = mpeg2_patterns;
- *patterns_num = MPQ_MPEG2_PATTERN_NUM;
+ switch (video_codec) {
+ case DMX_VIDEO_CODEC_MPEG2:
+ patterns[0] = dvb_dmx_get_pattern(DMX_IDX_MPEG_SEQ_HEADER);
+ patterns[1] = dvb_dmx_get_pattern(DMX_IDX_MPEG_GOP);
+ patterns[2] = dvb_dmx_get_pattern(DMX_IDX_MPEG_I_FRAME_START);
+ patterns[3] = dvb_dmx_get_pattern(DMX_IDX_MPEG_P_FRAME_START);
+ patterns[4] = dvb_dmx_get_pattern(DMX_IDX_MPEG_B_FRAME_START);
+ *patterns_num = 5;
break;
- case DMX_INDEXING_H264:
- *patterns = h264_patterns;
- *patterns_num = MPQ_H264_PATTERN_NUM;
+
+ case DMX_VIDEO_CODEC_H264:
+ patterns[0] = dvb_dmx_get_pattern(DMX_IDX_H264_SPS);
+ patterns[1] = dvb_dmx_get_pattern(DMX_IDX_H264_PPS);
+ patterns[2] = dvb_dmx_get_pattern(DMX_IDX_H264_IDR_START);
+ patterns[3] = dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_START);
+ *patterns_num = 4;
break;
- case DMX_INDEXING_VC1:
- *patterns = vc1_patterns;
- *patterns_num = MPQ_VC1_PATTERN_NUM;
+
+ case DMX_VIDEO_CODEC_VC1:
+ patterns[0] = dvb_dmx_get_pattern(DMX_IDX_VC1_SEQ_HEADER);
+ patterns[1] = dvb_dmx_get_pattern(DMX_IDX_VC1_ENTRY_POINT);
+ patterns[2] = dvb_dmx_get_pattern(DMX_IDX_VC1_FRAME_START);
+ *patterns_num = 3;
break;
+
default:
MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
- *patterns = NULL;
*patterns_num = 0;
return -EINVAL;
}
@@ -622,8 +313,77 @@
mpq_demux->sdmx_process_time_max = process_time;
}
-/* Extend dvb-demux debugfs with HW statistics */
-void mpq_dmx_init_hw_statistics(struct mpq_demux *mpq_demux)
+static int mpq_sdmx_log_level_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t mpq_sdmx_log_level_read(struct file *fp,
+ char __user *user_buffer, size_t count, loff_t *position)
+{
+ char user_str[16];
+ struct mpq_demux *mpq_demux = fp->private_data;
+ int ret;
+
+ ret = scnprintf(user_str, 16, "%d", mpq_demux->sdmx_log_level);
+ ret = simple_read_from_buffer(user_buffer, count, position,
+ user_str, ret+1);
+
+ return ret;
+}
+
+static ssize_t mpq_sdmx_log_level_write(struct file *fp,
+ const char __user *user_buffer, size_t count, loff_t *position)
+{
+ char user_str[16];
+ int ret;
+ int ret_count;
+ int level;
+ struct mpq_demux *mpq_demux = fp->private_data;
+
+ if (count >= 16)
+ return -EINVAL;
+
+ ret_count = simple_write_to_buffer(user_str, 16, position, user_buffer,
+ count);
+ if (ret_count < 0)
+ return ret_count;
+
+ ret = sscanf(user_str, "%d", &level);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (level < SDMX_LOG_NO_PRINT || level > SDMX_LOG_VERBOSE)
+ return -EINVAL;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_demux->sdmx_log_level = level;
+ if (mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) {
+ ret = sdmx_set_log_level(mpq_demux->sdmx_session_handle,
+ mpq_demux->sdmx_log_level);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Could not set sdmx log level. ret = %d\n",
+ __func__, ret);
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+ return ret_count;
+}
+
+static const struct file_operations sdmx_debug_fops = {
+ .open = mpq_sdmx_log_level_open,
+ .read = mpq_sdmx_log_level_read,
+ .write = mpq_sdmx_log_level_write,
+ .owner = THIS_MODULE,
+};
+
+/* Extend dvb-demux debugfs with common plug-in entries */
+void mpq_dmx_init_debugfs_entries(struct mpq_demux *mpq_demux)
{
/*
* Extend dvb-demux debugfs with HW statistics.
@@ -745,8 +505,14 @@
S_IRUGO | S_IWUSR | S_IWGRP,
mpq_demux->demux.dmx.debugfs_demux_dir,
&mpq_demux->sdmx_process_packets_min);
+
+ debugfs_create_file("sdmx_log_level",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ mpq_demux,
+ &sdmx_debug_fops);
}
-EXPORT_SYMBOL(mpq_dmx_init_hw_statistics);
+EXPORT_SYMBOL(mpq_dmx_init_debugfs_entries);
/* Update dvb-demux debugfs with HW notification statistics */
void mpq_dmx_update_hw_statistics(struct mpq_demux *mpq_demux)
@@ -907,6 +673,8 @@
mpq_demux->num_active_feeds = 0;
mpq_demux->sdmx_filter_count = 0;
mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+ mpq_demux->sdmx_eos = 0;
+ mpq_demux->sdmx_log_level = SDMX_LOG_NO_PRINT;
if (mpq_demux->demux.feednum > MPQ_MAX_DMX_FILES) {
MPQ_DVB_ERR_PRINT(
@@ -1127,7 +895,7 @@
goto map_buffer_failed_free_buff;
}
- if (ionflag & ION_SECURE) {
+ if (ionflag & ION_FLAG_SECURE) {
MPQ_DVB_DBG_PRINT("%s: secured buffer\n", __func__);
*kernel_mem = NULL;
} else {
@@ -1207,7 +975,7 @@
return -EINVAL;
}
- if (!(ionflag & ION_SECURE))
+ if (!(ionflag & ION_FLAG_SECURE))
ion_unmap_kernel(mpq_demux->ion_client, ion_handle);
ion_free(mpq_demux->ion_client, ion_handle);
@@ -1320,7 +1088,7 @@
feed_data->buffer_desc.desc[0].read_ptr = 0;
feed_data->buffer_desc.desc[0].write_ptr = 0;
feed_data->buffer_desc.desc[0].handle =
- ion_share_dma_buf(client, temp_handle);
+ ion_share_dma_buf_fd(client, temp_handle);
if (IS_ERR_VALUE(feed_data->buffer_desc.desc[0].handle)) {
MPQ_DVB_ERR_PRINT(
"%s: FAILED to share payload buffer %d\n",
@@ -1571,9 +1339,9 @@
/* get and store framing information if required */
if (!mpq_dmx_info.decoder_framing) {
mpq_dmx_get_pattern_params(
- &mpq_feed->dvb_demux_feed->indexing_params,
- &feed_data->patterns, &feed_data->patterns_num);
- if (feed_data->patterns == NULL) {
+ mpq_feed->dvb_demux_feed->video_codec,
+ feed_data->patterns, &feed_data->patterns_num);
+ if (!feed_data->patterns_num) {
MPQ_DVB_ERR_PRINT(
"%s: FAILED to get framing pattern parameters\n",
__func__);
@@ -1663,10 +1431,10 @@
&feed_data->frame_offset);
feed_data->last_pattern_offset = 0;
feed_data->pending_pattern_len = 0;
- feed_data->last_framing_match_type = DMX_FRM_UNKNOWN;
+ feed_data->last_framing_match_type = 0;
feed_data->found_sequence_header_pattern = 0;
memset(&feed_data->prefix_size, 0,
- sizeof(struct mpq_framing_prefix_size_masks));
+ sizeof(struct dvb_dmx_video_prefix_size_masks));
feed_data->first_prefix_size = 0;
feed_data->saved_pts_dts_info.pts_exist = 0;
feed_data->saved_pts_dts_info.dts_exist = 0;
@@ -2437,6 +2205,14 @@
size_t len = 0;
struct dmx_pts_dts_info *pts_dts;
+ if (meta_data->packet_type == DMX_PES_PACKET) {
+ pts_dts = &meta_data->info.pes.pts_dts_info;
+ data->buf.stc = meta_data->info.pes.stc;
+ } else {
+ pts_dts = &meta_data->info.framing.pts_dts_info;
+ data->buf.stc = meta_data->info.framing.stc;
+ }
+
pts_dts = meta_data->packet_type == DMX_PES_PACKET ?
&meta_data->info.pes.pts_dts_info :
&meta_data->info.framing.pts_dts_info;
@@ -2476,9 +2252,180 @@
feed_data->continuity_errs = 0;
}
+static int mpq_sdmx_dvr_buffer_desc(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *buf_desc)
+{
+ struct dvb_ringbuffer *rbuf = (struct dvb_ringbuffer *)
+ mpq_demux->demux.dmx.dvr_input.ringbuff;
+ struct ion_handle *ion_handle =
+ mpq_demux->demux.dmx.dvr_input.priv_handle;
+ ion_phys_addr_t phys_addr;
+ size_t len;
+ int ret;
+
+ ret = ion_phys(mpq_demux->ion_client, ion_handle, &phys_addr, &len);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to obtain physical address of input buffer. ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ buf_desc->base_addr = (void *)phys_addr;
+ buf_desc->size = rbuf->size;
+
+ return 0;
+}
+
+/**
+ * mpq_dmx_decoder_frame_closure - Helper function to handle closing current
+ * pending frame upon reaching EOS.
+ *
+ * @mpq_demux - mpq demux instance
+ * @mpq_feed - mpq feed object
+ */
+static void mpq_dmx_decoder_frame_closure(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_adapter_video_meta_data meta_data;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dmx_data_ready data;
+
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers).
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: video_buffer released\n", __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return;
+ }
+
+ /* Report last pattern found */
+ if ((feed_data->pending_pattern_len) &&
+ mpq_dmx_is_video_frame(feed->video_codec,
+ feed_data->last_framing_match_type)) {
+ meta_data.packet_type = DMX_FRAMING_INFO_PACKET;
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.framing.pts_dts_info));
+ mpq_dmx_save_pts_dts(feed_data);
+ packet.user_data_len =
+ sizeof(struct mpq_adapter_video_meta_data);
+ packet.raw_data_len = feed_data->pending_pattern_len;
+ packet.raw_data_offset = feed_data->frame_offset;
+ meta_data.info.framing.pattern_type =
+ feed_data->last_framing_match_type;
+ meta_data.info.framing.stc = feed_data->last_framing_match_stc;
+
+ mpq_streambuffer_get_buffer_handle(stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+
+ mpq_dmx_update_decoder_stat(mpq_demux);
+
+ /* Writing meta-data that includes the framing information */
+ if (mpq_streambuffer_pkt_write(stream_buffer, &packet,
+ (u8 *)&meta_data) < 0)
+ MPQ_DVB_ERR_PRINT("%s: Couldn't write packet\n",
+ __func__);
+
+ mpq_dmx_prepare_es_event_data(&packet, &meta_data, feed_data,
+ stream_buffer, &data);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ }
+
+ spin_unlock(&feed_data->video_buffer_lock);
+}
+
+/**
+ * mpq_dmx_decoder_pes_closure - Helper function to handle closing current PES
+ * upon reaching EOS.
+ *
+ * @mpq_demux - mpq demux instance
+ * @mpq_feed - mpq feed object
+ */
+static void mpq_dmx_decoder_pes_closure(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_adapter_video_meta_data meta_data;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dmx_data_ready data;
+
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers).
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: video_buffer released\n", __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return;
+ }
+
+ /*
+ * Close previous PES.
+ * Push new packet to the meta-data buffer.
+ */
+ if ((feed->pusi_seen) && (0 == feed_data->pes_header_left_bytes)) {
+ packet.raw_data_len = feed->peslen;
+ mpq_streambuffer_get_buffer_handle(stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+ packet.raw_data_offset = feed_data->frame_offset;
+ packet.user_data_len =
+ sizeof(struct mpq_adapter_video_meta_data);
+
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.pes.pts_dts_info));
+ mpq_dmx_save_pts_dts(feed_data);
+
+ meta_data.packet_type = DMX_PES_PACKET;
+ meta_data.info.pes.stc = feed_data->prev_stc;
+
+ mpq_dmx_update_decoder_stat(mpq_demux);
+
+ if (mpq_streambuffer_pkt_write(stream_buffer, &packet,
+ (u8 *)&meta_data) < 0)
+ MPQ_DVB_ERR_PRINT("%s: Couldn't write packet\n",
+ __func__);
+
+ /* Save write offset where new PES will begin */
+ mpq_streambuffer_get_data_rw_offset(stream_buffer, NULL,
+ &feed_data->frame_offset);
+
+ mpq_dmx_prepare_es_event_data(&packet, &meta_data, feed_data,
+ stream_buffer, &data);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ }
+ /* Reset PES info */
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+
+ spin_unlock(&feed_data->video_buffer_lock);
+}
+
static int mpq_dmx_process_video_packet_framing(
struct dvb_demux_feed *feed,
- const u8 *buf)
+ const u8 *buf,
+ u64 curr_stc)
{
int bytes_avail;
u32 ts_payload_offset;
@@ -2489,7 +2436,7 @@
struct mpq_demux *mpq_demux;
struct mpq_feed *mpq_feed;
- struct mpq_framing_pattern_lookup_results framing_res;
+ struct dvb_dmx_video_patterns_results framing_res;
struct mpq_streambuffer_packet_header packet;
struct mpq_adapter_video_meta_data meta_data;
int bytes_written = 0;
@@ -2619,7 +2566,7 @@
* the decoder requires demux to do framing,
* so search for the patterns now.
*/
- found_patterns = mpq_dmx_framing_pattern_search(
+ found_patterns = dvb_dmx_video_pattern_search(
feed_data->patterns,
feed_data->patterns_num,
(buf + ts_payload_offset),
@@ -2627,17 +2574,17 @@
&feed_data->prefix_size,
&framing_res);
- if (!(feed_data->found_sequence_header_pattern)) {
+ if (!feed_data->found_sequence_header_pattern) {
for (i = 0; i < found_patterns; i++) {
if ((framing_res.info[i].type ==
- DMX_FRM_MPEG2_SEQUENCE_HEADER) ||
+ DMX_IDX_MPEG_SEQ_HEADER) ||
(framing_res.info[i].type ==
- DMX_FRM_H264_SPS) ||
- (framing_res.info[i].type ==
- DMX_FRM_VC1_SEQUENCE_HEADER)) {
+ DMX_IDX_H264_SPS) ||
+ (framing_res.info[i].type ==
+ DMX_IDX_VC1_SEQ_HEADER)) {
MPQ_DVB_DBG_PRINT(
- "%s: Found Sequence Pattern, buf %p, i = %d, offset = %d, type = %d\n",
+ "%s: Found Sequence Pattern, buf %p, i = %d, offset = %d, type = %lld\n",
__func__, buf, i,
framing_res.info[i].offset,
framing_res.info[i].type);
@@ -2663,7 +2610,8 @@
* pass data to decoder only after sequence header
* or equivalent is found. Otherwise the data is dropped.
*/
- if (!(feed_data->found_sequence_header_pattern)) {
+ if (!feed_data->found_sequence_header_pattern) {
+ feed_data->prev_stc = curr_stc;
spin_unlock(&feed_data->video_buffer_lock);
return 0;
}
@@ -2680,10 +2628,10 @@
if (feed_data->first_pts_dts_copy) {
for (i = first_pattern; i < found_patterns; i++) {
is_video_frame = mpq_dmx_is_video_frame(
- feed->indexing_params.standard,
+ feed->video_codec,
framing_res.info[i].type);
- if (is_video_frame) {
+ if (is_video_frame == 1) {
mpq_dmx_save_pts_dts(feed_data);
feed_data->first_pts_dts_copy = 0;
break;
@@ -2693,12 +2641,12 @@
/*
* write prefix used to find first Sequence pattern, if needed.
- * feed_data->patterns[0].pattern always contains the Sequence
- * pattern.
+ * feed_data->patterns[0]->pattern always contains the sequence
+ * header pattern.
*/
if (feed_data->first_prefix_size) {
if (mpq_streambuffer_data_write(stream_buffer,
- (feed_data->patterns[0].pattern),
+ (feed_data->patterns[0]->pattern),
feed_data->first_prefix_size) < 0) {
mpq_demux->decoder_drop_count +=
feed_data->first_prefix_size;
@@ -2766,6 +2714,12 @@
framing_res.info[i].type;
feed_data->last_pattern_offset =
framing_res.info[i].offset;
+ if (framing_res.info[i].used_prefix_size)
+ feed_data->last_framing_match_stc =
+ feed_data->prev_stc;
+ else
+ feed_data->last_framing_match_stc =
+ curr_stc;
continue;
}
/*
@@ -2800,9 +2754,8 @@
}
is_video_frame = mpq_dmx_is_video_frame(
- feed->indexing_params.standard,
+ feed->video_codec,
feed_data->last_framing_match_type);
-
if (is_video_frame == 1) {
mpq_dmx_write_pts_dts(feed_data,
&(meta_data.info.framing.pts_dts_info));
@@ -2812,6 +2765,8 @@
packet.raw_data_offset = feed_data->frame_offset;
meta_data.info.framing.pattern_type =
feed_data->last_framing_match_type;
+ meta_data.info.framing.stc =
+ feed_data->last_framing_match_stc;
mpq_streambuffer_get_buffer_handle(
stream_buffer,
@@ -2852,8 +2807,13 @@
framing_res.info[i].type;
feed_data->last_pattern_offset =
framing_res.info[i].offset;
+ if (framing_res.info[i].used_prefix_size)
+ feed_data->last_framing_match_stc = feed_data->prev_stc;
+ else
+ feed_data->last_framing_match_stc = curr_stc;
}
+ feed_data->prev_stc = curr_stc;
feed_data->first_prefix_size = 0;
if (pending_data_len) {
@@ -2878,7 +2838,8 @@
static int mpq_dmx_process_video_packet_no_framing(
struct dvb_demux_feed *feed,
- const u8 *buf)
+ const u8 *buf,
+ u64 curr_stc)
{
int bytes_avail;
u32 ts_payload_offset;
@@ -2954,6 +2915,7 @@
mpq_dmx_save_pts_dts(feed_data);
meta_data.packet_type = DMX_PES_PACKET;
+ meta_data.info.pes.stc = feed_data->prev_stc;
mpq_dmx_update_decoder_stat(mpq_demux);
@@ -2996,6 +2958,8 @@
} else {
feed->pusi_seen = 1;
}
+
+ feed_data->prev_stc = curr_stc;
}
/*
@@ -3145,10 +3109,25 @@
struct dvb_demux_feed *feed,
const u8 *buf)
{
+ u64 curr_stc;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ if ((mpq_demux->source >= DMX_SOURCE_DVR0) &&
+ (mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
+ curr_stc = 0;
+ } else {
+ curr_stc = buf[STC_LOCATION_IDX + 2] << 16;
+ curr_stc += buf[STC_LOCATION_IDX + 1] << 8;
+ curr_stc += buf[STC_LOCATION_IDX];
+ curr_stc *= 256; /* convert from 105.47 KHZ to 27MHz */
+ }
+
if (mpq_dmx_info.decoder_framing)
- return mpq_dmx_process_video_packet_no_framing(feed, buf);
+ return mpq_dmx_process_video_packet_no_framing(feed, buf,
+ curr_stc);
else
- return mpq_dmx_process_video_packet_framing(feed, buf);
+ return mpq_dmx_process_video_packet_framing(feed, buf,
+ curr_stc);
}
EXPORT_SYMBOL(mpq_dmx_process_video_packet);
@@ -3219,9 +3198,9 @@
(mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
stc = 0;
} else {
- stc = buf[190] << 16;
- stc += buf[189] << 8;
- stc += buf[188];
+ stc = buf[STC_LOCATION_IDX + 2] << 16;
+ stc += buf[STC_LOCATION_IDX + 1] << 8;
+ stc += buf[STC_LOCATION_IDX];
stc *= 256; /* convert from 105.47 KHZ to 27MHz */
}
@@ -3234,6 +3213,48 @@
}
EXPORT_SYMBOL(mpq_dmx_process_pcr_packet);
+static int mpq_dmx_decoder_eos_cmd(struct mpq_feed *mpq_feed)
+{
+ struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_streambuffer_packet_header oob_packet;
+ struct mpq_adapter_video_meta_data oob_meta_data;
+ int ret;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: video_buffer released\n", __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ memset(&oob_packet, 0, sizeof(oob_packet));
+ oob_packet.user_data_len = sizeof(oob_meta_data);
+ oob_meta_data.packet_type = DMX_EOS_PACKET;
+
+ ret = mpq_streambuffer_pkt_write(stream_buffer, &oob_packet,
+ (u8 *)&oob_meta_data);
+
+ spin_unlock(&feed_data->video_buffer_lock);
+ return ret;
+}
+
+void mpq_dmx_convert_tts(struct dvb_demux_feed *feed,
+ const u8 timestamp[TIMESTAMP_LEN],
+ u64 *timestampIn27Mhz)
+{
+ if (unlikely(!timestampIn27Mhz))
+ return;
+
+ *timestampIn27Mhz = timestamp[2] << 16;
+ *timestampIn27Mhz += timestamp[1] << 8;
+ *timestampIn27Mhz += timestamp[0];
+ *timestampIn27Mhz *= 256; /* convert from 105.47 KHZ to 27MHz */
+}
+EXPORT_SYMBOL(mpq_dmx_convert_tts);
+
int mpq_sdmx_open_session(struct mpq_demux *mpq_demux)
{
enum sdmx_status ret = SDMX_SUCCESS;
@@ -3296,6 +3317,15 @@
return -EINVAL;
}
+ ret = sdmx_set_log_level(mpq_demux->sdmx_session_handle,
+ mpq_demux->sdmx_log_level);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: Could not set log level. ret=%d\n",
+ __func__, ret);
+ /* Don't fail open session if just log level setting failed */
+ ret = 0;
+ }
+
mpq_demux->sdmx_process_count = 0;
mpq_demux->sdmx_process_time_sum = 0;
mpq_demux->sdmx_process_time_average = 0;
@@ -3329,6 +3359,7 @@
__func__, status);
return -EINVAL;
}
+ mpq_demux->sdmx_eos = 0;
mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
}
@@ -4055,6 +4086,12 @@
data_event.data_length = 0;
feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
}
+
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ data_event.data_length = 0;
+ data_event.status = DMX_OK_EOS;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+ }
}
static void mpq_sdmx_section_filter_results(struct mpq_demux *mpq_demux,
@@ -4080,7 +4117,7 @@
__func__);
if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
- return;
+ goto section_filter_check_eos;
mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
mpq_feed->sdmx_buf.pwrite = sts->data_write_offset;
@@ -4101,6 +4138,19 @@
DVB_RINGBUFFER_SKIP(&mpq_feed->sdmx_buf, header.payload_length);
}
+
+section_filter_check_eos:
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ event.data_length = 0;
+ event.status = DMX_OK_EOS;
+ f = feed->filter;
+
+ while (f && sec->is_filtering) {
+ feed->data_ready_cb.sec(&f->filter, &event);
+ f = f->next;
+ }
+ }
+
}
static void mpq_sdmx_decoder_filter_results(struct mpq_demux *mpq_demux,
@@ -4122,7 +4172,7 @@
struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
- goto decoder_filter_check_overflow;
+ goto decoder_filter_check_flags;
/* Update meta data buffer write pointer */
mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
@@ -4187,6 +4237,9 @@
pes_header = (struct pes_packet_header *)
&metadata_buf[pes_header_offset];
meta_data.packet_type = DMX_PES_PACKET;
+ /* TODO - set to real STC when SDMX supports it */
+ meta_data.info.pes.stc = 0;
+
if (pes_header->pts_dts_flag & 0x2) {
meta_data.info.pes.pts_dts_info.pts_exist = 1;
meta_data.info.pes.pts_dts_info.pts =
@@ -4220,7 +4273,8 @@
mpq_feed->video_info.ts_packets_num =
counters.pes_ts_count;
mpq_feed->video_info.ts_dropped_bytes =
- counters.drop_count * mpq_demux->demux.ts_packet_size;
+ counters.drop_count *
+ mpq_demux->demux.ts_packet_size;
sbuf = mpq_feed->video_info.video_buffer;
if (sbuf == NULL) {
@@ -4261,7 +4315,7 @@
spin_unlock(&mpq_feed->video_info.video_buffer_lock);
}
-decoder_filter_check_overflow:
+decoder_filter_check_flags:
if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
(sts->error_indicators & SDMX_FILTER_ERR_D_LIN_BUFS_FULL)) {
MPQ_DVB_ERR_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
@@ -4270,6 +4324,21 @@
mpq_feed->dvb_demux_feed->data_ready_cb.ts(
&mpq_feed->dvb_demux_feed->feed.ts, &data_event);
}
+
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ /* Notify decoder via the stream buffer */
+ ret = mpq_dmx_decoder_eos_cmd(mpq_feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to notify decoder on EOS, ret=%d\n",
+ __func__, ret);
+
+ /* Notify user filter */
+ data_event.data_length = 0;
+ data_event.status = DMX_OK_EOS;
+ mpq_feed->dvb_demux_feed->data_ready_cb.ts(
+ &mpq_feed->dvb_demux_feed->feed.ts, &data_event);
+ }
}
static void mpq_sdmx_pcr_filter_results(struct mpq_demux *mpq_demux,
@@ -4289,10 +4358,8 @@
MPQ_DVB_ERR_PRINT("%s: internal PCR buffer overflowed!\n",
__func__);
- /* MPQ_TODO: Parse rest of error indicators ? */
-
if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
- return;
+ goto pcr_filter_check_eos;
if (DMX_TSP_FORMAT_192_TAIL == mpq_demux->demux.tsp_format)
stc_len = 4;
@@ -4338,6 +4405,13 @@
feed->data_ready_cb.ts(&feed->feed.ts, &data);
}
}
+
+pcr_filter_check_eos:
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ data.data_length = 0;
+ data.status = DMX_OK_EOS;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ }
}
static void mpq_sdmx_raw_filter_results(struct mpq_demux *mpq_demux,
@@ -4352,7 +4426,7 @@
feed->feed.ts.buffer.ringbuff;
if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
- goto raw_filter_check_overflow;
+ goto raw_filter_check_flags;
new_data = sts->data_write_offset -
buf->pwrite;
@@ -4374,7 +4448,7 @@
MPQ_DVB_DBG_PRINT("%s: Callback DMX_OK, size=%d\n",
__func__, data_event.data_length);
-raw_filter_check_overflow:
+raw_filter_check_flags:
if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
(sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
MPQ_DVB_DBG_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
@@ -4382,6 +4456,13 @@
data_event.data_length = 0;
feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
}
+
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ data_event.data_length = 0;
+ data_event.status = DMX_OK_EOS;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+ }
+
}
static void mpq_sdmx_process_results(struct mpq_demux *mpq_demux)
@@ -4457,8 +4538,7 @@
{
struct sdmx_filter_status *sts;
struct mpq_feed *mpq_feed;
- /* MPQ_TODO: EOS handling */
- u8 flags = mpq_sdmx_debug ? SDMX_INPUT_FLAG_DBG_ENABLE : 0;
+ u8 flags = 0;
u32 errors;
u32 status;
u32 prev_read_offset;
@@ -4485,6 +4565,12 @@
return 0;
}
+ /* Set input flags */
+ if (mpq_demux->sdmx_eos)
+ flags |= SDMX_INPUT_FLAG_EOS;
+ if (mpq_sdmx_debug)
+ flags |= SDMX_INPUT_FLAG_DBG_ENABLE;
+
/* Build up to date filter status array */
for (i = 0; i < MPQ_MAX_DMX_FILES; i++) {
mpq_feed = &mpq_demux->feeds[i];
@@ -4508,7 +4594,7 @@
}
MPQ_DVB_DBG_PRINT(
- "\n\n%s: Before SDMX_process: input read_offset=%u, fill count=%u\n",
+ "%s: Before SDMX_process: input read_offset=%u, fill count=%u\n",
__func__, read_offset, fill_count);
process_start_time = current_kernel_time();
@@ -4551,14 +4637,19 @@
int mpq_sdmx_process(struct mpq_demux *mpq_demux,
struct sdmx_buff_descr *input,
u32 fill_count,
- u32 read_offset)
+ u32 read_offset,
+ size_t tsp_size)
{
int ret;
int todo;
int total_bytes_read = 0;
- int limit = mpq_sdmx_proc_limit * mpq_demux->demux.ts_packet_size;
+ int limit = mpq_sdmx_proc_limit * tsp_size;
- while (fill_count >= mpq_demux->demux.ts_packet_size) {
+ MPQ_DVB_DBG_PRINT(
+ "\n\n%s: read_offset=%u, fill_count=%u, tsp_size=%u\n",
+ __func__, read_offset, fill_count, tsp_size);
+
+ while (fill_count >= tsp_size) {
todo = fill_count > limit ? limit : fill_count;
ret = mpq_sdmx_process_buffer(mpq_demux, input, todo,
read_offset);
@@ -4590,11 +4681,7 @@
size_t count)
{
struct sdmx_buff_descr buf_desc;
- struct dvb_ringbuffer *rbuf = (struct dvb_ringbuffer *)
- mpq_demux->demux.dmx.dvr_input.ringbuff;
- ion_phys_addr_t phys_addr;
u32 read_offset;
- size_t len;
int ret;
if (mpq_demux == NULL || input_handle == NULL) {
@@ -4602,19 +4689,17 @@
return -EINVAL;
}
- ret = ion_phys(mpq_demux->ion_client, input_handle, &phys_addr, &len);
+ ret = mpq_sdmx_dvr_buffer_desc(mpq_demux, &buf_desc);
if (ret) {
MPQ_DVB_ERR_PRINT(
- "%s: Failed to obtain physical address of input buffer. ret = %d\n",
+ "%s: Failed to init input buffer descriptor. ret = %d\n",
__func__, ret);
return ret;
}
+ read_offset = mpq_demux->demux.dmx.dvr_input.ringbuff->pread;
- buf_desc.base_addr = (void *)phys_addr;
- buf_desc.size = rbuf->size;
- read_offset = rbuf->pread;
-
- return mpq_sdmx_process(mpq_demux, &buf_desc, count, read_offset);
+ return mpq_sdmx_process(mpq_demux, &buf_desc, count,
+ read_offset, mpq_demux->demux.ts_packet_size);
}
int mpq_dmx_write(struct dmx_demux *demux, const char *buf, size_t count)
@@ -4649,10 +4734,9 @@
* process managed to consume, unless some sdmx error occurred, for
* which should process the whole buffer
*/
- if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds) {
+ if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds)
dvb_dmx_swfilter_format(dvb_demux, buf, ret,
dvb_demux->tsp_format);
- }
if (signal_pending(current))
return -EINTR;
@@ -4676,3 +4760,78 @@
return mpq_dmx_info.secure_demux_app_loaded;
}
EXPORT_SYMBOL(mpq_sdmx_is_loaded);
+
+int mpq_dmx_oob_command(struct dvb_demux_feed *feed,
+ struct dmx_oob_command *cmd)
+{
+ struct mpq_feed *mpq_feed = feed->priv;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ struct dmx_data_ready event;
+ int ret = 0;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_feed = feed->priv;
+
+ event.data_length = 0;
+
+ switch (cmd->type) {
+ case DMX_OOB_CMD_EOS:
+ event.status = DMX_OK_EOS;
+ if (!feed->secure_mode.is_secured) {
+ if (dvb_dmx_is_video_feed(feed)) {
+ if (mpq_dmx_info.decoder_framing)
+ mpq_dmx_decoder_pes_closure(mpq_demux,
+ mpq_feed);
+ else
+ mpq_dmx_decoder_frame_closure(mpq_demux,
+ mpq_feed);
+ ret = mpq_dmx_decoder_eos_cmd(mpq_feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: Couldn't write oob eos packet\n",
+ __func__);
+ }
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &event);
+ } else if (!mpq_demux->sdmx_eos) {
+ struct sdmx_buff_descr buf_desc;
+
+ mpq_demux->sdmx_eos = 1;
+ ret = mpq_sdmx_dvr_buffer_desc(mpq_demux, &buf_desc);
+ if (!ret) {
+ mutex_unlock(&mpq_demux->mutex);
+ mpq_sdmx_process_buffer(mpq_demux, &buf_desc,
+ 0, 0);
+ return 0;
+ }
+ }
+ break;
+ case DMX_OOB_CMD_MARKER:
+ event.status = DMX_OK_MARKER;
+ event.marker.id = cmd->params.marker.id;
+
+ if (feed->type == DMX_TYPE_SEC) {
+ struct dvb_demux_filter *f = feed->filter;
+ struct dmx_section_feed *sec = &feed->feed.sec;
+
+ while (f && sec->is_filtering) {
+ ret = feed->data_ready_cb.sec(&f->filter,
+ &event);
+ if (ret)
+ break;
+ f = f->next;
+ }
+ } else {
+ /* MPQ_TODO: Notify decoder via the stream buffer */
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &event);
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(mpq_dmx_oob_command);
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
index 7affcc6..ca7c15a 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
@@ -31,8 +31,6 @@
*/
#define TSIF_NAME_LENGTH 20
-#define MPQ_MAX_FOUND_PATTERNS 5
-
/**
* struct ts_packet_header - Transport packet header
* as defined in MPEG2 transport stream standard.
@@ -200,17 +198,6 @@
#endif
} __packed;
-/*
- * mpq_framing_prefix_size_masks - possible prefix sizes.
- *
- * @size_mask: a bit mask (per pattern) of possible prefix sizes to use
- * when searching for a pattern that started in the last buffer.
- * Updated in mpq_dmx_framing_pattern_search for use in the next lookup
- */
-struct mpq_framing_prefix_size_masks {
- u32 size_mask[MPQ_MAX_FOUND_PATTERNS];
-};
-
/**
* mpq_decoder_buffers_desc - decoder buffer(s) management information.
*
@@ -242,7 +229,7 @@
* @pes_header: Used for feeds that output data to decoder,
* holds PES header of current processed PES.
* @pes_header_left_bytes: Used for feeds that output data to decoder,
- * holds remainning PES header bytes of current processed PES.
+ * holds remaining PES header bytes of current processed PES.
* @pes_header_offset: Holds the offset within the current processed
* pes header.
* @fullness_wait_cancel: Flag used to signal to abort waiting for
@@ -257,6 +244,8 @@
* reported for this frame.
* @last_framing_match_type: Used for saving the type of
* the previous pattern match found in this video feed.
+ * @last_framing_match_stc: Used for saving the STC attached to TS packet
+ * of the previous pattern match found in this video feed.
* @found_sequence_header_pattern: Flag used to note that an MPEG-2
* Sequence Header, H.264 SPS or VC-1 Sequence Header pattern
* (whichever is relevant according to the video standard) had already
@@ -285,6 +274,7 @@
* buffer space.
* @last_pkt_index: used to save the last streambuffer packet index reported in
* a new elementary stream data event.
+ * @prev_stc: STC attached to the previous video TS packet
*/
struct mpq_video_feed_info {
struct mpq_streambuffer *video_buffer;
@@ -295,14 +285,16 @@
u32 pes_header_offset;
int fullness_wait_cancel;
enum mpq_adapter_stream_if stream_interface;
- const struct mpq_framing_pattern_lookup_params *patterns;
+ const struct dvb_dmx_video_patterns
+ *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM];
int patterns_num;
u32 frame_offset;
u32 last_pattern_offset;
u32 pending_pattern_len;
- enum dmx_framing_pattern_type last_framing_match_type;
+ u64 last_framing_match_type;
+ u64 last_framing_match_stc;
int found_sequence_header_pattern;
- struct mpq_framing_prefix_size_masks prefix_size;
+ struct dvb_dmx_video_prefix_size_masks prefix_size;
u32 first_prefix_size;
struct dmx_pts_dts_info saved_pts_dts_info;
struct dmx_pts_dts_info new_pts_dts_info;
@@ -315,6 +307,7 @@
u32 ts_packets_num;
u32 ts_dropped_bytes;
int last_pkt_index;
+ u64 prev_stc;
};
/**
@@ -369,6 +362,7 @@
* Used before each call to sdmx_process() to build up to date state.
* @sdmx_session_handle: Secure demux open session handle
* @sdmx_filter_count: Number of active secure demux filters
+ * @sdmx_eos: End-of-stream indication flag for current sdmx session
* @plugin_priv: Underlying plugin's own private data
* @hw_notification_interval: Notification interval in msec,
* exposed in debugfs.
@@ -415,6 +409,7 @@
int sdmx_session_handle;
int sdmx_session_ref_count;
int sdmx_filter_count;
+ int sdmx_eos;
void *plugin_priv;
/* debug-fs */
@@ -436,6 +431,7 @@
u32 sdmx_process_packets_sum;
u32 sdmx_process_packets_average;
u32 sdmx_process_packets_min;
+ enum sdmx_log_level sdmx_log_level;
struct timespec decoder_out_last_time;
struct timespec last_notification_time;
@@ -620,12 +616,13 @@
int mpq_dmx_process_pcr_packet(struct dvb_demux_feed *feed, const u8 *buf);
/**
- * mpq_dmx_init_hw_statistics -
- * Extend dvb-demux debugfs with HW statistics.
+ * mpq_dmx_init_debugfs_entries -
+ * Extend dvb-demux debugfs with mpq related entries (HW statistics and secure
+ * demux log level).
*
* @mpq_demux: The mpq_demux device to initialize.
*/
-void mpq_dmx_init_hw_statistics(struct mpq_demux *mpq_demux);
+void mpq_dmx_init_debugfs_entries(struct mpq_demux *mpq_demux);
/**
* mpq_dmx_update_hw_statistics -
@@ -647,6 +644,20 @@
struct dmx_secure_mode *secure_mode);
/**
+ * mpq_dmx_convert_tts - Convert timestamp attached by HW to each TS
+ * packet to 27MHz.
+ *
+ * @feed: The feed with TTS attached
+ * @timestamp: Buffer holding the timestamp attached by the HW
+ * @timestampIn27Mhz: Timestamp result in 27MHz
+ *
+ * Return error code
+*/
+void mpq_dmx_convert_tts(struct dvb_demux_feed *feed,
+ const u8 timestamp[TIMESTAMP_LEN],
+ u64 *timestampIn27Mhz);
+
+/**
* mpq_sdmx_open_session - Handle the details of opening a new secure demux
* session for the specified mpq demux instance. Multiple calls to this
* is allowed, reference counting is managed to open it only when needed.
@@ -710,13 +721,15 @@
* @input: input buffer descriptor
* @fill_count: number of data bytes in input buffer that can be read
* @read_offset: offset in buffer for reading
+ * @tsp_size: size of single TS packet
*
* Return number of bytes read or error code
*/
int mpq_sdmx_process(struct mpq_demux *mpq_demux,
struct sdmx_buff_descr *input,
u32 fill_count,
- u32 read_offset);
+ u32 read_offset,
+ size_t tsp_size);
/**
* mpq_sdmx_loaded - Returns 1 if secure demux application is loaded,
@@ -725,6 +738,22 @@
*/
int mpq_sdmx_is_loaded(void);
+/**
+ * mpq_dmx_oob_command - Handles OOB command from dvb-demux.
+ *
+ * OOB marker commands trigger callback to the dmxdev.
+ * Handling of EOS command may trigger current (last on stream) PES/Frame to
+ * be reported, in addition to callback to the dmxdev.
+ * In case secure demux is active for the feed, EOS command is passed to the
+ * secure demux for handling.
+ *
+ * @feed: dvb demux feed object
+ * @cmd: oob command data
+ *
+ * returns 0 on success or error
+ */
+int mpq_dmx_oob_command(struct dvb_demux_feed *feed,
+ struct dmx_oob_command *cmd);
#endif /* _MPQ_DMX_PLUGIN_COMMON_H */
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tsif.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tsif.c
index 3d48441..8855e85 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tsif.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tsif.c
@@ -687,6 +687,8 @@
mpq_demux->demux.decoder_buffer_status = mpq_dmx_decoder_buffer_status;
mpq_demux->demux.reuse_decoder_buffer = mpq_dmx_reuse_decoder_buffer;
mpq_demux->demux.set_secure_mode = NULL;
+ mpq_demux->demux.oob_command = mpq_dmx_oob_command;
+ mpq_demux->demux.convert_ts = mpq_dmx_convert_tts;
/* Initialize dvb_demux object */
result = dvb_dmx_init(&mpq_demux->demux);
@@ -718,7 +720,7 @@
}
/* Extend dvb-demux debugfs with TSIF statistics. */
- mpq_dmx_init_hw_statistics(mpq_demux);
+ mpq_dmx_init_debugfs_entries(mpq_demux);
return 0;
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
index beb4cce..43a65e9 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
@@ -382,7 +382,8 @@
buff_current_addr_phys - buff_start_addr_phys);
mpq_sdmx_process(mpq_demux, &input, aggregate_len,
- buff_current_addr_phys - buff_start_addr_phys);
+ buff_current_addr_phys - buff_start_addr_phys,
+ TSPP_RAW_TTS_SIZE);
}
for (i = 0; i < aggregate_count; i++)
@@ -1742,6 +1743,8 @@
mpq_demux->demux.decoder_buffer_status = mpq_dmx_decoder_buffer_status;
mpq_demux->demux.reuse_decoder_buffer = mpq_dmx_reuse_decoder_buffer;
mpq_demux->demux.set_secure_mode = mpq_dmx_set_secure_mode;
+ mpq_demux->demux.oob_command = mpq_dmx_oob_command;
+ mpq_demux->demux.convert_ts = mpq_dmx_convert_tts;
/* Initialize dvb_demux object */
result = dvb_dmx_init(&mpq_demux->demux);
@@ -1773,7 +1776,7 @@
}
/* Extend dvb-demux debugfs with TSPP statistics. */
- mpq_dmx_init_hw_statistics(mpq_demux);
+ mpq_dmx_init_debugfs_entries(mpq_demux);
return 0;
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v2.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v2.c
index 60ce9e5..c306488 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v2.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -127,6 +127,8 @@
mpq_demux->demux.decoder_buffer_status = NULL;
mpq_demux->demux.reuse_decoder_buffer = NULL;
mpq_demux->demux.set_secure_mode = NULL;
+ mpq_demux->demux.oob_command = NULL;
+ mpq_demux->demux.convert_ts = NULL;
/* Initialize dvb_demux object */
result = dvb_dmx_init(&mpq_demux->demux);
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c
index 946b055..14d3a39 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c
@@ -38,7 +38,9 @@
SDMX_PROCESS_CMD,
SDMX_GET_DBG_COUNTERS_CMD,
SDMX_RESET_DBG_COUNTERS_CMD,
- SDMX_GET_VERSION_CMD
+ SDMX_GET_VERSION_CMD,
+ SDMX_INVALIDATE_KL_CMD,
+ SDMX_SET_LOG_LEVEL_CMD
};
struct sdmx_proc_req {
@@ -184,6 +186,15 @@
int32_t version;
};
+struct sdmx_set_log_level_req {
+ enum sdmx_cmd_id cmd_id;
+ enum sdmx_log_level level;
+ u32 session_handle;
+};
+
+struct sdmx_set_log_level_rsp {
+ enum sdmx_status ret;
+};
static void get_cmd_rsp_buffers(int handle_index,
void **cmd,
int *cmd_len,
@@ -935,3 +946,48 @@
return ret;
}
EXPORT_SYMBOL(sdmx_reset_dbg_counters);
+
+/*
+ * Set debug log verbosity level
+ *
+ * @session_handle: secure demux instance
+ * @level: requested log level
+ *
+ * Return error code
+ */
+int sdmx_set_log_level(int session_handle, enum sdmx_log_level level)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_set_log_level_req *cmd;
+ struct sdmx_set_log_level_rsp *rsp;
+ enum sdmx_status ret;
+
+ cmd_len = sizeof(struct sdmx_set_log_level_req);
+ rsp_len = sizeof(struct sdmx_set_log_level_rsp);
+
+ /* Get command and response buffers */
+ get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_SET_LOG_LEVEL_CMD;
+ cmd->session_handle = session_handle;
+ cmd->level = level;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+ ret = rsp->ret;
+
+ /* Unlock */
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return ret;
+}
+
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
index f9d85aa..6b669e4 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
@@ -78,6 +78,13 @@
SDMX_195_BYTE_PKT = 195,
};
+enum sdmx_log_level {
+ SDMX_LOG_NO_PRINT,
+ SDMX_LOG_MSG_ERROR,
+ SDMX_LOG_DEBUG,
+ SDMX_LOG_VERBOSE
+};
+
enum sdmx_status {
SDMX_SUCCESS = 0,
SDMX_STATUS_GENERAL_FAILURE = -1,
@@ -250,4 +257,6 @@
int sdmx_reset_dbg_counters(int session_handle);
+int sdmx_set_log_level(int session_handle, enum sdmx_log_level level);
+
#endif /* _MPQ_SDMX_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_adapter.h b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
index 23121b2..a2ade18 100644
--- a/drivers/media/platform/msm/dvb/include/mpq_adapter.h
+++ b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
@@ -14,6 +14,7 @@
#define _MPQ_ADAPTER_H
#include "dvbdev.h"
+#include "dvb_demux.h"
#include "mpq_stream_buffer.h"
@@ -36,36 +37,11 @@
MPQ_ADAPTER_MAX_NUM_OF_INTERFACES,
};
-
-enum dmx_framing_pattern_type {
- /* MPEG-2 */
- DMX_FRM_MPEG2_SEQUENCE_HEADER,
- DMX_FRM_MPEG2_GOP_HEADER,
- DMX_FRM_MPEG2_I_PIC,
- DMX_FRM_MPEG2_P_PIC,
- DMX_FRM_MPEG2_B_PIC,
- /* H.264 */
- DMX_FRM_H264_SPS,
- DMX_FRM_H264_PPS,
- /* H.264 First Coded slice of an IDR Picture */
- DMX_FRM_H264_IDR_PIC,
- /* H.264 First Coded slice of a non-IDR Picture */
- DMX_FRM_H264_NON_IDR_PIC,
- /* VC-1 Sequence Header*/
- DMX_FRM_VC1_SEQUENCE_HEADER,
- /* VC-1 Entry Point Header (Advanced Profile only) */
- DMX_FRM_VC1_ENTRY_POINT_HEADER,
- /* VC-1 Frame Start Code */
- DMX_FRM_VC1_FRAME_START_CODE,
- /* Unknown or invalid framing information */
- DMX_FRM_UNKNOWN
-};
-
enum dmx_packet_type {
- DMX_PADDING_PACKET,
DMX_PES_PACKET,
DMX_FRAMING_INFO_PACKET,
- DMX_EOS_PACKET
+ DMX_EOS_PACKET,
+ DMX_MARKER_PACKET
};
struct dmx_pts_dts_info {
@@ -83,15 +59,27 @@
};
struct dmx_framing_packet_info {
- /** framing pattern type */
- enum dmx_framing_pattern_type pattern_type;
+ /** framing pattern type, one of DMX_IDX_* definitions */
+ u64 pattern_type;
+
/** PTS/DTS information */
struct dmx_pts_dts_info pts_dts_info;
+
+ /** STC value attached to first TS packet holding the pattern */
+ u64 stc;
};
struct dmx_pes_packet_info {
/** PTS/DTS information */
struct dmx_pts_dts_info pts_dts_info;
+
+ /** STC value attached to first TS packet holding the PES */
+ u64 stc;
+};
+
+struct dmx_marker_info {
+ /* marker id */
+ u64 id;
};
/** The meta-data used for video interface */
@@ -103,6 +91,7 @@
union {
struct dmx_framing_packet_info framing;
struct dmx_pes_packet_info pes;
+ struct dmx_marker_info marker;
} info;
} __packed;
diff --git a/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c b/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c
index 45a9dd5..0908a6e 100644
--- a/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c
+++ b/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c
@@ -131,24 +131,24 @@
switch (meta_data.packet_type) {
case DMX_FRAMING_INFO_PACKET:
switch (meta_data.info.framing.pattern_type) {
- case DMX_FRM_H264_SPS:
- case DMX_FRM_MPEG2_SEQUENCE_HEADER:
- case DMX_FRM_VC1_SEQUENCE_HEADER:
+ case DMX_IDX_H264_SPS:
+ case DMX_IDX_MPEG_SEQ_HEADER:
+ case DMX_IDX_VC1_SEQ_HEADER:
DBG("SPS FOUND\n");
frame_found = false;
break;
- case DMX_FRM_H264_PPS:
- case DMX_FRM_MPEG2_GOP_HEADER:
- case DMX_FRM_VC1_ENTRY_POINT_HEADER:
+ case DMX_IDX_H264_PPS:
+ case DMX_IDX_MPEG_GOP:
+ case DMX_IDX_VC1_ENTRY_POINT:
DBG("PPS FOUND\n");
frame_found = false;
break;
- case DMX_FRM_H264_IDR_PIC:
- case DMX_FRM_H264_NON_IDR_PIC:
- case DMX_FRM_MPEG2_I_PIC:
- case DMX_FRM_MPEG2_P_PIC:
- case DMX_FRM_MPEG2_B_PIC:
- case DMX_FRM_VC1_FRAME_START_CODE:
+ case DMX_IDX_H264_IDR_START:
+ case DMX_IDX_H264_NON_IDR_START:
+ case DMX_IDX_MPEG_I_FRAME_START:
+ case DMX_IDX_MPEG_P_FRAME_START:
+ case DMX_IDX_MPEG_B_FRAME_START:
+ case DMX_IDX_VC1_FRAME_START:
DBG("FRAME FOUND\n");
frame_found = true;
break;
@@ -186,7 +186,9 @@
case DMX_EOS_PACKET:
break;
case DMX_PES_PACKET:
- case DMX_PADDING_PACKET:
+ case DMX_MARKER_PACKET:
+ break;
+ default:
break;
}
} while (!frame_found);
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index addd235..7fc8810 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -896,6 +896,19 @@
pkt->size += sizeof(u32) + sizeof(struct hfi_bitrate);
break;
}
+ case HAL_CONFIG_VENC_MAX_BITRATE:
+ {
+ struct hfi_bitrate *hfi;
+
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE;
+ hfi = (struct hfi_bitrate *) &pkt->rg_property_data[1];
+ hfi->bit_rate = ((struct hal_bitrate *)pdata)->bit_rate;
+ hfi->layer_id = ((struct hal_bitrate *)pdata)->layer_id;
+
+ pkt->size += sizeof(u32) + sizeof(struct hfi_bitrate);
+ break;
+ }
case HAL_PARAM_PROFILE_LEVEL_CURRENT:
{
struct hfi_profile_level *hfi;
@@ -1044,6 +1057,51 @@
pkt->size += sizeof(u32) + sizeof(struct hfi_quantization);
break;
}
+ case HAL_PARAM_VENC_SESSION_QP_RANGE:
+ {
+ struct hfi_quantization_range *hfi;
+ struct hfi_quantization_range *hal_range =
+ (struct hfi_quantization_range *) pdata;
+ u32 min_qp, max_qp;
+
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE;
+ hfi = (struct hfi_quantization_range *)
+ &pkt->rg_property_data[1];
+
+ min_qp = hal_range->min_qp;
+ max_qp = hal_range->max_qp;
+
+ /* We'll be packing in the qp, so make sure we
+ * won't be losing data when masking */
+ if (min_qp > 0xff || max_qp > 0xff) {
+ dprintk(VIDC_ERR, "qp value out of range\n");
+ rc = -ERANGE;
+ break;
+ }
+
+ /* When creating the packet, pack the qp value as
+ * 0xiippbb, where ii = qp range for I-frames,
+ * pp = qp range for P-frames, etc. */
+ hfi->min_qp = min_qp | min_qp << 8 | min_qp << 16;
+ hfi->max_qp = max_qp | max_qp << 8 | max_qp << 16;
+ hfi->layer_id = hal_range->layer_id;
+
+ pkt->size += sizeof(u32) +
+ sizeof(struct hfi_quantization_range);
+ break;
+ }
+ case HAL_PARAM_VENC_MAX_NUM_B_FRAMES:
+ {
+ struct hfi_max_num_b_frames *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES;
+ hfi = (struct hfi_max_num_b_frames *) &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_max_num_b_frames *) pdata,
+ sizeof(struct hfi_max_num_b_frames));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_max_num_b_frames);
+ break;
+ }
case HAL_CONFIG_VENC_INTRA_PERIOD:
{
struct hfi_intra_period *hfi;
@@ -1170,8 +1228,36 @@
pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
break;
}
+ case HAL_PARAM_VENC_H264_VUI_TIMING_INFO:
+ {
+ struct hfi_h264_vui_timing_info *hfi;
+ struct hal_h264_vui_timing_info *timing_info = pdata;
+
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO;
+
+ hfi = (struct hfi_h264_vui_timing_info *)&pkt->
+ rg_property_data[1];
+ hfi->enable = timing_info->enable;
+ hfi->fixed_frame_rate = timing_info->fixed_frame_rate;
+ hfi->time_scale = timing_info->time_scale;
+
+ pkt->size += sizeof(u32) +
+ sizeof(struct hfi_h264_vui_timing_info);
+ break;
+ }
case HAL_CONFIG_VPE_DEINTERLACE:
break;
+ case HAL_PARAM_VENC_H264_GENERATE_AUDNAL:
+ {
+ struct hfi_enable *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL;
+ hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+ hfi->enable = ((struct hal_enable *) pdata)->enable;
+ pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ break;
+ }
/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
case HAL_CONFIG_BUFFER_REQUIREMENTS:
case HAL_CONFIG_PRIORITY:
@@ -1200,6 +1286,7 @@
case HAL_PARAM_VENC_LOW_LATENCY:
default:
dprintk(VIDC_ERR, "DEFAULT: Calling 0x%x", ptype);
+ rc = -ENOTSUPP;
break;
}
return rc;
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 102e1ec..43a3dad 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -174,8 +174,8 @@
switch (pkt->event_id) {
case HFI_EVENT_SYS_ERROR:
- dprintk(VIDC_ERR, "HFI_EVENT_SYS_ERROR: %d\n",
- pkt->event_data1);
+ dprintk(VIDC_ERR, "HFI_EVENT_SYS_ERROR: %d, 0x%x\n",
+ pkt->event_data1, pkt->event_data2);
hfi_process_sys_error(callback, device_id);
break;
case HFI_EVENT_SESSION_ERROR:
@@ -1003,12 +1003,6 @@
return;
}
- sess_close = (struct hal_session *)pkt->session_id;
- dprintk(VIDC_INFO, "deleted the session: 0x%x",
- sess_close->session_id);
- list_del(&sess_close->list);
- kfree(sess_close);
-
memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
cmd_done.device_id = device_id;
cmd_done.session_id =
@@ -1016,6 +1010,11 @@
cmd_done.status = hfi_map_err_status((u32)pkt->error_type);
cmd_done.data = NULL;
cmd_done.size = 0;
+ sess_close = (struct hal_session *)pkt->session_id;
+ dprintk(VIDC_INFO, "deleted the session: 0x%x",
+ sess_close->session_id);
+ list_del(&sess_close->list);
+ kfree(sess_close);
callback(SESSION_END_DONE, &cmd_done);
}
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 3b12a26..79a492e 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -152,16 +152,7 @@
rc = ion_handle_get_flags(client->clnt, hndl, &ionflags);
if (rc) {
dprintk(VIDC_ERR, "Failed to get ion flags: %d\n", rc);
- goto fail_map;
- }
- if (ION_IS_CACHED(ionflags)) {
- mem->kvaddr = ion_map_kernel(client->clnt, hndl);
- if (!mem->kvaddr) {
- dprintk(VIDC_ERR,
- "Failed to map shared mem in kernel\n");
- rc = -EIO;
- goto fail_map;
- }
+ goto fail_device_address;
}
mem->flags = ionflags;
@@ -184,9 +175,6 @@
mem->device_addr, mem->size);
return rc;
fail_device_address:
- if (mem->kvaddr)
- ion_unmap_kernel(client->clnt, hndl);
-fail_map:
ion_free(client->clnt, hndl);
fail_import_fd:
return rc;
@@ -366,20 +354,14 @@
rc = -EINVAL;
goto cache_op_failed;
}
- if (mem->kvaddr) {
- rc = msm_ion_do_cache_op(client->clnt,
- (struct ion_handle *)mem->smem_priv,
- (unsigned long *) mem->kvaddr,
- (unsigned long)mem->size,
- msm_cache_ops);
- if (rc) {
- dprintk(VIDC_ERR,
+ rc = msm_ion_do_cache_op(client->clnt,
+ (struct ion_handle *)mem->smem_priv,
+ 0, (unsigned long)mem->size,
+ msm_cache_ops);
+ if (rc) {
+ dprintk(VIDC_ERR,
"cache operation failed %d\n", rc);
- goto cache_op_failed;
- }
- } else {
- dprintk(VIDC_WARN,
- "cache operation failed as no kernel mapping\n");
+ goto cache_op_failed;
}
}
cache_op_failed:
diff --git a/drivers/media/platform/msm/vidc/msm_smem.h b/drivers/media/platform/msm/vidc/msm_smem.h
index b80d63e..4425909 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.h
+++ b/drivers/media/platform/msm/vidc/msm_smem.h
@@ -25,7 +25,7 @@
enum smem_prop {
SMEM_CACHED = ION_FLAG_CACHED,
- SMEM_SECURE = ION_SECURE,
+ SMEM_SECURE = ION_FLAG_SECURE,
};
enum hal_buffer {
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 2ca5008..f458a0a 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -24,7 +24,7 @@
#define MAX_NUM_OUTPUT_BUFFERS 6
enum msm_vdec_ctrl_cluster {
- MSM_VDEC_CTRL_CLUSTER_MAX = 1,
+ MSM_VDEC_CTRL_CLUSTER_MAX = 1 << 0,
};
static const char *const mpeg_video_vidc_divx_format[] = {
@@ -241,7 +241,7 @@
static u32 get_frame_size_compressed(int plane,
u32 height, u32 width)
{
- return (width * height * 3/2)/2;
+ return (width * height * 3/2)/4;
}
struct msm_vidc_format vdec_formats[] = {
@@ -621,13 +621,6 @@
frame_sz.height = inst->prop.height;
dprintk(VIDC_DBG, "width = %d, height = %d\n",
frame_sz.width, frame_sz.height);
- rc = msm_comm_try_set_prop(inst,
- HAL_PARAM_FRAME_SIZE, &frame_sz);
- if (rc) {
- dprintk(VIDC_ERR,
- "%s: Failed : Frame size setting\n", __func__);
- goto exit;
- }
rc = msm_comm_try_get_bufreqs(inst);
if (rc) {
dprintk(VIDC_ERR,
@@ -1287,7 +1280,7 @@
break;
}
- if (property_id) {
+ if (!rc && property_id) {
dprintk(VIDC_DBG,
"Control: HAL property=%d,ctrl_id=%d,ctrl_value=%d\n",
property_id,
@@ -1364,7 +1357,7 @@
return NULL;
for (c = 0; c < NUM_CTRLS; c++) {
- if (msm_vdec_ctrls[c].cluster == type) {
+ if (msm_vdec_ctrls[c].cluster & type) {
cluster[sz] = msm_vdec_ctrls[c].priv;
++sz;
}
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index bf29a95..5f47ae1 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -35,6 +35,7 @@
#define P_FRAME_QP 28
#define B_FRAME_QP 30
#define MAX_INTRA_REFRESH_MBS 300
+#define MAX_NUM_B_FRAMES 4
#define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
#define CODING V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY
@@ -107,16 +108,17 @@
};
enum msm_venc_ctrl_cluster {
- MSM_VENC_CTRL_CLUSTER_QP = 1,
- MSM_VENC_CTRL_CLUSTER_INTRA_PERIOD,
- MSM_VENC_CTRL_CLUSTER_H264_PROFILE_LEVEL,
- MSM_VENC_CTRL_CLUSTER_MPEG_PROFILE_LEVEL,
- MSM_VENC_CTRL_CLUSTER_H263_PROFILE_LEVEL,
- MSM_VENC_CTRL_CLUSTER_H264_ENTROPY,
- MSM_VENC_CTRL_CLUSTER_SLICING,
- MSM_VENC_CTRL_CLUSTER_INTRA_REFRESH,
- MSM_VENC_CTRL_CLUSTER_BITRATE,
- MSM_VENC_CTRL_CLUSTER_MAX,
+ MSM_VENC_CTRL_CLUSTER_QP = 1 << 0,
+ MSM_VENC_CTRL_CLUSTER_INTRA_PERIOD = 1 << 1,
+ MSM_VENC_CTRL_CLUSTER_H264_PROFILE_LEVEL = 1 << 2,
+ MSM_VENC_CTRL_CLUSTER_MPEG_PROFILE_LEVEL = 1 << 3,
+ MSM_VENC_CTRL_CLUSTER_H263_PROFILE_LEVEL = 1 << 4,
+ MSM_VENC_CTRL_CLUSTER_H264_ENTROPY = 1 << 5,
+ MSM_VENC_CTRL_CLUSTER_SLICING = 1 << 6,
+ MSM_VENC_CTRL_CLUSTER_INTRA_REFRESH = 1 << 7,
+ MSM_VENC_CTRL_CLUSTER_BITRATE = 1 << 8,
+ MSM_VENC_CTRL_CLUSTER_TIMING = 1 << 9,
+ MSM_VENC_CTRL_CLUSTER_MAX = 1 << 10,
};
static struct msm_vidc_ctrl msm_venc_ctrls[] = {
@@ -130,7 +132,7 @@
.step = 1,
.menu_skip_mask = 0,
.qmenu = NULL,
- .cluster = 0,
+ .cluster = MSM_VENC_CTRL_CLUSTER_TIMING,
},
{
.id = V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD,
@@ -208,7 +210,8 @@
(1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR)
),
.qmenu = mpeg_video_rate_control,
- .cluster = MSM_VENC_CTRL_CLUSTER_BITRATE,
+ .cluster = MSM_VENC_CTRL_CLUSTER_BITRATE |
+ MSM_VENC_CTRL_CLUSTER_TIMING,
},
{
.id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
@@ -238,6 +241,18 @@
.cluster = MSM_VENC_CTRL_CLUSTER_BITRATE,
},
{
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+ .name = "Peak Bit Rate",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = MIN_BIT_RATE,
+ .maximum = MAX_BIT_RATE,
+ .default_value = DEFAULT_BIT_RATE,
+ .step = BIT_RATE_STEP,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ .cluster = MSM_VENC_CTRL_CLUSTER_BITRATE,
+ },
+ {
.id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
.name = "Entropy Mode",
.type = V4L2_CTRL_TYPE_MENU,
@@ -405,6 +420,30 @@
.cluster = MSM_VENC_CTRL_CLUSTER_QP,
},
{
+ .id = V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+ .name = "H264 Minimum QP",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = 51,
+ .default_value = 1,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ .cluster = MSM_VENC_CTRL_CLUSTER_QP,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+ .name = "H264 Maximum QP",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = 51,
+ .default_value = 51,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ .cluster = MSM_VENC_CTRL_CLUSTER_QP,
+ },
+ {
.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
.name = "Slice Mode",
.type = V4L2_CTRL_TYPE_MENU,
@@ -617,6 +656,25 @@
.qmenu = mpeg_video_vidc_extradata,
.step = 0,
},
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO,
+ .name = "H264 VUI Timing Info",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_DISABLED,
+ .maximum = V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_ENABLED,
+ .default_value =
+ V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_DISABLED,
+ .cluster = MSM_VENC_CTRL_CLUSTER_TIMING,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_H264_AU_DELIMITER,
+ .name = "H264 AU Delimiter",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED,
+ .maximum = V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_ENABLED,
+ .default_value =
+ V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED,
+ },
};
#define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
@@ -704,7 +762,7 @@
struct v4l2_ctrl *ctrl = NULL;
u32 extradata = 0;
if (!q || !q->drv_priv) {
- dprintk(VIDC_ERR, "Invalid input, q = %p\n", q);
+ dprintk(VIDC_ERR, "Invalid input\n");
return -EINVAL;
}
inst = q->drv_priv;
@@ -1104,8 +1162,10 @@
struct hal_multi_slice_control multi_slice_control;
struct hal_h264_db_control h264_db_control;
struct hal_enable enable;
+ struct hal_h264_vui_timing_info vui_timing_info;
+ struct hal_quantization_range qp_range;
u32 property_id = 0, property_val = 0;
- void *pdata;
+ void *pdata = NULL;
struct v4l2_ctrl *temp_ctrl = NULL;
struct hfi_device *hdev;
@@ -1178,11 +1238,24 @@
break;
case V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES:
temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES);
-
- property_id =
- HAL_CONFIG_VENC_INTRA_PERIOD;
intra_period.bframes = ctrl->val;
intra_period.pframes = temp_ctrl->val;
+ if (intra_period.bframes) {
+ u32 max_num_b_frames = MAX_NUM_B_FRAMES;
+ property_id =
+ HAL_PARAM_VENC_MAX_NUM_B_FRAMES;
+ pdata = &max_num_b_frames;
+ rc = call_hfi_op(hdev, session_set_property,
+ (void *)inst->session, property_id, pdata);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed : Setprop MAX_NUM_B_FRAMES"
+ "%d", rc);
+ break;
+ }
+ }
+ property_id =
+ HAL_CONFIG_VENC_INTRA_PERIOD;
pdata = &intra_period;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME:
@@ -1255,6 +1328,29 @@
bitrate.layer_id = 0;
pdata = &bitrate;
break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ {
+ struct v4l2_ctrl *avg_bitrate = TRY_GET_CTRL(
+ V4L2_CID_MPEG_VIDEO_BITRATE);
+
+ if (ctrl->val < avg_bitrate->val) {
+ dprintk(VIDC_ERR,
+ "Peak bitrate (%d) is lower than average bitrate (%d)",
+ ctrl->val, avg_bitrate->val);
+ rc = -EINVAL;
+ break;
+ } else if (ctrl->val < avg_bitrate->val * 2) {
+ dprintk(VIDC_WARN,
+ "Peak bitrate (%d) ideally should be twice the average bitrate (%d)",
+ ctrl->val, avg_bitrate->val);
+ }
+
+ property_id = HAL_CONFIG_VENC_MAX_BITRATE;
+ bitrate.bit_rate = ctrl->val;
+ bitrate.layer_id = 0;
+ pdata = &bitrate;
+ break;
+ }
case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
temp_ctrl = TRY_GET_CTRL(
V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL);
@@ -1410,6 +1506,44 @@
pdata = &quantization;
break;
}
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: {
+ struct v4l2_ctrl *qp_max;
+
+ qp_max = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_MAX_QP);
+ if (ctrl->val >= qp_max->val) {
+ dprintk(VIDC_ERR, "Bad range: Min QP (%d) > Max QP(%d)",
+ ctrl->val, qp_max->val);
+ rc = -ERANGE;
+ break;
+ }
+
+ property_id = HAL_PARAM_VENC_SESSION_QP_RANGE;
+ qp_range.layer_id = 0;
+ qp_range.max_qp = qp_max->val;
+ qp_range.min_qp = ctrl->val;
+
+ pdata = &qp_range;
+ break;
+ }
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: {
+ struct v4l2_ctrl *qp_min;
+
+ qp_min = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_MIN_QP);
+ if (ctrl->val <= qp_min->val) {
+ dprintk(VIDC_ERR, "Bad range: Max QP (%d) < Min QP(%d)",
+ ctrl->val, qp_min->val);
+ rc = -ERANGE;
+ break;
+ }
+
+ property_id = HAL_PARAM_VENC_SESSION_QP_RANGE;
+ qp_range.layer_id = 0;
+ qp_range.max_qp = ctrl->val;
+ qp_range.min_qp = qp_min->val;
+
+ pdata = &qp_range;
+ break;
+ }
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: {
int temp = 0;
@@ -1589,14 +1723,74 @@
pdata = &extra;
break;
}
+ case V4L2_CID_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO:
+ {
+ struct v4l2_ctrl *rc_mode, *frame_rate;
+ bool cfr = false;
+
+ property_id = HAL_PARAM_VENC_H264_VUI_TIMING_INFO;
+ rc_mode = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL);
+ frame_rate = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE);
+
+ switch (rc_mode->val) {
+ case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR:
+ case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR:
+ cfr = true;
+ break;
+ default:
+ cfr = false;
+ break;
+ }
+
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_DISABLED:
+ vui_timing_info.enable = 0;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_ENABLED:
+ /* Only support this in CFR mode because we
+ * don't really know how to fill out vui_timing_info.
+ * time_scale in vfr mode. The assumed framerate
+ * might be incorrect. */
+ if (!cfr) {
+ dprintk(VIDC_ERR, "Can't set %x in VFR mode\n",
+ ctrl->id);
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ vui_timing_info.enable = 1;
+ vui_timing_info.fixed_frame_rate = cfr;
+ vui_timing_info.time_scale = frame_rate->val;
+ }
+
+ pdata = &vui_timing_info;
+ break;
+ }
+ case V4L2_CID_MPEG_VIDC_VIDEO_H264_AU_DELIMITER:
+ property_id = HAL_PARAM_VENC_H264_GENERATE_AUDNAL;
+
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED:
+ enable.enable = 0;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_ENABLED:
+ enable.enable = 1;
+ break;
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ pdata = &enable;
+ break;
default:
rc = -ENOTSUPP;
break;
}
#undef TRY_GET_CTRL
- if (property_id) {
- dprintk(VIDC_DBG, "Control: HAL property=%d,ctrl_value=%d\n",
+ if (!rc && property_id) {
+ dprintk(VIDC_DBG, "Control: HAL property=%x,ctrl_value=%d\n",
property_id,
ctrl->val);
rc = call_hfi_op(hdev, session_set_property,
@@ -1622,10 +1816,13 @@
for (c = 0; c < ctrl->ncontrols; ++c) {
if (ctrl->cluster[c]->is_new) {
- rc = try_set_ctrl(inst, ctrl->cluster[c]);
+ struct v4l2_ctrl *temp = ctrl->cluster[c];
+
+ rc = try_set_ctrl(inst, temp);
if (rc) {
- dprintk(VIDC_ERR, "Failed setting %x",
- ctrl->cluster[c]->id);
+ dprintk(VIDC_ERR, "Failed setting %s (%x)",
+ v4l2_ctrl_get_name(temp->id),
+ temp->id);
break;
}
}
@@ -1979,7 +2176,7 @@
{
int rc = 0;
int i;
- struct vidc_buffer_addr_info buffer_info;
+ struct vidc_buffer_addr_info buffer_info = {0};
struct hfi_device *hdev;
int extra_idx = 0;
@@ -2042,7 +2239,7 @@
struct v4l2_buffer *b)
{
int i, rc = 0, extra_idx = 0;
- struct vidc_buffer_addr_info buffer_info;
+ struct vidc_buffer_addr_info buffer_info = {0};
struct hfi_device *hdev;
if (!inst || !inst->core || !inst->core->device) {
@@ -2187,7 +2384,7 @@
return NULL;
for (c = 0; c < NUM_CTRLS; c++) {
- if (msm_venc_ctrls[c].cluster == type) {
+ if (msm_venc_ctrls[c].cluster & type) {
cluster[sz] = msm_venc_ctrls[c].priv;
++sz;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 2cf9928..1ee9c67 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -22,7 +22,7 @@
#include "msm_smem.h"
#include "msm_vidc_debug.h"
-#define HW_RESPONSE_TIMEOUT 200
+#define HW_RESPONSE_TIMEOUT 1000
#define IS_ALREADY_IN_STATE(__p, __d) ({\
int __rc = (__p >= __d);\
@@ -373,7 +373,6 @@
{
struct msm_vidc_cb_cmd_done *response = data;
struct msm_vidc_inst *inst;
- struct v4l2_control control = {0};
struct msm_vidc_cb_event *event_notify;
int event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
int rc = 0;
@@ -382,15 +381,7 @@
event_notify = (struct msm_vidc_cb_event *) response->data;
switch (event_notify->hal_event_type) {
case HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES:
- event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
- control.id =
- V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER;
- rc = v4l2_g_ctrl(&inst->ctrl_handler, &control);
- if (rc)
- dprintk(VIDC_WARN,
- "Failed to get Smooth streamng flag\n");
- if (!rc && control.value == true)
- event = V4L2_EVENT_SEQ_CHANGED_SUFFICIENT;
+ event = V4L2_EVENT_SEQ_CHANGED_SUFFICIENT;
break;
case HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES:
event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
@@ -1291,7 +1282,6 @@
}
hdev = inst->core->device;
-
if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_LOAD_RESOURCES)) {
dprintk(VIDC_INFO, "inst: %p is already in state: %d\n",
inst, inst->state);
@@ -1449,7 +1439,7 @@
"Failed to send close\n");
goto exit;
}
- change_inst_state(inst, MSM_VIDC_OPEN);
+ change_inst_state(inst, MSM_VIDC_CLOSE);
exit:
return rc;
}
@@ -1677,6 +1667,7 @@
core->state == VIDC_CORE_INVALID) {
dprintk(VIDC_ERR,
"Core is in bad state can't change the state");
+ rc = -EINVAL;
goto exit;
}
flipped_state = get_flipped_state(inst->state, state);
@@ -1910,8 +1901,14 @@
dprintk(VIDC_ERR, "%s invalid parameters", __func__);
return -EINVAL;
}
- hdev = inst->core->device;
+ if (inst->state == MSM_VIDC_CORE_INVALID ||
+ inst->core->state == VIDC_CORE_INVALID) {
+ dprintk(VIDC_ERR,
+ "Core is in bad state can't query get_bufreqs()");
+ return -EINVAL;
+ }
+ hdev = inst->core->device;
mutex_lock(&inst->sync_lock);
if (inst->state < MSM_VIDC_OPEN_DONE || inst->state >= MSM_VIDC_CLOSE) {
dprintk(VIDC_ERR,
@@ -2397,6 +2394,7 @@
if (inst->capability.capability_set) {
if (msm_vp8_low_tier &&
+ inst->core->hfi_type == VIDC_HFI_VENUS &&
inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_VP8) {
capability->width.max = DEFAULT_WIDTH;
capability->height.max = DEFAULT_HEIGHT;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 8031c74..2e92f1f 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -282,6 +282,7 @@
u32 packet_size_in_words, new_read_idx;
u32 *read_ptr;
struct vidc_iface_q_info *qinfo;
+ int rc = 0;
if (!info || !packet || !pb_tx_req_is_set) {
dprintk(VIDC_ERR, "Invalid Params");
@@ -317,17 +318,27 @@
new_read_idx = queue->qhdr_read_idx + packet_size_in_words;
dprintk(VIDC_DBG, "Read Ptr: %d", (u32) new_read_idx);
- if (new_read_idx < queue->qhdr_q_size) {
- memcpy(packet, read_ptr,
- packet_size_in_words << 2);
- } else {
- new_read_idx -= queue->qhdr_q_size;
- memcpy(packet, read_ptr,
+ if (((packet_size_in_words << 2) <= VIDC_IFACEQ_MED_PKT_SIZE)
+ && queue->qhdr_read_idx <= queue->qhdr_q_size) {
+ if (new_read_idx < queue->qhdr_q_size) {
+ memcpy(packet, read_ptr,
+ packet_size_in_words << 2);
+ } else {
+ new_read_idx -= queue->qhdr_q_size;
+ memcpy(packet, read_ptr,
(packet_size_in_words - new_read_idx) << 2);
- memcpy(packet + ((packet_size_in_words -
- new_read_idx) << 2),
- (u8 *)qinfo->q_array.align_virtual_addr,
- new_read_idx << 2);
+ memcpy(packet + ((packet_size_in_words -
+ new_read_idx) << 2),
+ (u8 *)qinfo->q_array.align_virtual_addr,
+ new_read_idx << 2);
+ }
+ } else {
+ dprintk(VIDC_WARN,
+ "BAD packet received, read_idx: 0x%x, pkt_size: %d\n",
+ queue->qhdr_read_idx, packet_size_in_words << 2);
+ dprintk(VIDC_WARN, "Dropping this packet\n");
+ new_read_idx = queue->qhdr_write_idx;
+ rc = -ENODATA;
}
queue->qhdr_read_idx = new_read_idx;
@@ -340,7 +351,7 @@
*pb_tx_req_is_set = (1 == queue->qhdr_tx_req) ? 1 : 0;
venus_hfi_hal_sim_modify_msg_packet(packet);
dprintk(VIDC_DBG, "Out : ");
- return 0;
+ return rc;
}
static int venus_hfi_alloc(void *mem, void *clnt, u32 size, u32 align,
@@ -547,7 +558,7 @@
q_info = &device->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
if (!q_info) {
dprintk(VIDC_ERR, "cannot write to shared Q's");
- goto err_q_write;
+ goto err_q_null;
}
mutex_lock(&device->clock_lock);
result = venus_hfi_clk_gating_off(device);
@@ -572,8 +583,9 @@
dprintk(VIDC_ERR, "venus_hfi_iface_cmdq_write:queue_full");
}
err_q_write:
- mutex_unlock(&device->write_lock);
mutex_unlock(&device->clock_lock);
+err_q_null:
+ mutex_unlock(&device->write_lock);
return result;
}
@@ -592,7 +604,7 @@
q_array.align_virtual_addr == 0) {
dprintk(VIDC_ERR, "cannot read from shared MSG Q's");
rc = -ENODATA;
- goto read_error;
+ goto read_error_null;
}
q_info = &device->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
mutex_lock(&device->clock_lock);
@@ -614,8 +626,9 @@
rc = -ENODATA;
}
read_error:
- mutex_unlock(&device->read_lock);
mutex_unlock(&device->clock_lock);
+read_error_null:
+ mutex_unlock(&device->read_lock);
return rc;
}
@@ -634,7 +647,7 @@
q_array.align_virtual_addr == 0) {
dprintk(VIDC_ERR, "cannot read from shared DBG Q's");
rc = -ENODATA;
- goto dbg_error;
+ goto dbg_error_null;
}
mutex_lock(&device->clock_lock);
rc = venus_hfi_clk_gating_off(device);
@@ -656,8 +669,9 @@
rc = -ENODATA;
}
dbg_error:
- mutex_unlock(&device->read_lock);
mutex_unlock(&device->clock_lock);
+dbg_error_null:
+ mutex_unlock(&device->read_lock);
return rc;
}
@@ -1070,8 +1084,8 @@
disable_irq_nosync(dev->hal_data->irq);
dev->intr_status = 0;
venus_hfi_interface_queues_release(dev);
+ mutex_unlock(&dev->clock_lock);
}
- mutex_unlock(&dev->clock_lock);
dprintk(VIDC_INFO, "HAL exited\n");
return 0;
}
@@ -1867,21 +1881,46 @@
if (((ctrl_status & VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK)
!= 0) && !rc)
venus_hfi_clk_gating_on(device);
- mutex_unlock(&device->write_lock);
mutex_unlock(&device->clock_lock);
+ mutex_unlock(&device->write_lock);
return rc;
}
+static void venus_hfi_process_msg_event_notify(
+ struct venus_hfi_device *device, void *packet)
+{
+ struct hfi_sfr_struct *vsfr = NULL;
+ struct hfi_msg_event_notify_packet *event_pkt;
+ struct vidc_hal_msg_pkt_hdr *msg_hdr;
+ msg_hdr = (struct vidc_hal_msg_pkt_hdr *)packet;
+ event_pkt =
+ (struct hfi_msg_event_notify_packet *)msg_hdr;
+ if (event_pkt && event_pkt->event_id ==
+ HFI_EVENT_SYS_ERROR) {
+ vsfr = (struct hfi_sfr_struct *)
+ device->sfr.align_virtual_addr;
+ if (vsfr)
+ dprintk(VIDC_ERR, "SFR Message from FW : %s",
+ vsfr->rg_data);
+ }
+}
static void venus_hfi_response_handler(struct venus_hfi_device *device)
{
u8 packet[VIDC_IFACEQ_MED_PKT_SIZE];
u32 rc = 0;
+ struct hfi_sfr_struct *vsfr = NULL;
dprintk(VIDC_INFO, "#####venus_hfi_response_handler#####\n");
if (device) {
if ((device->intr_status &
VIDC_WRAPPER_INTR_CLEAR_A2HWD_BMSK)) {
dprintk(VIDC_ERR, "Received: Watchdog timeout %s",
__func__);
+ vsfr = (struct hfi_sfr_struct *)
+ device->sfr.align_virtual_addr;
+ if (vsfr)
+ dprintk(VIDC_ERR,
+ "SFR Message from FW : %s",
+ vsfr->rg_data);
venus_hfi_process_sys_watchdog_timeout(device);
}
@@ -1889,6 +1928,9 @@
rc = hfi_process_msg_packet(device->callback,
device->device_id,
(struct vidc_hal_msg_pkt_hdr *) packet);
+ if (rc == HFI_MSG_EVENT_NOTIFY)
+ venus_hfi_process_msg_event_notify(
+ device, (void *)packet);
}
while (!venus_hfi_iface_dbgq_read(device, packet)) {
struct hfi_msg_sys_debug_packet *pkt =
@@ -2749,9 +2791,10 @@
return;
}
if (device->resources.fw.cookie) {
+ flush_workqueue(device->vidc_workq);
venus_hfi_disable_clks(device);
- venus_hfi_iommu_detach(device);
subsystem_put(device->resources.fw.cookie);
+ venus_hfi_iommu_detach(device);
device->resources.fw.cookie = NULL;
}
}
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 075b391..1311752 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -113,8 +113,6 @@
#define HFI_PROPERTY_SYS_OX_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
-#define HFI_PROPERTY_SYS_IDLE_INDICATOR \
- (HFI_PROPERTY_SYS_OX_START + 0x001)
#define HFI_PROPERTY_PARAM_OX_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
@@ -333,7 +331,6 @@
#define HFI_MSG_SYS_OX_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x0000)
-#define HFI_MSG_SYS_IDLE (HFI_MSG_SYS_OX_START + 0x1)
#define HFI_MSG_SYS_PING_ACK (HFI_MSG_SYS_OX_START + 0x2)
#define HFI_MSG_SYS_PROPERTY_INFO (HFI_MSG_SYS_OX_START + 0x3)
#define HFI_MSG_SYS_SESSION_ABORT_DONE (HFI_MSG_SYS_OX_START + 0x4)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index e20348d..3fbfec4 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -130,6 +130,7 @@
HAL_PARAM_VENC_H264_DEBLOCK_CONTROL,
HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF,
HAL_PARAM_VENC_SESSION_QP,
+ HAL_PARAM_VENC_SESSION_QP_RANGE,
HAL_CONFIG_VENC_INTRA_PERIOD,
HAL_CONFIG_VENC_IDR_PERIOD,
HAL_CONFIG_VPE_OPERATIONS,
@@ -166,6 +167,10 @@
HAL_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER,
HAL_PARAM_VDEC_SYNC_FRAME_DECODE,
HAL_PARAM_VENC_H264_ENTROPY_CABAC_MODEL,
+ HAL_CONFIG_VENC_MAX_BITRATE,
+ HAL_PARAM_VENC_H264_VUI_TIMING_INFO,
+ HAL_PARAM_VENC_H264_GENERATE_AUDNAL,
+ HAL_PARAM_VENC_MAX_NUM_B_FRAMES,
};
enum hal_domain {
@@ -593,6 +598,12 @@
u32 layer_id;
};
+struct hal_quantization_range {
+ u32 min_qp;
+ u32 max_qp;
+ u32 layer_id;
+};
+
struct hal_intra_period {
u32 pframes;
u32 bframes;
@@ -764,6 +775,13 @@
u32 time_stamp_scale;
};
+
+struct hal_h264_vui_timing_info {
+ u32 enable;
+ u32 fixed_frame_rate;
+ u32 time_scale;
+};
+
enum vidc_resource_id {
VIDC_RESOURCE_OCMEM = 0x00000001,
VIDC_UNUSED_RESORUCE = 0x10000000,
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index baf7bc4..6234dba 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -195,6 +195,8 @@
(HFI_PROPERTY_SYS_COMMON_START + 0x002)
#define HFI_PROPERTY_SYS_CONFIG_VCODEC_CLKFREQ \
(HFI_PROPERTY_SYS_COMMON_START + 0x003)
+#define HFI_PROPERTY_SYS_IDLE_INDICATOR \
+ (HFI_PROPERTY_SYS_COMMON_START + 0x004)
#define HFI_PROPERTY_PARAM_COMMON_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
@@ -294,10 +296,18 @@
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x019)
#define HFI_PROPERTY_PARAM_VENC_HIER_P_NUM_ENH_LAYER \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01A)
-
#define HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01B)
-
+#define HFI_PROPERTY_PARAM_VENC_H264_LTRMODE \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01C)
+#define HFI_PROPERTY_PARAM_VENC_VIDEO_FULL_RANGE \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01D)
+#define HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E)
+#define HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01F)
+#define HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x020)
#define HFI_PROPERTY_CONFIG_VENC_COMMON_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
#define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE \
@@ -417,6 +427,10 @@
u32 idr_period;
};
+struct hfi_max_num_b_frames {
+ u32 max_num_b_frames;
+};
+
struct hfi_intra_period {
u32 pframes;
u32 bframes;
@@ -500,6 +514,12 @@
u32 height;
};
+struct hfi_h264_vui_timing_info {
+ u32 enable;
+ u32 fixed_frame_rate;
+ u32 time_scale;
+};
+
#define HFI_COLOR_FORMAT_MONOCHROME (HFI_COMMON_BASE + 0x1)
#define HFI_COLOR_FORMAT_NV12 (HFI_COMMON_BASE + 0x2)
#define HFI_COLOR_FORMAT_NV21 (HFI_COMMON_BASE + 0x3)
@@ -679,6 +699,7 @@
#define HFI_MSG_SYS_DEBUG (HFI_MSG_SYS_COMMON_START + 0x4)
#define HFI_MSG_SYS_SESSION_INIT_DONE (HFI_MSG_SYS_COMMON_START + 0x6)
#define HFI_MSG_SYS_SESSION_END_DONE (HFI_MSG_SYS_COMMON_START + 0x7)
+#define HFI_MSG_SYS_IDLE (HFI_MSG_SYS_COMMON_START + 0x8)
#define HFI_MSG_SESSION_COMMON_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + \
@@ -883,5 +904,4 @@
u32 packet_type;
u32 trigger_type;
};
-
#endif
diff --git a/drivers/media/platform/msm/wfd/enc-mfc-subdev.c b/drivers/media/platform/msm/wfd/enc-mfc-subdev.c
index fee7b47..ceb0149 100644
--- a/drivers/media/platform/msm/wfd/enc-mfc-subdev.c
+++ b/drivers/media/platform/msm/wfd/enc-mfc-subdev.c
@@ -2040,7 +2040,7 @@
}
heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID);
heap_mask |= inst->secure ? 0 : ION_HEAP(ION_IOMMU_HEAP_ID);
- ion_flags |= inst->secure ? ION_SECURE : 0;
+ ion_flags |= inst->secure ? ION_FLAG_SECURE : 0;
if (vcd_get_ion_status()) {
for (i = 0; i < 4; ++i) {
diff --git a/drivers/media/platform/msm/wfd/enc-venus-subdev.c b/drivers/media/platform/msm/wfd/enc-venus-subdev.c
index b719b3f..4f7fb44 100644
--- a/drivers/media/platform/msm/wfd/enc-venus-subdev.c
+++ b/drivers/media/platform/msm/wfd/enc-venus-subdev.c
@@ -28,6 +28,7 @@
#define BUF_TYPE_INPUT V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
static struct ion_client *venc_ion_client;
+static long venc_secure(struct v4l2_subdev *sd);
struct index_bitmap {
unsigned long *bitmap;
@@ -321,8 +322,9 @@
goto venc_open_fail;
}
- inst->secure = false;
inst->vmops = *vmops;
+ inst->secure = vmops->secure; /* We need to inform vidc, but defer
+ until after s_fmt() */
INIT_LIST_HEAD(&inst->registered_output_bufs.list);
INIT_LIST_HEAD(&inst->registered_input_bufs.list);
init_completion(&inst->dq_complete);
@@ -903,6 +905,15 @@
WFD_MSG_ERR("Failed to format for input port\n");
goto venc_set_format_fail;
}
+
+ /* If the device was secured previously, we need to inform vidc _now_ */
+ if (inst->secure) {
+ rc = venc_secure(sd);
+ if (rc) {
+ WFD_MSG_ERR("Failed secure vidc\n");
+ goto venc_set_format_fail;
+ }
+ }
venc_set_format_fail:
return rc;
}
@@ -1329,12 +1340,6 @@
rc = -EEXIST;
}
- if (inst->secure) {
- /* Nothing to do! */
- rc = 0;
- goto secure_fail;
- }
-
ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE;
rc = msm_vidc_s_ctrl(inst->vidc_context, &ctrl);
if (rc) {
@@ -1342,7 +1347,6 @@
goto secure_fail;
}
- inst->secure = true;
secure_fail:
return rc;
}
@@ -1419,9 +1423,6 @@
case SET_FRAMERATE_MODE:
rc = venc_set_framerate_mode(sd, arg);
break;
- case ENC_SECURE:
- rc = venc_secure(sd);
- break;
default:
WFD_MSG_ERR("Unknown ioctl %d to enc-subdev\n", cmd);
rc = -ENOTSUPP;
diff --git a/drivers/media/platform/msm/wfd/mdp-4-subdev.c b/drivers/media/platform/msm/wfd/mdp-4-subdev.c
index d2ecd22..465ec21 100644
--- a/drivers/media/platform/msm/wfd/mdp-4-subdev.c
+++ b/drivers/media/platform/msm/wfd/mdp-4-subdev.c
@@ -11,7 +11,6 @@
*
*/
#include <linux/msm_mdp.h>
-#include <linux/switch.h>
#include <mach/iommu_domains.h>
#include <media/videobuf2-core.h>
#include "enc-subdev.h"
@@ -24,7 +23,6 @@
u32 width;
bool secure;
bool uses_iommu_split_domain;
- struct switch_dev sdev;
};
int mdp_init(struct v4l2_subdev *sd, u32 val)
@@ -56,13 +54,7 @@
rc = -ENODEV;
goto mdp_open_fail;
}
- inst->sdev.name = "wfd";
- /* Register wfd node to switch driver */
- rc = switch_dev_register(&inst->sdev);
- if (rc) {
- WFD_MSG_ERR("WFD switch registration failed\n");
- goto mdp_open_fail;
- }
+
msm_fb_writeback_init(fbi);
inst->mdp = fbi;
inst->secure = mops->secure;
@@ -92,8 +84,6 @@
rc = -ENODEV;
goto exit;
}
- switch_set_state(&inst->sdev, true);
- WFD_MSG_DBG("wfd state switched to %d\n", inst->sdev.state);
}
exit:
return rc;
@@ -110,8 +100,6 @@
return rc;
}
fbi = (struct fb_info *)inst->mdp;
- switch_set_state(&inst->sdev, false);
- WFD_MSG_DBG("wfd state switched to %d\n", inst->sdev.state);
}
return 0;
}
@@ -123,8 +111,6 @@
fbi = (struct fb_info *)inst->mdp;
msm_fb_writeback_terminate(fbi);
kfree(inst);
- /* Unregister wfd node from switch driver */
- switch_dev_unregister(&inst->sdev);
}
return 0;
}
diff --git a/drivers/media/platform/msm/wfd/mdp-5-subdev.c b/drivers/media/platform/msm/wfd/mdp-5-subdev.c
index 16de0d4..3c546d0 100644
--- a/drivers/media/platform/msm/wfd/mdp-5-subdev.c
+++ b/drivers/media/platform/msm/wfd/mdp-5-subdev.c
@@ -11,7 +11,6 @@
*
*/
#include <linux/msm_mdp.h>
-#include <linux/switch.h>
#include <mach/iommu_domains.h>
#include <media/videobuf2-core.h>
#include "enc-subdev.h"
@@ -23,9 +22,10 @@
u32 height;
u32 width;
bool secure;
- struct switch_dev sdev;
};
+static int mdp_secure(struct v4l2_subdev *sd, void *arg);
+
int mdp_init(struct v4l2_subdev *sd, u32 val)
{
return 0;
@@ -47,10 +47,6 @@
WFD_MSG_ERR("Invalid arguments\n");
rc = -EINVAL;
goto mdp_open_fail;
- } else if (mops->secure) {
- /* Deprecated API; use MDP_SECURE ioctl */
- WFD_MSG_ERR("Deprecated API for securing subdevice\n");
- return -ENOTSUPP;
}
fbi = msm_fb_get_writeback_fb();
@@ -59,19 +55,24 @@
rc = -ENODEV;
goto mdp_open_fail;
}
- inst->sdev.name = "wfd";
- /* Register wfd node to switch driver */
- rc = switch_dev_register(&inst->sdev);
- if (rc) {
- WFD_MSG_ERR("WFD switch registration failed\n");
- goto mdp_open_fail;
- }
+
msm_fb_writeback_init(fbi);
+
inst->mdp = fbi;
inst->secure = mops->secure;
+ if (mops->secure) {
+ rc = mdp_secure(sd, inst);
+ if (rc) {
+ WFD_MSG_ERR("Couldn't secure MDP\n");
+ goto mdp_secure_fail;
+ }
+ }
+
mops->cookie = inst;
- return rc;
+ return 0;
+mdp_secure_fail:
+ msm_fb_writeback_terminate(inst->mdp);
mdp_open_fail:
kfree(inst);
return rc;
@@ -94,8 +95,6 @@
rc = -ENODEV;
goto exit;
}
- switch_set_state(&inst->sdev, true);
- WFD_MSG_DBG("wfd state switched to %d\n", inst->sdev.state);
}
exit:
return rc;
@@ -112,13 +111,13 @@
WFD_MSG_ERR("Failed to stop writeback mode\n");
return rc;
}
+
fbi = (struct fb_info *)inst->mdp;
- switch_set_state(&inst->sdev, false);
- WFD_MSG_DBG("wfd state switched to %d\n", inst->sdev.state);
}
return 0;
}
-int mdp_close(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_close(struct v4l2_subdev *sd, void *arg)
{
struct mdp_instance *inst = arg;
struct fb_info *fbi = NULL;
@@ -127,13 +126,12 @@
if (inst->secure)
msm_fb_writeback_set_secure(inst->mdp, false);
msm_fb_writeback_terminate(fbi);
- /* Unregister wfd node from switch driver */
- switch_dev_unregister(&inst->sdev);
kfree(inst);
}
return 0;
}
-int mdp_q_buffer(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_q_buffer(struct v4l2_subdev *sd, void *arg)
{
int rc = 0;
struct mdp_buf_info *binfo = arg;
@@ -161,7 +159,8 @@
WFD_MSG_ERR("Failed to queue buffer\n");
return rc;
}
-int mdp_dq_buffer(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_dq_buffer(struct v4l2_subdev *sd, void *arg)
{
int rc = 0;
struct mdp_buf_info *obuf = arg;
@@ -184,7 +183,8 @@
obuf->cookie = (void *)fbdata.priv;
return rc;
}
-int mdp_set_prop(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_set_prop(struct v4l2_subdev *sd, void *arg)
{
struct mdp_prop *prop = (struct mdp_prop *)arg;
struct mdp_instance *inst = prop->inst;
@@ -197,7 +197,7 @@
return 0;
}
-int mdp_mmap(struct v4l2_subdev *sd, void *arg)
+static int mdp_mmap(struct v4l2_subdev *sd, void *arg)
{
int rc = 0, align = 0;
struct mem_region_map *mmap = arg;
@@ -250,7 +250,7 @@
return rc;
}
-int mdp_munmap(struct v4l2_subdev *sd, void *arg)
+static int mdp_munmap(struct v4l2_subdev *sd, void *arg)
{
struct mem_region_map *mmap = arg;
struct mem_region *mregion;
@@ -278,7 +278,7 @@
return 0;
}
-int mdp_secure(struct v4l2_subdev *sd, void *arg)
+static int mdp_secure(struct v4l2_subdev *sd, void *arg)
{
struct mdp_instance *inst = NULL;
int rc = 0;
@@ -331,9 +331,6 @@
case MDP_MUNMAP:
rc = mdp_munmap(sd, arg);
break;
- case MDP_SECURE:
- rc = mdp_secure(sd, arg);
- break;
default:
WFD_MSG_ERR("IOCTL: %u not supported\n", cmd);
rc = -EINVAL;
diff --git a/drivers/media/platform/msm/wfd/wfd-ioctl.c b/drivers/media/platform/msm/wfd/wfd-ioctl.c
index e589878..b1b1980 100644
--- a/drivers/media/platform/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/platform/msm/wfd/wfd-ioctl.c
@@ -168,7 +168,7 @@
if (secure) {
alloc_regions = ION_HEAP(ION_CP_MM_HEAP_ID);
- ion_flags = ION_SECURE;
+ ion_flags = ION_FLAG_SECURE;
align = SZ_1M;
} else {
alloc_regions = ION_HEAP(ION_IOMMU_HEAP_ID);
@@ -1006,6 +1006,7 @@
spin_unlock_irqrestore(&inst->inst_lock, flags);
WFD_MSG_DBG("Calling videobuf_streamoff\n");
vb2_streamoff(&inst->vid_bufq, i);
+ wake_up(&inst->event_handler.wait);
return 0;
}
static int wfdioc_dqbuf(struct file *filp, void *fh,
@@ -1040,30 +1041,9 @@
{
int rc = 0;
struct wfd_device *wfd_dev = video_drvdata(filp);
- struct wfd_inst *inst = file_to_inst(filp);
- switch (a->id) {
- case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
- rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
- ioctl, ENC_SECURE, NULL);
- if (rc) {
- WFD_MSG_ERR("Couldn't secure encoder");
- break;
- }
-
- rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core,
- ioctl, MDP_SECURE, (void *)inst->mdp_inst);
- if (rc) {
- WFD_MSG_ERR("Couldn't secure MDP");
- break;
- }
-
- wfd_dev->secure = true;
- break;
- default:
- rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
- ioctl, SET_PROP, a);
- }
+ rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
+ ioctl, SET_PROP, a);
if (rc)
WFD_MSG_ERR("Failed to set encoder property\n");
@@ -1566,14 +1546,22 @@
unsigned int wfd_poll(struct file *filp, struct poll_table_struct *pt)
{
struct wfd_inst *inst = file_to_inst(filp);
- unsigned int flags = 0;
+ unsigned int poll_flags = 0;
+ unsigned long flags;
+ bool streamoff = false;
poll_wait(filp, &inst->event_handler.wait, pt);
- if (v4l2_event_pending(&inst->event_handler))
- flags |= POLLPRI;
+ spin_lock_irqsave(&inst->inst_lock, flags);
+ streamoff = inst->streamoff;
+ spin_unlock_irqrestore(&inst->inst_lock, flags);
- return flags;
+ if (v4l2_event_pending(&inst->event_handler))
+ poll_flags |= POLLPRI;
+ if (streamoff)
+ poll_flags |= POLLERR;
+
+ return poll_flags;
}
static const struct v4l2_file_operations g_wfd_fops = {
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 7c73d6c..046faac 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -15,6 +15,7 @@
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
+#include <linux/ratelimit.h>
#include <linux/mfd/core.h>
#include <linux/mfd/wcd9xxx/wcd9xxx-slimslave.h>
#include <linux/mfd/wcd9xxx/core.h>
@@ -33,8 +34,6 @@
#define SLIMBUS_PRESENT_TIMEOUT 100
#define MAX_WCD9XXX_DEVICE 4
-#define TABLA_I2C_MODE 0x03
-#define SITAR_I2C_MODE 0x01
#define CODEC_DT_MAX_PROP_SIZE 40
#define WCD9XXX_I2C_GSBI_SLAVE_ID "3-000d"
#define WCD9XXX_I2C_TOP_SLAVE_ADDR 0x0d
@@ -58,18 +57,9 @@
int mod_id;
};
-static char *taiko_supplies[] = {
- WCD9XXX_SUPPLY_BUCK_NAME, "cdc-vdd-tx-h", "cdc-vdd-rx-h", "cdc-vddpx-1",
- "cdc-vdd-a-1p2v", "cdc-vddcx-1", "cdc-vddcx-2",
-};
-
-static char *tapan_supplies[] = {
- WCD9XXX_SUPPLY_BUCK_NAME, "cdc-vdd-h", "cdc-vdd-px",
- "cdc-vdd-a-1p2v", "cdc-vdd-cx"
-};
-
static int wcd9xxx_dt_parse_vreg_info(struct device *dev,
- struct wcd9xxx_regulator *vreg, const char *vreg_name);
+ struct wcd9xxx_regulator *vreg,
+ const char *vreg_name, bool ondemand);
static int wcd9xxx_dt_parse_micbias_info(struct device *dev,
struct wcd9xxx_micbias_setting *micbias);
static struct wcd9xxx_pdata *wcd9xxx_populate_dt_pdata(struct device *dev);
@@ -291,49 +281,60 @@
},
};
-static struct wcd9xx_codec_type {
- u8 byte[4];
- struct mfd_cell *dev;
- int size;
- int num_irqs;
- int version; /* -1 to retrive version from chip version register */
- enum wcd9xxx_slim_slave_addr_type slim_slave_type;
-} wcd9xxx_codecs[] = {
+
+enum wcd9xxx_chipid_major {
+ TABLA_MAJOR = cpu_to_le16(0x100),
+ SITAR_MAJOR = cpu_to_le16(0x101),
+ TAIKO_MAJOR = cpu_to_le16(0x102),
+ TAPAN_MAJOR = cpu_to_le16(0x103),
+};
+
+static const struct wcd9xxx_codec_type wcd9xxx_codecs[] = {
{
- {0x2, 0x0, 0x0, 0x1}, tabla_devs, ARRAY_SIZE(tabla_devs),
- TABLA_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA
+ TABLA_MAJOR, cpu_to_le16(0x1), tabla1x_devs,
+ ARRAY_SIZE(tabla1x_devs), TABLA_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x03,
},
{
- {0x1, 0x0, 0x0, 0x1}, tabla1x_devs, ARRAY_SIZE(tabla1x_devs),
- TABLA_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA
- },
- { /* wcd9320 version 1 */
- {0x0, 0x0, 0x2, 0x1}, taiko_devs, ARRAY_SIZE(taiko_devs),
- TAIKO_NUM_IRQS, 1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO
- },
- { /* wcd9320 version 2 */
- {0x1, 0x0, 0x2, 0x1}, taiko_devs, ARRAY_SIZE(taiko_devs),
- TAIKO_NUM_IRQS, 2, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO
+ TABLA_MAJOR, cpu_to_le16(0x2), tabla_devs,
+ ARRAY_SIZE(tabla_devs), TABLA_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x03
},
{
- {0x0, 0x0, 0x3, 0x1}, tapan_devs, ARRAY_SIZE(tapan_devs),
- TAPAN_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO
+ /* Siter version 1 has same major chip id with Tabla */
+ TABLA_MAJOR, cpu_to_le16(0x0), sitar_devs,
+ ARRAY_SIZE(sitar_devs), SITAR_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x01
},
{
- {0x1, 0x0, 0x3, 0x1}, tapan_devs, ARRAY_SIZE(tapan_devs),
- TAPAN_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO
+ SITAR_MAJOR, cpu_to_le16(0x1), sitar_devs,
+ ARRAY_SIZE(sitar_devs), SITAR_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x01
},
{
- {0x0, 0x0, 0x0, 0x1}, sitar_devs, ARRAY_SIZE(sitar_devs),
- SITAR_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA
+ SITAR_MAJOR, cpu_to_le16(0x2), sitar_devs,
+ ARRAY_SIZE(sitar_devs), SITAR_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x01
},
{
- {0x1, 0x0, 0x1, 0x1}, sitar_devs, ARRAY_SIZE(sitar_devs),
- SITAR_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA
+ TAIKO_MAJOR, cpu_to_le16(0x0), taiko_devs,
+ ARRAY_SIZE(taiko_devs), TAIKO_NUM_IRQS, 1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO, 0x01
},
{
- {0x2, 0x0, 0x1, 0x1}, sitar_devs, ARRAY_SIZE(sitar_devs),
- SITAR_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA
+ TAIKO_MAJOR, cpu_to_le16(0x1), taiko_devs,
+ ARRAY_SIZE(taiko_devs), TAIKO_NUM_IRQS, 2,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO, 0x01
+ },
+ {
+ TAPAN_MAJOR, cpu_to_le16(0x0), tapan_devs,
+ ARRAY_SIZE(tapan_devs), TAPAN_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO, 0x03
+ },
+ {
+ TAPAN_MAJOR, cpu_to_le16(0x1), tapan_devs,
+ ARRAY_SIZE(tapan_devs), TAPAN_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO, 0x03
},
};
@@ -383,65 +384,80 @@
wcd9xxx->reset_gpio = 0;
}
}
-static int wcd9xxx_check_codec_type(struct wcd9xxx *wcd9xxx,
- struct mfd_cell **wcd9xxx_dev,
- int *wcd9xxx_dev_size,
- int *wcd9xxx_dev_num_irqs)
+
+static const struct wcd9xxx_codec_type
+*wcd9xxx_check_codec_type(struct wcd9xxx *wcd9xxx, u8 *version)
{
- int i;
- int ret;
- i = WCD9XXX_A_CHIP_ID_BYTE_0;
- while (i <= WCD9XXX_A_CHIP_ID_BYTE_3) {
- ret = wcd9xxx_reg_read(wcd9xxx, i);
- if (ret < 0)
- goto exit;
- wcd9xxx->idbyte[i-WCD9XXX_A_CHIP_ID_BYTE_0] = (u8)ret;
- pr_debug("%s: wcd9xx read = %x, byte = %x\n", __func__, ret,
- i);
- i++;
+ int i, rc;
+ const struct wcd9xxx_codec_type *c, *d = NULL;
+
+ rc = wcd9xxx_bulk_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_0,
+ sizeof(wcd9xxx->id_minor),
+ (u8 *)&wcd9xxx->id_minor);
+ if (rc < 0)
+ goto exit;
+
+ rc = wcd9xxx_bulk_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_2,
+ sizeof(wcd9xxx->id_major),
+ (u8 *)&wcd9xxx->id_major);
+ if (rc < 0)
+ goto exit;
+ dev_dbg(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
+ __func__, wcd9xxx->id_major, wcd9xxx->id_minor);
+
+ for (i = 0, c = &wcd9xxx_codecs[0]; i < ARRAY_SIZE(wcd9xxx_codecs);
+ i++, c++) {
+ if (c->id_major == wcd9xxx->id_major) {
+ if (c->id_minor == wcd9xxx->id_minor) {
+ d = c;
+ dev_dbg(wcd9xxx->dev,
+ "%s: exact match %s\n", __func__,
+ d->dev->name);
+ break;
+ } else if (!d) {
+ d = c;
+ } else {
+ if ((d->id_minor < c->id_minor) ||
+ (d->id_minor == c->id_minor &&
+ d->version < c->version))
+ d = c;
+ }
+ dev_dbg(wcd9xxx->dev,
+ "%s: best match %s, major 0x%x, minor 0x%x\n",
+ __func__, d->dev->name, d->id_major,
+ d->id_minor);
+ }
}
- /* Read codec version */
- ret = wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_VERSION);
- if (ret < 0)
- goto exit;
- wcd9xxx->version = (u8)ret & 0x1F;
- i = 0;
- while (i < ARRAY_SIZE(wcd9xxx_codecs)) {
- if ((wcd9xxx_codecs[i].byte[0] == wcd9xxx->idbyte[0]) &&
- (wcd9xxx_codecs[i].byte[1] == wcd9xxx->idbyte[1]) &&
- (wcd9xxx_codecs[i].byte[2] == wcd9xxx->idbyte[2]) &&
- (wcd9xxx_codecs[i].byte[3] == wcd9xxx->idbyte[3])) {
- pr_info("%s: codec is %s", __func__,
- wcd9xxx_codecs[i].dev->name);
- *wcd9xxx_dev = wcd9xxx_codecs[i].dev;
- *wcd9xxx_dev_size = wcd9xxx_codecs[i].size;
- *wcd9xxx_dev_num_irqs = wcd9xxx_codecs[i].num_irqs;
- wcd9xxx->slim_slave_type =
- wcd9xxx_codecs[i].slim_slave_type;
- if (wcd9xxx_codecs[i].version > -1)
- wcd9xxx->version = wcd9xxx_codecs[i].version;
- break;
+ if (!d) {
+ dev_warn(wcd9xxx->dev,
+ "%s: driver for id major 0x%x, minor 0x%x not found\n",
+ __func__, wcd9xxx->id_major, wcd9xxx->id_minor);
+ } else {
+ if (d->version > -1) {
+ *version = d->version;
+ } else {
+ rc = wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_VERSION);
+ if (rc < 0) {
+ d = NULL;
+ goto exit;
+ }
+ *version = (u8)rc & 0x1F;
}
- i++;
+ dev_info(wcd9xxx->dev,
+ "%s: detected %s, major 0x%x, minor 0x%x, ver 0x%x\n",
+ __func__, d->dev->name, d->id_major, d->id_minor,
+ *version);
}
- if (*wcd9xxx_dev == NULL || *wcd9xxx_dev_size == 0)
- ret = -ENODEV;
- pr_info("%s: Read codec idbytes & version\n"
- "byte_0[%08x] byte_1[%08x] byte_2[%08x]\n"
- " byte_3[%08x] version = %x\n", __func__,
- wcd9xxx->idbyte[0], wcd9xxx->idbyte[1],
- wcd9xxx->idbyte[2], wcd9xxx->idbyte[3],
- wcd9xxx->version);
exit:
- return ret;
+ return d;
}
static int wcd9xxx_device_init(struct wcd9xxx *wcd9xxx)
{
int ret;
- struct mfd_cell *wcd9xxx_dev = NULL;
- int wcd9xxx_dev_size = 0;
+ u8 version;
+ const struct wcd9xxx_codec_type *found;
mutex_init(&wcd9xxx->io_lock);
mutex_init(&wcd9xxx->xfer_lock);
@@ -457,10 +473,14 @@
wcd9xxx_bring_up(wcd9xxx);
- ret = wcd9xxx_check_codec_type(wcd9xxx, &wcd9xxx_dev, &wcd9xxx_dev_size,
- &wcd9xxx->num_irqs);
- if (ret < 0)
+ found = wcd9xxx_check_codec_type(wcd9xxx, &version);
+ if (!found) {
+ ret = -ENODEV;
goto err_irq;
+ } else {
+ wcd9xxx->codec_type = found;
+ wcd9xxx->version = version;
+ }
if (wcd9xxx->irq != -1) {
ret = wcd9xxx_irq_init(wcd9xxx);
@@ -470,7 +490,7 @@
}
}
- ret = mfd_add_devices(wcd9xxx->dev, -1, wcd9xxx_dev, wcd9xxx_dev_size,
+ ret = mfd_add_devices(wcd9xxx->dev, -1, found->dev, found->size,
NULL, 0);
if (ret != 0) {
dev_err(wcd9xxx->dev, "Failed to add children: %d\n", ret);
@@ -604,8 +624,8 @@
};
#endif
-static int wcd9xxx_enable_supplies(struct wcd9xxx *wcd9xxx,
- struct wcd9xxx_pdata *pdata)
+static int wcd9xxx_init_supplies(struct wcd9xxx *wcd9xxx,
+ struct wcd9xxx_pdata *pdata)
{
int ret;
int i;
@@ -619,7 +639,7 @@
wcd9xxx->num_of_supplies = 0;
- if (ARRAY_SIZE(pdata->regulator) > MAX_REGULATOR) {
+ if (ARRAY_SIZE(pdata->regulator) > WCD9XXX_MAX_REGULATOR) {
pr_err("%s: Array Size out of bound\n", __func__);
ret = -EINVAL;
goto err;
@@ -641,8 +661,12 @@
}
for (i = 0; i < wcd9xxx->num_of_supplies; i++) {
+ if (regulator_count_voltages(wcd9xxx->supplies[i].consumer) <=
+ 0)
+ continue;
ret = regulator_set_voltage(wcd9xxx->supplies[i].consumer,
- pdata->regulator[i].min_uV, pdata->regulator[i].max_uV);
+ pdata->regulator[i].min_uV,
+ pdata->regulator[i].max_uV);
if (ret) {
pr_err("%s: Setting regulator voltage failed for "
"regulator %s err = %d\n", __func__,
@@ -651,30 +675,19 @@
}
ret = regulator_set_optimum_mode(wcd9xxx->supplies[i].consumer,
- pdata->regulator[i].optimum_uA);
+ pdata->regulator[i].optimum_uA);
if (ret < 0) {
pr_err("%s: Setting regulator optimum mode failed for "
"regulator %s err = %d\n", __func__,
wcd9xxx->supplies[i].supply, ret);
goto err_get;
+ } else {
+ ret = 0;
}
}
- ret = regulator_bulk_enable(wcd9xxx->num_of_supplies,
- wcd9xxx->supplies);
- if (ret != 0) {
- dev_err(wcd9xxx->dev, "Failed to enable supplies: err = %d\n",
- ret);
- goto err_configure;
- }
return ret;
-err_configure:
- for (i = 0; i < wcd9xxx->num_of_supplies; i++) {
- regulator_set_voltage(wcd9xxx->supplies[i].consumer, 0,
- pdata->regulator[i].max_uV);
- regulator_set_optimum_mode(wcd9xxx->supplies[i].consumer, 0);
- }
err_get:
regulator_bulk_free(wcd9xxx->num_of_supplies, wcd9xxx->supplies);
err_supplies:
@@ -683,6 +696,33 @@
return ret;
}
+static int wcd9xxx_enable_static_supplies(struct wcd9xxx *wcd9xxx,
+ struct wcd9xxx_pdata *pdata)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < wcd9xxx->num_of_supplies; i++) {
+ if (pdata->regulator[i].ondemand)
+ continue;
+ ret = regulator_enable(wcd9xxx->supplies[i].consumer);
+ if (ret) {
+ pr_err("%s: Failed to enable %s\n", __func__,
+ wcd9xxx->supplies[i].supply);
+ break;
+ } else {
+ pr_debug("%s: Enabled regulator %s\n", __func__,
+ wcd9xxx->supplies[i].supply);
+ }
+ }
+
+ while (ret && --i)
+ if (!pdata->regulator[i].ondemand)
+ regulator_disable(wcd9xxx->supplies[i].consumer);
+
+ return ret;
+}
+
static void wcd9xxx_disable_supplies(struct wcd9xxx *wcd9xxx,
struct wcd9xxx_pdata *pdata)
{
@@ -691,8 +731,11 @@
regulator_bulk_disable(wcd9xxx->num_of_supplies,
wcd9xxx->supplies);
for (i = 0; i < wcd9xxx->num_of_supplies; i++) {
+ if (regulator_count_voltages(wcd9xxx->supplies[i].consumer) <=
+ 0)
+ continue;
regulator_set_voltage(wcd9xxx->supplies[i].consumer, 0,
- pdata->regulator[i].max_uV);
+ pdata->regulator[i].max_uV);
regulator_set_optimum_mode(wcd9xxx->supplies[i].consumer, 0);
}
regulator_bulk_free(wcd9xxx->num_of_supplies, wcd9xxx->supplies);
@@ -855,7 +898,6 @@
struct wcd9xxx_pdata *pdata = NULL;
int val = 0;
int ret = 0;
- int i2c_mode = 0;
int wcd9xx_index = 0;
struct device *dev;
@@ -912,18 +954,26 @@
wcd9xxx->slim_device_bootup = true;
if (client->dev.of_node)
wcd9xxx->mclk_rate = pdata->mclk_rate;
- ret = wcd9xxx_enable_supplies(wcd9xxx, pdata);
+
+ ret = wcd9xxx_init_supplies(wcd9xxx, pdata);
if (ret) {
pr_err("%s: Fail to enable Codec supplies\n",
__func__);
goto err_codec;
}
+ ret = wcd9xxx_enable_static_supplies(wcd9xxx, pdata);
+ if (ret) {
+ pr_err("%s: Fail to enable Codec pre-reset supplies\n",
+ __func__);
+ goto err_codec;
+ }
usleep_range(5, 5);
+
ret = wcd9xxx_reset(wcd9xxx);
if (ret) {
pr_err("%s: Resetting Codec failed\n", __func__);
- goto err_supplies;
+ goto err_supplies;
}
ret = wcd9xxx_i2c_get_client_index(client, &wcd9xx_index);
@@ -947,18 +997,14 @@
goto err_device_init;
}
- if ((wcd9xxx->idbyte[0] == 0x2) || (wcd9xxx->idbyte[0] == 0x1))
- i2c_mode = TABLA_I2C_MODE;
- else if (wcd9xxx->idbyte[0] == 0x0)
- i2c_mode = SITAR_I2C_MODE;
-
ret = wcd9xxx_read(wcd9xxx, WCD9XXX_A_CHIP_STATUS, 1, &val, 0);
+ if (ret < 0)
+ pr_err("%s: failed to read the wcd9xxx status (%d)\n",
+ __func__, ret);
+ if (val != wcd9xxx->codec_type->i2c_chip_status)
+ pr_err("%s: unknown chip status 0x%x\n", __func__, val);
- if ((ret < 0) || (val != i2c_mode))
- pr_err("failed to read the wcd9xxx status ret = %d\n",
- ret);
-
- wcd9xxx_intf = WCD9XXX_INTERFACE_TYPE_I2C;
+ wcd9xxx_intf = WCD9XXX_INTERFACE_TYPE_I2C;
return ret;
} else
@@ -987,7 +1033,9 @@
}
static int wcd9xxx_dt_parse_vreg_info(struct device *dev,
- struct wcd9xxx_regulator *vreg, const char *vreg_name)
+ struct wcd9xxx_regulator *vreg,
+ const char *vreg_name,
+ bool ondemand)
{
int len, ret = 0;
const __be32 *prop;
@@ -1005,6 +1053,7 @@
return -ENODEV;
}
vreg->name = vreg_name;
+ vreg->ondemand = ondemand;
snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
"qcom,%s-voltage", vreg_name);
@@ -1013,7 +1062,7 @@
if (!prop || (len != (2 * sizeof(__be32)))) {
dev_err(dev, "%s %s property\n",
prop ? "invalid format" : "no", prop_name);
- return -ENODEV;
+ return -EINVAL;
} else {
vreg->min_uV = be32_to_cpup(&prop[0]);
vreg->max_uV = be32_to_cpup(&prop[1]);
@@ -1026,12 +1075,12 @@
if (ret) {
dev_err(dev, "Looking up %s property in node %s failed",
prop_name, dev->of_node->full_name);
- return -ENODEV;
+ return -EFAULT;
}
vreg->optimum_uA = prop_val;
- dev_info(dev, "%s: vol=[%d %d]uV, curr=[%d]uA\n", vreg->name,
- vreg->min_uV, vreg->max_uV, vreg->optimum_uA);
+ dev_info(dev, "%s: vol=[%d %d]uV, curr=[%d]uA, ond %d\n", vreg->name,
+ vreg->min_uV, vreg->max_uV, vreg->optimum_uA, vreg->ondemand);
return 0;
}
@@ -1149,40 +1198,66 @@
static struct wcd9xxx_pdata *wcd9xxx_populate_dt_pdata(struct device *dev)
{
struct wcd9xxx_pdata *pdata;
- int ret, i;
- char **codec_supplies;
- u32 num_of_supplies = 0;
+ int ret, static_cnt, ond_cnt, idx, i;
+ const char *name = NULL;
u32 mclk_rate = 0;
u32 dmic_sample_rate = 0;
+ const char *static_prop_name = "qcom,cdc-static-supplies";
+ const char *ond_prop_name = "qcom,cdc-on-demand-supplies";
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(dev, "could not allocate memory for platform data\n");
return NULL;
}
- if (!strcmp(dev_name(dev), "taiko-slim-pgd") ||
- (!strcmp(dev_name(dev), WCD9XXX_I2C_GSBI_SLAVE_ID))) {
- codec_supplies = taiko_supplies;
- num_of_supplies = ARRAY_SIZE(taiko_supplies);
- } else if (!strcmp(dev_name(dev), "tapan-slim-pgd")) {
- codec_supplies = tapan_supplies;
- num_of_supplies = ARRAY_SIZE(tapan_supplies);
- } else {
- dev_err(dev, "%s unsupported device %s\n",
- __func__, dev_name(dev));
+
+ static_cnt = of_property_count_strings(dev->of_node, static_prop_name);
+ if (IS_ERR_VALUE(static_cnt)) {
+ dev_err(dev, "%s: Failed to get static supplies %d\n", __func__,
+ static_cnt);
goto err;
}
- if (num_of_supplies > ARRAY_SIZE(pdata->regulator)) {
+ /* On-demand supply list is an optional property */
+ ond_cnt = of_property_count_strings(dev->of_node, ond_prop_name);
+ if (IS_ERR_VALUE(ond_cnt))
+ ond_cnt = 0;
+
+ BUG_ON(static_cnt <= 0 || ond_cnt < 0);
+ if ((static_cnt + ond_cnt) > ARRAY_SIZE(pdata->regulator)) {
dev_err(dev, "%s: Num of supplies %u > max supported %u\n",
- __func__, num_of_supplies, ARRAY_SIZE(pdata->regulator));
-
+ __func__, static_cnt, ARRAY_SIZE(pdata->regulator));
goto err;
}
- for (i = 0; i < num_of_supplies; i++) {
- ret = wcd9xxx_dt_parse_vreg_info(dev, &pdata->regulator[i],
- codec_supplies[i]);
+ for (idx = 0; idx < static_cnt; idx++) {
+ ret = of_property_read_string_index(dev->of_node,
+ static_prop_name, idx,
+ &name);
+ if (ret) {
+ dev_err(dev, "%s: of read string %s idx %d error %d\n",
+ __func__, static_prop_name, idx, ret);
+ goto err;
+ }
+
+ dev_dbg(dev, "%s: Found static cdc supply %s\n", __func__,
+ name);
+ ret = wcd9xxx_dt_parse_vreg_info(dev, &pdata->regulator[idx],
+ name, false);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < ond_cnt; i++, idx++) {
+ ret = of_property_read_string_index(dev->of_node, ond_prop_name,
+ i, &name);
+ if (ret)
+ goto err;
+
+ dev_dbg(dev, "%s: Found on-demand cdc supply %s\n", __func__,
+ name);
+ ret = wcd9xxx_dt_parse_vreg_info(dev, &pdata->regulator[idx],
+ name, true);
if (ret)
goto err;
}
@@ -1324,9 +1399,17 @@
wcd9xxx->mclk_rate = pdata->mclk_rate;
wcd9xxx->slim_device_bootup = true;
- ret = wcd9xxx_enable_supplies(wcd9xxx, pdata);
- if (ret)
+ ret = wcd9xxx_init_supplies(wcd9xxx, pdata);
+ if (ret) {
+ pr_err("%s: Fail to init Codec supplies %d\n", __func__, ret);
goto err_codec;
+ }
+ ret = wcd9xxx_enable_static_supplies(wcd9xxx, pdata);
+ if (ret) {
+ pr_err("%s: Fail to enable Codec pre-reset supplies\n",
+ __func__);
+ goto err_codec;
+ }
usleep_range(5, 5);
ret = wcd9xxx_reset(wcd9xxx);
diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c
index f2c3959..111131a 100644
--- a/drivers/mfd/wcd9xxx-irq.c
+++ b/drivers/mfd/wcd9xxx-irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,7 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
+#include <linux/ratelimit.h>
#include <mach/cpuidle.h>
#define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
@@ -218,16 +219,19 @@
static int wcd9xxx_num_irq_regs(const struct wcd9xxx *wcd9xxx)
{
- return (wcd9xxx->num_irqs / 8) + ((wcd9xxx->num_irqs % 8) ? 1 : 0);
+ return (wcd9xxx->codec_type->num_irqs / 8) +
+ ((wcd9xxx->codec_type->num_irqs % 8) ? 1 : 0);
}
static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
{
int ret;
int i;
+ char linebuf[128];
struct wcd9xxx *wcd9xxx = data;
int num_irq_regs = wcd9xxx_num_irq_regs(wcd9xxx);
- u8 status[num_irq_regs];
+ u8 status[num_irq_regs], status1[num_irq_regs];
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
if (unlikely(wcd9xxx_lock_sleep(wcd9xxx) == false)) {
dev_err(wcd9xxx->dev, "Failed to hold suspend\n");
@@ -249,12 +253,17 @@
for (i = 0; i < num_irq_regs; i++)
status[i] &= ~wcd9xxx->irq_masks_cur[i];
+ memcpy(status1, status, sizeof(status1));
+
/* Find out which interrupt was triggered and call that interrupt's
* handler function
*/
if (status[BIT_BYTE(WCD9XXX_IRQ_SLIMBUS)] &
- BYTE_BIT_MASK(WCD9XXX_IRQ_SLIMBUS))
+ BYTE_BIT_MASK(WCD9XXX_IRQ_SLIMBUS)) {
wcd9xxx_irq_dispatch(wcd9xxx, WCD9XXX_IRQ_SLIMBUS);
+ status1[BIT_BYTE(WCD9XXX_IRQ_SLIMBUS)] &=
+ ~BYTE_BIT_MASK(WCD9XXX_IRQ_SLIMBUS);
+ }
/* Since codec has only one hardware irq line which is shared by
* codec's different internal interrupts, so it's possible master irq
@@ -263,12 +272,41 @@
* machine's order */
for (i = WCD9XXX_IRQ_MBHC_INSERTION;
i >= WCD9XXX_IRQ_MBHC_REMOVAL; i--) {
- if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i))
+ if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i)) {
wcd9xxx_irq_dispatch(wcd9xxx, i);
+ status1[BIT_BYTE(i)] &= ~BYTE_BIT_MASK(i);
+ }
}
- for (i = WCD9XXX_IRQ_BG_PRECHARGE; i < wcd9xxx->num_irqs; i++) {
- if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i))
+ for (i = WCD9XXX_IRQ_BG_PRECHARGE; i < wcd9xxx->codec_type->num_irqs;
+ i++) {
+ if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i)) {
wcd9xxx_irq_dispatch(wcd9xxx, i);
+ status1[BIT_BYTE(i)] &= ~BYTE_BIT_MASK(i);
+ }
+ }
+
+ /*
+ * As a failsafe if unhandled irq is found, clear it to prevent
+ * interrupt storm.
+ * Note that we can say there was an unhandled irq only when no irq
+ * handled by nested irq handler since Taiko supports qdsp as irqs'
+ * destination for few irqs. Therefore driver shouldn't clear pending
+ * irqs when few handled while few others not.
+ */
+ if (unlikely(!memcmp(status, status1, sizeof(status)))) {
+ if (__ratelimit(&ratelimit)) {
+ pr_warn("%s: Unhandled irq found\n", __func__);
+ hex_dump_to_buffer(status, sizeof(status), 16, 1,
+ linebuf, sizeof(linebuf), false);
+ pr_warn("%s: status0 : %s\n", __func__, linebuf);
+ hex_dump_to_buffer(status1, sizeof(status1), 16, 1,
+ linebuf, sizeof(linebuf), false);
+ pr_warn("%s: status1 : %s\n", __func__, linebuf);
+ }
+
+ memset(status, 0, num_irq_regs);
+ wcd9xxx_bulk_write(wcd9xxx, WCD9XXX_A_INTR_STATUS0,
+ num_irq_regs, status);
}
wcd9xxx_unlock_sleep(wcd9xxx);
@@ -301,7 +339,7 @@
pr_debug("%s: enter\n", __func__);
- for (irq = 0; irq < wcd9xxx->num_irqs; irq++) {
+ for (irq = 0; irq < wcd9xxx->codec_type->num_irqs; irq++) {
/* Map OF irq */
virq = wcd9xxx_map_irq(wcd9xxx, irq);
pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
@@ -365,7 +403,7 @@
/* mask all the interrupts */
memset(irq_level, 0, wcd9xxx_num_irq_regs(wcd9xxx));
- for (i = 0; i < wcd9xxx->num_irqs; i++) {
+ for (i = 0; i < wcd9xxx->codec_type->num_irqs; i++) {
wcd9xxx->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
wcd9xxx->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
irq_level[BIT_BYTE(i)] |=
diff --git a/drivers/mfd/wcd9xxx-slimslave.c b/drivers/mfd/wcd9xxx-slimslave.c
index f2d71b6..81262b58 100644
--- a/drivers/mfd/wcd9xxx-slimslave.c
+++ b/drivers/mfd/wcd9xxx-slimslave.c
@@ -31,7 +31,8 @@
static int wcd9xxx_configure_ports(struct wcd9xxx *wcd9xxx)
{
- if (wcd9xxx->slim_slave_type == WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA) {
+ if (wcd9xxx->codec_type->slim_slave_type ==
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA) {
sh_ch.rx_port_ch_reg_base = 0x180;
sh_ch.port_rx_cfg_reg_base = 0x040;
sh_ch.port_tx_cfg_reg_base = 0x040;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 7d76b43..fcadc30 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1,5 +1,3 @@
-
-
/*Qualcomm Secure Execution Environment Communicator (QSEECOM) driver
*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
@@ -52,6 +50,10 @@
#define QSEEE_VERSION_00 0x400000
#define QSEE_VERSION_01 0x401000
#define QSEE_VERSION_02 0x402000
+#define QSEE_VERSION_03 0x403000
+#define QSEE_VERSION_04 0x404000
+#define QSEE_VERSION_05 0x405000
+
#define QSEOS_CHECK_VERSION_CMD 0x00001803
@@ -59,12 +61,31 @@
#define QSEE_CE_CLK_100MHZ 100000000
#define QSEECOM_MAX_SG_ENTRY 512
+#define QSEECOM_DISK_ENCRYTPION_KEY_ID 0
+
+/* Save partition image hash for authentication check */
+#define SCM_SAVE_PARTITION_HASH_ID 0x01
+
+/* Check if enterprise security is activate */
+#define SCM_IS_ACTIVATED_ID 0x02
enum qseecom_clk_definitions {
CLK_DFAB = 0,
CLK_SFPB,
};
+enum qseecom_client_handle_type {
+ QSEECOM_CLIENT_APP = 0,
+ QSEECOM_LISTENER_SERVICE,
+ QSEECOM_SECURE_SERVICE,
+ QSEECOM_GENERIC,
+};
+
+enum qseecom_ce_hw_instance {
+ CLK_QSEE = 0,
+ CLK_CE_DRV,
+};
+
static struct class *driver_class;
static dev_t qseecom_device_no;
static struct cdev qseecom_cdev;
@@ -76,6 +97,7 @@
static DEFINE_MUTEX(qsee_bw_mutex);
static DEFINE_MUTEX(app_access_lock);
+static DEFINE_MUTEX(clk_access_lock);
struct qseecom_registered_listener_list {
struct list_head list;
@@ -101,11 +123,19 @@
struct qseecom_handle *handle;
};
+struct ce_hw_usage_info {
+ uint32_t qsee_ce_hw_instance;
+ uint32_t hlos_ce_hw_instance;
+ uint32_t disk_encrypt_pipe;
+};
+
struct qseecom_clk {
+ enum qseecom_ce_hw_instance instance;
struct clk *ce_core_clk;
struct clk *ce_clk;
struct clk *ce_core_src_clk;
struct clk *ce_bus_clk;
+ uint32_t clk_access_cnt;
};
struct qseecom_control {
@@ -126,12 +156,14 @@
uint32_t qsee_version;
struct device *pdev;
bool commonlib_loaded;
+ struct ce_hw_usage_info ce_info;
int qsee_bw_count;
int qsee_sfpb_bw_count;
uint32_t qsee_perf_client;
struct qseecom_clk qsee;
+ struct qseecom_clk ce_drv;
};
struct qseecom_client_handle {
@@ -141,8 +173,6 @@
uint32_t user_virt_sb_base;
size_t sb_length;
struct ion_handle *ihandle; /* Retrieve phy addr */
- bool perf_enabled;
- bool fast_load_enabled;
};
struct qseecom_listener_handle {
@@ -152,7 +182,7 @@
static struct qseecom_control qseecom;
struct qseecom_dev_handle {
- bool service;
+ enum qseecom_client_handle_type type;
union {
struct qseecom_client_handle client;
struct qseecom_listener_handle listener;
@@ -161,6 +191,22 @@
int abort;
wait_queue_head_t abort_wq;
atomic_t ioctl_count;
+ bool perf_enabled;
+ bool fast_load_enabled;
+};
+
+enum qseecom_set_clear_key_flag {
+ QSEECOM_CLEAR_CE_KEY_CMD = 0,
+ QSEECOM_SET_CE_KEY_CMD,
+};
+
+struct qseecom_set_key_parameter {
+ uint32_t ce_hw;
+ uint32_t pipe;
+ uint32_t flags;
+ uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+ unsigned char hash32[QSEECOM_HASH_SIZE];
+ enum qseecom_set_clear_key_flag set_clear_key_flag;
};
struct qseecom_sg_entry {
@@ -307,7 +353,7 @@
return ret;
}
data->listener.id = 0;
- data->service = true;
+ data->type = QSEECOM_LISTENER_SERVICE;
if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
pr_err("Service is not unique and is already registered\n");
data->released = true;
@@ -495,7 +541,6 @@
struct qseecom_client_listener_data_irsp send_data_rsp;
struct qseecom_registered_listener_list *ptr_svc = NULL;
-
while (resp->result == QSEOS_RESULT_INCOMPLETE) {
lstnr = resp->data;
/*
@@ -522,13 +567,14 @@
if (wait_event_freezable(qseecom.send_resp_wq,
__qseecom_listener_has_sent_rsp(data))) {
pr_warning("Interrupted: exiting send_cmd loop\n");
- return -ERESTARTSYS;
+ ret = -ERESTARTSYS;
}
- if (data->abort) {
- pr_err("Aborting listener service %d\n",
- data->listener.id);
- rc = -ENODEV;
+ if ((data->abort) || (ret == -ERESTARTSYS)) {
+ pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
+ data->client.app_id, lstnr, ret);
+ if (data->abort)
+ rc = -ENODEV;
send_data_rsp.status = QSEOS_RESULT_FAILURE;
} else {
send_data_rsp.status = QSEOS_RESULT_SUCCESS;
@@ -544,13 +590,14 @@
sizeof(*resp));
if (ret) {
pr_err("scm_call() failed with err: %d (app_id = %d)\n",
- ret, data->client.app_id);
+ ret, data->client.app_id);
return ret;
}
- if (resp->result == QSEOS_RESULT_FAILURE) {
- pr_err("Response result %d FAIL (app_id = %d)\n",
- resp->result, data->client.app_id);
- return -EINVAL;
+ if ((resp->result != QSEOS_RESULT_SUCCESS) &&
+ (resp->result != QSEOS_RESULT_INCOMPLETE)) {
+ pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+ resp->result, data->client.app_id, lstnr);
+ ret = -EINVAL;
}
}
if (rc)
@@ -628,7 +675,7 @@
app_id = ret;
if (app_id) {
- pr_warn("App id %d (%s) already exists\n", app_id,
+ pr_debug("App id %d (%s) already exists\n", app_id,
(char *)(req.app_name));
spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
list_for_each_entry(entry,
@@ -754,6 +801,17 @@
return 1;
}
+static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
+{
+ int ret = 0;
+ if (!IS_ERR_OR_NULL(data->client.ihandle)) {
+ ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
+ ion_free(qseecom.ion_clnt, data->client.ihandle);
+ data->client.ihandle = NULL;
+ }
+ return ret;
+}
+
static int qseecom_unload_app(struct qseecom_dev_handle *data)
{
unsigned long flags;
@@ -775,7 +833,7 @@
break;
} else {
ptr_app->ref_cnt--;
- pr_warn("Can't unload app(%d) inuse\n",
+ pr_debug("Can't unload app(%d) inuse\n",
ptr_app->app_id);
break;
}
@@ -836,11 +894,7 @@
}
}
}
- if (!IS_ERR_OR_NULL(data->client.ihandle)) {
- ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
- ion_free(qseecom.ion_clnt, data->client.ihandle);
- data->client.ihandle = NULL;
- }
+ qseecom_unmap_ion_allocated_memory(data);
data->released = true;
return ret;
}
@@ -943,6 +997,96 @@
return ret;
}
+int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
+ struct qseecom_send_svc_cmd_req *req_ptr,
+ struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
+{
+ int ret = 0;
+ if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+ pr_err("Error with pointer: req_ptr = %p, send_svc_ptr = %p\n",
+ req_ptr, send_svc_ireq_ptr);
+ return -EINVAL;
+ }
+ send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+ send_svc_ireq_ptr->key_type =
+ ((struct qseecom_rpmb_provision_key *)req_ptr->cmd_req_buf)->key_type;
+ send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+ send_svc_ireq_ptr->rsp_ptr = (void *)(__qseecom_uvirt_to_kphys(data_ptr,
+ (uint32_t)req_ptr->resp_buf));
+ send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+ pr_debug("CMD ID (%x), KEY_TYPE (%d)\n", send_svc_ireq_ptr->qsee_cmd_id,
+ ((struct qseecom_rpmb_provision_key *)req_ptr->cmd_req_buf)->key_type);
+ return ret;
+}
+
+static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ int ret = 0;
+ struct qseecom_client_send_service_ireq send_svc_ireq;
+ struct qseecom_command_scm_resp resp;
+ struct qseecom_send_svc_cmd_req req;
+ /*struct qseecom_command_scm_resp resp;*/
+
+ if (__copy_from_user(&req,
+ (void __user *)argp,
+ sizeof(req))) {
+ pr_err("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ if (req.resp_buf == NULL) {
+ pr_err("cmd buffer or response buffer is null\n");
+ return -EINVAL;
+ }
+
+ data->type = QSEECOM_SECURE_SERVICE;
+
+ switch (req.cmd_id) {
+ case QSEE_RPMB_PROVISION_KEY_COMMAND:
+ case QSEE_RPMB_ERASE_COMMAND:
+ if (__qseecom_process_rpmb_svc_cmd(data, &req,
+ &send_svc_ireq))
+ return -EINVAL;
+ break;
+ default:
+ pr_err("Unsupported cmd_id %d\n", req.cmd_id);
+ return -EINVAL;
+ }
+
+ ret = scm_call(SCM_SVC_TZSCHEDULER, 1, (const void *) &send_svc_ireq,
+ sizeof(send_svc_ireq),
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("qseecom_scm_call failed with err: %d\n", ret);
+ return ret;
+ }
+
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ break;
+ case QSEOS_RESULT_INCOMPLETE:
+ pr_err("qseos_result_incomplete\n");
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd fail: err: %d\n",
+ ret);
+ }
+ break;
+ case QSEOS_RESULT_FAILURE:
+ pr_err("process_incomplete_cmd failed err: %d\n", ret);
+ break;
+ default:
+ pr_err("Response result %d not supported\n",
+ resp.result);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+
+}
+
static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
struct qseecom_send_cmd_req *req)
{
@@ -1504,9 +1648,8 @@
return -ENOMEM;
}
data->abort = 0;
- data->service = false;
+ data->type = QSEECOM_CLIENT_APP;
data->released = false;
- data->client.app_id = ret;
data->client.sb_length = size;
data->client.user_virt_sb_base = 0;
data->client.ihandle = NULL;
@@ -1551,7 +1694,7 @@
*handle = NULL;
return -EINVAL;
}
-
+ data->client.app_id = ret;
if (ret > 0) {
pr_warn("App id %d for [%s] app exists\n", ret,
(char *)app_ireq.app_name);
@@ -1662,9 +1805,9 @@
pr_err("Unable to find the handle, exiting\n");
else
ret = qseecom_unload_app(data);
- if (data->client.fast_load_enabled == true)
+ if (data->fast_load_enabled == true)
qsee_disable_clock_vote(data, CLK_SFPB);
- if (data->client.perf_enabled == true)
+ if (data->perf_enabled == true)
qsee_disable_clock_vote(data, CLK_DFAB);
if (ret == 0) {
kzfree(data);
@@ -1764,12 +1907,23 @@
return 0;
}
-static int __qseecom_enable_clk(void)
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
{
int rc = 0;
struct qseecom_clk *qclk;
- qclk = &qseecom.qsee;
+ if (ce == CLK_QSEE)
+ qclk = &qseecom.qsee;
+ else
+ qclk = &qseecom.ce_drv;
+
+ mutex_lock(&clk_access_lock);
+ if (qclk->clk_access_cnt > 0) {
+ qclk->clk_access_cnt++;
+ mutex_unlock(&clk_access_lock);
+ return rc;
+ }
+
/* Enable CE core clk */
rc = clk_prepare_enable(qclk->ce_core_clk);
if (rc) {
@@ -1788,6 +1942,8 @@
pr_err("Unable to enable/prepare CE bus clk\n");
goto ce_bus_clk_err;
}
+ qclk->clk_access_cnt++;
+ mutex_unlock(&clk_access_lock);
return 0;
ce_bus_clk_err:
@@ -1795,20 +1951,30 @@
ce_clk_err:
clk_disable_unprepare(qclk->ce_core_clk);
err:
+ mutex_unlock(&clk_access_lock);
return -EIO;
}
-static void __qseecom_disable_clk(void)
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
{
struct qseecom_clk *qclk;
- qclk = &qseecom.qsee;
- if (qclk->ce_clk != NULL)
- clk_disable_unprepare(qclk->ce_clk);
- if (qclk->ce_core_clk != NULL)
- clk_disable_unprepare(qclk->ce_core_clk);
- if (qclk->ce_bus_clk != NULL)
- clk_disable_unprepare(qclk->ce_bus_clk);
+ if (ce == CLK_QSEE)
+ qclk = &qseecom.qsee;
+ else
+ qclk = &qseecom.ce_drv;
+
+ mutex_lock(&clk_access_lock);
+ if (qclk->clk_access_cnt == 1) {
+ if (qclk->ce_clk != NULL)
+ clk_disable_unprepare(qclk->ce_clk);
+ if (qclk->ce_core_clk != NULL)
+ clk_disable_unprepare(qclk->ce_core_clk);
+ if (qclk->ce_bus_clk != NULL)
+ clk_disable_unprepare(qclk->ce_bus_clk);
+ }
+ qclk->clk_access_cnt--;
+ mutex_unlock(&clk_access_lock);
}
static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
@@ -1830,14 +1996,14 @@
qseecom.qsee_perf_client, 3);
else {
if (qclk->ce_core_src_clk != NULL)
- ret = __qseecom_enable_clk();
+ ret = __qseecom_enable_clk(CLK_QSEE);
if (!ret) {
ret =
msm_bus_scale_client_update_request(
qseecom.qsee_perf_client, 1);
if ((ret) &&
(qclk->ce_core_src_clk != NULL))
- __qseecom_disable_clk();
+ __qseecom_disable_clk(CLK_QSEE);
}
}
if (ret)
@@ -1845,11 +2011,11 @@
ret);
else {
qseecom.qsee_bw_count++;
- data->client.perf_enabled = true;
+ data->perf_enabled = true;
}
} else {
qseecom.qsee_bw_count++;
- data->client.perf_enabled = true;
+ data->perf_enabled = true;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -1861,14 +2027,14 @@
qseecom.qsee_perf_client, 3);
else {
if (qclk->ce_core_src_clk != NULL)
- ret = __qseecom_enable_clk();
+ ret = __qseecom_enable_clk(CLK_QSEE);
if (!ret) {
ret =
msm_bus_scale_client_update_request(
qseecom.qsee_perf_client, 2);
if ((ret) &&
(qclk->ce_core_src_clk != NULL))
- __qseecom_disable_clk();
+ __qseecom_disable_clk(CLK_QSEE);
}
}
@@ -1877,11 +2043,11 @@
ret);
else {
qseecom.qsee_sfpb_bw_count++;
- data->client.fast_load_enabled = true;
+ data->fast_load_enabled = true;
}
} else {
qseecom.qsee_sfpb_bw_count++;
- data->client.fast_load_enabled = true;
+ data->fast_load_enabled = true;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -1919,18 +2085,18 @@
ret = msm_bus_scale_client_update_request(
qseecom.qsee_perf_client, 0);
if ((!ret) && (qclk->ce_core_src_clk != NULL))
- __qseecom_disable_clk();
+ __qseecom_disable_clk(CLK_QSEE);
}
if (ret)
pr_err("SFPB Bandwidth req fail (%d)\n",
ret);
else {
qseecom.qsee_bw_count--;
- data->client.perf_enabled = false;
+ data->perf_enabled = false;
}
} else {
qseecom.qsee_bw_count--;
- data->client.perf_enabled = false;
+ data->perf_enabled = false;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -1949,18 +2115,18 @@
ret = msm_bus_scale_client_update_request(
qseecom.qsee_perf_client, 0);
if ((!ret) && (qclk->ce_core_src_clk != NULL))
- __qseecom_disable_clk();
+ __qseecom_disable_clk(CLK_QSEE);
}
if (ret)
pr_err("SFPB Bandwidth req fail (%d)\n",
ret);
else {
qseecom.qsee_sfpb_bw_count--;
- data->client.fast_load_enabled = false;
+ data->fast_load_enabled = false;
}
} else {
qseecom.qsee_sfpb_bw_count--;
- data->client.fast_load_enabled = false;
+ data->fast_load_enabled = false;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -2151,7 +2317,7 @@
pr_err(" scm call to check if app is loaded failed");
return ret; /* scm call failed */
} else if (ret > 0) {
- pr_warn("App id %d (%s) already exists\n", ret,
+ pr_debug("App id %d (%s) already exists\n", ret,
(char *)(req.app_name));
spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
list_for_each_entry(entry,
@@ -2176,6 +2342,384 @@
}
}
+static int __qseecom_get_ce_pipe_info(
+ enum qseecom_key_management_usage_type usage,
+ uint32_t *pipe, uint32_t *ce_hw)
+{
+ int ret;
+ switch (usage) {
+ case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+ if (qseecom.ce_info.disk_encrypt_pipe == 0xFF ||
+ qseecom.ce_info.hlos_ce_hw_instance == 0xFF) {
+ pr_err("nfo unavailable: disk encr pipe %d ce_hw %d\n",
+ qseecom.ce_info.disk_encrypt_pipe,
+ qseecom.ce_info.hlos_ce_hw_instance);
+ ret = -EINVAL;
+ } else {
+ *pipe = qseecom.ce_info.disk_encrypt_pipe;
+ *ce_hw = qseecom.ce_info.hlos_ce_hw_instance;
+ ret = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
+ enum qseecom_key_management_usage_type usage,
+ uint8_t *key_id, uint32_t flags)
+{
+ struct qseecom_key_generate_ireq ireq;
+ struct qseecom_command_scm_resp resp;
+ int ret;
+
+ if (usage != QSEOS_KM_USAGE_DISK_ENCRYPTION) {
+ pr_err("Error:: unsupported usage %d\n", usage);
+ return -EFAULT;
+ }
+
+ memcpy(ireq.key_id, key_id, QSEECOM_KEY_ID_SIZE);
+ ireq.flags = flags;
+ ireq.qsee_command_id = QSEOS_GENERATE_KEY;
+
+ __qseecom_enable_clk(CLK_QSEE);
+
+ ret = scm_call(SCM_SVC_TZSCHEDULER, 1,
+ &ireq, sizeof(struct qseecom_key_generate_ireq),
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm call to generate key failed : %d\n", ret);
+ __qseecom_disable_clk(CLK_QSEE);
+ return ret;
+ }
+
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ break;
+ case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
+ break;
+ case QSEOS_RESULT_INCOMPLETE:
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+ pr_warn("process_incomplete_cmd return Key ID exits.\n");
+ ret = 0;
+ } else {
+ pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+ resp.result);
+ }
+ }
+ break;
+ case QSEOS_RESULT_FAILURE:
+ default:
+ pr_err("gen key scm call failed resp.result %d\n", resp.result);
+ ret = -EINVAL;
+ break;
+ }
+ __qseecom_disable_clk(CLK_QSEE);
+ return ret;
+}
+
+static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
+ enum qseecom_key_management_usage_type usage,
+ uint8_t *key_id, uint32_t flags)
+{
+ struct qseecom_key_delete_ireq ireq;
+ struct qseecom_command_scm_resp resp;
+ int ret;
+
+ if (usage != QSEOS_KM_USAGE_DISK_ENCRYPTION) {
+ pr_err("Error:: unsupported usage %d\n", usage);
+ return -EFAULT;
+ }
+
+ memcpy(ireq.key_id, key_id, QSEECOM_KEY_ID_SIZE);
+ ireq.flags = flags;
+ ireq.qsee_command_id = QSEOS_DELETE_KEY;
+
+ __qseecom_enable_clk(CLK_QSEE);
+
+ ret = scm_call(SCM_SVC_TZSCHEDULER, 1,
+ &ireq, sizeof(struct qseecom_key_delete_ireq),
+ &resp, sizeof(struct qseecom_command_scm_resp));
+ if (ret) {
+ pr_err("scm call to delete key failed : %d\n", ret);
+ __qseecom_disable_clk(CLK_QSEE);
+ return ret;
+ }
+
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ break;
+ case QSEOS_RESULT_INCOMPLETE:
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret)
+ pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+ resp.result);
+ break;
+ case QSEOS_RESULT_FAILURE:
+ default:
+ pr_err("Delete key scm call failed resp.result %d\n",
+ resp.result);
+ ret = -EINVAL;
+ break;
+ }
+ __qseecom_disable_clk(CLK_QSEE);
+ return ret;
+}
+
+static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
+ enum qseecom_key_management_usage_type usage,
+ struct qseecom_set_key_parameter *set_key_para)
+{
+ struct qseecom_key_select_ireq ireq;
+ struct qseecom_command_scm_resp resp;
+ int ret;
+
+ if (usage != QSEOS_KM_USAGE_DISK_ENCRYPTION) {
+ pr_err("Error:: unsupported usage %d\n", usage);
+ return -EFAULT;
+ }
+
+ if (qseecom.qsee.instance == qseecom.ce_drv.instance)
+ __qseecom_enable_clk(CLK_QSEE);
+ else
+ __qseecom_enable_clk(CLK_CE_DRV);
+
+ memcpy(ireq.key_id, set_key_para->key_id, QSEECOM_KEY_ID_SIZE);
+ ireq.qsee_command_id = QSEOS_SET_KEY;
+ ireq.ce = set_key_para->ce_hw;
+ ireq.pipe = set_key_para->pipe;
+ ireq.flags = set_key_para->flags;
+
+ /* set PIPE_ENC */
+ ireq.pipe_type = QSEOS_PIPE_ENC;
+
+ if (set_key_para->set_clear_key_flag ==
+ QSEECOM_SET_CE_KEY_CMD)
+ memcpy((void *)ireq.hash, (void *)set_key_para->hash32,
+ QSEECOM_HASH_SIZE);
+ else
+ memset((void *)ireq.hash, 0, QSEECOM_HASH_SIZE);
+
+ ret = scm_call(SCM_SVC_TZSCHEDULER, 1,
+ &ireq, sizeof(struct qseecom_key_select_ireq),
+ &resp, sizeof(struct qseecom_command_scm_resp));
+ if (ret) {
+ pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n", ret);
+ return ret;
+ }
+
+ /* set PIPE_ENC_XTS */
+ ireq.pipe_type = QSEOS_PIPE_ENC_XTS;
+ ret = scm_call(SCM_SVC_TZSCHEDULER, 1,
+ &ireq, sizeof(struct qseecom_key_select_ireq),
+ &resp, sizeof(struct qseecom_command_scm_resp));
+ if (ret) {
+ pr_err("scm call to set QSEOS_PIPE_ENC_XTS key failed : %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ break;
+ case QSEOS_RESULT_INCOMPLETE:
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret)
+ pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+ resp.result);
+ break;
+ case QSEOS_RESULT_FAILURE:
+ default:
+ pr_err("Set key scm call failed resp.result %d\n", resp.result);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (qseecom.qsee.instance == qseecom.ce_drv.instance)
+ __qseecom_disable_clk(CLK_QSEE);
+ else
+ __qseecom_disable_clk(CLK_CE_DRV);
+
+ return ret;
+}
+
+static int qseecom_create_key(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ uint32_t ce_hw = 0;
+ uint32_t pipe = 0;
+ uint8_t key_id[QSEECOM_KEY_ID_SIZE] = {0};
+ int ret = 0;
+ uint32_t flags = 0;
+ struct qseecom_set_key_parameter set_key_para;
+ struct qseecom_create_key_req create_key_req;
+
+ ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+
+ if (create_key_req.usage != QSEOS_KM_USAGE_DISK_ENCRYPTION) {
+ pr_err("Error:: unsupported usage %d\n", create_key_req.usage);
+ return -EFAULT;
+ }
+
+ ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw);
+ if (ret) {
+ pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+ return -EINVAL;
+ }
+
+ ret = __qseecom_generate_and_save_key(data, create_key_req.usage,
+ key_id, flags);
+ if (ret) {
+ pr_err("Failed to generate key on storage: %d\n", ret);
+ return -EFAULT;
+ }
+
+ set_key_para.ce_hw = ce_hw;
+ set_key_para.pipe = pipe;
+ memcpy(set_key_para.key_id, key_id, QSEECOM_KEY_ID_SIZE);
+ set_key_para.flags = flags;
+ set_key_para.set_clear_key_flag = QSEECOM_SET_CE_KEY_CMD;
+ memcpy((void *)set_key_para.hash32, (void *)create_key_req.hash32,
+ QSEECOM_HASH_SIZE);
+
+ ret = __qseecom_set_clear_ce_key(data, create_key_req.usage,
+ &set_key_para);
+ if (ret) {
+ pr_err("Failed to create key: pipe %d, ce %d: %d\n",
+ pipe, ce_hw, ret);
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+static int qseecom_wipe_key(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ uint32_t ce_hw = 0;
+ uint32_t pipe = 0;
+ uint8_t key_id[QSEECOM_KEY_ID_SIZE] = {0};
+ int ret = 0;
+ uint32_t flags = 0;
+ int i;
+ struct qseecom_wipe_key_req wipe_key_req;
+ struct qseecom_set_key_parameter clear_key_para;
+
+ ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+
+ if (wipe_key_req.usage != QSEOS_KM_USAGE_DISK_ENCRYPTION) {
+ pr_err("Error:: unsupported usage %d\n", wipe_key_req.usage);
+ return -EFAULT;
+ }
+
+ ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw);
+ if (ret) {
+ pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+ return -EINVAL;
+ }
+
+ ret = __qseecom_delete_saved_key(data, wipe_key_req.usage, key_id,
+ flags);
+ if (ret) {
+ pr_err("Failed to delete key from ssd storage: %d\n", ret);
+ return -EFAULT;
+ }
+
+ /* an invalid key_id 0xff is used to indicate clear key*/
+ for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
+ clear_key_para.key_id[i] = 0xff;
+ clear_key_para.ce_hw = ce_hw;
+ clear_key_para.pipe = pipe;
+ clear_key_para.flags = flags;
+ clear_key_para.set_clear_key_flag = QSEECOM_CLEAR_CE_KEY_CMD;
+ ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
+ &clear_key_para);
+ if (ret) {
+ pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
+ pipe, ce_hw, ret);
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+static int qseecom_is_es_activated(void __user *argp)
+{
+ struct qseecom_is_es_activated_req req;
+ int ret;
+ int resp_buf;
+
+ if (qseecom.qsee_version < QSEE_VERSION_04) {
+ pr_err("invalid qsee version");
+ return -ENODEV;
+ }
+
+ if (argp == NULL) {
+ pr_err("arg is null");
+ return -EINVAL;
+ }
+
+ ret = scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID, NULL, 0,
+ (void *) &resp_buf, sizeof(resp_buf));
+ if (ret) {
+ pr_err("scm_call failed");
+ return ret;
+ }
+
+ req.is_activated = resp_buf;
+ ret = copy_to_user(argp, &req, sizeof(req));
+ if (ret) {
+ pr_err("copy_to_user failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qseecom_save_partition_hash(void __user *argp)
+{
+ struct qseecom_save_partition_hash_req req;
+ int ret;
+
+ if (qseecom.qsee_version < QSEE_VERSION_04) {
+ pr_err("invalid qsee version ");
+ return -ENODEV;
+ }
+
+ if (argp == NULL) {
+ pr_err("arg is null");
+ return -EINVAL;
+ }
+
+ ret = copy_from_user(&req, argp, sizeof(req));
+ if (ret) {
+ pr_err("copy_from_user failed");
+ return ret;
+ }
+
+ ret = scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
+ (void *) &req, sizeof(req), NULL, 0);
+ if (ret) {
+ pr_err("scm_call failed");
+ return ret;
+ }
+
+ return 0;
+}
+
static long qseecom_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
@@ -2352,6 +2896,70 @@
mutex_unlock(&app_access_lock);
break;
}
+ case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
+ if (qseecom.qsee_version < QSEE_VERSION_03) {
+ pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee version %u\n",
+ qseecom.qsee_version);
+ return -EINVAL;
+ }
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_send_service_cmd(data, argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_CREATE_KEY_REQ: {
+ if (qseecom.qsee_version < QSEE_VERSION_05) {
+ pr_err("Create Key feature not supported in qsee version %u\n",
+ qseecom.qsee_version);
+ return -EINVAL;
+ }
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_create_key(data, argp);
+ if (ret)
+ pr_err("failed to create encryption key: %d\n", ret);
+
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_WIPE_KEY_REQ: {
+ if (qseecom.qsee_version < QSEE_VERSION_05) {
+ pr_err("Wipe Key feature not supported in qsee version %u\n",
+ qseecom.qsee_version);
+ return -EINVAL;
+ }
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_wipe_key(data, argp);
+ if (ret)
+ pr_err("failed to wipe encryption key: %d\n", ret);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_save_partition_hash(argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_is_es_activated(argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
default:
return -EINVAL;
}
@@ -2370,7 +2978,7 @@
}
file->private_data = data;
data->abort = 0;
- data->service = false;
+ data->type = QSEECOM_GENERIC;
data->released = false;
init_waitqueue_head(&data->abort_wq);
atomic_set(&data->ioctl_count, 0);
@@ -2401,18 +3009,31 @@
if (data->released == false) {
pr_warn("data->released == false\n");
- if (data->service)
+ switch (data->type) {
+ case QSEECOM_LISTENER_SERVICE:
ret = qseecom_unregister_listener(data);
- else
+ break;
+ case QSEECOM_CLIENT_APP:
ret = qseecom_unload_app(data);
- if (ret) {
- pr_err("Close failed\n");
- return ret;
+ break;
+ case QSEECOM_SECURE_SERVICE:
+ case QSEECOM_GENERIC:
+ ret = qseecom_unmap_ion_allocated_memory(data);
+ if (ret) {
+ pr_err("Close failed\n");
+ return ret;
+ }
+ break;
+ default:
+ pr_err("Unsupported clnt_handle_type %d",
+ data->type);
+ break;
}
}
- if (data->client.fast_load_enabled == true)
+
+ if (data->fast_load_enabled == true)
qsee_disable_clock_vote(data, CLK_SFPB);
- if (data->client.perf_enabled == true)
+ if (data->perf_enabled == true)
qsee_disable_clock_vote(data, CLK_DFAB);
if (qseecom.qseos_version == QSEOS_VERSION_13) {
@@ -2434,18 +3055,43 @@
.release = qseecom_release
};
-static int __qseecom_init_clk(void)
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
{
int rc = 0;
struct device *pdev;
struct qseecom_clk *qclk;
+ char *core_clk_src = NULL;
+ char *core_clk = NULL;
+ char *iface_clk = NULL;
+ char *bus_clk = NULL;
- qclk = &qseecom.qsee;
-
+ switch (ce) {
+ case CLK_QSEE: {
+ core_clk_src = "core_clk_src";
+ core_clk = "core_clk";
+ iface_clk = "iface_clk";
+ bus_clk = "bus_clk";
+ qclk = &qseecom.qsee;
+ qclk->instance = CLK_QSEE;
+ break;
+ };
+ case CLK_CE_DRV: {
+ core_clk_src = "ce_drv_core_clk_src";
+ core_clk = "ce_drv_core_clk";
+ iface_clk = "ce_drv_iface_clk";
+ bus_clk = "ce_drv_bus_clk";
+ qclk = &qseecom.ce_drv;
+ qclk->instance = CLK_CE_DRV;
+ break;
+ };
+ default:
+ pr_err("Invalid ce hw instance: %d!\n", ce);
+ return -EIO;
+ }
pdev = qseecom.pdev;
- /* Get CE3 src core clk. */
- qclk->ce_core_src_clk = clk_get(pdev, "core_clk_src");
+ /* Get CE3 src core clk. */
+ qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
if (!IS_ERR(qclk->ce_core_src_clk)) {
/* Set the core src clk @100Mhz */
rc = clk_set_rate(qclk->ce_core_src_clk, QSEE_CE_CLK_100MHZ);
@@ -2460,7 +3106,7 @@
}
/* Get CE core clk */
- qclk->ce_core_clk = clk_get(pdev, "core_clk");
+ qclk->ce_core_clk = clk_get(pdev, core_clk);
if (IS_ERR(qclk->ce_core_clk)) {
rc = PTR_ERR(qclk->ce_core_clk);
pr_err("Unable to get CE core clk\n");
@@ -2470,7 +3116,7 @@
}
/* Get CE Interface clk */
- qclk->ce_clk = clk_get(pdev, "iface_clk");
+ qclk->ce_clk = clk_get(pdev, iface_clk);
if (IS_ERR(qclk->ce_clk)) {
rc = PTR_ERR(qclk->ce_clk);
pr_err("Unable to get CE interface clk\n");
@@ -2481,7 +3127,7 @@
}
/* Get CE AXI clk */
- qclk->ce_bus_clk = clk_get(pdev, "bus_clk");
+ qclk->ce_bus_clk = clk_get(pdev, bus_clk);
if (IS_ERR(qclk->ce_bus_clk)) {
rc = PTR_ERR(qclk->ce_bus_clk);
pr_err("Unable to get CE BUS interface clk\n");
@@ -2494,11 +3140,14 @@
return rc;
}
-static void __qseecom_deinit_clk(void)
+static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
{
struct qseecom_clk *qclk;
- qclk = &qseecom.qsee;
+ if (ce == CLK_QSEE)
+ qclk = &qseecom.qsee;
+ else
+ qclk = &qseecom.ce_drv;
if (qclk->ce_clk != NULL) {
clk_put(qclk->ce_clk);
@@ -2536,6 +3185,11 @@
qseecom.qsee.ce_core_src_clk = NULL;
qseecom.qsee.ce_bus_clk = NULL;
+ qseecom.ce_drv.ce_core_clk = NULL;
+ qseecom.ce_drv.ce_clk = NULL;
+ qseecom.ce_drv.ce_core_src_clk = NULL;
+ qseecom.ce_drv.ce_bus_clk = NULL;
+
rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
if (rc < 0) {
pr_err("alloc_chrdev_region failed %d\n", rc);
@@ -2610,10 +3264,66 @@
/* register client for bus scaling */
if (pdev->dev.of_node) {
- ret = __qseecom_init_clk();
+
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,disk-encrypt-pipe-pair",
+ &qseecom.ce_info.disk_encrypt_pipe)) {
+ pr_err("Fail to get disk-encrypt pipe pair information.\n");
+ qseecom.ce_info.disk_encrypt_pipe = 0xff;
+ rc = -EINVAL;
+ goto err;
+ } else {
+ pr_warn("bam_pipe_pair=0x%x",
+ qseecom.ce_info.disk_encrypt_pipe);
+ }
+
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,qsee-ce-hw-instance",
+ &qseecom.ce_info.qsee_ce_hw_instance)) {
+ pr_err("Fail to get qsee ce hw instance information.\n");
+ qseecom.ce_info.qsee_ce_hw_instance = 0xff;
+ rc = -EINVAL;
+ goto err;
+ } else {
+ pr_warn("qsee-ce-hw-instance=0x%x",
+ qseecom.ce_info.qsee_ce_hw_instance);
+ }
+
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,hlos-ce-hw-instance",
+ &qseecom.ce_info.hlos_ce_hw_instance)) {
+ pr_err("Fail to get hlos ce hw instance information.\n");
+ qseecom.ce_info.hlos_ce_hw_instance = 0xff;
+ rc = -EINVAL;
+ goto err;
+ } else {
+ pr_warn("hlos-ce-hw-instance=0x%x",
+ qseecom.ce_info.hlos_ce_hw_instance);
+ }
+
+ qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
+ qseecom.ce_drv.instance = qseecom.ce_info.hlos_ce_hw_instance;
+
+ ret = __qseecom_init_clk(CLK_QSEE);
if (ret)
goto err;
+ if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
+ ret = __qseecom_init_clk(CLK_CE_DRV);
+ if (ret) {
+ __qseecom_deinit_clk(CLK_QSEE);
+ goto err;
+ }
+ } else {
+ struct qseecom_clk *qclk;
+
+ qclk = &qseecom.qsee;
+ qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
+ qseecom.ce_drv.ce_clk = qclk->ce_clk;
+ qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
+ qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
+ }
+
qseecom_platform_support = (struct msm_bus_scale_pdata *)
msm_bus_cl_get_pdata(pdev);
if (qseecom.qsee_version >= (QSEE_VERSION_02)) {
@@ -2724,9 +3434,11 @@
msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
0);
/* register client for bus scaling */
- if (pdev->dev.of_node)
- __qseecom_deinit_clk();
-
+ if (pdev->dev.of_node) {
+ __qseecom_deinit_clk(CLK_QSEE);
+ if (qseecom.qsee.instance != qseecom.ce_drv.instance)
+ __qseecom_deinit_clk(CLK_CE_DRV);
+ }
return ret;
};
diff --git a/drivers/misc/tspp.c b/drivers/misc/tspp.c
index dbb4f5e..73cae32 100644
--- a/drivers/misc/tspp.c
+++ b/drivers/misc/tspp.c
@@ -1571,6 +1571,7 @@
int id;
int table_idx;
u32 val;
+ unsigned long flags;
struct sps_connect *config;
struct tspp_device *pdev;
@@ -1591,6 +1592,15 @@
if (!channel->used)
return 0;
+ /*
+ * Need to protect access to used and waiting fields, as they are
+ * used by the tasklet which is invoked from interrupt context
+ */
+ spin_lock_irqsave(&pdev->spinlock, flags);
+ channel->used = 0;
+ channel->waiting = NULL;
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+
if (channel->expiration_period_ms)
del_timer(&channel->expiration_timer);
@@ -1644,9 +1654,7 @@
channel->buffer_count = 0;
channel->data = NULL;
channel->read = NULL;
- channel->waiting = NULL;
channel->locked = NULL;
- channel->used = 0;
if (tspp_channels_in_use(pdev) == 0) {
wake_unlock(&pdev->wake_lock);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b075435..9f12142 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -67,12 +67,19 @@
(rq_data_dir(req) == WRITE))
#define PACKED_CMD_VER 0x01
#define PACKED_CMD_WR 0x02
+#define PACKED_TRIGGER_MAX_ELEMENTS 5000
#define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \
do { \
if (stats->enabled) \
stats->pack_stop_reason[reason]++; \
} while (0)
+#define PCKD_TRGR_INIT_MEAN_POTEN 17
+#define PCKD_TRGR_POTEN_LOWER_BOUND 5
+#define PCKD_TRGR_URGENT_PENALTY 2
+#define PCKD_TRGR_LOWER_BOUND 5
+#define PCKD_TRGR_PRECISION_MULTIPLIER 100
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -646,6 +653,7 @@
mrq.cmd = &cmd;
+ mmc_rpm_hold(card->host, &card->dev);
mmc_claim_host(card->host);
err = mmc_blk_part_switch(card, md);
@@ -695,6 +703,7 @@
cmd_rel_host:
mmc_release_host(card->host);
+ mmc_rpm_release(card->host, &card->dev);
cmd_done:
mmc_blk_put(md);
@@ -775,6 +784,7 @@
goto idata_free;
}
+ mmc_rpm_hold(card->host, &card->dev);
mmc_claim_host(card->host);
err = mmc_blk_part_switch(card, md);
@@ -871,6 +881,7 @@
cmd_rel_host:
mmc_release_host(card->host);
+ mmc_rpm_release(card->host, &card->dev);
idata_free:
for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
@@ -1858,6 +1869,80 @@
}
EXPORT_SYMBOL(mmc_blk_disable_wr_packing);
+static int get_packed_trigger(int potential, struct mmc_card *card,
+ struct request *req, int curr_trigger)
+{
+ static int num_mean_elements = 1;
+ static unsigned long mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
+ unsigned int trigger = curr_trigger;
+ unsigned int pckd_trgr_upper_bound = card->ext_csd.max_packed_writes;
+
+ /* scale down the upper bound to 75% */
+ pckd_trgr_upper_bound = (pckd_trgr_upper_bound * 3) / 4;
+
+ /*
+ * since the most common calls for this function are with small
+ * potential write values and since we don't want these calls to affect
+ * the packed trigger, set a lower bound and ignore calls with
+ * potential lower than that bound
+ */
+ if (potential <= PCKD_TRGR_POTEN_LOWER_BOUND)
+ return trigger;
+
+ /*
+ * this is to prevent integer overflow in the following calculation:
+ * once every PACKED_TRIGGER_MAX_ELEMENTS reset the algorithm
+ */
+ if (num_mean_elements > PACKED_TRIGGER_MAX_ELEMENTS) {
+ num_mean_elements = 1;
+ mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
+ }
+
+ /*
+ * get next mean value based on previous mean value and current
+ * potential packed writes. Calculation is as follows:
+ * mean_pot[i+1] =
+ * ((mean_pot[i] * num_mean_elem) + potential)/(num_mean_elem + 1)
+ */
+ mean_potential *= num_mean_elements;
+ /*
+ * add num_mean_elements so that the division of two integers doesn't
+ * lower mean_potential too much
+ */
+ if (potential > mean_potential)
+ mean_potential += num_mean_elements;
+ mean_potential += potential;
+ /* this is for gaining more precision when dividing two integers */
+ mean_potential *= PCKD_TRGR_PRECISION_MULTIPLIER;
+ /* this completes the mean calculation */
+ mean_potential /= ++num_mean_elements;
+ mean_potential /= PCKD_TRGR_PRECISION_MULTIPLIER;
+
+ /*
+ * if current potential packed writes is greater than the mean potential
+ * then the heuristic is that the following workload will contain many
+ * write requests, therefore we lower the packed trigger. In the
+ * opposite case we want to increase the trigger in order to get less
+ * packing events.
+ */
+ if (potential >= mean_potential)
+ trigger = (trigger <= PCKD_TRGR_LOWER_BOUND) ?
+ PCKD_TRGR_LOWER_BOUND : trigger - 1;
+ else
+ trigger = (trigger >= pckd_trgr_upper_bound) ?
+ pckd_trgr_upper_bound : trigger + 1;
+
+ /*
+ * an urgent read request indicates a packed list being interrupted
+ * by this read, therefore we aim for less packing, hence the trigger
+ * gets increased
+ */
+ if (req && (req->cmd_flags & REQ_URGENT) && (rq_data_dir(req) == READ))
+ trigger += PCKD_TRGR_URGENT_PENALTY;
+
+ return trigger;
+}
+
static void mmc_blk_write_packing_control(struct mmc_queue *mq,
struct request *req)
{
@@ -1885,6 +1970,10 @@
if (mq->num_of_potential_packed_wr_reqs >
mq->num_wr_reqs_to_start_packing)
mq->wr_packing_enabled = true;
+ mq->num_wr_reqs_to_start_packing =
+ get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
+ mq->card, req,
+ mq->num_wr_reqs_to_start_packing);
mq->num_of_potential_packed_wr_reqs = 0;
return;
}
@@ -1893,6 +1982,12 @@
if (data_dir == READ) {
mmc_blk_disable_wr_packing(mq);
+ mq->num_wr_reqs_to_start_packing =
+ get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
+ mq->card, req,
+ mq->num_wr_reqs_to_start_packing);
+ mq->num_of_potential_packed_wr_reqs = 0;
+ mq->wr_packing_enabled = false;
return;
} else if (data_dir == WRITE) {
mq->num_of_potential_packed_wr_reqs++;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 38b453b..73a1b41 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -45,6 +45,9 @@
#include "sd_ops.h"
#include "sdio_ops.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmc.h>
+
static void mmc_clk_scaling(struct mmc_host *host, bool from_wq);
/* If the device is not responding */
@@ -164,10 +167,12 @@
static inline void mmc_update_clk_scaling(struct mmc_host *host)
{
- if (host->clk_scaling.enable)
+ if (host->clk_scaling.enable) {
host->clk_scaling.busy_time_us +=
ktime_to_us(ktime_sub(ktime_get(),
host->clk_scaling.start_busy));
+ host->clk_scaling.start_busy = ktime_get();
+ }
}
/**
* mmc_request_done - finish processing an MMC request
@@ -808,6 +813,12 @@
context_info->is_urgent = false;
context_info->is_new_req = false;
if (mmc_should_stop_curr_req(host)) {
+ /*
+ * We are going to stop the ongoing request.
+ * Update stuff that we ought to do when the
+ * request actually completes.
+ */
+ mmc_update_clk_scaling(host);
err = mmc_stop_request(host);
if (err && !context_info->is_done_rcv) {
err = MMC_BLK_ABORT;
@@ -820,14 +831,6 @@
context_info->is_done_rcv = false;
break; /* return err */
} else {
- /*
- * We have stopped the ongoing request
- * and are sure that mmc_request_done()
- * is not going to get called. Update
- * stuff that we ought to do when the
- * request actually completes.
- */
- mmc_update_clk_scaling(host);
mmc_host_clk_release(host);
}
err = host->areq->update_interrupted_req(
@@ -1486,6 +1489,19 @@
if (ios->clock > 0)
mmc_set_ungated(host);
host->ops->set_ios(host, ios);
+ if (ios->old_rate != ios->clock) {
+ if (likely(ios->clk_ts)) {
+ char trace_info[80];
+ snprintf(trace_info, 80,
+ "%s: freq_KHz %d --> %d | t = %d",
+ mmc_hostname(host), ios->old_rate / 1000,
+ ios->clock / 1000, jiffies_to_msecs(
+ (long)jiffies - (long)ios->clk_ts));
+ trace_mmc_clk(trace_info);
+ }
+ ios->old_rate = ios->clock;
+ ios->clk_ts = jiffies;
+ }
}
EXPORT_SYMBOL(mmc_set_ios);
@@ -3600,6 +3616,20 @@
EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
#endif
+#ifdef CONFIG_PM_RUNTIME
+void mmc_dump_dev_pm_state(struct mmc_host *host, struct device *dev)
+{
+ pr_err("%s: %s: err: runtime_error: %d\n", dev_name(dev),
+ mmc_hostname(host), dev->power.runtime_error);
+ pr_err("%s: %s: disable_depth: %d runtime_status: %d idle_notification: %d\n",
+ dev_name(dev), mmc_hostname(host), dev->power.disable_depth,
+ dev->power.runtime_status,
+ dev->power.idle_notification);
+ pr_err("%s: %s: request_pending: %d, request: %d\n",
+ dev_name(dev), mmc_hostname(host),
+ dev->power.request_pending, dev->power.request);
+}
+
void mmc_rpm_hold(struct mmc_host *host, struct device *dev)
{
int ret = 0;
@@ -3608,13 +3638,16 @@
return;
ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pr_err("%s: %s: %s: error resuming device: %d\n",
+ if ((ret < 0) &&
+ (dev->power.runtime_error || (dev->power.disable_depth > 0))) {
+ pr_err("%s: %s: %s: pm_runtime_get_sync: err: %d\n",
dev_name(dev), mmc_hostname(host), __func__, ret);
+ mmc_dump_dev_pm_state(host, dev);
if (pm_runtime_suspended(dev))
BUG_ON(1);
}
}
+
EXPORT_SYMBOL(mmc_rpm_hold);
void mmc_rpm_release(struct mmc_host *host, struct device *dev)
@@ -3625,11 +3658,22 @@
return;
ret = pm_runtime_put_sync(dev);
- if (ret < 0 && ret != -EBUSY)
- pr_err("%s: %s: %s: put sync ret: %d\n",
+ if ((ret < 0) &&
+ (dev->power.runtime_error || (dev->power.disable_depth > 0))) {
+ pr_err("%s: %s: %s: pm_runtime_put_sync: err: %d\n",
dev_name(dev), mmc_hostname(host), __func__, ret);
+ mmc_dump_dev_pm_state(host, dev);
+ }
}
+
EXPORT_SYMBOL(mmc_rpm_release);
+#else
+void mmc_rpm_hold(struct mmc_host *host, struct device *dev) {}
+EXPORT_SYMBOL(mmc_rpm_hold);
+
+void mmc_rpm_release(struct mmc_host *host, struct device *dev) {}
+EXPORT_SYMBOL(mmc_rpm_release);
+#endif
/**
* mmc_init_context_info() - init synchronization context
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 4a063fd..3f3687b 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -4683,10 +4683,10 @@
}
/* Now save the sps pipe handle */
ep->pipe_handle = sps_pipe_handle;
- pr_debug("%s: %s, success !!! %s: pipe_handle=0x%x,"
- " desc_fifo.phys_base=0x%x\n", mmc_hostname(host->mmc),
+ pr_debug("%s: %s, success !!! %s: pipe_handle=0x%x,"\
+ " desc_fifo.phys_base=%pa\n", mmc_hostname(host->mmc),
__func__, is_producer ? "READ" : "WRITE",
- (u32)sps_pipe_handle, sps_config->desc.phys_base);
+ (u32)sps_pipe_handle, &sps_config->desc.phys_base);
goto out;
reg_event_err:
@@ -4929,11 +4929,8 @@
host->bam_base = ioremap(host->bam_memres->start,
resource_size(host->bam_memres));
if (!host->bam_base) {
- pr_err("%s: BAM ioremap() failed!!! phys_addr=0x%x,"
- " size=0x%x", mmc_hostname(host->mmc),
- host->bam_memres->start,
- (host->bam_memres->end -
- host->bam_memres->start));
+ pr_err("%s: BAM ioremap() failed!!! resource: %pr\n",
+ mmc_hostname(host->mmc), host->bam_memres);
rc = -ENOMEM;
goto out;
}
@@ -4954,11 +4951,15 @@
*/
bam.summing_threshold = SPS_MIN_XFER_SIZE;
/* SPS driver wll handle the SDCC BAM IRQ */
- bam.irq = (u32)host->bam_irqres->start;
+ bam.irq = host->bam_irqres->start;
bam.manage = SPS_BAM_MGR_LOCAL;
bam.callback = msmsdcc_sps_bam_global_irq_cb;
bam.user = (void *)host;
+ /* bam reset messages will be limited to 5 times */
+ bam.constrained_logging = true;
+ bam.logging_number = 5;
+
pr_info("%s: bam physical base=0x%x\n", mmc_hostname(host->mmc),
(u32)bam.phys_addr);
pr_info("%s: bam virtual base=0x%x\n", mmc_hostname(host->mmc),
@@ -4986,10 +4987,8 @@
if (rc)
goto cons_conn_err;
- pr_info("%s: Qualcomm MSM SDCC-BAM at 0x%016llx irq %d\n",
- mmc_hostname(host->mmc),
- (unsigned long long)host->bam_memres->start,
- (unsigned int)host->bam_irqres->start);
+ pr_info("%s: Qualcomm MSM SDCC-BAM at %pr %pr\n",
+ mmc_hostname(host->mmc), host->bam_memres, host->bam_irqres);
goto out;
cons_conn_err:
@@ -5176,15 +5175,16 @@
}
static void msmsdcc_print_regs(const char *name, void __iomem *base,
- u32 phys_base, unsigned int no_of_regs)
+ resource_size_t phys_base,
+ unsigned int no_of_regs)
{
unsigned int i;
if (!base)
return;
- pr_err("===== %s: Register Dumps @phys_base=0x%x, @virt_base=0x%x"
- " =====\n", name, phys_base, (u32)base);
+ pr_err("===== %s: Register Dumps @phys_base=%pa, @virt_base=0x%x"\
+ " =====\n", name, &phys_base, (u32)base);
for (i = 0; i < no_of_regs; i = i + 4) {
pr_err("Reg=0x%.2x: 0x%.8x, 0x%.8x, 0x%.8x, 0x%.8x\n", i*4,
(u32)readl_relaxed(base + i*4),
@@ -6271,10 +6271,8 @@
mmc->clk_scaling.polling_delay_ms = 100;
mmc->caps2 |= MMC_CAP2_CLK_SCALE;
- pr_info("%s: Qualcomm MSM SDCC-core at 0x%016llx irq %d,%d dma %d"
- " dmacrcri %d\n", mmc_hostname(mmc),
- (unsigned long long)core_memres->start,
- (unsigned int) core_irqres->start,
+ pr_info("%s: Qualcomm MSM SDCC-core %pr %pr,%d dma %d dmacrcri %d\n",
+ mmc_hostname(mmc), core_memres, core_irqres,
(unsigned int) plat->status_irq, host->dma.channel,
host->dma.crci);
@@ -6296,11 +6294,11 @@
if (is_dma_mode(host) && host->dma.channel != -1
&& host->dma.crci != -1) {
- pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
- mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
- pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
- mmc_hostname(mmc), host->dma.cmd_busaddr,
- host->dma.cmdptr_busaddr);
+ pr_info("%s: DM non-cached buffer at %p, dma_addr: %pa\n",
+ mmc_hostname(mmc), host->dma.nc, &host->dma.nc_busaddr);
+ pr_info("%s: DM cmd busaddr: %pa, cmdptr busaddr: %pa\n",
+ mmc_hostname(mmc), &host->dma.cmd_busaddr,
+ &host->dma.cmdptr_busaddr);
} else if (is_sps_mode(host)) {
pr_info("%s: SPS-BAM data transfer mode available\n",
mmc_hostname(mmc));
diff --git a/drivers/mmc/host/msm_sdcc_dml.c b/drivers/mmc/host/msm_sdcc_dml.c
index 91ab7e3..2562436 100644
--- a/drivers/mmc/host/msm_sdcc_dml.c
+++ b/drivers/mmc/host/msm_sdcc_dml.c
@@ -166,17 +166,13 @@
host->dml_base = ioremap(host->dml_memres->start,
resource_size(host->dml_memres));
if (!host->dml_base) {
- pr_err("%s: DML ioremap() failed!!! phys_addr=0x%x,"
- " size=0x%x", mmc_hostname(host->mmc),
- host->dml_memres->start,
- (host->dml_memres->end -
- host->dml_memres->start));
+ pr_err("%s: DML ioremap() failed!!! %pr\n",
+ mmc_hostname(host->mmc), host->dml_memres);
rc = -ENOMEM;
goto out;
}
- pr_info("%s: Qualcomm MSM SDCC-DML at 0x%016llx\n",
- mmc_hostname(host->mmc),
- (unsigned long long)host->dml_memres->start);
+ pr_info("%s: Qualcomm MSM SDCC-DML %pr\n",
+ mmc_hostname(host->mmc), host->dml_memres);
}
dml_base = host->dml_base;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 2038d3d..15e36a8 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -38,6 +38,7 @@
#include <linux/dma-mapping.h>
#include <mach/gpio.h>
#include <mach/msm_bus.h>
+#include <linux/iopoll.h>
#include "sdhci-pltfm.h"
@@ -81,6 +82,31 @@
#define CORE_CLK_PWRSAVE (1 << 1)
#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
+#define CORE_MCI_DATA_CTRL 0x2C
+#define CORE_MCI_DPSM_ENABLE (1 << 0)
+
+#define CORE_TESTBUS_CONFIG 0x0CC
+#define CORE_TESTBUS_ENA (1 << 3)
+#define CORE_TESTBUS_SEL2 (1 << 4)
+
+/*
+ * Waiting until end of potential AHB access for data:
+ * 16 AHB cycles (160ns for 100MHz and 320ns for 50MHz) +
+ * delay on AHB (2us) = maximum 2.32us
+ * Taking x10 times margin
+ */
+#define CORE_AHB_DATA_DELAY_US 23
+/* Waiting until end of potential AHB access for descriptor:
+ * Single (1 AHB cycle) + delay on AHB bus = max 2us
+ * INCR4 (4 AHB cycles) + delay on AHB bus = max 2us
+ * Single (1 AHB cycle) + delay on AHB bus = max 2us
+ * Total 8 us delay with margin
+ */
+#define CORE_AHB_DESC_DELAY_US 8
+
+#define CORE_SDCC_DEBUG_REG 0x124
+#define CORE_DEBUG_REG_AHB_HTRANS (3 << 12)
+
/* 8KB descriptors */
#define SDHCI_MSM_MAX_SEGMENTS (1 << 13)
#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
@@ -2044,6 +2070,51 @@
return 0;
}
+/*
+ * sdhci_msm_disable_data_xfer - disable undergoing AHB bus data transfer
+ *
+ * Write 0 to bit 0 in MCI_DATA_CTL (offset 0x2C) - clearing TxActive bit by
+ * access to legacy registers. It will stop current burst and prevent start of
+ * the next on.
+ *
+ * Polling CORE_AHB_DATA_DELAY_US timeout, by reading bit 13:12 until they are 0
+ * in CORE_SDCC_DEBUG_REG (offset 0x124) will validate that AHB burst was
+ * completed and a new one didn't start.
+ *
+ * Waiting for 4us while AHB finishes descriptors fetch.
+ */
+static void sdhci_msm_disable_data_xfer(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ u32 value;
+ int ret;
+
+ value = readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CTRL);
+ value &= ~(u32)CORE_MCI_DPSM_ENABLE;
+ writel_relaxed(value, msm_host->core_mem + CORE_MCI_DATA_CTRL);
+
+ /* Enable the test bus for device slot */
+ writel_relaxed(CORE_TESTBUS_ENA | CORE_TESTBUS_SEL2,
+ msm_host->core_mem + CORE_TESTBUS_CONFIG);
+
+ ret = readl_poll_timeout_noirq(msm_host->core_mem
+ + CORE_SDCC_DEBUG_REG, value,
+ !(value & CORE_DEBUG_REG_AHB_HTRANS),
+ CORE_AHB_DATA_DELAY_US, 1);
+ if (ret) {
+ pr_err("%s: %s: can't stop ongoing AHB bus access by ADMA\n",
+ mmc_hostname(host->mmc), __func__);
+ BUG();
+ }
+ /* Disable the test bus for device slot */
+ value = readl_relaxed(msm_host->core_mem + CORE_TESTBUS_CONFIG);
+ value &= ~CORE_TESTBUS_ENA;
+ writel_relaxed(value, msm_host->core_mem + CORE_TESTBUS_CONFIG);
+
+ udelay(CORE_AHB_DESC_DELAY_US);
+}
+
static struct sdhci_ops sdhci_msm_ops = {
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
.check_power_status = sdhci_msm_check_power_status,
@@ -2054,6 +2125,7 @@
.platform_bus_voting = sdhci_msm_bus_voting,
.get_min_clock = sdhci_msm_get_min_clock,
.get_max_clock = sdhci_msm_get_max_clock,
+ .disable_data_xfer = sdhci_msm_disable_data_xfer,
};
static int __devinit sdhci_msm_probe(struct platform_device *pdev)
@@ -2064,7 +2136,7 @@
struct resource *core_memres = NULL;
int ret = 0, dead = 0;
u32 vdd_max_current;
- u32 host_version;
+ u16 host_version;
pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
@@ -2188,8 +2260,11 @@
host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
host->quirks2 |= SDHCI_QUIRK2_IGNORE_CMDCRC_FOR_TUNING;
host->quirks2 |= SDHCI_QUIRK2_USE_MAX_DISCARD_SIZE;
+ host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
+ host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
- host_version = readl_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
+ host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
SDHCI_VENDOR_VER_SHIFT));
@@ -2259,6 +2334,7 @@
msm_host->mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
msm_host->mmc->caps2 |= MMC_CAP2_POWEROFF_NOTIFY;
msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
+ msm_host->mmc->caps2 |= MMC_CAP2_STOP_REQUEST;
if (msm_host->pdata->nonremovable)
msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
@@ -2416,6 +2492,9 @@
goto out;
}
+ if (msm_host->msm_bus_vote.client_handle)
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+
return sdhci_msm_runtime_suspend(dev);
out:
return ret;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index d58379f..3efea77 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -741,10 +741,12 @@
break;
}
- if (count >= 0xF) {
- DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
- mmc_hostname(host->mmc), count, cmd->opcode);
- count = 0xE;
+ if (!(host->quirks2 & SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT)) {
+ if (count >= 0xF) {
+ DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+ mmc_hostname(host->mmc), count, cmd->opcode);
+ count = 0xE;
+ }
}
return count;
@@ -1094,6 +1096,8 @@
cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
flags |= SDHCI_CMD_DATA;
+ if (cmd->data)
+ host->data_start_time = ktime_get();
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
}
@@ -2087,6 +2091,9 @@
if (host->version < SDHCI_SPEC_300)
return;
+ if (host->quirks2 & SDHCI_QUIRK2_BROKEN_PRESET_VALUE)
+ return;
+
spin_lock_irqsave(&host->lock, flags);
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -2117,6 +2124,53 @@
sdhci_runtime_pm_put(host);
}
+static int sdhci_stop_request(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ struct mmc_data *data;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (!host->mrq || !host->data)
+ goto out;
+
+ data = host->data;
+
+ if (host->ops->disable_data_xfer)
+ host->ops->disable_data_xfer(host);
+
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ if (host->flags & SDHCI_USE_ADMA) {
+ sdhci_adma_table_post(host, data);
+ } else {
+ if (!data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+ }
+ del_timer(&host->timer);
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+ return 0;
+}
+
+static unsigned int sdhci_get_xfer_remain(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 present_state = 0;
+
+ present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
+
+ return present_state & SDHCI_DOING_WRITE;
+}
+
static const struct mmc_host_ops sdhci_ops = {
.pre_req = sdhci_pre_req,
.post_req = sdhci_post_req,
@@ -2130,6 +2184,8 @@
.enable_preset_value = sdhci_enable_preset_value,
.enable = sdhci_enable,
.disable = sdhci_disable,
+ .stop_request = sdhci_stop_request,
+ .get_xfer_remain = sdhci_get_xfer_remain,
};
/*****************************************************************************\
@@ -2361,7 +2417,6 @@
sdhci_finish_command(host);
}
-#ifdef CONFIG_MMC_DEBUG
static void sdhci_show_adma_error(struct sdhci_host *host)
{
const char *name = mmc_hostname(host->mmc);
@@ -2377,7 +2432,7 @@
len = (__le16 *)(desc + 2);
attr = *desc;
- DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+ pr_info("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
desc += 8;
@@ -2386,9 +2441,6 @@
break;
}
}
-#else
-static void sdhci_show_adma_error(struct sdhci_host *host) { }
-#endif
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
@@ -2418,6 +2470,9 @@
sdhci_finish_command(host);
return;
}
+ if (host->quirks2 &
+ SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD)
+ return;
}
pr_err("%s: Got data interrupt 0x%08x even "
@@ -2453,9 +2508,10 @@
pr_msg = true;
}
if (pr_msg) {
- pr_err("%s: data txfr (0x%08x) error: %d\n",
+ pr_err("%s: data txfr (0x%08x) error: %d after %lld ms\n",
mmc_hostname(host->mmc), intmask,
- host->data->error);
+ host->data->error, ktime_to_ms(ktime_sub(
+ ktime_get(), host->data_start_time)));
sdhci_dumpregs(host);
}
sdhci_finish_data(host);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index c6bef8a..a3d8442 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -286,6 +286,7 @@
void (*toggle_cdr)(struct sdhci_host *host, bool enable);
unsigned int (*get_max_segments)(void);
void (*platform_bus_voting)(struct sdhci_host *host, u32 enable);
+ void (*disable_data_xfer)(struct sdhci_host *host);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c
index c37a4a4..fe8bdd0 100644
--- a/drivers/mtd/devices/msm_qpic_nand.c
+++ b/drivers/mtd/devices/msm_qpic_nand.c
@@ -28,8 +28,9 @@
#include <linux/bitrev.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/ctype.h>
#include <mach/sps.h>
-
+#include <mach/msm_smsm.h>
#define PAGE_SIZE_2K 2048
#define PAGE_SIZE_4K 4096
#define WRITE 1
@@ -285,6 +286,34 @@
uint16_t integrity_crc;
} __attribute__((__packed__));
+#define FLASH_PART_MAGIC1 0x55EE73AA
+#define FLASH_PART_MAGIC2 0xE35EBDDB
+#define FLASH_PTABLE_V3 3
+#define FLASH_PTABLE_V4 4
+#define FLASH_PTABLE_MAX_PARTS_V3 16
+#define FLASH_PTABLE_MAX_PARTS_V4 32
+#define FLASH_PTABLE_HDR_LEN (4*sizeof(uint32_t))
+#define FLASH_PTABLE_ENTRY_NAME_SIZE 16
+
+struct flash_partition_entry {
+ char name[FLASH_PTABLE_ENTRY_NAME_SIZE];
+ u32 offset; /* Offset in blocks from beginning of device */
+ u32 length; /* Length of the partition in blocks */
+ u8 attr; /* Flags for this partition */
+};
+
+struct flash_partition_table {
+ u32 magic1;
+ u32 magic2;
+ u32 version;
+ u32 numparts;
+ struct flash_partition_entry part_entry[FLASH_PTABLE_MAX_PARTS_V4];
+};
+
+static struct flash_partition_table ptable;
+
+static struct mtd_partition mtd_part[FLASH_PTABLE_MAX_PARTS_V4];
+
/*
* Get the DMA memory for requested amount of size. It returns the pointer
* to free memory available from the allocated pool. Returns NULL if there
@@ -660,6 +689,14 @@
if (ret < 0)
goto free_dma;
+ /* Lookup the 'APPS' partition's first page address */
+ for (i = 0; i < FLASH_PTABLE_MAX_PARTS_V4; i++) {
+ if (!strncmp("apps", mtd_part[i].name,
+ strlen(mtd_part[i].name))) {
+ page_address = mtd_part[i].offset << 6;
+ break;
+ }
+ }
data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
data.exec = 1;
data.cfg.addr0 = (page_address << 16) |
@@ -2338,6 +2375,75 @@
}
+#ifdef CONFIG_MSM_SMD
+static int msm_nand_parse_smem_ptable(int *nr_parts)
+{
+
+ uint32_t i, j;
+ uint32_t len = FLASH_PTABLE_HDR_LEN;
+ struct flash_partition_entry *pentry;
+ char *delimiter = ":";
+
+ pr_info("Parsing partition table info from SMEM\n");
+ /* Read only the header portion of ptable */
+ ptable = *(struct flash_partition_table *)
+ (smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len));
+ /* Verify ptable magic */
+ if (ptable.magic1 != FLASH_PART_MAGIC1 ||
+ ptable.magic2 != FLASH_PART_MAGIC2) {
+ pr_err("Partition table magic verification failed\n");
+ goto out;
+ }
+ /* Ensure that # of partitions is less than the max we have allocated */
+ if (ptable.numparts > FLASH_PTABLE_MAX_PARTS_V4) {
+ pr_err("Partition numbers exceed the max limit\n");
+ goto out;
+ }
+ /* Find out length of partition data based on table version. */
+ if (ptable.version <= FLASH_PTABLE_V3) {
+ len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V3 *
+ sizeof(struct flash_partition_entry);
+ } else if (ptable.version == FLASH_PTABLE_V4) {
+ len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V4 *
+ sizeof(struct flash_partition_entry);
+ } else {
+ pr_err("Unknown ptable version (%d)", ptable.version);
+ goto out;
+ }
+
+ *nr_parts = ptable.numparts;
+ ptable = *(struct flash_partition_table *)
+ (smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len));
+ for (i = 0; i < ptable.numparts; i++) {
+ pentry = &ptable.part_entry[i];
+ if (pentry->name == '\0')
+ continue;
+ /* Convert name to lower case and discard the initial chars */
+ mtd_part[i].name = pentry->name;
+ for (j = 0; j < strlen(mtd_part[i].name); j++)
+ *(mtd_part[i].name + j) =
+ tolower(*(mtd_part[i].name + j));
+ strsep(&(mtd_part[i].name), delimiter);
+ mtd_part[i].offset = pentry->offset;
+ mtd_part[i].mask_flags = pentry->attr;
+ mtd_part[i].size = pentry->length;
+ pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
+ i, pentry->name, pentry->offset, pentry->length,
+ pentry->attr);
+ }
+ pr_info("SMEM partition table found: ver: %d len: %d\n",
+ ptable.version, ptable.numparts);
+ return 0;
+out:
+ return -EINVAL;
+}
+#else
+static int msm_nand_parse_smem_ptable(int *nr_parts)
+{
+ return -ENODEV;
+}
+#endif
+
/*
* This function gets called when its device named msm-nand is added to
* device tree .dts file with all its resources such as physical addresses
@@ -2352,26 +2458,13 @@
{
struct msm_nand_info *info;
struct resource *res;
- int err;
- struct device_node *pnode;
- struct mtd_part_parser_data parser_data;
-
- if (!pdev->dev.of_node) {
- pr_err("No valid device tree info for NANDc\n");
- err = -ENODEV;
- goto out;
- }
+ int i, err, nr_parts;
/*
* The partition information can also be passed from kernel command
* line. Also, the MTD core layer supports adding the whole device as
* one MTD device when no partition information is available at all.
- * Hence, do not bail out when partition information is not availabe
- * in device tree.
*/
- pnode = of_find_node_by_path("/qcom,mtd-partitions");
- if (!pnode)
- pr_info("No partition info available in device tree\n");
info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info),
GFP_KERNEL);
if (!info) {
@@ -2379,7 +2472,6 @@
err = -ENOMEM;
goto out;
}
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"nand_phys");
if (!res || !res->start) {
@@ -2438,14 +2530,22 @@
pr_err("Failed to enable DMA in NANDc\n");
goto free_bam;
}
+ err = msm_nand_parse_smem_ptable(&nr_parts);
+ if (err < 0) {
+ pr_err("Failed to parse partition table in SMEM\n");
+ goto free_bam;
+ }
if (msm_nand_scan(&info->mtd)) {
pr_err("No nand device found\n");
err = -ENXIO;
goto free_bam;
}
- parser_data.of_node = pnode;
- err = mtd_device_parse_register(&info->mtd, NULL, &parser_data,
- NULL, 0);
+ for (i = 0; i < nr_parts; i++) {
+ mtd_part[i].offset *= info->mtd.erasesize;
+ mtd_part[i].size *= info->mtd.erasesize;
+ }
+ err = mtd_device_parse_register(&info->mtd, NULL, NULL,
+ &mtd_part[0], nr_parts);
if (err < 0) {
pr_err("Unable to register MTD partitions %d\n", err);
goto free_bam;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 4e12bb7..147e378 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1127,6 +1127,33 @@
}
#endif
+static inline unsigned long get_vm_size(struct vm_area_struct *vma)
+{
+ return vma->vm_end - vma->vm_start;
+}
+
+static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
+{
+ return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
+}
+
+/*
+ * Set a new vm offset.
+ *
+ * Verify that the incoming offset really works as a page offset,
+ * and that the offset and size fit in a resource_size_t.
+ */
+static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
+{
+ pgoff_t pgoff = off >> PAGE_SHIFT;
+ if (off != (resource_size_t) pgoff << PAGE_SHIFT)
+ return -EINVAL;
+ if (off + get_vm_size(vma) - 1 < off)
+ return -EINVAL;
+ vma->vm_pgoff = pgoff;
+ return 0;
+}
+
/*
* set up a mapping for shared memory segments
*/
@@ -1136,20 +1163,29 @@
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
struct map_info *map = mtd->priv;
- unsigned long start;
- unsigned long off;
- u32 len;
+ resource_size_t start, off;
+ unsigned long len, vma_len;
if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
- off = vma->vm_pgoff << PAGE_SHIFT;
+ off = get_vm_offset(vma);
start = map->phys;
len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
start &= PAGE_MASK;
- if ((vma->vm_end - vma->vm_start + off) > len)
+ vma_len = get_vm_size(vma);
+
+ /* Overflow in off+len? */
+ if (vma_len + off < off)
+ return -EINVAL;
+ /* Does it fit in the mapping? */
+ if (vma_len + off > len)
return -EINVAL;
off += start;
- vma->vm_pgoff = off >> PAGE_SHIFT;
+ /* Did that overflow? */
+ if (off < start)
+ return -EINVAL;
+ if (set_vm_offset(vma, off) < 0)
+ return -EINVAL;
vma->vm_flags |= VM_IO | VM_RESERVED;
#ifdef pgprot_noncached
diff --git a/drivers/net/ethernet/msm/ecm_ipa.c b/drivers/net/ethernet/msm/ecm_ipa.c
index ed67df4..38a36bb 100644
--- a/drivers/net/ethernet/msm/ecm_ipa.c
+++ b/drivers/net/ethernet/msm/ecm_ipa.c
@@ -18,15 +18,18 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/sched.h>
-#include <linux/spinlock.h>
+#include <linux/atomic.h>
#include <mach/ecm_ipa.h>
#define DRIVER_NAME "ecm_ipa"
-#define DRIVER_VERSION "20-Mar-2013"
#define ECM_IPA_IPV4_HDR_NAME "ecm_eth_ipv4"
#define ECM_IPA_IPV6_HDR_NAME "ecm_eth_ipv6"
#define IPA_TO_USB_CLIENT IPA_CLIENT_USB_CONS
#define INACTIVITY_MSEC_DELAY 100
+#define DEFAULT_OUTSTANDING_HIGH 64
+#define DEFAULT_OUTSTANDING_LOW 32
+#define DEBUGFS_TEMP_BUF_SIZE 4
+
#define ECM_IPA_ERROR(fmt, args...) \
pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\
fmt, __func__, __LINE__, current->comm, ## args)
@@ -45,8 +48,6 @@
/**
* struct ecm_ipa_dev - main driver context parameters
- * @ack_spinlock: protect last sent skb
- * @last_out_skb: last sent skb saved until Tx notify is received from IPA
* @net: network interface struct implemented by this driver
* @folder: debugfs folder for various debuging switches
* @tx_enable: flag that enable/disable Tx path to continue to IPA
@@ -56,15 +57,20 @@
* @tx_file: saved debugfs entry to allow cleanup
* @rx_file: saved debugfs entry to allow cleanup
* @rm_file: saved debugfs entry to allow cleanup
+ * @outstanding_file: saved debugfs entry to allow cleanup
+ * @outstanding_high_file saved debugfs entry to allow cleanup
+ * @outstanding_low_file saved debugfs entry to allow cleanup
* @dma_file: saved debugfs entry to allow cleanup
* @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table
* @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table
* @usb_to_ipa_hdl: save handle for IPA pipe operations
* @ipa_to_usb_hdl: save handle for IPA pipe operations
+ * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
+ * @outstanding_high: number of outstanding packets allowed
+ * @outstanding_low: number of outstanding packets which shall cause
+ * to netdev queue start (after stopped due to outstanding_high reached)
*/
struct ecm_ipa_dev {
- spinlock_t ack_spinlock;
- struct sk_buff *last_out_skb;
struct net_device *net;
bool tx_enable;
bool rx_enable;
@@ -74,11 +80,17 @@
struct dentry *tx_file;
struct dentry *rx_file;
struct dentry *rm_file;
+ struct dentry *outstanding_high_file;
+ struct dentry *outstanding_low_file;
struct dentry *dma_file;
+ struct dentry *outstanding_file;
uint32_t eth_ipv4_hdr_hdl;
uint32_t eth_ipv6_hdr_hdl;
u32 usb_to_ipa_hdl;
u32 ipa_to_usb_hdl;
+ atomic_t outstanding_pkts;
+ u8 outstanding_high;
+ u8 outstanding_low;
};
/**
@@ -104,7 +116,7 @@
static void ecm_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
unsigned long data);
static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *dev);
-static void ecm_ipa_destory_rm_resource(void);
+static void ecm_ipa_destory_rm_resource(struct ecm_ipa_dev *dev);
static bool rx_filter(struct sk_buff *skb);
static bool tx_filter(struct sk_buff *skb);
static bool rm_enabled(struct ecm_ipa_dev *dev);
@@ -119,8 +131,11 @@
static int ecm_ipa_debugfs_rx_open(struct inode *inode, struct file *file);
static int ecm_ipa_debugfs_rm_open(struct inode *inode, struct file *file);
static int ecm_ipa_debugfs_dma_open(struct inode *inode, struct file *file);
+static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file);
static ssize_t ecm_ipa_debugfs_enable_read(struct file *file,
char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t ecm_ipa_debugfs_atomic_read(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos);
static ssize_t ecm_ipa_debugfs_enable_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos);
static ssize_t ecm_ipa_debugfs_enable_write_dma(struct file *file,
@@ -159,6 +174,11 @@
.write = ecm_ipa_debugfs_enable_write_dma,
};
+const struct file_operations ecm_ipa_debugfs_atomic_ops = {
+ .open = ecm_ipa_debugfs_atomic_open,
+ .read = ecm_ipa_debugfs_atomic_read,
+};
+
/**
* ecm_ipa_init() - initializes internal data structures
* @ecm_ipa_rx_dp_notify: supplied callback to be called by the IPA
@@ -185,10 +205,12 @@
struct net_device *net;
struct ecm_ipa_dev *dev;
ECM_IPA_LOG_ENTRY();
- pr_debug("%s version %s\n", DRIVER_NAME, DRIVER_VERSION);
+ pr_debug("%s initializing\n", DRIVER_NAME);
NULL_CHECK(ecm_ipa_rx_dp_notify);
NULL_CHECK(ecm_ipa_tx_dp_notify);
NULL_CHECK(priv);
+ pr_debug("rx_cb=0x%p, tx_cb=0x%p priv=0x%p\n",
+ ecm_ipa_rx_dp_notify, ecm_ipa_tx_dp_notify, *priv);
net = alloc_etherdev(sizeof(struct ecm_ipa_dev));
if (!net) {
ret = -ENOMEM;
@@ -200,7 +222,9 @@
memset(dev, 0, sizeof(*dev));
dev->tx_enable = true;
dev->rx_enable = true;
- spin_lock_init(&dev->ack_spinlock);
+ atomic_set(&dev->outstanding_pkts, 0);
+ dev->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+ dev->outstanding_low = DEFAULT_OUTSTANDING_LOW;
dev->net = net;
ecm_ipa_ctx = dev;
*priv = (void *)dev;
@@ -408,8 +432,8 @@
NULL_CHECK(dev);
net = dev->net;
NULL_CHECK(net);
- pr_debug("host_ethaddr=%pM device_ethaddr=%pM\n",
- host_ethaddr, device_ethaddr);
+ pr_debug("priv=0x%p, host_ethaddr=%pM device_ethaddr=%pM\n",
+ priv, host_ethaddr, device_ethaddr);
result = ecm_ipa_create_rm_resource(dev);
if (result) {
ECM_IPA_ERROR("fail on RM create\n");
@@ -448,7 +472,7 @@
fail_register_tx:
fail_set_device_ethernet:
ecm_ipa_rules_destroy(dev);
- ecm_ipa_destory_rm_resource();
+ ecm_ipa_destory_rm_resource(dev);
free_netdev(net);
return result;
}
@@ -460,8 +484,8 @@
struct ecm_ipa_dev *dev = priv;
ECM_IPA_LOG_ENTRY();
NULL_CHECK(priv);
- pr_debug("usb_to_ipa_hdl = %d, ipa_to_usb_hdl = %d\n",
- usb_to_ipa_hdl, ipa_to_usb_hdl);
+ pr_debug("usb_to_ipa_hdl = %d, ipa_to_usb_hdl = %d, priv=0x%p\n",
+ usb_to_ipa_hdl, ipa_to_usb_hdl, priv);
if (!usb_to_ipa_hdl || usb_to_ipa_hdl >= IPA_CLIENT_MAX) {
ECM_IPA_ERROR("usb_to_ipa_hdl(%d) is not a valid ipa handle\n",
usb_to_ipa_hdl);
@@ -490,6 +514,7 @@
struct ecm_ipa_dev *dev = priv;
ECM_IPA_LOG_ENTRY();
NULL_CHECK(dev);
+ pr_debug("priv=0x%p\n", priv);
netif_carrier_off(dev->net);
ECM_IPA_LOG_EXIT();
return 0;
@@ -517,6 +542,10 @@
struct ipa_rm_create_params create_params = {0};
int result;
ECM_IPA_LOG_ENTRY();
+ if (!dev->rm_enable) {
+ pr_debug("RM feature not used\n");
+ return 0;
+ }
create_params.name = IPA_RM_RESOURCE_STD_ECM_PROD;
create_params.reg_params.user_data = dev;
create_params.reg_params.notify_cb = ecm_ipa_rm_notify;
@@ -550,10 +579,11 @@
return result;
}
-static void ecm_ipa_destory_rm_resource(void)
+static void ecm_ipa_destory_rm_resource(struct ecm_ipa_dev *dev)
{
ECM_IPA_LOG_ENTRY();
-
+ if (!dev->rm_enable)
+ return;
ipa_rm_delete_dependency(IPA_RM_RESOURCE_STD_ECM_PROD,
IPA_RM_RESOURCE_USB_CONS);
ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_STD_ECM_PROD);
@@ -605,18 +635,18 @@
{
struct ecm_ipa_dev *dev = priv;
ECM_IPA_LOG_ENTRY();
+ pr_debug("priv=0x%p\n", priv);
if (!dev) {
ECM_IPA_ERROR("dev NULL pointer\n");
return;
}
- if (rm_enabled(dev)) {
- ecm_ipa_destory_rm_resource();
- ecm_ipa_debugfs_destroy(dev);
- }
- if (!dev->net) {
- unregister_netdev(dev->net);
- free_netdev(dev->net);
- }
+
+ ecm_ipa_destory_rm_resource(dev);
+ ecm_ipa_debugfs_destroy(dev);
+
+ unregister_netdev(dev->net);
+ free_netdev(dev->net);
+
pr_debug("cleanup done\n");
ecm_ipa_ctx = NULL;
ECM_IPA_LOG_EXIT();
@@ -662,7 +692,6 @@
int ret;
netdev_tx_t status = NETDEV_TX_BUSY;
struct ecm_ipa_dev *dev = netdev_priv(net);
- unsigned long flags;
if (unlikely(netif_queue_stopped(net))) {
ECM_IPA_ERROR("interface queue is stopped\n");
@@ -682,23 +711,24 @@
goto resource_busy;
}
- spin_lock_irqsave(&dev->ack_spinlock, flags);
- if (dev->last_out_skb) {
- pr_debug("No Tx-ack received for previous packet\n");
- spin_unlock_irqrestore(&dev->ack_spinlock, flags);
+ pr_debug("Before sending packet the outstanding packets counter is %d\n",
+ atomic_read(&dev->outstanding_pkts));
+
+ if (atomic_read(&dev->outstanding_pkts) >= dev->outstanding_high) {
+ pr_debug("Outstanding high boundary reached (%d)- stopping queue\n",
+ dev->outstanding_high);
netif_stop_queue(net);
status = -NETDEV_TX_BUSY;
goto out;
- } else {
- dev->last_out_skb = skb;
}
- spin_unlock_irqrestore(&dev->ack_spinlock, flags);
ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
if (ret) {
ECM_IPA_ERROR("ipa transmit failed (%d)\n", ret);
goto fail_tx_packet;
}
+
+ atomic_inc(&dev->outstanding_pkts);
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
status = NETDEV_TX_OK;
@@ -766,7 +796,6 @@
{
struct sk_buff *skb = (struct sk_buff *)data;
struct ecm_ipa_dev *dev = priv;
- unsigned long flags;
if (!dev) {
ECM_IPA_ERROR("dev is NULL pointer\n");
@@ -776,15 +805,16 @@
ECM_IPA_ERROR("unsupported event on Tx callback\n");
return;
}
- spin_lock_irqsave(&dev->ack_spinlock, flags);
- if (skb != dev->last_out_skb)
- ECM_IPA_ERROR("ACKed/Sent not the same(FIFO expected)\n");
- dev->last_out_skb = NULL;
- spin_unlock_irqrestore(&dev->ack_spinlock, flags);
- if (netif_queue_stopped(dev->net)) {
- pr_debug("waking up queue\n");
+ atomic_dec(&dev->outstanding_pkts);
+ if (netif_queue_stopped(dev->net) &&
+ atomic_read(&dev->outstanding_pkts) < (dev->outstanding_low)) {
+ pr_debug("Outstanding low boundary reached (%d) - waking up queue\n",
+ dev->outstanding_low);
netif_wake_queue(dev->net);
}
+ pr_debug("After Tx-complete the outstanding packets counter is %d\n",
+ atomic_read(&dev->outstanding_pkts));
+
dev_kfree_skb_any(skb);
return;
}
@@ -816,6 +846,15 @@
return 0;
}
+static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file)
+{
+ struct ecm_ipa_dev *dev = inode->i_private;
+ ECM_IPA_LOG_ENTRY();
+ file->private_data = &(dev->outstanding_pkts);
+ ECM_IPA_LOG_EXIT();
+ return 0;
+}
+
static ssize_t ecm_ipa_debugfs_enable_write_dma(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
@@ -887,10 +926,23 @@
return size;
}
+static ssize_t ecm_ipa_debugfs_atomic_read(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int nbytes;
+ u8 atomic_str[DEBUGFS_TEMP_BUF_SIZE] = {0};
+ atomic_t *atomic_var = file->private_data;
+ nbytes = scnprintf(atomic_str, sizeof(atomic_str), "%d\n",
+ atomic_read(atomic_var));
+ return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes);
+}
+
+
static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *dev)
{
- const mode_t flags = S_IRUSR | S_IRGRP | S_IROTH |
- S_IWUSR | S_IWGRP | S_IWOTH;
+ const mode_t flags = S_IRUGO | S_IWUGO;
+ const mode_t flags_read_only = S_IRUGO;
+
int ret = -EINVAL;
ECM_IPA_LOG_ENTRY();
if (!dev)
@@ -929,6 +981,30 @@
ret = -EFAULT;
goto fail_file;
}
+
+ dev->outstanding_high_file = debugfs_create_u8("outstanding_high",
+ flags, dev->folder, &dev->outstanding_high);
+ if (!dev->outstanding_high_file) {
+ ECM_IPA_ERROR("could not create outstanding_high file\n");
+ ret = -EFAULT;
+ goto fail_file;
+ }
+ dev->outstanding_low_file = debugfs_create_u8("outstanding_low",
+ flags, dev->folder, &dev->outstanding_low);
+ if (!dev->outstanding_low_file) {
+ ECM_IPA_ERROR("could not create outstanding_low file\n");
+ ret = -EFAULT;
+ goto fail_file;
+ }
+ dev->outstanding_file = debugfs_create_file("outstanding",
+ flags_read_only, dev->folder, dev,
+ &ecm_ipa_debugfs_atomic_ops);
+ if (!dev->outstanding_file) {
+ ECM_IPA_ERROR("could not create outstanding file\n");
+ ret = -EFAULT;
+ goto fail_file;
+ }
+
ECM_IPA_LOG_EXIT();
return 0;
fail_file:
@@ -947,7 +1023,6 @@
{
ECM_IPA_LOG_ENTRY();
strlcpy(drv_info->driver, DRIVER_NAME, sizeof(drv_info->driver));
- strlcpy(drv_info->version, DRIVER_VERSION, sizeof(drv_info->version));
ECM_IPA_LOG_EXIT();
}
diff --git a/drivers/net/ethernet/msm/msm_rmnet_smux.c b/drivers/net/ethernet/msm/msm_rmnet_smux.c
index 7b27b73..5fe724e 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_smux.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_smux.c
@@ -804,7 +804,7 @@
for (i = 0; i < RMNET_SMUX_DEVICE_COUNT; i++) {
p = netdev_priv(netdevs[i]);
- if ((p != NULL) && (p->device_state == DEVICE_INACTIVE)) {
+ if (p != NULL) {
r = msm_smux_open(p->ch_id,
netdevs[i],
rmnet_smux_notify,
@@ -828,7 +828,7 @@
for (i = 0; i < RMNET_SMUX_DEVICE_COUNT; i++) {
p = netdev_priv(netdevs[i]);
- if ((p != NULL) && (p->device_state == DEVICE_ACTIVE)) {
+ if (p != NULL) {
r = msm_smux_close(p->ch_id);
if (r < 0) {
diff --git a/drivers/net/ethernet/msm/msm_rmnet_wwan.c b/drivers/net/ethernet/msm/msm_rmnet_wwan.c
index fe1ac46..f90ee3d 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_wwan.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_wwan.c
@@ -34,6 +34,7 @@
#include <mach/ipa.h>
#define WWAN_DEV_NAME "rmnet%d"
+#define WWAN_METADATA_SHFT 16
#define WWAN_METADATA_MASK 0x00FF0000
#define IPA_RM_INACTIVITY_TIMER 1000
#define WWAN_DEVICE_COUNT (8)
@@ -304,13 +305,15 @@
rx_ipv4_property = &rx_properties.prop[0];
rx_ipv4_property->ip = IPA_IP_v4;
rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
- rx_ipv4_property->attrib.meta_data = wwan_ptr->ch_id;
+ rx_ipv4_property->attrib.meta_data =
+ wwan_ptr->ch_id << WWAN_METADATA_SHFT;
rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
rx_ipv4_property->src_pipe = IPA_CLIENT_A2_EMBEDDED_PROD;
rx_ipv6_property = &rx_properties.prop[1];
rx_ipv6_property->ip = IPA_IP_v6;
rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
- rx_ipv6_property->attrib.meta_data = wwan_ptr->ch_id;
+ rx_ipv6_property->attrib.meta_data =
+ wwan_ptr->ch_id << WWAN_METADATA_SHFT;
rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
rx_ipv6_property->src_pipe = IPA_CLIENT_A2_EMBEDDED_PROD;
rx_properties.num_props = 2;
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 0158235..1a175c9 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -91,9 +91,6 @@
#define CCU_PRONTO_LAST_ADDR1_OFFSET 0x10
#define CCU_PRONTO_LAST_ADDR2_OFFSET 0x14
-#define MSM_PRONTO_CCPU_CTL_BASE 0xfb21d000
-#define BOOT_REMAP_OFFSET 0x04
-
#define WCNSS_CTRL_CHANNEL "WCNSS_CTRL"
#define WCNSS_MAX_FRAME_SIZE 500
#define WCNSS_VERSION_LEN 30
@@ -200,7 +197,7 @@
void __iomem *riva_ccu_base;
void __iomem *pronto_a2xb_base;
void __iomem *pronto_ccpu_base;
- void __iomem *pronto_ctl_base;
+ void __iomem *fiq_reg;
} *penv = NULL;
static ssize_t wcnss_serial_number_show(struct device *dev,
@@ -334,10 +331,6 @@
reg = readl_relaxed(reg_addr);
pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);
- reg_addr = penv->pronto_ctl_base + BOOT_REMAP_OFFSET;
- reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: BOOT_REMAP_ADDR %08x\n", __func__, reg);
-
tst_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_OFFSET;
tst_ctrl_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_CTRL_OFFSET;
@@ -409,7 +402,7 @@
if (wcnss_hardware_type() == WCNSS_PRONTO_HW) {
wcnss_pronto_log_debug_regs();
wmb();
- __raw_writel(1 << 16, MSM_APCS_GCC_BASE + 0x8);
+ __raw_writel(1 << 16, penv->fiq_reg);
} else {
wcnss_riva_log_debug_regs();
wmb();
@@ -1076,6 +1069,7 @@
struct qcom_wcnss_opts *pdata;
unsigned long wcnss_phys_addr;
int size = 0;
+ struct resource *res;
int has_pronto_hw = of_property_read_bool(pdev->dev.of_node,
"qcom,has_pronto_hw");
@@ -1191,11 +1185,19 @@
pr_err("%s: ioremap wcnss physical failed\n", __func__);
goto fail_ioremap2;
}
- penv->pronto_ctl_base = ioremap(MSM_PRONTO_CCPU_CTL_BASE,
- SZ_32);
- if (!penv->pronto_ctl_base) {
+ /* for reset FIQ */
+ res = platform_get_resource_byname(penv->pdev,
+ IORESOURCE_MEM, "wcnss_fiq");
+ if (!res) {
+ dev_err(&pdev->dev, "insufficient irq mem resources\n");
+ ret = -ENOENT;
+ goto fail_ioremap3;
+ }
+ penv->fiq_reg = ioremap_nocache(res->start, resource_size(res));
+ if (!penv->fiq_reg) {
+ pr_err("wcnss: %s: ioremap_nocache() failed fiq_reg addr:%pr\n",
+ __func__, &res->start);
ret = -ENOMEM;
- pr_err("%s: ioremap wcnss physical failed\n", __func__);
goto fail_ioremap3;
}
}
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
index b7eca61..2b6ce75 100644
--- a/drivers/platform/msm/ipa/Makefile
+++ b/drivers/platform/msm/ipa/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_IPA) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
- ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o ipa_intf.o teth_bridge.o \
+ ipa_utils.o ipa_nat.o a2_service.o ipa_bridge.o ipa_intf.o teth_bridge.o \
ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
index 4b5f0a2..1b33dc0 100644
--- a/drivers/platform/msm/ipa/a2_service.c
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -205,6 +205,7 @@
smsm_change_state(SMSM_APPS_STATE,
clear_bit & SMSM_A2_POWER_CONTROL_ACK,
~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
+ IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_apps_acks);
clear_bit = ~clear_bit;
}
@@ -216,10 +217,13 @@
if (a2_mux_ctx->bam_dmux_uplink_vote == vote)
IPADBG("%s: warning - duplicate power vote\n", __func__);
a2_mux_ctx->bam_dmux_uplink_vote = vote;
- if (vote)
+ if (vote) {
smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
- else
+ IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_on_reqs_out);
+ } else {
smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
+ IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_off_reqs_out);
+ }
}
static inline void ul_powerdown(void)
@@ -634,12 +638,14 @@
last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
if (new_state & SMSM_A2_POWER_CONTROL) {
IPADBG("%s: MODEM PWR CTRL 1\n", __func__);
+ IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_on_reqs_in);
grab_wakelock();
(void) connect_to_bam();
queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
&a2_mux_ctx->kickoff_ul_request_resource);
} else if (!(new_state & SMSM_A2_POWER_CONTROL)) {
IPADBG("%s: MODEM PWR CTRL 0\n", __func__);
+ IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_off_reqs_in);
(void) disconnect_to_bam();
release_wakelock();
} else {
@@ -653,6 +659,7 @@
{
IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
new_state);
+ IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_modem_acks);
complete_all(&a2_mux_ctx->ul_wakeup_ack_completion);
}
@@ -1492,6 +1499,7 @@
a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
a2_props.num_pipes = A2_NUM_PIPES;
a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
+ a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
/* need to free on tear down */
rc = sps_register_bam_device(&a2_props, &h);
if (rc < 0) {
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index db7f7f0..466e694 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -41,6 +41,7 @@
#define IPA_DMA_POOL_SIZE (512)
#define IPA_DMA_POOL_ALIGNMENT (4)
#define IPA_DMA_POOL_BOUNDARY (1024)
+#define IPA_NUM_DESC_PER_SW_TX (2)
#define IPA_ROUTING_RULE_BYTE_SIZE (4)
#define IPA_BAM_CNFG_BITS_VAL (0x7FFFE004)
@@ -49,6 +50,13 @@
#define IPA_AGGR_STR_IN_BYTES(str) \
(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+/*
+ * This equals a timer value of 162.56us. This value was
+ * determined empirically and shows good bi-directional
+ * WLAN throughputs
+ */
+#define IPA_HOLB_TMR_DEFAULT_VAL 0x7f
+
static struct ipa_plat_drv_res ipa_res = {0, };
static struct of_device_id ipa_plat_drv_match[] = {
{
@@ -72,6 +80,18 @@
.ab = 0,
.ib = 0,
},
+ {
+ .src = MSM_BUS_MASTER_BAM_DMA,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+ {
+ .src = MSM_BUS_MASTER_BAM_DMA,
+ .dst = MSM_BUS_SLAVE_OCIMEM,
+ .ab = 0,
+ .ib = 0,
+ },
};
static struct msm_bus_vectors ipa_max_perf_vectors[] = {
@@ -81,6 +101,18 @@
.ab = 50000000,
.ib = 960000000,
},
+ {
+ .src = MSM_BUS_MASTER_BAM_DMA,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 50000000,
+ .ib = 960000000,
+ },
+ {
+ .src = MSM_BUS_MASTER_BAM_DMA,
+ .dst = MSM_BUS_SLAVE_OCIMEM,
+ .ab = 50000000,
+ .ib = 960000000,
+ },
};
static struct msm_bus_paths ipa_usecases[] = {
@@ -900,7 +932,7 @@
/* LAN-WAN OUT (A5->IPA) */
memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
sys_in.client = IPA_CLIENT_A5_LAN_WAN_PROD;
- sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) {
@@ -1122,7 +1154,7 @@
u32 producer_hdl = 0;
u32 consumer_hdl = 0;
- rmnet_bridge_get_client_handles(&producer_hdl, &consumer_hdl);
+ teth_bridge_get_client_handles(&producer_hdl, &consumer_hdl);
/* configure aggregation on producer */
memset(&agg_params, 0, sizeof(struct ipa_ep_cfg_aggr));
@@ -1459,6 +1491,41 @@
WARN_ON(1);
}
+/**
+* ipa_inc_client_enable_clks() - Increase active clients counter, and
+* enable ipa clocks if necessary
+*
+* Return codes:
+* None
+*/
+void ipa_inc_client_enable_clks(void)
+{
+ mutex_lock(&ipa_ctx->ipa_active_clients_lock);
+ ipa_ctx->ipa_active_clients++;
+ if (ipa_ctx->ipa_active_clients == 1)
+ if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
+ ipa_enable_clks();
+ mutex_unlock(&ipa_ctx->ipa_active_clients_lock);
+}
+
+/**
+* ipa_dec_client_disable_clks() - Decrease active clients counter, and
+* disable ipa clocks if necessary
+*
+* Return codes:
+* None
+*/
+void ipa_dec_client_disable_clks(void)
+{
+ mutex_lock(&ipa_ctx->ipa_active_clients_lock);
+ ipa_ctx->ipa_active_clients--;
+ if (ipa_ctx->ipa_active_clients == 0)
+ if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
+ ipa_disable_clks();
+ mutex_unlock(&ipa_ctx->ipa_active_clients_lock);
+}
+
+
static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res)
{
void *bam_cnfg_bits;
@@ -1594,6 +1661,8 @@
result = -ENOMEM;
goto fail_mem;
}
+ ipa_ctx->hol_en = 0x1;
+ ipa_ctx->hol_timer = IPA_HOLB_TMR_DEFAULT_VAL;
IPADBG("polling_mode=%u delay_ms=%u\n", polling_mode, polling_delay_ms);
ipa_ctx->polling_mode = polling_mode;
@@ -1676,6 +1745,7 @@
bam_props.num_pipes = IPA_NUM_PIPES;
bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
bam_props.event_threshold = IPA_EVENT_THRESHOLD;
+ bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle);
if (result) {
@@ -1761,15 +1831,20 @@
* This is an issue with IPA HW v1.0 only.
*/
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
- ipa_ctx->one_kb_no_straddle_pool = dma_pool_create("ipa_1k",
+ ipa_ctx->dma_pool = dma_pool_create("ipa_1k",
NULL,
IPA_DMA_POOL_SIZE, IPA_DMA_POOL_ALIGNMENT,
IPA_DMA_POOL_BOUNDARY);
- if (!ipa_ctx->one_kb_no_straddle_pool) {
- IPAERR("cannot setup 1kb alloc DMA pool.\n");
- result = -ENOMEM;
- goto fail_dma_pool;
- }
+ } else {
+ ipa_ctx->dma_pool = dma_pool_create("ipa_tx", NULL,
+ IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec),
+ 0, 0);
+ }
+
+ if (!ipa_ctx->dma_pool) {
+ IPAERR("cannot alloc DMA pool.\n");
+ result = -ENOMEM;
+ goto fail_dma_pool;
}
ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
@@ -1839,13 +1914,14 @@
ipa_ctx->flt_rule_hdl_tree = RB_ROOT;
ipa_ctx->tag_tree = RB_ROOT;
- atomic_set(&ipa_ctx->ipa_active_clients, 0);
+ mutex_init(&ipa_ctx->ipa_active_clients_lock);
+ ipa_ctx->ipa_active_clients = 0;
result = ipa_bridge_init();
if (result) {
IPAERR("ipa bridge init err.\n");
result = -ENODEV;
- goto fail_bridge_init;
+ goto fail_a5_pipes;
}
/* setup the A5-IPA pipes */
@@ -1966,8 +2042,6 @@
ipa_cleanup_rx();
ipa_teardown_a5_pipes();
fail_a5_pipes:
- ipa_bridge_cleanup();
-fail_bridge_init:
destroy_workqueue(ipa_ctx->tx_wq);
fail_tx_wq:
destroy_workqueue(ipa_ctx->rx_wq);
@@ -1976,7 +2050,7 @@
* DMA pool need to be released only for IPA HW v1.0 only.
*/
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
- dma_pool_destroy(ipa_ctx->one_kb_no_straddle_pool);
+ dma_pool_destroy(ipa_ctx->dma_pool);
fail_dma_pool:
kmem_cache_destroy(ipa_ctx->tree_node_cache);
fail_tree_node_cache:
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
index eeb98e9..3ff604c 100644
--- a/drivers/platform/msm/ipa/ipa_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -12,10 +12,52 @@
#include <linux/delay.h>
#include <linux/ratelimit.h>
+#include <mach/msm_smsm.h>
#include "ipa_i.h"
-#define A2_EMBEDDED_PIPE_TX 4
-#define A2_EMBEDDED_PIPE_RX 5
+/*
+ * EP0 (teth)
+ * A2_BAM(1)->(12)DMA_BAM->DMA_BAM(13)->(6)IPA_BAM->IPA_BAM(10)->USB_BAM(0)
+ * A2_BAM(0)<-(15)DMA_BAM<-DMA_BAM(14)<-(7)IPA_BAM<-IPA_BAM(11)<-USB_BAM(1)
+ *
+ * EP2 (emb)
+ * A2_BAM(5)->(16)DMA_BAM->DMA_BAM(17)->(8)IPA_BAM->
+ * A2_BAM(4)<-(19)DMA_BAM<-DMA_BAM(18)<-(9)IPA_BAM<-
+ */
+
+#define A2_TETHERED_PIPE_UL 0
+#define DMA_A2_TETHERED_PIPE_UL 15
+#define DMA_IPA_TETHERED_PIPE_UL 14
+#define A2_TETHERED_PIPE_DL 1
+#define DMA_A2_TETHERED_PIPE_DL 12
+#define DMA_IPA_TETHERED_PIPE_DL 13
+
+#define A2_EMBEDDED_PIPE_UL 4
+#define DMA_A2_EMBEDDED_PIPE_UL 19
+#define DMA_IPA_EMBEDDED_PIPE_UL 18
+#define A2_EMBEDDED_PIPE_DL 5
+#define DMA_A2_EMBEDDED_PIPE_DL 16
+#define DMA_IPA_EMBEDDED_PIPE_DL 17
+
+#define IPA_SMEM_PIPE_MEM_SZ 32768
+
+#define IPA_UL_DATA_FIFO_SZ 0xc00
+#define IPA_UL_DESC_FIFO_SZ 0x530
+#define IPA_DL_DATA_FIFO_SZ 0x2400
+#define IPA_DL_DESC_FIFO_SZ 0x8a0
+
+#define IPA_SMEM_UL_DATA_FIFO_OFST 0x3dd0
+#define IPA_SMEM_UL_DESC_FIFO_OFST 0x49d0
+#define IPA_SMEM_DL_DATA_FIFO_OFST 0x4f00
+#define IPA_SMEM_DL_DESC_FIFO_OFST 0x7300
+
+#define IPA_OCIMEM_UL_DATA_FIFO_OFST 0
+#define IPA_OCIMEM_UL_DESC_FIFO_OFST (IPA_OCIMEM_UL_DATA_FIFO_OFST + \
+ IPA_UL_DATA_FIFO_SZ)
+#define IPA_OCIMEM_DL_DATA_FIFO_OFST (IPA_OCIMEM_UL_DESC_FIFO_OFST + \
+ IPA_UL_DESC_FIFO_SZ)
+#define IPA_OCIMEM_DL_DESC_FIFO_OFST (IPA_OCIMEM_DL_DATA_FIFO_OFST + \
+ IPA_DL_DATA_FIFO_SZ)
enum ipa_pipe_type {
IPA_DL_FROM_A2,
@@ -25,678 +67,383 @@
IPA_PIPE_TYPE_MAX
};
-static int polling_min_sleep[IPA_BRIDGE_DIR_MAX] = { 950, 950 };
-static int polling_max_sleep[IPA_BRIDGE_DIR_MAX] = { 1050, 1050 };
-static int polling_inactivity[IPA_BRIDGE_DIR_MAX] = { 4, 4 };
-
-struct ipa_pkt_info {
- void *buffer;
- dma_addr_t dma_address;
- uint32_t len;
- struct list_head link;
-};
-
struct ipa_bridge_pipe_context {
- struct list_head head_desc_list;
struct sps_pipe *pipe;
- struct sps_connect connection;
- struct sps_mem_buffer desc_mem_buf;
- struct sps_register_event register_event;
- struct list_head free_desc_list;
+ bool ipa_facing;
bool valid;
};
struct ipa_bridge_context {
struct ipa_bridge_pipe_context pipe[IPA_PIPE_TYPE_MAX];
- struct workqueue_struct *ul_wq;
- struct workqueue_struct *dl_wq;
- struct work_struct ul_work;
- struct work_struct dl_work;
enum ipa_bridge_type type;
};
static struct ipa_bridge_context bridge[IPA_BRIDGE_TYPE_MAX];
-static void ipa_do_bridge_work(enum ipa_bridge_dir dir,
- struct ipa_bridge_context *ctx);
-
-static void ul_work_func(struct work_struct *work)
+static void ipa_get_dma_pipe_num(enum ipa_bridge_dir dir,
+ enum ipa_bridge_type type, int *a2, int *ipa)
{
- struct ipa_bridge_context *ctx = container_of(work,
- struct ipa_bridge_context, ul_work);
- ipa_do_bridge_work(IPA_BRIDGE_DIR_UL, ctx);
+ if (type == IPA_BRIDGE_TYPE_TETHERED) {
+ if (dir == IPA_BRIDGE_DIR_UL) {
+ *a2 = DMA_A2_TETHERED_PIPE_UL;
+ *ipa = DMA_IPA_TETHERED_PIPE_UL;
+ } else {
+ *a2 = DMA_A2_TETHERED_PIPE_DL;
+ *ipa = DMA_IPA_TETHERED_PIPE_DL;
+ }
+ } else {
+ if (dir == IPA_BRIDGE_DIR_UL) {
+ *a2 = DMA_A2_EMBEDDED_PIPE_UL;
+ *ipa = DMA_IPA_EMBEDDED_PIPE_UL;
+ } else {
+ *a2 = DMA_A2_EMBEDDED_PIPE_DL;
+ *ipa = DMA_IPA_EMBEDDED_PIPE_DL;
+ }
+ }
}
-static void dl_work_func(struct work_struct *work)
+static int ipa_get_desc_fifo_sz(enum ipa_bridge_dir dir,
+ enum ipa_bridge_type type)
{
- struct ipa_bridge_context *ctx = container_of(work,
- struct ipa_bridge_context, dl_work);
- ipa_do_bridge_work(IPA_BRIDGE_DIR_DL, ctx);
+ int sz;
+
+ if (type == IPA_BRIDGE_TYPE_TETHERED) {
+ if (dir == IPA_BRIDGE_DIR_UL)
+ sz = IPA_UL_DESC_FIFO_SZ;
+ else
+ sz = IPA_DL_DESC_FIFO_SZ;
+ } else {
+ if (dir == IPA_BRIDGE_DIR_UL)
+ sz = IPA_UL_DESC_FIFO_SZ;
+ else
+ sz = IPA_DL_DESC_FIFO_SZ;
+ }
+
+ return sz;
}
-static int ipa_switch_to_intr_mode(enum ipa_bridge_dir dir,
- struct ipa_bridge_context *ctx)
+static int ipa_get_data_fifo_sz(enum ipa_bridge_dir dir,
+ enum ipa_bridge_type type)
+{
+ int sz;
+
+ if (type == IPA_BRIDGE_TYPE_TETHERED) {
+ if (dir == IPA_BRIDGE_DIR_UL)
+ sz = IPA_UL_DATA_FIFO_SZ;
+ else
+ sz = IPA_DL_DATA_FIFO_SZ;
+ } else {
+ if (dir == IPA_BRIDGE_DIR_UL)
+ sz = IPA_UL_DATA_FIFO_SZ;
+ else
+ sz = IPA_DL_DATA_FIFO_SZ;
+ }
+
+ return sz;
+}
+
+static int ipa_get_a2_pipe_num(enum ipa_bridge_dir dir,
+ enum ipa_bridge_type type)
+{
+ int ep;
+
+ if (type == IPA_BRIDGE_TYPE_TETHERED) {
+ if (dir == IPA_BRIDGE_DIR_UL)
+ ep = A2_TETHERED_PIPE_UL;
+ else
+ ep = A2_TETHERED_PIPE_DL;
+ } else {
+ if (dir == IPA_BRIDGE_DIR_UL)
+ ep = A2_EMBEDDED_PIPE_UL;
+ else
+ ep = A2_EMBEDDED_PIPE_DL;
+ }
+
+ return ep;
+}
+
+int ipa_setup_a2_dma_fifos(enum ipa_bridge_dir dir,
+ enum ipa_bridge_type type,
+ struct sps_mem_buffer *desc,
+ struct sps_mem_buffer *data)
{
int ret;
- struct ipa_bridge_pipe_context *sys = &ctx->pipe[2 * dir];
- ret = sps_get_config(sys->pipe, &sys->connection);
- if (ret) {
- IPAERR("sps_get_config() failed %d type=%d dir=%d\n",
- ret, ctx->type, dir);
- goto fail;
- }
- sys->register_event.options = SPS_O_EOT;
- ret = sps_register_event(sys->pipe, &sys->register_event);
- if (ret) {
- IPAERR("sps_register_event() failed %d type=%d dir=%d\n",
- ret, ctx->type, dir);
- goto fail;
- }
- sys->connection.options =
- SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
- ret = sps_set_config(sys->pipe, &sys->connection);
- if (ret) {
- IPAERR("sps_set_config() failed %d type=%d dir=%d\n",
- ret, ctx->type, dir);
- goto fail;
- }
- ret = 0;
-fail:
- return ret;
-}
+ if (type == IPA_BRIDGE_TYPE_EMBEDDED) {
+ if (dir == IPA_BRIDGE_DIR_UL) {
+ desc->base = ipa_ctx->smem_pipe_mem +
+ IPA_SMEM_UL_DESC_FIFO_OFST;
+ desc->phys_base = smem_virt_to_phys(desc->base);
+ desc->size = ipa_get_desc_fifo_sz(dir, type);
+ data->base = ipa_ctx->smem_pipe_mem +
+ IPA_SMEM_UL_DATA_FIFO_OFST;
+ data->phys_base = smem_virt_to_phys(data->base);
+ data->size = ipa_get_data_fifo_sz(dir, type);
+ } else {
+ desc->base = ipa_ctx->smem_pipe_mem +
+ IPA_SMEM_DL_DESC_FIFO_OFST;
+ desc->phys_base = smem_virt_to_phys(desc->base);
+ desc->size = ipa_get_desc_fifo_sz(dir, type);
+ data->base = ipa_ctx->smem_pipe_mem +
+ IPA_SMEM_DL_DATA_FIFO_OFST;
+ data->phys_base = smem_virt_to_phys(data->base);
+ data->size = ipa_get_data_fifo_sz(dir, type);
+ }
+ } else {
+ if (dir == IPA_BRIDGE_DIR_UL) {
+ ret = sps_setup_bam2bam_fifo(data,
+ IPA_OCIMEM_UL_DATA_FIFO_OFST,
+ ipa_get_data_fifo_sz(dir, type), 1);
+ if (ret) {
+ IPAERR("DAFIFO setup fail %d dir %d type %d\n",
+ ret, dir, type);
+ return ret;
+ }
-static int ipa_switch_to_poll_mode(enum ipa_bridge_dir dir,
- enum ipa_bridge_type type)
-{
- int ret;
- struct ipa_bridge_pipe_context *sys = &bridge[type].pipe[2 * dir];
+ ret = sps_setup_bam2bam_fifo(desc,
+ IPA_OCIMEM_UL_DESC_FIFO_OFST,
+ ipa_get_desc_fifo_sz(dir, type), 1);
+ if (ret) {
+ IPAERR("DEFIFO setup fail %d dir %d type %d\n",
+ ret, dir, type);
+ return ret;
+ }
+ } else {
+ ret = sps_setup_bam2bam_fifo(data,
+ IPA_OCIMEM_DL_DATA_FIFO_OFST,
+ ipa_get_data_fifo_sz(dir, type), 1);
+ if (ret) {
+ IPAERR("DAFIFO setup fail %d dir %d type %d\n",
+ ret, dir, type);
+ return ret;
+ }
- ret = sps_get_config(sys->pipe, &sys->connection);
- if (ret) {
- IPAERR("sps_get_config() failed %d type=%d dir=%d\n",
- ret, type, dir);
- goto fail;
- }
- sys->connection.options =
- SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- ret = sps_set_config(sys->pipe, &sys->connection);
- if (ret) {
- IPAERR("sps_set_config() failed %d type=%d dir=%d\n",
- ret, type, dir);
- goto fail;
- }
- ret = 0;
-fail:
- return ret;
-}
-
-static int queue_rx_single(enum ipa_bridge_dir dir, enum ipa_bridge_type type)
-{
- struct ipa_bridge_pipe_context *sys_rx = &bridge[type].pipe[2 * dir];
- struct ipa_pkt_info *info;
- int ret;
-
- info = kmalloc(sizeof(struct ipa_pkt_info), GFP_KERNEL);
- if (!info) {
- IPAERR("unable to alloc rx_pkt_info type=%d dir=%d\n",
- type, dir);
- goto fail_pkt;
+ ret = sps_setup_bam2bam_fifo(desc,
+ IPA_OCIMEM_DL_DESC_FIFO_OFST,
+ ipa_get_desc_fifo_sz(dir, type), 1);
+ if (ret) {
+ IPAERR("DEFIFO setup fail %d dir %d type %d\n",
+ ret, dir, type);
+ return ret;
+ }
+ }
}
- info->buffer = kmalloc(IPA_RX_SKB_SIZE, GFP_KERNEL | GFP_DMA);
- if (!info->buffer) {
- IPAERR("unable to alloc rx_pkt_buffer type=%d dir=%d\n",
- type, dir);
- goto fail_buffer;
- }
+ IPADBG("dir=%d type=%d Dpa=%x Dsz=%u Dva=%p dpa=%x dsz=%u dva=%p\n",
+ dir, type, data->phys_base, data->size, data->base,
+ desc->phys_base, desc->size, desc->base);
- info->dma_address = dma_map_single(NULL, info->buffer, IPA_RX_SKB_SIZE,
- DMA_BIDIRECTIONAL);
- if (info->dma_address == 0 || info->dma_address == ~0) {
- IPAERR("dma_map_single failure %p for %p type=%d dir=%d\n",
- (void *)info->dma_address, info->buffer,
- type, dir);
- goto fail_dma;
- }
-
- list_add_tail(&info->link, &sys_rx->head_desc_list);
- ret = sps_transfer_one(sys_rx->pipe, info->dma_address,
- IPA_RX_SKB_SIZE, info,
- SPS_IOVEC_FLAG_INT);
- if (ret) {
- list_del(&info->link);
- dma_unmap_single(NULL, info->dma_address, IPA_RX_SKB_SIZE,
- DMA_BIDIRECTIONAL);
- IPAERR("sps_transfer_one failed %d type=%d dir=%d\n", ret,
- type, dir);
- goto fail_dma;
- }
return 0;
-
-fail_dma:
- kfree(info->buffer);
-fail_buffer:
- kfree(info);
-fail_pkt:
- IPAERR("failed type=%d dir=%d\n", type, dir);
- return -ENOMEM;
}
-static int ipa_reclaim_tx(struct ipa_bridge_pipe_context *sys_tx, bool all)
-{
- struct sps_iovec iov;
- struct ipa_pkt_info *tx_pkt;
- int cnt = 0;
- int ret;
-
- do {
- iov.addr = 0;
- ret = sps_get_iovec(sys_tx->pipe, &iov);
- if (ret || iov.addr == 0) {
- break;
- } else {
- tx_pkt = list_first_entry(&sys_tx->head_desc_list,
- struct ipa_pkt_info,
- link);
- list_move_tail(&tx_pkt->link,
- &sys_tx->free_desc_list);
- cnt++;
- }
- } while (all);
-
- return cnt;
-}
-
-static void ipa_do_bridge_work(enum ipa_bridge_dir dir,
- struct ipa_bridge_context *ctx)
-{
- struct ipa_bridge_pipe_context *sys_rx = &ctx->pipe[2 * dir];
- struct ipa_bridge_pipe_context *sys_tx = &ctx->pipe[2 * dir + 1];
- struct ipa_pkt_info *tx_pkt;
- struct ipa_pkt_info *rx_pkt;
- struct ipa_pkt_info *tmp_pkt;
- struct sps_iovec iov;
- int ret;
- int inactive_cycles = 0;
-
- while (1) {
- ++inactive_cycles;
-
- if (ipa_reclaim_tx(sys_tx, false))
- inactive_cycles = 0;
-
- iov.addr = 0;
- ret = sps_get_iovec(sys_rx->pipe, &iov);
- if (ret || iov.addr == 0) {
- /* no-op */
- } else {
- inactive_cycles = 0;
-
- rx_pkt = list_first_entry(&sys_rx->head_desc_list,
- struct ipa_pkt_info,
- link);
- list_del(&rx_pkt->link);
- rx_pkt->len = iov.size;
-
-retry_alloc_tx:
- if (list_empty(&sys_tx->free_desc_list)) {
- tmp_pkt = kmalloc(sizeof(struct ipa_pkt_info),
- GFP_KERNEL);
- if (!tmp_pkt) {
- pr_debug_ratelimited("%s: unable to alloc tx_pkt_info type=%d dir=%d\n",
- __func__, ctx->type, dir);
- usleep_range(polling_min_sleep[dir],
- polling_max_sleep[dir]);
- goto retry_alloc_tx;
- }
-
- tmp_pkt->buffer = kmalloc(IPA_RX_SKB_SIZE,
- GFP_KERNEL | GFP_DMA);
- if (!tmp_pkt->buffer) {
- pr_debug_ratelimited("%s: unable to alloc tx_pkt_buffer type=%d dir=%d\n",
- __func__, ctx->type, dir);
- kfree(tmp_pkt);
- usleep_range(polling_min_sleep[dir],
- polling_max_sleep[dir]);
- goto retry_alloc_tx;
- }
-
- tmp_pkt->dma_address = dma_map_single(NULL,
- tmp_pkt->buffer,
- IPA_RX_SKB_SIZE,
- DMA_BIDIRECTIONAL);
- if (tmp_pkt->dma_address == 0 ||
- tmp_pkt->dma_address == ~0) {
- pr_debug_ratelimited("%s: dma_map_single failure %p for %p type=%d dir=%d\n",
- __func__,
- (void *)tmp_pkt->dma_address,
- tmp_pkt->buffer, ctx->type, dir);
- }
-
- list_add_tail(&tmp_pkt->link,
- &sys_tx->free_desc_list);
- }
-
- tx_pkt = list_first_entry(&sys_tx->free_desc_list,
- struct ipa_pkt_info,
- link);
- list_del(&tx_pkt->link);
-
-retry_add_rx:
- list_add_tail(&tx_pkt->link,
- &sys_rx->head_desc_list);
- ret = sps_transfer_one(sys_rx->pipe,
- tx_pkt->dma_address,
- IPA_RX_SKB_SIZE,
- tx_pkt,
- SPS_IOVEC_FLAG_INT);
- if (ret) {
- list_del(&tx_pkt->link);
- pr_debug_ratelimited("%s: sps_transfer_one failed %d type=%d dir=%d\n",
- __func__, ret, ctx->type, dir);
- usleep_range(polling_min_sleep[dir],
- polling_max_sleep[dir]);
- goto retry_add_rx;
- }
-
-retry_add_tx:
- list_add_tail(&rx_pkt->link,
- &sys_tx->head_desc_list);
- ret = sps_transfer_one(sys_tx->pipe,
- rx_pkt->dma_address,
- iov.size,
- rx_pkt,
- SPS_IOVEC_FLAG_INT |
- SPS_IOVEC_FLAG_EOT);
- if (ret) {
- pr_debug_ratelimited("%s: fail to add to TX type=%d dir=%d\n",
- __func__, ctx->type, dir);
- list_del(&rx_pkt->link);
- ipa_reclaim_tx(sys_tx, true);
- usleep_range(polling_min_sleep[dir],
- polling_max_sleep[dir]);
- goto retry_add_tx;
- }
- IPA_STATS_INC_BRIDGE_CNT(ctx->type, dir,
- ipa_ctx->stats.bridged_pkts);
- }
-
- if (inactive_cycles >= polling_inactivity[dir]) {
- ipa_switch_to_intr_mode(dir, ctx);
- break;
- }
- }
-}
-
-static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
-{
- enum ipa_bridge_type type = (enum ipa_bridge_type) notify->user;
-
- switch (notify->event_id) {
- case SPS_EVENT_EOT:
- ipa_switch_to_poll_mode(IPA_BRIDGE_DIR_UL, type);
- queue_work(bridge[type].ul_wq, &bridge[type].ul_work);
- break;
- default:
- IPAERR("recieved unexpected event id %d type %d\n",
- notify->event_id, type);
- }
-}
-
-static int setup_bridge_to_ipa(enum ipa_bridge_dir dir,
+static int setup_dma_bam_bridge(enum ipa_bridge_dir dir,
enum ipa_bridge_type type,
struct ipa_sys_connect_params *props,
u32 *clnt_hdl)
{
- struct ipa_bridge_pipe_context *sys;
- dma_addr_t dma_addr;
- enum ipa_pipe_type pipe_type;
- int ipa_ep_idx;
- int ret;
- int i;
-
- ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, props->client);
- if (ipa_ep_idx == -1) {
- IPAERR("Invalid client=%d mode=%d type=%d dir=%d\n",
- props->client, ipa_ctx->mode, type, dir);
- ret = -EINVAL;
- goto alloc_endpoint_failed;
- }
-
- if (ipa_ctx->ep[ipa_ep_idx].valid) {
- IPAERR("EP %d already allocated type=%d dir=%d\n", ipa_ep_idx,
- type, dir);
- ret = -EINVAL;
- goto alloc_endpoint_failed;
- }
-
- pipe_type = (dir == IPA_BRIDGE_DIR_DL) ? IPA_DL_TO_IPA :
- IPA_UL_FROM_IPA;
-
- sys = &bridge[type].pipe[pipe_type];
- sys->pipe = sps_alloc_endpoint();
- if (sys->pipe == NULL) {
- IPAERR("alloc endpoint failed type=%d dir=%d\n", type, dir);
- ret = -ENOMEM;
- goto alloc_endpoint_failed;
- }
- ret = sps_get_config(sys->pipe, &sys->connection);
- if (ret) {
- IPAERR("get config failed %d type=%d dir=%d\n", ret, type, dir);
- ret = -EINVAL;
- goto get_config_failed;
- }
-
- if (dir == IPA_BRIDGE_DIR_DL) {
- sys->connection.source = SPS_DEV_HANDLE_MEM;
- sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
- sys->connection.destination = ipa_ctx->bam_handle;
- sys->connection.dest_pipe_index = ipa_ep_idx;
- sys->connection.mode = SPS_MODE_DEST;
- sys->connection.options =
- SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- } else {
- sys->connection.source = ipa_ctx->bam_handle;
- sys->connection.src_pipe_index = ipa_ep_idx;
- sys->connection.destination = SPS_DEV_HANDLE_MEM;
- sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
- sys->connection.mode = SPS_MODE_SRC;
- sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
- SPS_O_ACK_TRANSFERS;
- }
-
- sys->desc_mem_buf.size = props->desc_fifo_sz;
- sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
- sys->desc_mem_buf.size,
- &dma_addr,
- 0);
- if (sys->desc_mem_buf.base == NULL) {
- IPAERR("memory alloc failed type=%d dir=%d\n", type, dir);
- ret = -ENOMEM;
- goto get_config_failed;
- }
- sys->desc_mem_buf.phys_base = dma_addr;
- memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
- sys->connection.desc = sys->desc_mem_buf;
- sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
-
- ret = sps_connect(sys->pipe, &sys->connection);
- if (ret < 0) {
- IPAERR("connect error %d type=%d dir=%d\n", ret, type, dir);
- goto connect_failed;
- }
-
- INIT_LIST_HEAD(&sys->head_desc_list);
- INIT_LIST_HEAD(&sys->free_desc_list);
-
- memset(&ipa_ctx->ep[ipa_ep_idx], 0,
- sizeof(struct ipa_ep_context));
-
- ipa_ctx->ep[ipa_ep_idx].valid = 1;
- ipa_ctx->ep[ipa_ep_idx].client_notify = props->notify;
- ipa_ctx->ep[ipa_ep_idx].priv = props->priv;
-
- ret = ipa_cfg_ep(ipa_ep_idx, &props->ipa_ep_cfg);
- if (ret < 0) {
- IPAERR("ep cfg set error %d type=%d dir=%d\n", ret, type, dir);
- ipa_ctx->ep[ipa_ep_idx].valid = 0;
- goto event_reg_failed;
- }
-
- if (dir == IPA_BRIDGE_DIR_UL) {
- sys->register_event.options = SPS_O_EOT;
- sys->register_event.mode = SPS_TRIGGER_CALLBACK;
- sys->register_event.xfer_done = NULL;
- sys->register_event.callback = ipa_sps_irq_rx_notify;
- sys->register_event.user = (void *)type;
- ret = sps_register_event(sys->pipe, &sys->register_event);
- if (ret < 0) {
- IPAERR("register event error %d type=%d dir=%d\n", ret,
- type, dir);
- goto event_reg_failed;
- }
-
- for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
- ret = queue_rx_single(dir, type);
- if (ret < 0)
- IPAERR("queue fail dir=%d type=%d iter=%d\n",
- dir, type, i);
- }
- }
-
- *clnt_hdl = ipa_ep_idx;
- sys->valid = true;
-
- return 0;
-
-event_reg_failed:
- sps_disconnect(sys->pipe);
-connect_failed:
- dma_free_coherent(NULL,
- sys->desc_mem_buf.size,
- sys->desc_mem_buf.base,
- sys->desc_mem_buf.phys_base);
-get_config_failed:
- sps_free_endpoint(sys->pipe);
-alloc_endpoint_failed:
- return ret;
-}
-
-static void bam_mux_rx_notify(struct sps_event_notify *notify)
-{
- enum ipa_bridge_type type = (enum ipa_bridge_type) notify->user;
-
- switch (notify->event_id) {
- case SPS_EVENT_EOT:
- ipa_switch_to_poll_mode(IPA_BRIDGE_DIR_DL, type);
- queue_work(bridge[type].dl_wq, &bridge[type].dl_work);
- break;
- default:
- IPAERR("recieved unexpected event id %d type %d\n",
- notify->event_id, type);
- }
-}
-
-static int setup_bridge_to_a2(enum ipa_bridge_dir dir,
- enum ipa_bridge_type type,
- u32 desc_fifo_sz)
-{
- struct ipa_bridge_pipe_context *sys;
- struct a2_mux_pipe_connection pipe_conn = { 0 };
- dma_addr_t dma_addr;
- u32 a2_handle;
+ struct ipa_connect_params ipa_in_params;
+ struct ipa_sps_params sps_out_params;
+ int dma_a2_pipe;
+ int dma_ipa_pipe;
+ struct sps_pipe *pipe;
+ struct sps_pipe *pipe_a2;
+ struct sps_connect _connection;
+ struct sps_connect *connection = &_connection;
+ struct a2_mux_pipe_connection pipe_conn = {0};
enum a2_mux_pipe_direction pipe_dir;
- enum ipa_pipe_type pipe_type;
+ u32 dma_hdl = sps_dma_get_bam_handle();
+ u32 a2_hdl;
u32 pa;
int ret;
- int i;
+
+ memset(&ipa_in_params, 0, sizeof(ipa_in_params));
+ memset(&sps_out_params, 0, sizeof(sps_out_params));
pipe_dir = (dir == IPA_BRIDGE_DIR_UL) ? IPA_TO_A2 : A2_TO_IPA;
ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_conn);
if (ret) {
- IPAERR("ipa_get_a2_mux_pipe_info failed type=%d dir=%d\n",
- type, dir);
- ret = -EINVAL;
- goto alloc_endpoint_failed;
+ IPAERR("ipa_get_a2_mux_pipe_info failed dir=%d type=%d\n",
+ dir, type);
+ goto fail_get_a2_prop;
}
pa = (dir == IPA_BRIDGE_DIR_UL) ? pipe_conn.dst_phy_addr :
pipe_conn.src_phy_addr;
- ret = sps_phy2h(pa, &a2_handle);
+ ret = sps_phy2h(pa, &a2_hdl);
if (ret) {
- IPAERR("sps_phy2h failed (A2 BAM) %d type=%d dir=%d\n",
- ret, type, dir);
- ret = -EINVAL;
- goto alloc_endpoint_failed;
+ IPAERR("sps_phy2h failed (A2 BAM) %d dir=%d type=%d\n",
+ ret, dir, type);
+ goto fail_get_a2_prop;
}
- pipe_type = (dir == IPA_BRIDGE_DIR_UL) ? IPA_UL_TO_A2 : IPA_DL_FROM_A2;
+ ipa_get_dma_pipe_num(dir, type, &dma_a2_pipe, &dma_ipa_pipe);
- sys = &bridge[type].pipe[pipe_type];
- sys->pipe = sps_alloc_endpoint();
- if (sys->pipe == NULL) {
- IPAERR("alloc endpoint failed type=%d dir=%d\n", type, dir);
+ ipa_in_params.ipa_ep_cfg = props->ipa_ep_cfg;
+ ipa_in_params.client = props->client;
+ ipa_in_params.client_bam_hdl = dma_hdl;
+ ipa_in_params.client_ep_idx = dma_ipa_pipe;
+ ipa_in_params.priv = props->priv;
+ ipa_in_params.notify = props->notify;
+ ipa_in_params.desc_fifo_sz = ipa_get_desc_fifo_sz(dir, type);
+ ipa_in_params.data_fifo_sz = ipa_get_data_fifo_sz(dir, type);
+
+ if (ipa_connect(&ipa_in_params, &sps_out_params, clnt_hdl)) {
+ IPAERR("ipa connect failed dir=%d type=%d\n", dir, type);
+ goto fail_get_a2_prop;
+ }
+
+ pipe = sps_alloc_endpoint();
+ if (pipe == NULL) {
+ IPAERR("sps_alloc_endpoint failed dir=%d type=%d\n", dir, type);
ret = -ENOMEM;
- goto alloc_endpoint_failed;
+ goto fail_sps_alloc;
}
- ret = sps_get_config(sys->pipe, &sys->connection);
+
+ memset(&_connection, 0, sizeof(_connection));
+ ret = sps_get_config(pipe, connection);
if (ret) {
- IPAERR("get config failed %d type=%d dir=%d\n", ret, type, dir);
- ret = -EINVAL;
- goto get_config_failed;
+ IPAERR("sps_get_config failed %d dir=%d type=%d\n", ret, dir,
+ type);
+ goto fail_sps_get_config;
}
- if (dir == IPA_BRIDGE_DIR_UL) {
- sys->connection.source = SPS_DEV_HANDLE_MEM;
- sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
- sys->connection.destination = a2_handle;
- if (type == IPA_BRIDGE_TYPE_TETHERED)
- sys->connection.dest_pipe_index =
- pipe_conn.dst_pipe_index;
- else
- sys->connection.dest_pipe_index = A2_EMBEDDED_PIPE_TX;
- sys->connection.mode = SPS_MODE_DEST;
- sys->connection.options =
- SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- } else {
- sys->connection.source = a2_handle;
- if (type == IPA_BRIDGE_TYPE_TETHERED)
- sys->connection.src_pipe_index =
- pipe_conn.src_pipe_index;
- else
- sys->connection.src_pipe_index = A2_EMBEDDED_PIPE_RX;
- sys->connection.destination = SPS_DEV_HANDLE_MEM;
- sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
- sys->connection.mode = SPS_MODE_SRC;
- sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
- SPS_O_ACK_TRANSFERS;
- }
-
- sys->desc_mem_buf.size = desc_fifo_sz;
- sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
- sys->desc_mem_buf.size,
- &dma_addr,
- 0);
- if (sys->desc_mem_buf.base == NULL) {
- IPAERR("memory alloc failed type=%d dir=%d\n", type, dir);
- ret = -ENOMEM;
- goto get_config_failed;
- }
- sys->desc_mem_buf.phys_base = dma_addr;
- memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
- sys->connection.desc = sys->desc_mem_buf;
- sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
-
- ret = sps_connect(sys->pipe, &sys->connection);
- if (ret < 0) {
- IPAERR("connect error %d type=%d dir=%d\n", ret, type, dir);
- ret = -EINVAL;
- goto connect_failed;
- }
-
- INIT_LIST_HEAD(&sys->head_desc_list);
- INIT_LIST_HEAD(&sys->free_desc_list);
-
if (dir == IPA_BRIDGE_DIR_DL) {
- sys->register_event.options = SPS_O_EOT;
- sys->register_event.mode = SPS_TRIGGER_CALLBACK;
- sys->register_event.xfer_done = NULL;
- sys->register_event.callback = bam_mux_rx_notify;
- sys->register_event.user = (void *)type;
- ret = sps_register_event(sys->pipe, &sys->register_event);
- if (ret < 0) {
- IPAERR("register event error %d type=%d dir=%d\n",
- ret, type, dir);
- ret = -EINVAL;
- goto event_reg_failed;
- }
-
- for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
- ret = queue_rx_single(dir, type);
- if (ret < 0)
- IPAERR("queue fail dir=%d type=%d iter=%d\n",
- dir, type, i);
- }
+ connection->mode = SPS_MODE_SRC;
+ connection->source = dma_hdl;
+ connection->destination = sps_out_params.ipa_bam_hdl;
+ connection->src_pipe_index = dma_ipa_pipe;
+ connection->dest_pipe_index = sps_out_params.ipa_ep_idx;
+ } else {
+ connection->mode = SPS_MODE_DEST;
+ connection->source = sps_out_params.ipa_bam_hdl;
+ connection->destination = dma_hdl;
+ connection->src_pipe_index = sps_out_params.ipa_ep_idx;
+ connection->dest_pipe_index = dma_ipa_pipe;
}
- sys->valid = true;
+ connection->event_thresh = IPA_EVENT_THRESHOLD;
+ connection->data = sps_out_params.data;
+ connection->desc = sps_out_params.desc;
+ connection->options = SPS_O_AUTO_ENABLE;
+
+ ret = sps_connect(pipe, connection);
+ if (ret) {
+ IPAERR("sps_connect failed %d dir=%d type=%d\n", ret, dir,
+ type);
+ goto fail_sps_get_config;
+ }
+
+ if (dir == IPA_BRIDGE_DIR_DL) {
+ bridge[type].pipe[IPA_DL_TO_IPA].pipe = pipe;
+ bridge[type].pipe[IPA_DL_TO_IPA].ipa_facing = true;
+ bridge[type].pipe[IPA_DL_TO_IPA].valid = true;
+ } else {
+ bridge[type].pipe[IPA_UL_FROM_IPA].pipe = pipe;
+ bridge[type].pipe[IPA_UL_FROM_IPA].ipa_facing = true;
+ bridge[type].pipe[IPA_UL_FROM_IPA].valid = true;
+ }
+
+ IPADBG("dir=%d type=%d (ipa) src(0x%x:%u)->dst(0x%x:%u)\n", dir, type,
+ connection->source, connection->src_pipe_index,
+ connection->destination, connection->dest_pipe_index);
+
+ pipe_a2 = sps_alloc_endpoint();
+ if (pipe_a2 == NULL) {
+ IPAERR("sps_alloc_endpoint failed2 dir=%d type=%d\n", dir,
+ type);
+ ret = -ENOMEM;
+ goto fail_sps_alloc_a2;
+ }
+
+ memset(&_connection, 0, sizeof(_connection));
+ ret = sps_get_config(pipe_a2, connection);
+ if (ret) {
+ IPAERR("sps_get_config failed2 %d dir=%d type=%d\n", ret, dir,
+ type);
+ goto fail_sps_get_config_a2;
+ }
+
+ if (dir == IPA_BRIDGE_DIR_DL) {
+ connection->mode = SPS_MODE_DEST;
+ connection->source = a2_hdl;
+ connection->destination = dma_hdl;
+ connection->src_pipe_index = ipa_get_a2_pipe_num(dir, type);
+ connection->dest_pipe_index = dma_a2_pipe;
+ } else {
+ connection->mode = SPS_MODE_SRC;
+ connection->source = dma_hdl;
+ connection->destination = a2_hdl;
+ connection->src_pipe_index = dma_a2_pipe;
+ connection->dest_pipe_index = ipa_get_a2_pipe_num(dir, type);
+ }
+
+ connection->event_thresh = IPA_EVENT_THRESHOLD;
+
+ if (ipa_setup_a2_dma_fifos(dir, type, &connection->desc,
+ &connection->data)) {
+ IPAERR("fail to setup A2-DMA FIFOs dir=%d type=%d\n",
+ dir, type);
+ goto fail_sps_get_config_a2;
+ }
+
+ connection->options = SPS_O_AUTO_ENABLE;
+
+ ret = sps_connect(pipe_a2, connection);
+ if (ret) {
+ IPAERR("sps_connect failed2 %d dir=%d type=%d\n", ret, dir,
+ type);
+ goto fail_sps_get_config_a2;
+ }
+
+ if (dir == IPA_BRIDGE_DIR_DL) {
+ bridge[type].pipe[IPA_DL_FROM_A2].pipe = pipe_a2;
+ bridge[type].pipe[IPA_DL_FROM_A2].valid = true;
+ } else {
+ bridge[type].pipe[IPA_UL_TO_A2].pipe = pipe_a2;
+ bridge[type].pipe[IPA_UL_TO_A2].valid = true;
+ }
+
+ IPADBG("dir=%d type=%d (a2) src(0x%x:%u)->dst(0x%x:%u)\n", dir, type,
+ connection->source, connection->src_pipe_index,
+ connection->destination, connection->dest_pipe_index);
return 0;
-event_reg_failed:
- sps_disconnect(sys->pipe);
-connect_failed:
- dma_free_coherent(NULL,
- sys->desc_mem_buf.size,
- sys->desc_mem_buf.base,
- sys->desc_mem_buf.phys_base);
-get_config_failed:
- sps_free_endpoint(sys->pipe);
-alloc_endpoint_failed:
+fail_sps_get_config_a2:
+ sps_free_endpoint(pipe_a2);
+fail_sps_alloc_a2:
+ sps_disconnect(pipe);
+fail_sps_get_config:
+ sps_free_endpoint(pipe);
+fail_sps_alloc:
+ ipa_disconnect(*clnt_hdl);
+fail_get_a2_prop:
return ret;
}
/**
- * ipa_bridge_init() - create workqueues and work items serving SW bridges
+ * ipa_bridge_init()
*
* Return codes: 0: success, -ENOMEM: failure
*/
int ipa_bridge_init(void)
{
- int ret;
int i;
- bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq =
- create_singlethread_workqueue("ipa_ul_teth");
- if (!bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq) {
- IPAERR("ipa ul teth wq alloc failed\n");
- ret = -ENOMEM;
- goto fail_ul_teth;
+ ipa_ctx->smem_pipe_mem = smem_alloc(SMEM_BAM_PIPE_MEMORY,
+ IPA_SMEM_PIPE_MEM_SZ);
+ if (!ipa_ctx->smem_pipe_mem) {
+ IPAERR("smem alloc failed\n");
+ return -ENOMEM;
}
+ IPADBG("smem_pipe_mem = %p\n", ipa_ctx->smem_pipe_mem);
- bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq =
- create_singlethread_workqueue("ipa_dl_teth");
- if (!bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq) {
- IPAERR("ipa dl teth wq alloc failed\n");
- ret = -ENOMEM;
- goto fail_dl_teth;
- }
-
- bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq =
- create_singlethread_workqueue("ipa_ul_emb");
- if (!bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq) {
- IPAERR("ipa ul emb wq alloc failed\n");
- ret = -ENOMEM;
- goto fail_ul_emb;
- }
-
- bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq =
- create_singlethread_workqueue("ipa_dl_emb");
- if (!bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq) {
- IPAERR("ipa dl emb wq alloc failed\n");
- ret = -ENOMEM;
- goto fail_dl_emb;
- }
-
- for (i = 0; i < IPA_BRIDGE_TYPE_MAX; i++) {
- INIT_WORK(&bridge[i].ul_work, ul_work_func);
- INIT_WORK(&bridge[i].dl_work, dl_work_func);
+ for (i = 0; i < IPA_BRIDGE_TYPE_MAX; i++)
bridge[i].type = i;
- }
return 0;
-
-fail_dl_emb:
- destroy_workqueue(bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq);
-fail_ul_emb:
- destroy_workqueue(bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq);
-fail_dl_teth:
- destroy_workqueue(bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq);
-fail_ul_teth:
- return ret;
}
/**
@@ -720,66 +467,29 @@
if (props == NULL || clnt_hdl == NULL ||
type >= IPA_BRIDGE_TYPE_MAX || dir >= IPA_BRIDGE_DIR_MAX ||
- props->client >= IPA_CLIENT_MAX || props->desc_fifo_sz == 0) {
+ props->client >= IPA_CLIENT_MAX) {
IPAERR("Bad param props=%p clnt_hdl=%p type=%d dir=%d\n",
props, clnt_hdl, type, dir);
return -EINVAL;
}
- if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1) {
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_enable_clks();
- }
+ ipa_inc_client_enable_clks();
- if (setup_bridge_to_ipa(dir, type, props, clnt_hdl)) {
+ if (setup_dma_bam_bridge(dir, type, props, clnt_hdl)) {
IPAERR("fail to setup SYS pipe to IPA dir=%d type=%d\n",
dir, type);
ret = -EINVAL;
goto bail_ipa;
}
- if (setup_bridge_to_a2(dir, type, props->desc_fifo_sz)) {
- IPAERR("fail to setup SYS pipe to A2 dir=%d type=%d\n",
- dir, type);
- ret = -EINVAL;
- goto bail_a2;
- }
-
-
return 0;
-bail_a2:
- ipa_bridge_teardown(dir, type, *clnt_hdl);
bail_ipa:
- if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0) {
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_disable_clks();
- }
+ ipa_dec_client_disable_clks();
return ret;
}
EXPORT_SYMBOL(ipa_bridge_setup);
-static void ipa_bridge_free_pkt(struct ipa_pkt_info *pkt)
-{
- list_del(&pkt->link);
- dma_unmap_single(NULL, pkt->dma_address, IPA_RX_SKB_SIZE,
- DMA_BIDIRECTIONAL);
- kfree(pkt->buffer);
- kfree(pkt);
-}
-
-static void ipa_bridge_free_resources(struct ipa_bridge_pipe_context *pipe)
-{
- struct ipa_pkt_info *pkt;
- struct ipa_pkt_info *n;
-
- list_for_each_entry_safe(pkt, n, &pipe->head_desc_list, link)
- ipa_bridge_free_pkt(pkt);
-
- list_for_each_entry_safe(pkt, n, &pipe->free_desc_list, link)
- ipa_bridge_free_pkt(pkt);
-}
-
/**
* ipa_bridge_teardown() - teardown SW bridge leg
* @dir: downlink or uplink (from air interface perspective)
@@ -814,39 +524,18 @@
for (; lo <= hi; lo++) {
sys = &bridge[type].pipe[lo];
if (sys->valid) {
+ if (sys->ipa_facing)
+ ipa_disconnect(clnt_hdl);
sps_disconnect(sys->pipe);
- dma_free_coherent(NULL, sys->desc_mem_buf.size,
- sys->desc_mem_buf.base,
- sys->desc_mem_buf.phys_base);
sps_free_endpoint(sys->pipe);
- ipa_bridge_free_resources(sys);
sys->valid = false;
}
}
memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
- if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0) {
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_disable_clks();
- }
+ ipa_dec_client_disable_clks();
return 0;
}
EXPORT_SYMBOL(ipa_bridge_teardown);
-
-/**
- * ipa_bridge_cleanup() - destroy workqueues serving the SW bridges
- *
- * Return codes:
- * None
- */
-void ipa_bridge_cleanup(void)
-{
- int i;
-
- for (i = 0; i < IPA_BRIDGE_TYPE_MAX; i++) {
- destroy_workqueue(bridge[i].dl_wq);
- destroy_workqueue(bridge[i].ul_wq);
- }
-}
diff --git a/drivers/platform/msm/ipa/ipa_client.c b/drivers/platform/msm/ipa/ipa_client.c
index a4b7e22..a78879d 100644
--- a/drivers/platform/msm/ipa/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_client.c
@@ -13,8 +13,6 @@
#include <linux/delay.h>
#include "ipa_i.h"
-#define IPA_HOLB_TMR_VAL 0xff
-
static void ipa_enable_data_path(u32 clnt_hdl)
{
struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl];
@@ -33,17 +31,56 @@
static int ipa_disable_data_path(u32 clnt_hdl)
{
+ DECLARE_COMPLETION_ONSTACK(tag_rsp);
+ struct ipa_desc desc = {0};
+ struct ipa_ip_packet_tag cmd;
struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl];
+ struct ipa_tree_node *node;
+ int result = 0;
if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_VIRTUAL) {
/* IPA_HW_MODE_VIRTUAL lacks support for TAG IC & EP suspend */
return 0;
}
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ result = -ENOMEM;
+ goto fail_alloc;
+ }
+
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1 && !ep->suspended) {
ipa_write_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_CTRL_n_OFST(clnt_hdl), 1);
+ cmd.tag = (u32) &tag_rsp;
+
+ desc.pyld = &cmd;
+ desc.len = sizeof(struct ipa_ip_packet_tag);
+ desc.type = IPA_IMM_CMD_DESC;
+ desc.opcode = IPA_IP_PACKET_TAG;
+
+ IPADBG("Wait on TAG %p clnt=%d\n", &tag_rsp, clnt_hdl);
+
+ node->hdl = cmd.tag;
+ mutex_lock(&ipa_ctx->lock);
+ if (ipa_insert(&ipa_ctx->tag_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ result = -EINVAL;
+ mutex_unlock(&ipa_ctx->lock);
+ goto fail_insert;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ if (ipa_send_cmd(1, &desc)) {
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_CTRL_n_OFST(clnt_hdl), 0);
+ IPAERR("fail to send TAG command\n");
+ result = -EPERM;
+ goto fail_send;
+ }
+ wait_for_completion(&tag_rsp);
if (IPA_CLIENT_IS_CONS(ep->client) &&
ep->cfg.aggr.aggr_en == IPA_ENABLE_AGGR &&
ep->cfg.aggr.aggr_time_limit)
@@ -52,6 +89,13 @@
}
return 0;
+
+fail_send:
+ rb_erase(&node->node, &ipa_ctx->tag_tree);
+fail_insert:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+fail_alloc:
+ return result;
}
static int ipa_connect_configure_sps(const struct ipa_connect_params *in,
@@ -158,9 +202,7 @@
int result = -EFAULT;
struct ipa_ep_context *ep;
- if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_enable_clks();
+ ipa_inc_client_enable_clks();
if (in == NULL || sps == NULL || clnt_hdl == NULL ||
in->client >= IPA_CLIENT_MAX ||
@@ -238,6 +280,9 @@
ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
ep->connect.options = SPS_O_AUTO_ENABLE; /* BAM-to-BAM */
+ if (IPA_CLIENT_IS_CONS(in->client))
+ ep->connect.options |= SPS_O_NO_DISABLE;
+
result = sps_connect(ep->ep_hdl, &ep->connect);
if (result) {
IPAERR("sps_connect fails.\n");
@@ -255,13 +300,13 @@
in->client == IPA_CLIENT_HSIC3_CONS ||
in->client == IPA_CLIENT_HSIC4_CONS) {
IPADBG("disable holb for ep=%d tmr=%d\n", ipa_ep_idx,
- IPA_HOLB_TMR_VAL);
+ ipa_ctx->hol_timer);
ipa_write_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_HOL_BLOCK_EN_n_OFST(ipa_ep_idx),
- 0x1);
+ ipa_ctx->hol_en);
ipa_write_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OFST(ipa_ep_idx),
- IPA_HOLB_TMR_VAL);
+ ipa_ctx->hol_timer);
}
IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);
@@ -293,11 +338,7 @@
ipa_cfg_ep_fail:
memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
fail:
- if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0) {
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_disable_clks();
- }
-
+ ipa_dec_client_disable_clks();
return result;
}
EXPORT_SYMBOL(ipa_connect);
@@ -372,10 +413,7 @@
ipa_enable_data_path(clnt_hdl);
memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
- if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0) {
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_disable_clks();
- }
+ ipa_dec_client_disable_clks();
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
index 51a950d..fb69817 100644
--- a/drivers/platform/msm/ipa/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -94,6 +94,8 @@
static struct dentry *dent;
static struct dentry *dfile_gen_reg;
static struct dentry *dfile_ep_reg;
+static struct dentry *dfile_ep_hol_en;
+static struct dentry *dfile_ep_hol_timer;
static struct dentry *dfile_hdr;
static struct dentry *dfile_ip4_rt;
static struct dentry *dfile_ip6_rt;
@@ -144,6 +146,58 @@
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
+static ssize_t ipa_write_ep_hol_en_reg(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ u32 endp_reg_val;
+ unsigned long missing;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtou32(dbg_buff, 16, &endp_reg_val))
+ return -EFAULT;
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n_OFST(ep_reg_idx),
+ endp_reg_val);
+
+ ipa_ctx->hol_en = endp_reg_val;
+
+ return count;
+}
+
+static ssize_t ipa_write_ep_hol_timer_reg(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ u32 endp_reg_val;
+ unsigned long missing;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtou32(dbg_buff, 16, &endp_reg_val))
+ return -EFAULT;
+
+ ipa_write_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OFST(ep_reg_idx),
+ endp_reg_val);
+
+ ipa_ctx->hol_timer = endp_reg_val;
+
+ return count;
+}
+
static ssize_t ipa_write_ep_reg(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -556,15 +610,27 @@
"x_intr_repost=%u\n"
"rx_q_len=%u\n"
"act_clnt=%u\n"
- "con_clnt_bmap=0x%x\n",
+ "con_clnt_bmap=0x%x\n"
+ "a2_power_on_reqs_in=%u\n"
+ "a2_power_on_reqs_out=%u\n"
+ "a2_power_off_reqs_in=%u\n"
+ "a2_power_off_reqs_out=%u\n"
+ "a2_power_modem_acks=%u\n"
+ "a2_power_apps_acks=%u\n",
ipa_ctx->stats.tx_sw_pkts,
ipa_ctx->stats.tx_hw_pkts,
ipa_ctx->stats.rx_pkts,
ipa_ctx->stats.rx_repl_repost,
ipa_ctx->stats.x_intr_repost,
ipa_ctx->stats.rx_q_len,
- atomic_read(&ipa_ctx->ipa_active_clients),
- connect);
+ ipa_ctx->ipa_active_clients,
+ connect,
+ ipa_ctx->stats.a2_power_on_reqs_in,
+ ipa_ctx->stats.a2_power_on_reqs_out,
+ ipa_ctx->stats.a2_power_off_reqs_in,
+ ipa_ctx->stats.a2_power_off_reqs_out,
+ ipa_ctx->stats.a2_power_modem_acks,
+ ipa_ctx->stats.a2_power_apps_acks);
cnt += nbytes;
for (i = 0; i < MAX_NUM_EXCP; i++) {
@@ -663,6 +729,13 @@
.write = ipa_write_ep_reg,
};
+const struct file_operations ipa_ep_hol_en_ops = {
+ .write = ipa_write_ep_hol_en_reg,
+};
+const struct file_operations ipa_ep_hol_timer_ops = {
+ .write = ipa_write_ep_hol_timer_reg,
+};
+
const struct file_operations ipa_hdr_ops = {
.read = ipa_read_hdr,
};
@@ -695,6 +768,7 @@
const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
S_IWUSR | S_IWGRP | S_IWOTH;
+ const mode_t write_only_mode = S_IWUSR | S_IWGRP | S_IWOTH;
dent = debugfs_create_dir("ipa", 0);
if (IS_ERR(dent)) {
@@ -716,6 +790,20 @@
goto fail;
}
+ dfile_ep_hol_en = debugfs_create_file("hol_en", write_only_mode, dent,
+ 0, &ipa_ep_hol_en_ops);
+ if (!dfile_ep_hol_en || IS_ERR(dfile_ep_hol_en)) {
+ IPAERR("fail to create file for debug_fs dfile_ep_hol_en\n");
+ goto fail;
+ }
+
+ dfile_ep_hol_timer = debugfs_create_file("hol_timer", write_only_mode,
+ dent, 0, &ipa_ep_hol_timer_ops);
+ if (!dfile_ep_hol_timer || IS_ERR(dfile_ep_hol_timer)) {
+ IPAERR("fail to create file for debug_fs dfile_ep_hol_timer\n");
+ goto fail;
+ }
+
dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
&ipa_hdr_ops);
if (!dfile_hdr || IS_ERR(dfile_hdr)) {
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
index 86eebf6..228c77fe 100644
--- a/drivers/platform/msm/ipa/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -42,7 +42,7 @@
* the order for sent packet is the same as expected
* - delete all the tx packet descriptors from the system
* pipe context (not needed anymore)
- * - return the tx buffer back to one_kb_no_straddle_pool
+ * - return the tx buffer back to dma_pool
*/
void ipa_wq_write_done(struct work_struct *work)
{
@@ -80,7 +80,7 @@
list_del(&tx_pkt->link);
spin_unlock_irqrestore(&tx_pkt->sys->spinlock, irq_flags);
if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
- dma_pool_free(ipa_ctx->one_kb_no_straddle_pool,
+ dma_pool_free(ipa_ctx->dma_pool,
tx_pkt->bounce,
tx_pkt->mem.phys_base);
} else {
@@ -97,7 +97,7 @@
}
if (mult.phys_base)
- dma_free_coherent(NULL, mult.size, mult.base, mult.phys_base);
+ dma_pool_free(ipa_ctx->dma_pool, mult.base, mult.phys_base);
}
/**
@@ -144,7 +144,7 @@
* does not cross a 1KB boundary
*/
tx_pkt->bounce = dma_pool_alloc(
- ipa_ctx->one_kb_no_straddle_pool,
+ ipa_ctx->dma_pool,
mem_flag, &dma_address);
if (!tx_pkt->bounce) {
dma_address = 0;
@@ -208,7 +208,7 @@
list_del(&tx_pkt->link);
spin_unlock_irqrestore(&sys->spinlock, irq_flags);
if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
- dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+ dma_pool_free(ipa_ctx->dma_pool, tx_pkt->bounce,
dma_address);
else
dma_unmap_single(NULL, dma_address, desc->len, DMA_TO_DEVICE);
@@ -259,7 +259,7 @@
if (unlikely(!in_atomic))
mem_flag = GFP_KERNEL;
- transfer.iovec = dma_alloc_coherent(NULL, size, &dma_addr, mem_flag);
+ transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag, &dma_addr);
transfer.iovec_phys = dma_addr;
transfer.iovec_count = num_desc;
spin_lock_irqsave(&sys->spinlock, irq_flags);
@@ -306,7 +306,7 @@
* packet does not cross a 1KB boundary
*/
tx_pkt->bounce =
- dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool,
+ dma_pool_alloc(ipa_ctx->dma_pool,
mem_flag,
&tx_pkt->mem.phys_base);
if (!tx_pkt->bounce) {
@@ -377,7 +377,7 @@
next_pkt = list_next_entry(tx_pkt, link);
list_del(&tx_pkt->link);
if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
- dma_pool_free(ipa_ctx->one_kb_no_straddle_pool,
+ dma_pool_free(ipa_ctx->dma_pool,
tx_pkt->bounce,
tx_pkt->mem.phys_base);
else
@@ -392,7 +392,7 @@
if (fail_dma_wrap)
kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
if (transfer.iovec_phys)
- dma_free_coherent(NULL, size, transfer.iovec,
+ dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
transfer.iovec_phys);
failure_coherent:
spin_unlock_irqrestore(&sys->spinlock, irq_flags);
@@ -433,9 +433,7 @@
struct ipa_desc *desc;
int result = 0;
- if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_enable_clks();
+ ipa_inc_client_enable_clks();
if (num_desc == 1) {
init_completion(&descr->xfer_done);
@@ -471,9 +469,7 @@
IPA_STATS_INC_IC_CNT(num_desc, descr, ipa_ctx->stats.imm_cmds);
bail:
- if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_disable_clks();
+ ipa_dec_client_disable_clks();
return result;
}
@@ -528,6 +524,8 @@
struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
struct ipa_ep_context *ep;
int cnt = 0;
+ struct completion *compl;
+ struct ipa_tree_node *node;
unsigned int src_pipe;
while ((in_poll_state ? atomic_read(&ipa_ctx->curr_polling_state) :
@@ -582,6 +580,35 @@
IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
IPA_STATS_EXCP_CNT(mux_hdr->flags, ipa_ctx->stats.rx_excp_pkts);
+ if (unlikely(mux_hdr->flags & IPA_A5_MUX_HDR_EXCP_FLAG_TAG)) {
+ if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) {
+ /* retrieve the compl object from tag value */
+ mux_hdr++;
+ compl = (struct completion *)
+ ntohl(*((u32 *)mux_hdr));
+ IPADBG("%x %x %p\n", *(u32 *)mux_hdr,
+ *((u32 *)mux_hdr + 1), compl);
+
+ mutex_lock(&ipa_ctx->lock);
+ node = ipa_search(&ipa_ctx->tag_tree,
+ (u32)compl);
+ if (node) {
+ complete_all(compl);
+ rb_erase(&node->node,
+ &ipa_ctx->tag_tree);
+ kmem_cache_free(
+ ipa_ctx->tree_node_cache, node);
+ } else {
+ WARN_ON(1);
+ }
+ mutex_unlock(&ipa_ctx->lock);
+ }
+ dev_kfree_skb(rx_skb);
+ ipa_replenish_rx_cache();
+ ++cnt;
+ continue;
+ }
+
/*
* Any packets arriving over AMPDU_TX should be dispatched
* to the regular WLAN RX data-path.
@@ -791,7 +818,8 @@
ipa_ctx->a5_pipe_index++;
ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index = ipa_ep_idx;
ipa_ctx->ep[ipa_ep_idx].connect.options =
- SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS;
+ SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS |
+ SPS_O_NO_DISABLE;
if (ipa_ctx->polling_mode)
ipa_ctx->ep[ipa_ep_idx].connect.options |= SPS_O_POLL;
} else {
@@ -1055,6 +1083,7 @@
int inactive_cycles = 0;
int cnt;
+ ipa_inc_client_enable_clks();
do {
cnt = ipa_handle_rx_core(true, true);
if (cnt == 0) {
@@ -1066,6 +1095,7 @@
} while (inactive_cycles <= POLLING_INACTIVITY);
ipa_rx_switch_to_intr_mode();
+ ipa_dec_client_disable_clks();
}
/**
diff --git a/drivers/platform/msm/ipa/ipa_flt.c b/drivers/platform/msm/ipa/ipa_flt.c
index b63b939..edb9fb1 100644
--- a/drivers/platform/msm/ipa/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_flt.c
@@ -368,6 +368,7 @@
return 0;
proc_err:
dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+ mem->base = NULL;
error:
return -EPERM;
@@ -456,7 +457,7 @@
if (mem->size > avail) {
IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
- goto fail_hw_tbl_gen;
+ goto fail_send_cmd;
}
if (ip == IPA_IP_v4) {
diff --git a/drivers/platform/msm/ipa/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_hdr.c
index 7d0bc24..9618da2 100644
--- a/drivers/platform/msm/ipa/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_hdr.c
@@ -89,7 +89,7 @@
if (ipa_ctx->hdr_tbl_lcl && mem->size > IPA_RAM_HDR_SIZE) {
IPAERR("tbl too big, needed %d avail %d\n", mem->size,
IPA_RAM_HDR_SIZE);
- goto fail_hw_tbl_gen;
+ goto fail_send_cmd;
}
cmd->hdr_table_addr = mem->phys_base;
@@ -126,7 +126,7 @@
return 0;
fail_send_cmd:
- if (mem->phys_base)
+ if (mem->base)
dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
fail_hw_tbl_gen:
kfree(cmd);
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
index ca5740d..cc3e630 100644
--- a/drivers/platform/msm/ipa/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -30,7 +30,8 @@
#define IPA_COOKIE 0xfacefeed
#define IPA_NUM_PIPES 0x14
-#define IPA_SYS_DESC_FIFO_SZ (0x800)
+#define IPA_SYS_DESC_FIFO_SZ 0x800
+#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
#ifdef IPA_DEBUG
#define IPADBG(fmt, args...) \
@@ -40,10 +41,11 @@
#define IPADBG(fmt, args...)
#endif
-#define WLAN_AMPDU_TX_EP (15)
-#define WLAN_PROD_TX_EP (19)
-#define MAX_NUM_EXCP (8)
-#define MAX_NUM_IMM_CMD (17)
+#define WLAN_AMPDU_TX_EP 15
+#define WLAN_PROD_TX_EP 19
+
+#define MAX_NUM_EXCP 8
+#define MAX_NUM_IMM_CMD 17
#define IPA_STATS
@@ -531,6 +533,12 @@
u32 rx_q_len;
u32 msg_w[IPA_EVENT_MAX];
u32 msg_r[IPA_EVENT_MAX];
+ u32 a2_power_on_reqs_in;
+ u32 a2_power_on_reqs_out;
+ u32 a2_power_off_reqs_in;
+ u32 a2_power_off_reqs_out;
+ u32 a2_power_modem_acks;
+ u32 a2_power_apps_acks;
};
/**
@@ -585,7 +593,7 @@
* @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
* @empty_rt_tbl_mem: empty routing tables memory
* @pipe_mem_pool: pipe memory pool
- * @one_kb_no_straddle_pool: one kb no straddle pool
+ * @dma_pool: special purpose DMA pool
* @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc')
* @ipa_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe)
*
@@ -643,8 +651,9 @@
bool ip6_flt_tbl_lcl;
struct ipa_mem_buffer empty_rt_tbl_mem;
struct gen_pool *pipe_mem_pool;
- struct dma_pool *one_kb_no_straddle_pool;
- atomic_t ipa_active_clients;
+ struct dma_pool *dma_pool;
+ struct mutex ipa_active_clients_lock;
+ int ipa_active_clients;
u32 clnt_hdl_cmd;
u32 clnt_hdl_data_in;
u32 clnt_hdl_data_out;
@@ -658,6 +667,10 @@
enum ipa_hw_mode ipa_hw_mode;
/* featurize if memory footprint becomes a concern */
struct ipa_stats stats;
+ void *smem_pipe_mem;
+ /* store HOLB configuration for WLAN TX pipes */
+ u32 hol_en;
+ u32 hol_timer;
};
/**
@@ -742,7 +755,7 @@
struct a2_mux_pipe_connection *pipe_connect);
int ipa_get_a2_mux_bam_info(u32 *a2_bam_mem_base, u32 *a2_bam_mem_size,
u32 *a2_bam_irq);
-void rmnet_bridge_get_client_handles(u32 *producer_handle,
+void teth_bridge_get_client_handles(u32 *producer_handle,
u32 *consumer_handle);
int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
bool in_atomic);
@@ -795,6 +808,8 @@
struct ipa_context *ipa_get_ctx(void);
void ipa_enable_clks(void);
void ipa_disable_clks(void);
+void ipa_inc_client_enable_clks(void);
+void ipa_dec_client_disable_clks(void);
int __ipa_del_rt_rule(u32 rule_hdl);
int __ipa_del_hdr(u32 hdr_hdl);
int __ipa_release_hdr(u32 hdr_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_intf.c b/drivers/platform/msm/ipa/ipa_intf.c
index 0f41d2c..5ee1929 100644
--- a/drivers/platform/msm/ipa/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_intf.c
@@ -432,6 +432,7 @@
}
IPA_STATS_INC_CNT(
ipa_ctx->stats.msg_r[msg->meta.msg_type]);
+ kfree(msg);
}
ret = -EAGAIN;
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c
index 0a6771c..3615952 100644
--- a/drivers/platform/msm/ipa/ipa_rm_resource.c
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.c
@@ -80,7 +80,8 @@
int result = 0;
int driver_result;
unsigned long flags;
- IPADBG("IPA RM ::ipa_rm_resource_consumer_request ENTER\n");
+ IPADBG("IPA RM ::ipa_rm_resource_consumer_request %d ENTER\n",
+ consumer->resource.name);
spin_lock_irqsave(&consumer->resource.state_lock, flags);
switch (consumer->resource.state) {
case IPA_RM_RELEASED:
@@ -114,7 +115,8 @@
consumer->usage_count++;
bail:
spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
- IPADBG("IPA RM ::ipa_rm_resource_consumer_request EXIT [%d]\n", result);
+ IPADBG("IPA RM ::ipa_rm_resource_consumer_request %d EXIT %d\n",
+ consumer->resource.name, result);
return result;
}
@@ -125,7 +127,8 @@
int driver_result;
unsigned long flags;
enum ipa_rm_resource_state save_state;
- IPADBG("IPA RM ::ipa_rm_resource_consumer_release ENTER\n");
+ IPADBG("IPA RM ::ipa_rm_resource_consumer_release %d ENTER\n",
+ consumer->resource.name);
spin_lock_irqsave(&consumer->resource.state_lock, flags);
switch (consumer->resource.state) {
case IPA_RM_RELEASED:
@@ -160,7 +163,8 @@
}
bail:
spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
- IPADBG("IPA RM ::ipa_rm_resource_consumer_release EXIT [%d]\n", result);
+ IPADBG("IPA RM ::ipa_rm_resource_consumer_release %d EXIT %d\n",
+ consumer->resource.name, result);
return result;
}
@@ -564,7 +568,7 @@
unsigned long flags;
struct ipa_rm_resource *consumer;
int consumer_result;
- IPADBG("IPA RM ::ipa_rm_resource_producer_request [%d] ENTER\n",
+ IPADBG("IPA RM ::ipa_rm_resource_producer_request %d ENTER\n",
producer->resource.name);
if (ipa_rm_peers_list_is_empty(producer->resource.peers_list)) {
spin_lock_irqsave(&producer->resource.state_lock, flags);
@@ -628,7 +632,8 @@
unlock_and_bail:
spin_unlock_irqrestore(&producer->resource.state_lock, flags);
bail:
- IPADBG("IPA RM ::ipa_rm_resource_producer_request EXIT[%d]\n", result);
+ IPADBG("IPA RM ::ipa_rm_resource_producer_request %d EXIT %d\n",
+ producer->resource.name, result);
return result;
}
@@ -646,7 +651,8 @@
unsigned long flags;
struct ipa_rm_resource *consumer;
int consumer_result;
- IPADBG("IPA RM ::ipa_rm_resource_producer_release ENTER\n");
+ IPADBG("IPA RM ::ipa_rm_resource_producer_release %d ENTER\n",
+ producer->resource.name);
if (ipa_rm_peers_list_is_empty(producer->resource.peers_list)) {
spin_lock_irqsave(&producer->resource.state_lock, flags);
producer->resource.state = IPA_RM_RELEASED;
@@ -702,7 +708,8 @@
return result;
bail:
spin_unlock_irqrestore(&producer->resource.state_lock, flags);
- IPADBG("IPA RM ::ipa_rm_resource_producer_release EXIT[%d]\n", result);
+ IPADBG("IPA RM ::ipa_rm_resource_producer_release %d EXIT %d\n",
+ producer->resource.name, result);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_rt.c b/drivers/platform/msm/ipa/ipa_rt.c
index 1d88280..6430c07 100644
--- a/drivers/platform/msm/ipa/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_rt.c
@@ -305,6 +305,7 @@
rt_tbl_mem.base, rt_tbl_mem.phys_base);
proc_err:
dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+ mem->base = NULL;
error:
return -EPERM;
}
@@ -378,7 +379,7 @@
if (mem->size > avail) {
IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
- goto fail_hw_tbl_gen;
+ goto fail_send_cmd;
}
if (ip == IPA_IP_v4) {
@@ -413,7 +414,7 @@
return 0;
fail_send_cmd:
- if (mem->phys_base)
+ if (mem->base)
dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
fail_hw_tbl_gen:
kfree(cmd);
@@ -505,6 +506,8 @@
IPAERR("failed to add to tree\n");
WARN_ON(1);
}
+ } else {
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
}
return entry;
diff --git a/drivers/platform/msm/ipa/rmnet_bridge.c b/drivers/platform/msm/ipa/rmnet_bridge.c
deleted file mode 100644
index 696b363..0000000
--- a/drivers/platform/msm/ipa/rmnet_bridge.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <mach/bam_dmux.h>
-#include <mach/ipa.h>
-#include <mach/sps.h>
-
-static struct rmnet_bridge_cb_type {
- u32 producer_handle;
- u32 consumer_handle;
- u32 ipa_producer_handle;
- u32 ipa_consumer_handle;
- bool is_connected;
-} rmnet_bridge_cb;
-
-/**
-* rmnet_bridge_init() - Initialize RmNet bridge module
-*
-* Return codes:
-* 0: success
-*/
-int rmnet_bridge_init(void)
-{
- memset(&rmnet_bridge_cb, 0, sizeof(struct rmnet_bridge_cb_type));
-
- return 0;
-}
-EXPORT_SYMBOL(rmnet_bridge_init);
-
-/**
-* rmnet_bridge_disconnect() - Disconnect RmNet bridge module
-*
-* Return codes:
-* 0: success
-* -EINVAL: invalid parameters
-*/
-int rmnet_bridge_disconnect(void)
-{
- int ret = 0;
- if (false == rmnet_bridge_cb.is_connected) {
- pr_err("%s: trying to disconnect already disconnected RmNet bridge\n",
- __func__);
- goto bail;
- }
-
- rmnet_bridge_cb.is_connected = false;
-
- ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
- rmnet_bridge_cb.ipa_consumer_handle);
- ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
- rmnet_bridge_cb.ipa_producer_handle);
-bail:
- return ret;
-}
-EXPORT_SYMBOL(rmnet_bridge_disconnect);
-
-/**
-* rmnet_bridge_connect() - Connect RmNet bridge module
-* @producer_hdl: IPA producer handle
-* @consumer_hdl: IPA consumer handle
-* @wwan_logical_channel_id: WWAN logical channel ID
-*
-* Return codes:
-* 0: success
-* -EINVAL: invalid parameters
-*/
-int rmnet_bridge_connect(u32 producer_hdl,
- u32 consumer_hdl,
- int wwan_logical_channel_id)
-{
- struct ipa_sys_connect_params props;
- int ret = 0;
-
- if (true == rmnet_bridge_cb.is_connected) {
- ret = 0;
- pr_err("%s: trying to connect already connected RmNet bridge\n",
- __func__);
- goto bail;
- }
-
- rmnet_bridge_cb.consumer_handle = consumer_hdl;
- rmnet_bridge_cb.producer_handle = producer_hdl;
- rmnet_bridge_cb.is_connected = true;
-
- memset(&props, 0, sizeof(props));
- props.ipa_ep_cfg.mode.mode = IPA_DMA;
- props.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
- props.client = IPA_CLIENT_A2_TETHERED_PROD;
- props.desc_fifo_sz = 0x800;
- /* setup notification callback if needed */
-
- ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
- &props, &rmnet_bridge_cb.ipa_consumer_handle);
- if (ret) {
- pr_err("%s: IPA DL bridge setup failure\n", __func__);
- goto bail_dl;
- }
-
- memset(&props, 0, sizeof(props));
- props.client = IPA_CLIENT_A2_TETHERED_CONS;
- props.desc_fifo_sz = 0x800;
- /* setup notification callback if needed */
-
- ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
- &props, &rmnet_bridge_cb.ipa_producer_handle);
- if (ret) {
- pr_err("%s: IPA UL bridge setup failure\n", __func__);
- goto bail_ul;
- }
- return 0;
-bail_ul:
- ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
- rmnet_bridge_cb.ipa_consumer_handle);
-bail_dl:
- rmnet_bridge_cb.is_connected = false;
-bail:
- return ret;
-}
-EXPORT_SYMBOL(rmnet_bridge_connect);
-
-void rmnet_bridge_get_client_handles(u32 *producer_handle,
- u32 *consumer_handle)
-{
- if (producer_handle == NULL || consumer_handle == NULL)
- return;
-
- *producer_handle = rmnet_bridge_cb.producer_handle;
- *consumer_handle = rmnet_bridge_cb.consumer_handle;
-}
diff --git a/drivers/platform/msm/ipa/teth_bridge.c b/drivers/platform/msm/ipa/teth_bridge.c
index 5b26e41..40c8fc7 100644
--- a/drivers/platform/msm/ipa/teth_bridge.c
+++ b/drivers/platform/msm/ipa/teth_bridge.c
@@ -58,6 +58,17 @@
#define TETH_AGGR_MAX_DATAGRAMS_DEFAULT 16
#define TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT (8*1024)
+#define TETH_MTU_BYTE 1500
+
+#define TETH_INACTIVITY_TIME_MSEC (1000)
+
+#define TETH_WORKQUEUE_NAME "tethering_bridge_wq"
+
+#define TETH_TOTAL_HDR_ENTRIES 6
+#define TETH_TOTAL_RT_ENTRIES_IP 3
+#define TETH_TOTAL_FLT_ENTRIES_IP 2
+#define TETH_IP_FAMILIES 2
+
struct mac_addresses_type {
u8 host_pc_mac_addr[ETH_ALEN];
bool host_pc_mac_addr_known;
@@ -68,6 +79,7 @@
struct stats {
u64 a2_to_usb_num_sw_tx_packets;
u64 usb_to_a2_num_sw_tx_packets;
+ u64 num_sw_tx_packets_during_resource_wakeup;
};
struct teth_bridge_ctx {
@@ -92,9 +104,24 @@
bool comp_hw_bridge_in_progress;
struct teth_aggr_capabilities *aggr_caps;
struct stats stats;
+ struct workqueue_struct *teth_wq;
+ u16 a2_ipa_hdr_len;
+ struct ipa_ioc_del_hdr *hdr_del;
+ struct ipa_ioc_del_rt_rule *routing_del[TETH_IP_FAMILIES];
+ struct ipa_ioc_del_flt_rule *filtering_del[TETH_IP_FAMILIES];
+};
+static struct teth_bridge_ctx *teth_ctx;
+
+enum teth_packet_direction {
+ TETH_USB_TO_A2,
+ TETH_A2_TO_USB,
};
-static struct teth_bridge_ctx *teth_ctx;
+struct teth_work {
+ struct work_struct work;
+ struct sk_buff *skb;
+ enum teth_packet_direction dir;
+};
#ifdef CONFIG_DEBUG_FS
#define TETH_MAX_MSG_LEN 512
@@ -108,6 +135,7 @@
struct ipa_ioc_add_hdr *hdrs;
struct ethhdr hdr_ipv4;
struct ethhdr hdr_ipv6;
+ int idx1;
TETH_DBG_FUNC_ENTRY();
memcpy(hdr_ipv4.h_source, src_mac_addr, ETH_ALEN);
@@ -142,6 +170,13 @@
res = ipa_add_hdr(hdrs);
if (res || hdrs->hdr[0].status || hdrs->hdr[1].status)
TETH_ERR("Header insertion failed\n");
+
+ /* Save the headers handles in order to delete them later */
+ for (idx1 = 0; idx1 < hdrs->num_hdrs; idx1++) {
+ int idx2 = teth_ctx->hdr_del->num_hdls++;
+ teth_ctx->hdr_del->hdl[idx2].hdl = hdrs->hdr[idx1].hdr_hdl;
+ }
+
kfree(hdrs);
TETH_DBG_FUNC_EXIT();
@@ -167,6 +202,7 @@
}
hdr_cfg.hdr_len = a2_ipa_hdr_len;
+ teth_ctx->a2_ipa_hdr_len = a2_ipa_hdr_len;
res = ipa_cfg_ep_hdr(teth_ctx->a2_ipa_pipe_hdl, &hdr_cfg);
if (res) {
TETH_ERR("Header removal config for A2->IPA pipe failed\n");
@@ -198,6 +234,7 @@
int res;
struct ipa_ioc_add_hdr *mbim_hdr;
u8 mbim_stream_id = 0;
+ int idx;
TETH_DBG_FUNC_ENTRY();
mbim_hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
@@ -221,6 +258,11 @@
} else {
TETH_DBG("Added MBIM stream ID header\n");
}
+
+ /* Save the header handle in order to delete it later */
+ idx = teth_ctx->hdr_del->num_hdls++;
+ teth_ctx->hdr_del->hdl[idx].hdl = mbim_hdr->hdr[0].hdr_hdl;
+
kfree(mbim_hdr);
TETH_DBG_FUNC_EXIT();
@@ -283,14 +325,7 @@
TETH_ERR("Configuration of header removal/insertion failed\n");
goto bail;
}
-
- res = ipa_commit_hdr();
- if (res) {
- TETH_ERR("Failed committing headers\n");
- goto bail;
- }
TETH_DBG_FUNC_EXIT();
-
bail:
return res;
}
@@ -304,6 +339,7 @@
struct ipa_ioc_add_rt_rule *rt_rule;
struct ipa_ioc_get_hdr hdr_info;
int res;
+ int idx;
TETH_DBG_FUNC_ENTRY();
/* Get the header handle */
@@ -330,6 +366,12 @@
res = ipa_add_rt_rule(rt_rule);
if (res || rt_rule->rules[0].status)
TETH_ERR("Failed adding routing rule\n");
+
+ /* Save the routing rule handle in order to delete it later */
+ idx = teth_ctx->routing_del[ip_address_family]->num_hdls++;
+ teth_ctx->routing_del[ip_address_family]->hdl[idx].hdl =
+ rt_rule->rules[0].rt_rule_hdl;
+
kfree(rt_rule);
TETH_DBG_FUNC_EXIT();
@@ -425,20 +467,7 @@
TETH_ERR("A2 to USB routing block configuration failed\n");
goto bail;
}
-
- /* Commit all the changes to HW in one shot */
- res = ipa_commit_rt(IPA_IP_v4);
- if (res) {
- TETH_ERR("Failed commiting IPv4 routing tables\n");
- goto bail;
- }
- res = ipa_commit_rt(IPA_IP_v6);
- if (res) {
- TETH_ERR("Failed commiting IPv6 routing tables\n");
- goto bail;
- }
TETH_DBG_FUNC_EXIT();
-
bail:
return res;
}
@@ -450,6 +479,7 @@
struct ipa_ioc_add_flt_rule *flt_tbl;
struct ipa_ioc_get_rt_tbl rt_tbl_info;
int res;
+ int idx;
TETH_DBG_FUNC_ENTRY();
/* Get the needed routing table handle */
@@ -480,6 +510,12 @@
res = ipa_add_flt_rule(flt_tbl);
if (res || flt_tbl->rules[0].status)
TETH_ERR("Failed adding filtering table\n");
+
+ /* Save the filtering rule handle in order to delete it later */
+ idx = teth_ctx->filtering_del[ip_address_family]->num_hdls++;
+ teth_ctx->filtering_del[ip_address_family]->hdl[idx].hdl =
+ flt_tbl->rules[0].flt_rule_hdl;
+
kfree(flt_tbl);
TETH_DBG_FUNC_EXIT();
@@ -533,20 +569,7 @@
TETH_ERR("A2_PROD filtering configuration failed\n");
goto bail;
}
-
- /* Commit all the changes to HW in one shot */
- res = ipa_commit_flt(IPA_IP_v4);
- if (res) {
- TETH_ERR("Failed commiting IPv4 filtering tables\n");
- goto bail;
- }
- res = ipa_commit_flt(IPA_IP_v6);
- if (res) {
- TETH_ERR("Failed commiting IPv6 filtering tables\n");
- goto bail;
- }
TETH_DBG_FUNC_EXIT();
-
bail:
return res;
}
@@ -578,8 +601,15 @@
return -EFAULT;
}
+ /*
+ * Due to a HW 'feature', the maximal aggregated packet size may be the
+ * requested aggr_byte_limit plus the MTU. Therefore, the MTU is
+ * subtracted from the requested aggr_byte_limit so that the requested
+ * byte limit is honored .
+ */
ipa_aggr_params->aggr_byte_limit =
- teth_aggr_params->max_transfer_size_byte / 1024;
+ (teth_aggr_params->max_transfer_size_byte - TETH_MTU_BYTE) /
+ 1024;
ipa_aggr_params->aggr_time_limit = TETH_DEFAULT_AGGR_TIME_LIMIT;
TETH_DBG_FUNC_EXIT();
@@ -592,7 +622,6 @@
u32 pipe_hdl)
{
struct ipa_ep_cfg_aggr agg_params;
- struct ipa_ep_cfg_hdr hdr_params;
int res;
TETH_DBG_FUNC_ENTRY();
@@ -609,18 +638,7 @@
TETH_ERR("ipa_cfg_ep_aggr() failed\n");
goto bail;
}
-
- if (!client_is_prod) {
- memset(&hdr_params, 0, sizeof(hdr_params));
- hdr_params.hdr_len = 1;
- res = ipa_cfg_ep_hdr(pipe_hdl, &hdr_params);
- if (res) {
- TETH_ERR("ipa_cfg_ep_hdr() failed\n");
- goto bail;
- }
- }
TETH_DBG_FUNC_EXIT();
-
bail:
return res;
}
@@ -651,6 +669,19 @@
char aggr_prot_str[20];
TETH_DBG_FUNC_ENTRY();
+ if (!teth_ctx->aggr_params_known) {
+ TETH_ERR("Aggregation parameters unknown.\n");
+ return -EINVAL;
+ }
+
+ if ((teth_ctx->usb_ipa_pipe_hdl == 0) ||
+ (teth_ctx->ipa_usb_pipe_hdl == 0))
+ return 0;
+ /*
+ * Returning 0 in case pipe handles are 0 becuase aggregation
+ * params will be set later
+ */
+
if (teth_ctx->aggr_params.ul.aggr_prot == TETH_AGGR_PROTOCOL_MBIM ||
teth_ctx->aggr_params.dl.aggr_prot == TETH_AGGR_PROTOCOL_MBIM) {
res = ipa_set_aggr_mode(IPA_MBIM);
@@ -696,12 +727,26 @@
return res;
}
+static int teth_request_resource(void)
+{
+ int res;
+
+ INIT_COMPLETION(teth_ctx->is_bridge_prod_up);
+ res = ipa_rm_inactivity_timer_request_resource(
+ IPA_RM_RESOURCE_BRIDGE_PROD);
+ if (res < 0) {
+ if (res == -EINPROGRESS)
+ wait_for_completion(&teth_ctx->is_bridge_prod_up);
+ else
+ return res;
+ }
+
+ return 0;
+}
+
static void complete_hw_bridge(struct work_struct *work)
{
int res;
- static DEFINE_MUTEX(f_lock);
-
- mutex_lock(&f_lock);
TETH_DBG_FUNC_ENTRY();
TETH_DBG("Completing HW bridge in %s mode\n",
@@ -709,20 +754,15 @@
"ETHERNET" :
"IP");
- res = teth_set_aggregation();
+ res = teth_request_resource();
if (res) {
- TETH_ERR("Failed setting aggregation params\n");
+ TETH_ERR("request_resource() failed.\n");
goto bail;
}
- /*
- * Reset the Header, Routing and Filtering blocks.
- * Resetting the Header block will also reset the other blocks.
- * This reset is not comitted to HW.
- */
- res = ipa_reset_hdr();
+ res = teth_set_aggregation();
if (res) {
- TETH_ERR("Failed resetting IPA\n");
+ TETH_ERR("Failed setting aggregation params\n");
goto bail;
}
@@ -744,10 +784,20 @@
goto bail;
}
+ /*
+ * Commit all the data to HW, including header, routing and filtering
+ * blocks, IPv4 and IPv6
+ */
+ res = ipa_commit_hdr();
+ if (res) {
+ TETH_ERR("Failed committing headers / routing / filtering.\n");
+ goto bail;
+ }
+
teth_ctx->is_hw_bridge_complete = true;
- teth_ctx->comp_hw_bridge_in_progress = false;
bail:
- mutex_unlock(&f_lock);
+ teth_ctx->comp_hw_bridge_in_progress = false;
+ ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
TETH_DBG_FUNC_EXIT();
return;
@@ -787,10 +837,78 @@
(teth_ctx->aggr_params_known)) {
INIT_WORK(&teth_ctx->comp_hw_bridge_work, complete_hw_bridge);
teth_ctx->comp_hw_bridge_in_progress = true;
- schedule_work(&teth_ctx->comp_hw_bridge_work);
+ queue_work(teth_ctx->teth_wq, &teth_ctx->comp_hw_bridge_work);
}
}
+static void teth_send_skb_work(struct work_struct *work)
+{
+ struct teth_work *work_data =
+ container_of(work, struct teth_work, work);
+ int res;
+
+ res = teth_request_resource();
+ if (res) {
+ TETH_ERR("Packet send failure, dropping packet !\n");
+ goto bail;
+ }
+
+ switch (work_data->dir) {
+ case TETH_USB_TO_A2:
+ res = a2_mux_write(A2_MUX_TETHERED_0, work_data->skb);
+ if (res) {
+ TETH_ERR("Packet send failure, dropping packet !\n");
+ goto bail;
+ }
+ teth_ctx->stats.usb_to_a2_num_sw_tx_packets++;
+ break;
+
+ case TETH_A2_TO_USB:
+ res = ipa_tx_dp(IPA_CLIENT_USB_CONS, work_data->skb, NULL);
+ if (res) {
+ TETH_ERR("Packet send failure, dropping packet !\n");
+ goto bail;
+ }
+ teth_ctx->stats.a2_to_usb_num_sw_tx_packets++;
+ break;
+
+ default:
+ TETH_ERR("Unsupported direction to send !\n");
+ WARN_ON(1);
+ }
+ ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+ kfree(work_data);
+ teth_ctx->stats.num_sw_tx_packets_during_resource_wakeup++;
+
+ return;
+bail:
+ ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+ dev_kfree_skb(work_data->skb);
+ kfree(work_data);
+}
+
+static void defer_skb_send(struct sk_buff *skb, enum teth_packet_direction dir)
+{
+ struct teth_work *work = kmalloc(sizeof(struct teth_work), GFP_KERNEL);
+
+ if (!work) {
+ TETH_ERR("No mem, dropping packet\n");
+ dev_kfree_skb(skb);
+ ipa_rm_inactivity_timer_release_resource
+ (IPA_RM_RESOURCE_BRIDGE_PROD);
+ return;
+ }
+
+ /*
+ * Since IPA uses a single Rx thread, we don't
+ * want to wait for completion here
+ */
+ INIT_WORK(&work->work, teth_send_skb_work);
+ work->dir = dir;
+ work->skb = skb;
+ queue_work(teth_ctx->teth_wq, &work->work);
+}
+
static void usb_notify_cb(void *priv,
enum ipa_dp_evt_type evt,
unsigned long data)
@@ -807,13 +925,36 @@
&teth_ctx->mac_addresses.host_pc_mac_addr_known,
&teth_ctx->mac_addresses.device_mac_addr_known);
- /* Send the packet to A2, using a2_service driver API */
- teth_ctx->stats.usb_to_a2_num_sw_tx_packets++;
+ /*
+ * Request the BRIDGE_PROD resource, send the packet and release
+ * the resource
+ */
+ res = ipa_rm_inactivity_timer_request_resource(
+ IPA_RM_RESOURCE_BRIDGE_PROD);
+ if (res < 0) {
+ if (res == -EINPROGRESS) {
+ /* The resource is waking up */
+ defer_skb_send(skb, TETH_USB_TO_A2);
+ } else {
+ TETH_ERR(
+ "Packet send failure, dropping packet !\n");
+ dev_kfree_skb(skb);
+ }
+ ipa_rm_inactivity_timer_release_resource(
+ IPA_RM_RESOURCE_BRIDGE_PROD);
+ return;
+ }
res = a2_mux_write(A2_MUX_TETHERED_0, skb);
if (res) {
TETH_ERR("Packet send failure, dropping packet !\n");
dev_kfree_skb(skb);
+ ipa_rm_inactivity_timer_release_resource(
+ IPA_RM_RESOURCE_BRIDGE_PROD);
+ return;
}
+ teth_ctx->stats.usb_to_a2_num_sw_tx_packets++;
+ ipa_rm_inactivity_timer_release_resource(
+ IPA_RM_RESOURCE_BRIDGE_PROD);
break;
case IPA_WRITE_DONE:
@@ -845,13 +986,37 @@
&teth_ctx->
mac_addresses.host_pc_mac_addr_known);
- /* Send the packet to USB */
- teth_ctx->stats.a2_to_usb_num_sw_tx_packets++;
+ /*
+ * Request the BRIDGE_PROD resource, send the packet and release
+ * the resource
+ */
+ res = ipa_rm_inactivity_timer_request_resource(
+ IPA_RM_RESOURCE_BRIDGE_PROD);
+ if (res < 0) {
+ if (res == -EINPROGRESS) {
+ /* The resource is waking up */
+ defer_skb_send(skb, TETH_A2_TO_USB);
+ } else {
+ TETH_ERR(
+ "Packet send failure, dropping packet !\n");
+ dev_kfree_skb(skb);
+ }
+ ipa_rm_inactivity_timer_release_resource(
+ IPA_RM_RESOURCE_BRIDGE_PROD);
+ return;
+ }
+
res = ipa_tx_dp(IPA_CLIENT_USB_CONS, skb, NULL);
if (res) {
TETH_ERR("Packet send failure, dropping packet !\n");
dev_kfree_skb(skb);
+ ipa_rm_inactivity_timer_release_resource(
+ IPA_RM_RESOURCE_BRIDGE_PROD);
+ return;
}
+ teth_ctx->stats.a2_to_usb_num_sw_tx_packets++;
+ ipa_rm_inactivity_timer_release_resource(
+ IPA_RM_RESOURCE_BRIDGE_PROD);
break;
case A2_MUX_WRITE_DONE:
@@ -870,8 +1035,28 @@
enum ipa_rm_event event,
unsigned long data)
{
+ int res;
+ struct ipa_ep_cfg ipa_ep_cfg;
+
switch (event) {
case IPA_RM_RESOURCE_GRANTED:
+ res = a2_mux_get_tethered_client_handles(
+ A2_MUX_TETHERED_0,
+ &teth_ctx->ipa_a2_pipe_hdl,
+ &teth_ctx->a2_ipa_pipe_hdl);
+ if (res) {
+ TETH_ERR(
+ "a2_mux_get_tethered_client_handles() failed, res = %d\n",
+ res);
+ return;
+ }
+
+ /* Reset the various endpoints configuration */
+ memset(&ipa_ep_cfg, 0, sizeof(ipa_ep_cfg));
+ ipa_cfg_ep(teth_ctx->ipa_a2_pipe_hdl, &ipa_ep_cfg);
+
+ ipa_ep_cfg.hdr.hdr_len = teth_ctx->a2_ipa_hdr_len;
+ ipa_cfg_ep(teth_ctx->a2_ipa_pipe_hdl, &ipa_ep_cfg);
complete(&teth_ctx->is_bridge_prod_up);
break;
@@ -915,32 +1100,34 @@
/* Build IPA Resource manager dependency graph */
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
IPA_RM_RESOURCE_USB_CONS);
- if (res && res != -EEXIST) {
+ if (res && res != -EINPROGRESS) {
TETH_ERR("ipa_rm_add_dependency() failed\n");
goto bail;
}
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
IPA_RM_RESOURCE_A2_CONS);
- if (res && res != -EEXIST) {
+ if (res && res != -EINPROGRESS) {
TETH_ERR("ipa_rm_add_dependency() failed\n");
goto fail_add_dependency_1;
}
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_A2_CONS);
- if (res && res != -EEXIST) {
+ if (res && res != -EINPROGRESS) {
TETH_ERR("ipa_rm_add_dependency() failed\n");
goto fail_add_dependency_2;
}
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_A2_PROD,
IPA_RM_RESOURCE_USB_CONS);
- if (res && res != -EEXIST) {
+ if (res && res != -EINPROGRESS) {
TETH_ERR("ipa_rm_add_dependency() failed\n");
goto fail_add_dependency_3;
}
+ /* Return 0 as EINPROGRESS is a valid return value at this point */
+ res = 0;
goto bail;
fail_add_dependency_3:
@@ -958,6 +1145,57 @@
}
EXPORT_SYMBOL(teth_bridge_init);
+static void initialize_context(void)
+{
+ TETH_DBG_FUNC_ENTRY();
+ /* Initialize context variables */
+ teth_ctx->usb_ipa_pipe_hdl = 0;
+ teth_ctx->ipa_a2_pipe_hdl = 0;
+ teth_ctx->a2_ipa_pipe_hdl = 0;
+ teth_ctx->ipa_usb_pipe_hdl = 0;
+ teth_ctx->is_connected = false;
+
+ /* The default link protocol is Ethernet */
+ teth_ctx->link_protocol = TETH_LINK_PROTOCOL_ETHERNET;
+
+ memset(&teth_ctx->mac_addresses, 0, sizeof(teth_ctx->mac_addresses));
+ teth_ctx->is_hw_bridge_complete = false;
+ memset(&teth_ctx->aggr_params, 0, sizeof(teth_ctx->aggr_params));
+ teth_ctx->aggr_params_known = false;
+ teth_ctx->tethering_mode = 0;
+ INIT_COMPLETION(teth_ctx->is_bridge_prod_up);
+ INIT_COMPLETION(teth_ctx->is_bridge_prod_down);
+ teth_ctx->comp_hw_bridge_in_progress = false;
+ memset(&teth_ctx->stats, 0, sizeof(teth_ctx->stats));
+ teth_ctx->a2_ipa_hdr_len = 0;
+ memset(teth_ctx->hdr_del,
+ 0,
+ sizeof(struct ipa_ioc_del_hdr) + TETH_TOTAL_HDR_ENTRIES *
+ sizeof(struct ipa_hdr_del));
+ memset(teth_ctx->routing_del[IPA_IP_v4],
+ 0,
+ sizeof(struct ipa_ioc_del_rt_rule) +
+ TETH_TOTAL_RT_ENTRIES_IP * sizeof(struct ipa_rt_rule_del));
+ teth_ctx->routing_del[IPA_IP_v4]->ip = IPA_IP_v4;
+ memset(teth_ctx->routing_del[IPA_IP_v6],
+ 0,
+ sizeof(struct ipa_ioc_del_rt_rule) +
+ TETH_TOTAL_RT_ENTRIES_IP * sizeof(struct ipa_rt_rule_del));
+ teth_ctx->routing_del[IPA_IP_v6]->ip = IPA_IP_v6;
+ memset(teth_ctx->filtering_del[IPA_IP_v4],
+ 0,
+ sizeof(struct ipa_ioc_del_flt_rule) +
+ TETH_TOTAL_FLT_ENTRIES_IP * sizeof(struct ipa_flt_rule_del));
+ teth_ctx->filtering_del[IPA_IP_v4]->ip = IPA_IP_v4;
+ memset(teth_ctx->filtering_del[IPA_IP_v6],
+ 0,
+ sizeof(struct ipa_ioc_del_flt_rule) +
+ TETH_TOTAL_FLT_ENTRIES_IP * sizeof(struct ipa_flt_rule_del));
+ teth_ctx->filtering_del[IPA_IP_v6]->ip = IPA_IP_v6;
+
+ TETH_DBG_FUNC_EXIT();
+}
+
/**
* teth_bridge_disconnect() - Disconnect tethering bridge module
*
@@ -967,38 +1205,82 @@
*/
int teth_bridge_disconnect(void)
{
- int res = -EPERM;
+ int res;
TETH_DBG_FUNC_ENTRY();
if (!teth_ctx->is_connected) {
TETH_ERR(
- "Trying to disconnect an already disconnected bridge\n");
+ "Trying to disconnect an already disconnected bridge\n");
+ goto bail;
+ }
+
+ /* Request the BRIDGE_PROD resource */
+ res = teth_request_resource();
+ if (res) {
+ TETH_ERR("request_resource() failed.\n");
goto bail;
}
teth_ctx->is_connected = false;
- res = ipa_rm_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
- if (res == -EINPROGRESS)
- wait_for_completion(&teth_ctx->is_bridge_prod_down);
+ /* Close the channel to A2 */
+ if (a2_mux_close_channel(A2_MUX_TETHERED_0))
+ TETH_ERR("a2_mux_close_channel() failed\n");
- /* Initialize statistics */
- memset(&teth_ctx->stats, 0, sizeof(teth_ctx->stats));
+ if (teth_ctx->is_hw_bridge_complete) {
+ /* Delete header entries */
+ if (ipa_del_hdr(teth_ctx->hdr_del))
+ TETH_ERR("ipa_del_hdr() failed\n");
+
+ /* Delete installed routing rules */
+ if (ipa_del_rt_rule(teth_ctx->routing_del[IPA_IP_v4]))
+ TETH_ERR("ipa_del_rt_rule() failed\n");
+ if (ipa_del_rt_rule(teth_ctx->routing_del[IPA_IP_v6]))
+ TETH_ERR("ipa_del_rt_rule() failed\n");
+
+ /* Delete installed filtering rules */
+ if (ipa_del_flt_rule(teth_ctx->filtering_del[IPA_IP_v4]))
+ TETH_ERR("ipa_del_flt_rule() failed\n");
+ if (ipa_del_flt_rule(teth_ctx->filtering_del[IPA_IP_v6]))
+ TETH_ERR("ipa_del_flt_rule() failed\n");
+
+ /*
+ * Commit all the data to HW, including header, routing and
+ * filtering blocks, IPv4 and IPv6
+ */
+ if (ipa_commit_hdr())
+ TETH_ERR("Failed committing headers\n");
+ }
+
+ initialize_context();
+
+ ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
/* Delete IPA Resource manager dependency graph */
res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
IPA_RM_RESOURCE_USB_CONS);
- res |= ipa_rm_delete_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
- IPA_RM_RESOURCE_A2_CONS);
- res |= ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
- IPA_RM_RESOURCE_A2_CONS);
- res |= ipa_rm_delete_dependency(IPA_RM_RESOURCE_A2_PROD,
- IPA_RM_RESOURCE_USB_CONS);
- if (res)
- TETH_ERR("Failed deleting ipa_rm dependency.\n");
+ if ((res != 0) && (res != -EINPROGRESS))
+ TETH_ERR(
+ "Failed deleting ipa_rm dependency BRIDGE_PROD <-> USB_CONS\n");
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
+ IPA_RM_RESOURCE_A2_CONS);
+ if ((res != 0) && (res != -EINPROGRESS))
+ TETH_ERR(
+ "Failed deleting ipa_rm dependency BRIDGE_PROD <-> A2_CONS\n");
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_A2_CONS);
+ if ((res != 0) && (res != -EINPROGRESS))
+ TETH_ERR(
+ "Failed deleting ipa_rm dependency USB_PROD <-> A2_CONS\n");
+ res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_A2_PROD,
+ IPA_RM_RESOURCE_USB_CONS);
+ if ((res != 0) && (res != -EINPROGRESS))
+ TETH_ERR(
+ "Failed deleting ipa_rm dependency A2_PROD <-> USB_CONS\n");
bail:
TETH_DBG_FUNC_EXIT();
- return res;
+
+ return 0;
}
EXPORT_SYMBOL(teth_bridge_disconnect);
@@ -1032,12 +1314,10 @@
teth_ctx->usb_ipa_pipe_hdl = connect_params->usb_ipa_pipe_hdl;
teth_ctx->tethering_mode = connect_params->tethering_mode;
- res = ipa_rm_request_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
- if (res < 0) {
- if (res == -EINPROGRESS)
- wait_for_completion(&teth_ctx->is_bridge_prod_up);
- else
- goto bail;
+ res = teth_request_resource();
+ if (res) {
+ TETH_ERR("request_resource() failed.\n");
+ goto bail;
}
res = a2_mux_open_channel(A2_MUX_TETHERED_0,
@@ -1068,10 +1348,28 @@
if (teth_ctx->tethering_mode == TETH_TETHERING_MODE_MBIM)
teth_ctx->link_protocol = TETH_LINK_PROTOCOL_IP;
- TETH_DBG_FUNC_EXIT();
+
+ if (teth_ctx->aggr_params_known) {
+ res = teth_set_aggregation();
+ if (res) {
+ TETH_ERR("Failed setting aggregation params\n");
+ goto bail;
+ }
+ }
+
+ /* In case of IP link protocol, complete HW bridge */
+ if ((teth_ctx->link_protocol == TETH_LINK_PROTOCOL_IP) &&
+ (!teth_ctx->comp_hw_bridge_in_progress) &&
+ (teth_ctx->aggr_params_known) &&
+ (!teth_ctx->is_hw_bridge_complete)) {
+ INIT_WORK(&teth_ctx->comp_hw_bridge_work, complete_hw_bridge);
+ teth_ctx->comp_hw_bridge_in_progress = true;
+ queue_work(teth_ctx->teth_wq, &teth_ctx->comp_hw_bridge_work);
+ }
bail:
- if (res)
- ipa_rm_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+ ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+ TETH_DBG_FUNC_EXIT();
+
return res;
}
EXPORT_SYMBOL(teth_bridge_connect);
@@ -1097,11 +1395,33 @@
{
int res;
+ TETH_DBG_FUNC_ENTRY();
if (!aggr_params) {
TETH_ERR("Invalid parameter\n");
return -EINVAL;
}
+ /*
+ * In case the requested max transfer size is larger than 8K, set it to
+ * to the default 8K
+ */
+ if (aggr_params->dl.max_transfer_size_byte >
+ TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT)
+ aggr_params->dl.max_transfer_size_byte =
+ TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT;
+ if (aggr_params->ul.max_transfer_size_byte >
+ TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT)
+ aggr_params->ul.max_transfer_size_byte =
+ TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT;
+
+ /* Ethernet link protocol and MBIM aggregation is not supported */
+ if (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_ETHERNET &&
+ (aggr_params->dl.aggr_prot == TETH_AGGR_PROTOCOL_MBIM ||
+ aggr_params->ul.aggr_prot == TETH_AGGR_PROTOCOL_MBIM)) {
+ TETH_ERR("Ethernet with MBIM is not supported.\n");
+ return -EINVAL;
+ }
+
memcpy(&teth_ctx->aggr_params,
aggr_params,
sizeof(struct teth_aggr_params));
@@ -1110,10 +1430,9 @@
teth_ctx->aggr_params_known = true;
res = teth_set_aggregation();
- if (res) {
+ if (res)
TETH_ERR("Failed setting aggregation params\n");
- res = -EFAULT;
- }
+ TETH_DBG_FUNC_EXIT();
return res;
}
@@ -1153,6 +1472,19 @@
}
res = teth_bridge_set_aggr_params(&aggr_params);
+ if (res)
+ break;
+
+ /* In case of IP link protocol, complete HW bridge */
+ if ((teth_ctx->link_protocol == TETH_LINK_PROTOCOL_IP) &&
+ (!teth_ctx->comp_hw_bridge_in_progress) &&
+ (!teth_ctx->is_hw_bridge_complete)) {
+ INIT_WORK(&teth_ctx->comp_hw_bridge_work,
+ complete_hw_bridge);
+ teth_ctx->comp_hw_bridge_in_progress = true;
+ queue_work(teth_ctx->teth_wq,
+ &teth_ctx->comp_hw_bridge_work);
+ }
break;
case TETH_BRIDGE_IOC_GET_AGGR_PARAMS:
@@ -1208,7 +1540,7 @@
return res;
}
-static void set_aggr_capabilities(void)
+static int set_aggr_capabilities(void)
{
u16 NUM_PROTOCOLS = 2;
@@ -1216,9 +1548,9 @@
NUM_PROTOCOLS *
sizeof(struct teth_aggr_params_link),
GFP_KERNEL);
- if (teth_ctx->aggr_caps == NULL) {
+ if (!teth_ctx->aggr_caps) {
TETH_ERR("Memory alloc failed for aggregation capabilities.\n");
- return;
+ return -ENOMEM;
}
teth_ctx->aggr_caps->num_protocols = NUM_PROTOCOLS;
@@ -1228,8 +1560,15 @@
teth_ctx->aggr_caps->prot_caps[1].aggr_prot = TETH_AGGR_PROTOCOL_TLP;
set_aggr_default_params(&teth_ctx->aggr_caps->prot_caps[1]);
+
+ return 0;
}
+/**
+* teth_bridge_get_client_handles() - Get USB <--> IPA pipe handles
+* @producer_handle: USB --> IPA pipe handle
+* @consumer_handle: IPA --> USB pipe handle
+*/
void teth_bridge_get_client_handles(u32 *producer_handle,
u32 *consumer_handle)
{
@@ -1397,6 +1736,11 @@
TETH_MAX_MSG_LEN - nbytes,
"A2 to USB SW Tx packets: %lld\n",
teth_ctx->stats.a2_to_usb_num_sw_tx_packets);
+ nbytes += scnprintf(
+ &dbg_buff[nbytes],
+ TETH_MAX_MSG_LEN - nbytes,
+ "SW Tx packets sent during resource wakeup: %lld\n",
+ teth_ctx->stats.num_sw_tx_packets_during_resource_wakeup);
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
@@ -1523,7 +1867,59 @@
return -ENOMEM;
}
- set_aggr_capabilities();
+ res = set_aggr_capabilities();
+ if (res) {
+ TETH_ERR("kzalloc err.\n");
+ goto fail_alloc_aggr_caps;
+ }
+
+ res = -ENOMEM;
+ teth_ctx->hdr_del = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
+ TETH_TOTAL_HDR_ENTRIES *
+ sizeof(struct ipa_hdr_del),
+ GFP_KERNEL);
+ if (!teth_ctx->hdr_del) {
+ TETH_ERR("kzalloc err.\n");
+ goto fail_alloc_hdr_del;
+ }
+
+ teth_ctx->routing_del[IPA_IP_v4] =
+ kzalloc(sizeof(struct ipa_ioc_del_rt_rule) +
+ TETH_TOTAL_RT_ENTRIES_IP *
+ sizeof(struct ipa_rt_rule_del),
+ GFP_KERNEL);
+ if (!teth_ctx->routing_del[IPA_IP_v4]) {
+ TETH_ERR("kzalloc err.\n");
+ goto fail_alloc_routing_del_ipv4;
+ }
+ teth_ctx->routing_del[IPA_IP_v6] =
+ kzalloc(sizeof(struct ipa_ioc_del_rt_rule) +
+ TETH_TOTAL_RT_ENTRIES_IP *
+ sizeof(struct ipa_rt_rule_del),
+ GFP_KERNEL);
+ if (!teth_ctx->routing_del[IPA_IP_v6]) {
+ TETH_ERR("kzalloc err.\n");
+ goto fail_alloc_routing_del_ipv6;
+ }
+
+ teth_ctx->filtering_del[IPA_IP_v4] =
+ kzalloc(sizeof(struct ipa_ioc_del_flt_rule) +
+ TETH_TOTAL_FLT_ENTRIES_IP *
+ sizeof(struct ipa_flt_rule_del),
+ GFP_KERNEL);
+ if (!teth_ctx->filtering_del[IPA_IP_v4]) {
+ TETH_ERR("kzalloc err.\n");
+ goto fail_alloc_filtering_del_ipv4;
+ }
+ teth_ctx->filtering_del[IPA_IP_v6] =
+ kzalloc(sizeof(struct ipa_ioc_del_flt_rule) +
+ TETH_TOTAL_FLT_ENTRIES_IP *
+ sizeof(struct ipa_flt_rule_del),
+ GFP_KERNEL);
+ if (!teth_ctx->filtering_del[IPA_IP_v6]) {
+ TETH_ERR("kzalloc err.\n");
+ goto fail_alloc_filtering_del_ipv6;
+ }
teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME);
@@ -1554,8 +1950,6 @@
goto fail_cdev_add;
}
- teth_ctx->comp_hw_bridge_in_progress = false;
-
teth_debugfs_init();
/* Create BRIDGE_PROD entity in IPA Resource Manager */
@@ -1570,9 +1964,21 @@
init_completion(&teth_ctx->is_bridge_prod_up);
init_completion(&teth_ctx->is_bridge_prod_down);
- /* The default link protocol is Ethernet */
- teth_ctx->link_protocol = TETH_LINK_PROTOCOL_ETHERNET;
+ res = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_BRIDGE_PROD,
+ TETH_INACTIVITY_TIME_MSEC);
+ if (res) {
+ TETH_ERR("ipa_rm_inactivity_timer_init() failed, res=%d\n",
+ res);
+ goto fail_cdev_add;
+ }
+ teth_ctx->teth_wq = create_workqueue(TETH_WORKQUEUE_NAME);
+ if (!teth_ctx->teth_wq) {
+ TETH_ERR("workqueue creation failed\n");
+ goto fail_cdev_add;
+ }
+
+ initialize_context();
TETH_DBG("Tethering bridge driver init OK\n");
return 0;
@@ -1581,7 +1987,18 @@
fail_device_create:
unregister_chrdev_region(teth_ctx->dev_num, 1);
fail_alloc_chrdev_region:
+ kfree(teth_ctx->filtering_del[IPA_IP_v6]);
+fail_alloc_filtering_del_ipv6:
+ kfree(teth_ctx->filtering_del[IPA_IP_v4]);
+fail_alloc_filtering_del_ipv4:
+ kfree(teth_ctx->routing_del[IPA_IP_v6]);
+fail_alloc_routing_del_ipv6:
+ kfree(teth_ctx->routing_del[IPA_IP_v4]);
+fail_alloc_routing_del_ipv4:
+ kfree(teth_ctx->hdr_del);
+fail_alloc_hdr_del:
kfree(teth_ctx->aggr_caps);
+fail_alloc_aggr_caps:
kfree(teth_ctx);
teth_ctx = NULL;
diff --git a/drivers/platform/msm/qpnp-pwm.c b/drivers/platform/msm/qpnp-pwm.c
index 1729b49..52c523e 100644
--- a/drivers/platform/msm/qpnp-pwm.c
+++ b/drivers/platform/msm/qpnp-pwm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -463,7 +463,7 @@
int i, pwm_size, rc = 0;
int burst_size = SPMI_MAX_BUF_LEN;
int list_len = lut->list_len << 1;
- int offset = lut->lo_index << 1;
+ int offset = (lut->lo_index << 1) - 2;
pwm_size = QPNP_GET_PWM_SIZE(
chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) &
@@ -1024,8 +1024,8 @@
raw_lut = 1;
lut_config->list_len = len;
- lut_config->lo_index = start_idx;
- lut_config->hi_index = start_idx + len - 1;
+ lut_config->lo_index = start_idx + 1;
+ lut_config->hi_index = start_idx + len;
rc = qpnp_lpg_change_table(pwm, duty_pct, raw_lut);
if (rc) {
@@ -1041,13 +1041,13 @@
QPNP_SET_PAUSE_CNT(lut_config->lut_pause_lo_cnt,
lut_params.lut_pause_lo, ramp_step_ms);
- if (lut_config->lut_pause_lo_cnt > PM_PWM_LUT_PAUSE_MAX)
- lut_config->lut_pause_lo_cnt = PM_PWM_LUT_PAUSE_MAX;
+ if (lut_config->lut_pause_lo_cnt > PM_PWM_MAX_PAUSE_CNT)
+ lut_config->lut_pause_lo_cnt = PM_PWM_MAX_PAUSE_CNT;
QPNP_SET_PAUSE_CNT(lut_config->lut_pause_hi_cnt,
lut_params.lut_pause_hi, ramp_step_ms);
- if (lut_config->lut_pause_hi_cnt > PM_PWM_LUT_PAUSE_MAX)
- lut_config->lut_pause_hi_cnt = PM_PWM_LUT_PAUSE_MAX;
+ if (lut_config->lut_pause_hi_cnt > PM_PWM_MAX_PAUSE_CNT)
+ lut_config->lut_pause_hi_cnt = PM_PWM_MAX_PAUSE_CNT;
lut_config->ramp_step_ms = ramp_step_ms;
diff --git a/drivers/platform/msm/sps/bam.c b/drivers/platform/msm/sps/bam.c
index 47108c6..6412fc0 100644
--- a/drivers/platform/msm/sps/bam.c
+++ b/drivers/platform/msm/sps/bam.c
@@ -886,6 +886,12 @@
(u32) base, status);
bam_output_register_content(base);
*cb_case = SPS_CALLBACK_BAM_HRESP_ERR_IRQ;
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+ } else if (status & IRQ_STTS_BAM_TIMER_IRQ) {
+ SPS_DBG1("sps:bam 0x%x(va);receive BAM_TIMER_IRQ\n",
+ (u32) base);
+ *cb_case = SPS_CALLBACK_BAM_TIMER_IRQ;
+#endif
} else
SPS_INFO("sps:bam 0x%x(va);bam irq status="
"0x%x.", (u32) base, status);
@@ -1126,9 +1132,25 @@
void bam_pipe_timer_config(void *base, u32 pipe, enum bam_pipe_timer_mode mode,
u32 timeout_count)
{
- bam_write_reg_field(base, P_TIMER_CTRL(pipe), P_TIMER_MODE, mode);
- bam_write_reg_field(base, P_TIMER_CTRL(pipe), P_TIMER_TRSHLD,
- timeout_count);
+ u32 for_all_pipes = 0;
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+ for_all_pipes = bam_read_reg_field(base, REVISION,
+ BAM_NUM_INACTIV_TMRS);
+#endif
+
+ if (for_all_pipes) {
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+ bam_write_reg_field(base, TIMER_CTRL, TIMER_MODE, mode);
+ bam_write_reg_field(base, TIMER_CTRL, TIMER_TRSHLD,
+ timeout_count);
+#endif
+ } else {
+ bam_write_reg_field(base, P_TIMER_CTRL(pipe), P_TIMER_MODE,
+ mode);
+ bam_write_reg_field(base, P_TIMER_CTRL(pipe), P_TIMER_TRSHLD,
+ timeout_count);
+ }
}
/**
@@ -1137,10 +1159,26 @@
*/
void bam_pipe_timer_reset(void *base, u32 pipe)
{
- /* reset */
- bam_write_reg_field(base, P_TIMER_CTRL(pipe), P_TIMER_RST, 0);
- /* active */
- bam_write_reg_field(base, P_TIMER_CTRL(pipe), P_TIMER_RST, 1);
+ u32 for_all_pipes = 0;
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+ for_all_pipes = bam_read_reg_field(base, REVISION,
+ BAM_NUM_INACTIV_TMRS);
+#endif
+
+ if (for_all_pipes) {
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+ /* reset */
+ bam_write_reg_field(base, TIMER_CTRL, TIMER_RST, 0);
+ /* active */
+ bam_write_reg_field(base, TIMER_CTRL, TIMER_RST, 1);
+#endif
+ } else {
+ /* reset */
+ bam_write_reg_field(base, P_TIMER_CTRL(pipe), P_TIMER_RST, 0);
+ /* active */
+ bam_write_reg_field(base, P_TIMER_CTRL(pipe), P_TIMER_RST, 1);
+ }
}
/**
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 6f2e2a4..23c346a 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -2232,8 +2232,7 @@
SPS_ERR("sps:%s:timer_ctrl pointer is NULL.\n", __func__);
return SPS_ERROR;
} else if (timer_result == NULL) {
- SPS_ERR("sps:%s:result pointer is NULL.\n", __func__);
- return SPS_ERROR;
+ SPS_DBG("sps:%s:no result to return.\n", __func__);
}
bam = sps_bam_lock(pipe);
@@ -2434,7 +2433,7 @@
static int __devinit msm_sps_probe(struct platform_device *pdev)
{
- int ret;
+ int ret = -ENODEV;
SPS_DBG2("sps:%s.", __func__);
@@ -2471,7 +2470,10 @@
sps->dfab_clk = clk_get(sps->dev, "dfab_clk");
if (IS_ERR(sps->dfab_clk)) {
- SPS_ERR("sps:fail to get dfab_clk.");
+ if (IS_ERR(sps->dfab_clk) == -EPROBE_DEFER)
+ ret = -EPROBE_DEFER;
+ else
+ SPS_ERR("sps:fail to get dfab_clk.");
goto clk_err;
} else {
ret = clk_set_rate(sps->dfab_clk, 64000000);
@@ -2485,7 +2487,10 @@
if (!d_type) {
sps->pmem_clk = clk_get(sps->dev, "mem_clk");
if (IS_ERR(sps->pmem_clk)) {
- SPS_ERR("sps:fail to get pmem_clk.");
+ if (IS_ERR(sps->pmem_clk) == -EPROBE_DEFER)
+ ret = -EPROBE_DEFER;
+ else
+ SPS_ERR("sps:fail to get pmem_clk.");
goto clk_err;
} else {
ret = clk_prepare_enable(sps->pmem_clk);
@@ -2499,7 +2504,10 @@
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
sps->bamdma_clk = clk_get(sps->dev, "dma_bam_pclk");
if (IS_ERR(sps->bamdma_clk)) {
- SPS_ERR("sps:fail to get bamdma_clk.");
+ if (IS_ERR(sps->bamdma_clk) == -EPROBE_DEFER)
+ ret = -EPROBE_DEFER;
+ else
+ SPS_ERR("sps:fail to get bamdma_clk.");
goto clk_err;
} else {
ret = clk_prepare_enable(sps->bamdma_clk);
@@ -2539,7 +2547,7 @@
alloc_chrdev_region_err:
class_destroy(sps->dev_class);
- return -ENODEV;
+ return ret;
}
static int __devexit msm_sps_remove(struct platform_device *pdev)
diff --git a/drivers/platform/msm/sps/sps_bam.c b/drivers/platform/msm/sps/sps_bam.c
index 31d1a78..80056f5 100644
--- a/drivers/platform/msm/sps/sps_bam.c
+++ b/drivers/platform/msm/sps/sps_bam.c
@@ -24,7 +24,8 @@
#include "spsi.h"
/* All BAM global IRQ sources */
-#define BAM_IRQ_ALL (BAM_DEV_IRQ_HRESP_ERROR | BAM_DEV_IRQ_ERROR)
+#define BAM_IRQ_ALL (BAM_DEV_IRQ_HRESP_ERROR | BAM_DEV_IRQ_ERROR | \
+ BAM_DEV_IRQ_TIMER)
/* BAM device state flags */
#define BAM_STATE_INIT (1UL << 1)
@@ -105,7 +106,7 @@
for (n = 0; n < ARRAY_SIZE(opt_event_table); n++) {
if ((u32)opt_event_table[n].option !=
(u32)opt_event_table[n].pipe_irq) {
- SPS_ERR("sps:SPS_O 0x%x != HAL IRQ 0x%x",
+ SPS_ERR("sps:SPS_O 0x%x != HAL IRQ 0x%x\n",
opt_event_table[n].option,
opt_event_table[n].pipe_irq);
return SPS_ERROR;
@@ -141,11 +142,11 @@
source = bam_check_irq_source(dev->base, dev->props.ee,
mask, &cb_case);
- SPS_DBG1("sps:bam_isr:bam=0x%x;source=0x%x;mask=0x%x.",
+ SPS_DBG1("sps:bam_isr:bam=0x%x;source=0x%x;mask=0x%x.\n",
BAM_ID(dev), source, mask);
if ((source & (1UL << 31)) && (dev->props.callback)) {
- SPS_INFO("sps:bam_isr:bam=0x%x;callback for case %d.",
+ SPS_DBG1("sps:bam_isr:bam=0x%x;callback for case %d.\n",
BAM_ID(dev), cb_case);
dev->props.callback(cb_case, dev->props.user);
}
@@ -156,7 +157,7 @@
/* If MTIs are used, must poll each active pipe */
source = dev->pipe_active_mask;
- SPS_DBG1("sps:bam_isr for MTI:bam=0x%x;source=0x%x.",
+ SPS_DBG1("sps:bam_isr for MTI:bam=0x%x;source=0x%x.\n",
BAM_ID(dev), source);
}
@@ -177,7 +178,7 @@
/* Process any inactive pipe sources */
if (source) {
- SPS_ERR("sps:IRQ from BAM 0x%x inactive pipe(s) 0x%x",
+ SPS_ERR("sps:IRQ from BAM 0x%x inactive pipe(s) 0x%x\n",
BAM_ID(dev), source);
dev->irq_from_disabled_pipe++;
}
@@ -204,7 +205,7 @@
/* Is there any access to this BAM? */
if ((dev->props.manage & SPS_BAM_MGR_ACCESS_MASK) == SPS_BAM_MGR_NONE) {
- SPS_ERR("sps:No local access to BAM 0x%x", BAM_ID(dev));
+ SPS_ERR("sps:No local access to BAM 0x%x\n", BAM_ID(dev));
return SPS_ERROR;
}
@@ -222,7 +223,7 @@
IRQF_TRIGGER_HIGH, "sps", dev);
if (result) {
- SPS_ERR("sps:Failed to enable BAM 0x%x IRQ %d",
+ SPS_ERR("sps:Failed to enable BAM 0x%x IRQ %d\n",
BAM_ID(dev), dev->props.irq);
return SPS_ERROR;
}
@@ -236,13 +237,13 @@
result = enable_irq_wake(dev->props.irq);
if (result) {
- SPS_ERR("sps:Fail to enable wakeup irq "
- "BAM 0x%x IRQ %d",
+ SPS_ERR(
+ "sps:Fail to enable wakeup irq for BAM 0x%x IRQ %d\n",
BAM_ID(dev), dev->props.irq);
return SPS_ERROR;
} else
- SPS_DBG2("sps:Enable wakeup irq for "
- "BAM 0x%x IRQ %d",
+ SPS_DBG2(
+ "sps:Enable wakeup irq for BAM 0x%x IRQ %d\n",
BAM_ID(dev), dev->props.irq);
}
}
@@ -262,7 +263,7 @@
rc = bam_check(dev->base, &dev->version, &num_pipes);
if (rc) {
- SPS_ERR("sps:Fail to init BAM 0x%x IRQ %d",
+ SPS_ERR("sps:Fail to init BAM 0x%x IRQ %d\n",
BAM_ID(dev), dev->props.irq);
return SPS_ERROR;
}
@@ -281,7 +282,7 @@
* must use MTI. Thus, force EE index to a non-zero value to
* insure that EE zero globals can't be modified.
*/
- SPS_ERR("sps:EE for satellite BAM must be set to non-zero.");
+ SPS_ERR("sps:EE for satellite BAM must be set to non-zero.\n");
return SPS_ERROR;
}
@@ -295,8 +296,9 @@
MTIenabled) {
if (dev->props.irq_gen_addr == 0 ||
dev->props.irq_gen_addr == SPS_ADDR_INVALID) {
- SPS_ERR("sps:MTI destination address not specified "
- "for BAM 0x%x", BAM_ID(dev));
+ SPS_ERR(
+ "sps:MTI destination address not specified for BAM 0x%x\n",
+ BAM_ID(dev));
return SPS_ERROR;
}
dev->state |= BAM_STATE_MTI;
@@ -304,13 +306,13 @@
if (num_pipes) {
dev->props.num_pipes = num_pipes;
- SPS_DBG1("sps:BAM 0x%x number of pipes reported by hw: %d",
+ SPS_DBG1("sps:BAM 0x%x number of pipes reported by hw: %d\n",
BAM_ID(dev), dev->props.num_pipes);
}
/* Check EE index */
if (!MTIenabled && dev->props.ee >= SPS_BAM_NUM_EES) {
- SPS_ERR("sps:Invalid EE BAM 0x%x: %d", BAM_ID(dev),
+ SPS_ERR("sps:Invalid EE BAM 0x%x: %d\n", BAM_ID(dev),
dev->props.ee);
return SPS_ERROR;
}
@@ -323,8 +325,9 @@
struct sps_bam_sec_config_props *p_sec =
dev->props.p_sec_config_props;
if (p_sec == NULL) {
- SPS_ERR("sps:EE config table is not specified for "
- "BAM 0x%x", BAM_ID(dev));
+ SPS_ERR(
+ "sps:EE config table is not specified for BAM 0x%x\n",
+ BAM_ID(dev));
return SPS_ERROR;
}
@@ -351,9 +354,8 @@
for (i = n + 1; i < SPS_BAM_NUM_EES; i++) {
if ((p_sec->ees[n].pipe_mask &
p_sec->ees[i].pipe_mask) != 0) {
- SPS_ERR("sps:Overlapping pipe "
- "assignments for BAM "
- "0x%x: EEs %d and %d",
+ SPS_ERR(
+ "sps:Overlapping pipe assignments for BAM 0x%x: EEs %d and %d\n",
BAM_ID(dev), n, i);
return SPS_ERROR;
}
@@ -403,9 +405,21 @@
}
dev->state |= BAM_STATE_ENABLED;
- SPS_INFO("sps:BAM 0x%x (va:0x%x) enabled: ver:0x%x, number of pipes:%d",
- BAM_ID(dev), (u32) dev->base, dev->version,
- dev->props.num_pipes);
+
+ if (!dev->props.constrained_logging ||
+ (dev->props.constrained_logging && dev->props.logging_number)) {
+ if (dev->props.logging_number > 0)
+ dev->props.logging_number--;
+ SPS_INFO(
+ "sps:BAM 0x%x (va:0x%x) enabled: ver:0x%x, number of pipes:%d\n",
+ BAM_ID(dev), (u32) dev->base, dev->version,
+ dev->props.num_pipes);
+ } else
+ SPS_DBG2(
+ "sps:BAM 0x%x (va:0x%x) enabled: ver:0x%x, number of pipes:%d\n",
+ BAM_ID(dev), (u32) dev->base, dev->version,
+ dev->props.num_pipes);
+
return 0;
}
@@ -420,7 +434,7 @@
/* Is there any access to this BAM? */
if ((dev->props.manage & SPS_BAM_MGR_ACCESS_MASK) == SPS_BAM_MGR_NONE) {
- SPS_ERR("sps:No local access to BAM 0x%x", BAM_ID(dev));
+ SPS_ERR("sps:No local access to BAM 0x%x\n", BAM_ID(dev));
return SPS_ERROR;
}
@@ -444,7 +458,7 @@
dev->state &= ~BAM_STATE_ENABLED;
- SPS_DBG2("sps:BAM 0x%x disabled", BAM_ID(dev));
+ SPS_DBG2("sps:BAM 0x%x disabled\n", BAM_ID(dev));
return 0;
}
@@ -455,7 +469,7 @@
int sps_bam_device_init(struct sps_bam *dev)
{
if (dev->props.virt_addr == NULL) {
- SPS_ERR("sps:NULL BAM virtual address");
+ SPS_ERR("sps:NULL BAM virtual address\n");
return SPS_ERROR;
}
dev->base = (void *) dev->props.virt_addr;
@@ -463,7 +477,7 @@
if (dev->props.num_pipes == 0) {
/* Assume max number of pipes until BAM registers can be read */
dev->props.num_pipes = BAM_MAX_PIPES;
- SPS_DBG2("sps:BAM 0x%x: assuming max number of pipes: %d",
+ SPS_DBG2("sps:BAM 0x%x: assuming max number of pipes: %d\n",
BAM_ID(dev), dev->props.num_pipes);
}
@@ -479,11 +493,11 @@
if ((dev->props.options & SPS_BAM_OPT_ENABLE_AT_BOOT))
if (sps_bam_enable(dev)) {
- SPS_ERR("sps:Fail to enable bam device");
+ SPS_ERR("sps:Fail to enable bam device\n");
return SPS_ERROR;
}
- SPS_DBG2("sps:BAM device: phys 0x%x IRQ %d",
+ SPS_DBG2("sps:BAM device: phys 0x%x IRQ %d\n",
BAM_ID(dev), dev->props.irq);
return 0;
@@ -497,7 +511,7 @@
{
int result;
- SPS_DBG2("sps:BAM device DEINIT: phys 0x%x IRQ %d",
+ SPS_DBG2("sps:BAM device DEINIT: phys 0x%x IRQ %d\n",
BAM_ID(dev), dev->props.irq);
result = sps_bam_disable(dev);
@@ -515,7 +529,7 @@
u32 pipe_index;
int result;
- SPS_DBG2("sps:BAM device RESET: phys 0x%x IRQ %d",
+ SPS_DBG2("sps:BAM device RESET: phys 0x%x IRQ %d\n",
BAM_ID(dev), dev->props.irq);
/* If BAM is enabled, then disable */
@@ -526,8 +540,8 @@
pipe_index++) {
pipe = dev->pipes[pipe_index];
if (BAM_PIPE_IS_ASSIGNED(pipe)) {
- SPS_ERR("sps:BAM device 0x%x RESET failed: "
- "pipe %d in use",
+ SPS_ERR(
+ "sps:BAM device 0x%x RESET failed: pipe %d in use\n",
BAM_ID(dev), pipe_index);
result = SPS_ERROR;
break;
@@ -579,8 +593,9 @@
if (pipe_index == SPS_BAM_PIPE_INVALID) {
/* Allocate a pipe from the BAM */
if ((dev->props.manage & SPS_BAM_MGR_PIPE_NO_ALLOC)) {
- SPS_ERR("sps:Restricted from allocating pipes "
- "on BAM 0x%x", BAM_ID(dev));
+ SPS_ERR(
+ "sps:Restricted from allocating pipes on BAM 0x%x\n",
+ BAM_ID(dev));
return SPS_BAM_PIPE_INVALID;
}
for (pipe_index = 0, pipe_mask = 1;
@@ -593,24 +608,25 @@
break; /* Found an available pipe */
}
if (pipe_index >= dev->props.num_pipes) {
- SPS_ERR("sps:Fail to allocate pipe on BAM 0x%x",
+ SPS_ERR("sps:Fail to allocate pipe on BAM 0x%x\n",
BAM_ID(dev));
return SPS_BAM_PIPE_INVALID;
}
} else {
/* Check that client-specified pipe is available */
if (pipe_index >= dev->props.num_pipes) {
- SPS_ERR("sps:Invalid pipe %d for allocate on BAM 0x%x",
+ SPS_ERR(
+ "sps:Invalid pipe %d for allocate on BAM 0x%x\n",
pipe_index, BAM_ID(dev));
return SPS_BAM_PIPE_INVALID;
}
if ((dev->props.restricted_pipes & (1UL << pipe_index))) {
- SPS_ERR("sps:BAM 0x%x pipe %d is not local",
+ SPS_ERR("sps:BAM 0x%x pipe %d is not local\n",
BAM_ID(dev), pipe_index);
return SPS_BAM_PIPE_INVALID;
}
if (dev->pipes[pipe_index] != NULL) {
- SPS_ERR("sps:Pipe %d already allocated on BAM 0x%x",
+ SPS_ERR("sps:Pipe %d already allocated on BAM 0x%x\n",
pipe_index, BAM_ID(dev));
return SPS_BAM_PIPE_INVALID;
}
@@ -631,7 +647,7 @@
struct sps_pipe *pipe;
if (pipe_index >= dev->props.num_pipes) {
- SPS_ERR("sps:Invalid BAM 0x%x pipe: %d", BAM_ID(dev),
+ SPS_ERR("sps:Invalid BAM 0x%x pipe: %d\n", BAM_ID(dev),
pipe_index);
return;
}
@@ -642,8 +658,8 @@
/* Is the pipe currently allocated? */
if (pipe == NULL) {
- SPS_ERR("sps:Attempt to free unallocated pipe %d on "
- "BAM 0x%x", pipe_index, BAM_ID(dev));
+ SPS_ERR("sps:Attempt to free unallocated pipe %d on BAM 0x%x\n",
+ pipe_index, BAM_ID(dev));
return;
}
@@ -654,7 +670,7 @@
if (!list_empty(&pipe->sys.events_q)) {
struct sps_q_event *sps_event;
- SPS_ERR("sps:Disconnect BAM 0x%x pipe %d with events pending",
+ SPS_ERR("sps:Disconnect BAM 0x%x pipe %d with events pending\n",
BAM_ID(dev), pipe_index);
sps_event = list_entry((&pipe->sys.events_q)->next,
@@ -718,7 +734,7 @@
dev = map_pipe->bam;
pipe_index = map_pipe->pipe_index;
if (pipe_index >= dev->props.num_pipes) {
- SPS_ERR("sps:Invalid BAM 0x%x pipe: %d", BAM_ID(dev),
+ SPS_ERR("sps:Invalid BAM 0x%x pipe: %d\n", BAM_ID(dev),
pipe_index);
return SPS_ERROR;
}
@@ -729,14 +745,14 @@
/* Verify that control of this pipe is allowed */
if ((dev->props.manage & SPS_BAM_MGR_PIPE_NO_CTRL) ||
(dev->props.restricted_pipes & (1UL << pipe_index))) {
- SPS_ERR("sps:BAM 0x%x pipe %d is not local",
+ SPS_ERR("sps:BAM 0x%x pipe %d is not local\n",
BAM_ID(dev), pipe_index);
return SPS_ERROR;
}
/* Control without configuration permission is not supported yet */
if ((dev->props.manage & SPS_BAM_MGR_PIPE_NO_CONFIG)) {
- SPS_ERR("sps:BAM 0x%x pipe %d remote config is not supported",
+ SPS_ERR("sps:BAM 0x%x pipe %d remote config is not supported\n",
BAM_ID(dev), pipe_index);
return SPS_ERROR;
}
@@ -754,8 +770,9 @@
if (map->desc.phys_base == SPS_ADDR_INVALID ||
map->data.phys_base == SPS_ADDR_INVALID ||
map->desc.size == 0 || map->data.size == 0) {
- SPS_ERR("sps:FIFO buffers are not allocated for BAM "
- "0x%x pipe %d.", BAM_ID(dev), pipe_index);
+ SPS_ERR(
+ "sps:FIFO buffers are not allocated for BAM 0x%x pipe %d.\n",
+ BAM_ID(dev), pipe_index);
return SPS_ERROR;
}
hw_params.data_base = map->data.phys_base;
@@ -787,8 +804,8 @@
/* Get virtual address for descriptor FIFO */
if (map->desc.phys_base != SPS_ADDR_INVALID) {
if (map->desc.size < (2 * sizeof(struct sps_iovec))) {
- SPS_ERR("sps:Invalid descriptor FIFO size "
- "for BAM 0x%x pipe %d: %d",
+ SPS_ERR(
+ "sps:Invalid descriptor FIFO size for BAM 0x%x pipe %d: %d\n",
BAM_ID(dev), pipe_index, map->desc.size);
return SPS_ERROR;
}
@@ -821,19 +838,24 @@
/* Check pipe allocation */
if (dev->pipes[pipe_index] != BAM_PIPE_UNASSIGNED) {
- SPS_ERR("sps:Invalid pipe %d on BAM 0x%x for connect",
+ SPS_ERR("sps:Invalid pipe %d on BAM 0x%x for connect\n",
pipe_index, BAM_ID(dev));
return SPS_ERROR;
}
if (bam_pipe_is_enabled(dev->base, pipe_index)) {
- SPS_ERR("sps:BAM 0x%x pipe %d sharing violation",
- BAM_ID(dev), pipe_index);
- return SPS_ERROR;
+ if (params->options & SPS_O_NO_DISABLE)
+ SPS_DBG("sps:BAM 0x%x pipe %d is already enabled\n",
+ BAM_ID(dev), pipe_index);
+ else {
+ SPS_ERR("sps:BAM 0x%x pipe %d sharing violation\n",
+ BAM_ID(dev), pipe_index);
+ return SPS_ERROR;
+ }
}
if (bam_pipe_init(dev->base, pipe_index, &hw_params, dev->props.ee)) {
- SPS_ERR("sps:BAM 0x%x pipe %d init error",
+ SPS_ERR("sps:BAM 0x%x pipe %d init error\n",
BAM_ID(dev), pipe_index);
goto exit_err;
}
@@ -882,8 +904,13 @@
bam_pipe->state |= BAM_STATE_INIT;
result = 0;
exit_err:
- if (result)
- bam_pipe_exit(dev->base, pipe_index, dev->props.ee);
+ if (result) {
+ if (params->options & SPS_O_NO_DISABLE)
+ SPS_DBG("sps:BAM 0x%x pipe %d connection exits\n",
+ BAM_ID(dev), pipe_index);
+ else
+ bam_pipe_exit(dev->base, pipe_index, dev->props.ee);
+ }
exit_init_err:
if (result) {
/* Clear the client pipe state */
@@ -903,7 +930,7 @@
int result;
if (pipe_index >= dev->props.num_pipes) {
- SPS_ERR("sps:Invalid BAM 0x%x pipe: %d", BAM_ID(dev),
+ SPS_ERR("sps:Invalid BAM 0x%x pipe: %d\n", BAM_ID(dev),
pipe_index);
return SPS_ERROR;
}
@@ -916,7 +943,11 @@
dev->pipe_active_mask &= ~(1UL << pipe_index);
}
dev->pipe_remote_mask &= ~(1UL << pipe_index);
- bam_pipe_exit(dev->base, pipe_index, dev->props.ee);
+ if (pipe->connect.options & SPS_O_NO_DISABLE)
+ SPS_DBG("sps:BAM 0x%x pipe %d exits\n", BAM_ID(dev),
+ pipe_index);
+ else
+ bam_pipe_exit(dev->base, pipe_index, dev->props.ee);
if (pipe->sys.desc_cache != NULL) {
u32 size = pipe->num_descs * sizeof(void *);
if (pipe->desc_size + size <= PAGE_SIZE)
@@ -933,7 +964,7 @@
}
if (result)
- SPS_ERR("sps:BAM 0x%x pipe %d already disconnected",
+ SPS_ERR("sps:BAM 0x%x pipe %d already disconnected\n",
BAM_ID(dev), pipe_index);
return result;
@@ -976,7 +1007,7 @@
irq_enable = BAM_DISABLE;
pipe->polled = true;
if (poll == 0 && pipe->irq_mask)
- SPS_DBG2("sps:BAM 0x%x pipe %d forced to use polling",
+ SPS_DBG2("sps:BAM 0x%x pipe %d forced to use polling\n",
BAM_ID(dev), pipe_index);
}
if ((pipe->state & BAM_STATE_MTI) == 0)
@@ -1024,8 +1055,8 @@
if (pipe->sys.desc_wr_count > 0 &&
(no_queue != pipe->sys.no_queue
|| ack_xfers != pipe->sys.ack_xfers)) {
- SPS_ERR("sps:Queue/ack mode change after transfer: "
- "BAM 0x%x pipe %d opt 0x%x",
+ SPS_ERR(
+ "sps:Queue/ack mode change after transfer: BAM 0x%x pipe %d opt 0x%x\n",
BAM_ID(dev), pipe_index, options);
return SPS_ERROR;
}
@@ -1034,8 +1065,9 @@
/* Is client setting invalid options for a BAM-to-BAM connection? */
if ((pipe->state & BAM_STATE_BAM2BAM) &&
(options & BAM2BAM_O_INVALID)) {
- SPS_ERR("sps:Invalid option for BAM-to-BAM: BAM 0x%x pipe %d "
- "opt 0x%x", BAM_ID(dev), pipe_index, options);
+ SPS_ERR(
+ "sps:Invalid option for BAM-to-BAM: BAM 0x%x pipe %d opt 0x%x\n",
+ BAM_ID(dev), pipe_index, options);
return SPS_ERROR;
}
@@ -1053,7 +1085,8 @@
vmalloc(pipe->desc_size + size);
if (pipe->sys.desc_cache == NULL) {
- SPS_ERR("sps:No memory for pipe %d of BAM 0x%x",
+ SPS_ERR(
+ "sps:No memory for pipe %d of BAM 0x%x\n",
pipe_index, BAM_ID(dev));
return -ENOMEM;
}
@@ -1063,7 +1096,7 @@
if (pipe->sys.desc_cache == NULL) {
/*** MUST BE LAST POINT OF FAILURE (see below) *****/
- SPS_ERR("sps:Desc cache error: BAM 0x%x pipe %d: %d",
+ SPS_ERR("sps:Desc cache error: BAM 0x%x pipe %d: %d\n",
BAM_ID(dev), pipe_index,
pipe->desc_size + size);
return SPS_ERROR;
@@ -1114,7 +1147,12 @@
struct sps_pipe *pipe = dev->pipes[pipe_index];
/* Disable the BAM pipe */
- bam_pipe_disable(dev->base, pipe_index);
+ if (pipe->connect.options & SPS_O_NO_DISABLE)
+ SPS_DBG("sps:BAM 0x%x pipe %d enters disable state\n",
+ BAM_ID(dev), pipe_index);
+ else
+ bam_pipe_disable(dev->base, pipe_index);
+
pipe->state &= ~BAM_STATE_ENABLED;
return 0;
@@ -1134,8 +1172,8 @@
if (pipe->sys.no_queue && reg->xfer_done != NULL &&
reg->mode != SPS_TRIGGER_CALLBACK) {
- SPS_ERR("sps:Only callback events support for NO_Q: "
- "BAM 0x%x pipe %d mode %d",
+ SPS_ERR(
+ "sps:Only callback events support for NO_Q: BAM 0x%x pipe %d mode %d\n",
BAM_ID(dev), pipe_index, reg->mode);
return SPS_ERROR;
}
@@ -1149,9 +1187,9 @@
index = SPS_EVENT_INDEX(opt_event_table[n].event_id);
if (index < 0)
- SPS_ERR("sps:Negative event index: "
- "BAM 0x%x pipe %d mode %d",
- BAM_ID(dev), pipe_index, reg->mode);
+ SPS_ERR(
+ "sps:Negative event index: BAM 0x%x pipe %d mode %d\n",
+ BAM_ID(dev), pipe_index, reg->mode);
else {
event_reg = &pipe->sys.event_regs[index];
event_reg->xfer_done = reg->xfer_done;
@@ -1180,7 +1218,7 @@
/* Is this a BAM-to-BAM or satellite connection? */
if ((pipe->state & (BAM_STATE_BAM2BAM | BAM_STATE_REMOTE))) {
- SPS_ERR("sps:Transfer on BAM-to-BAM: BAM 0x%x pipe %d",
+ SPS_ERR("sps:Transfer on BAM-to-BAM: BAM 0x%x pipe %d\n",
BAM_ID(dev), pipe_index);
return SPS_ERROR;
}
@@ -1190,7 +1228,7 @@
* SPS_O_NO_Q option.
*/
if (pipe->sys.no_queue && user != NULL) {
- SPS_ERR("sps:User pointer arg non-NULL: BAM 0x%x pipe %d",
+ SPS_ERR("sps:User pointer arg non-NULL: BAM 0x%x pipe %d\n",
BAM_ID(dev), pipe_index);
return SPS_ERROR;
}
@@ -1211,24 +1249,27 @@
if (next_write == pipe->sys.acked_offset) {
if (!show_recom) {
show_recom = true;
- SPS_ERR("sps:Client of BAM 0x%x pipe %d is recommended to have flow control",
+ SPS_ERR(
+ "sps:Client of BAM 0x%x pipe %d is recommended to have flow control\n",
BAM_ID(dev), pipe_index);
}
- SPS_DBG2("sps:Descriptor FIFO is full for BAM "
- "0x%x pipe %d after pipe_handler_eot",
+ SPS_DBG2(
+ "sps:Descriptor FIFO is full for BAM 0x%x pipe %d after pipe_handler_eot\n",
BAM_ID(dev), pipe_index);
return SPS_ERROR;
}
} else {
if (!show_recom) {
show_recom = true;
- SPS_ERR("sps:Client of BAM 0x%x pipe %d is recommended to have flow control.",
+ SPS_ERR(
+ "sps:Client of BAM 0x%x pipe %d is recommended to have flow control.\n",
BAM_ID(dev), pipe_index);
}
- SPS_DBG2("sps:Descriptor FIFO is full for "
- "BAM 0x%x pipe %d", BAM_ID(dev), pipe_index);
+ SPS_DBG2(
+ "sps:Descriptor FIFO is full for BAM 0x%x pipe %d\n",
+ BAM_ID(dev), pipe_index);
return SPS_ERROR;
}
}
@@ -1304,14 +1345,14 @@
int result;
if (transfer->iovec_count == 0) {
- SPS_ERR("sps:iovec count zero: BAM 0x%x pipe %d",
+ SPS_ERR("sps:iovec count zero: BAM 0x%x pipe %d\n",
BAM_ID(dev), pipe_index);
return SPS_ERROR;
}
sps_bam_get_free_count(dev, pipe_index, &count);
if (count < transfer->iovec_count) {
- SPS_ERR("sps:Insufficient free desc: BAM 0x%x pipe %d: %d",
+ SPS_ERR("sps:Insufficient free desc: BAM 0x%x pipe %d: %d\n",
BAM_ID(dev), pipe_index, count);
return SPS_ERROR;
}
@@ -1382,18 +1423,18 @@
struct sps_q_event *sps_event)
{
if (sps_event == NULL) {
- SPS_DBG("sps:trigger_event.sps_event is NULL.");
+ SPS_DBG("sps:trigger_event.sps_event is NULL.\n");
return;
}
if (event_reg->xfer_done) {
complete(event_reg->xfer_done);
- SPS_DBG("sps:trigger_event.done=%d.",
+ SPS_DBG("sps:trigger_event.done=%d.\n",
event_reg->xfer_done->done);
}
if (event_reg->callback) {
- SPS_DBG("sps:trigger_event.using callback.");
+ SPS_DBG("sps:trigger_event.using callback.\n");
event_reg->callback(&sps_event->notify);
}
@@ -1665,7 +1706,7 @@
pipe_index = pipe->pipe_index;
status = bam_pipe_get_and_clear_irq_status(dev->base, pipe_index);
- SPS_DBG("sps:pipe_handler.bam 0x%x.pipe %d.status=0x%x.",
+ SPS_DBG("sps:pipe_handler.bam 0x%x.pipe %d.status=0x%x.\n",
BAM_ID(dev), pipe_index, status);
/* Check for enabled interrupt sources */
@@ -1737,8 +1778,8 @@
struct sps_q_event *event_queue;
if (pipe->sys.no_queue) {
- SPS_ERR("sps:Invalid connection for event: "
- "BAM 0x%x pipe %d context 0x%x",
+ SPS_ERR(
+ "sps:Invalid connection for event: BAM 0x%x pipe %d context 0x%x\n",
BAM_ID(dev), pipe_index, (u32) pipe);
notify->event_id = SPS_EVENT_INVALID;
return SPS_ERROR;
@@ -1751,9 +1792,10 @@
/* Pull an event off the synchronous event queue */
if (list_empty(&pipe->sys.events_q)) {
event_queue = NULL;
- SPS_DBG("sps:events_q of bam 0x%x is empty.", BAM_ID(dev));
+ SPS_DBG("sps:events_q of bam 0x%x is empty.\n", BAM_ID(dev));
} else {
- SPS_DBG("sps:events_q of bam 0x%x is not empty.", BAM_ID(dev));
+ SPS_DBG("sps:events_q of bam 0x%x is not empty.\n",
+ BAM_ID(dev));
event_queue =
list_first_entry(&pipe->sys.events_q, struct sps_q_event,
list);
@@ -1842,7 +1884,7 @@
/* Is this a satellite connection? */
if ((pipe->state & BAM_STATE_REMOTE)) {
- SPS_ERR("sps:Is empty on remote: BAM 0x%x pipe %d",
+ SPS_ERR("sps:Is empty on remote: BAM 0x%x pipe %d\n",
BAM_ID(dev), pipe_index);
return SPS_ERROR;
}
@@ -1881,8 +1923,9 @@
/* Is this a BAM-to-BAM or satellite connection? */
if ((pipe->state & (BAM_STATE_BAM2BAM | BAM_STATE_REMOTE))) {
- SPS_ERR("sps:Free count on BAM-to-BAM or remote: BAM "
- "0x%x pipe %d", BAM_ID(dev), pipe_index);
+ SPS_ERR(
+ "sps:Free count on BAM-to-BAM or remote: BAM 0x%x pipe %d\n",
+ BAM_ID(dev), pipe_index);
*count = 0;
return SPS_ERROR;
}
@@ -1917,14 +1960,15 @@
*/
if ((dev->props.manage & SPS_BAM_MGR_MULTI_EE) == 0 ||
(dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE)) {
- SPS_ERR("sps:Cannot grant satellite control to BAM 0x%x "
- "pipe %d", BAM_ID(dev), pipe_index);
+ SPS_ERR(
+ "sps:Cannot grant satellite control to BAM 0x%x pipe %d\n",
+ BAM_ID(dev), pipe_index);
return SPS_ERROR;
}
/* Is this pipe locally controlled? */
if ((dev->pipe_active_mask & (1UL << pipe_index)) == 0) {
- SPS_ERR("sps:BAM 0x%x pipe %d not local and active",
+ SPS_ERR("sps:BAM 0x%x pipe %d not local and active\n",
BAM_ID(dev), pipe_index);
return SPS_ERROR;
}
@@ -1972,7 +2016,7 @@
/* Is this pipe locally controlled? */
if ((dev->pipe_active_mask & (1UL << pipe_index)) == 0) {
- SPS_ERR("sps:BAM 0x%x pipe %d not local and active",
+ SPS_ERR("sps:BAM 0x%x pipe %d not local and active\n",
BAM_ID(dev), pipe_index);
return SPS_ERROR;
}
@@ -1984,7 +2028,7 @@
BAM_PIPE_TIMER_ONESHOT :
BAM_PIPE_TIMER_PERIODIC;
bam_pipe_timer_config(dev->base, pipe_index, mode,
- timer_ctrl->timeout_msec * 10);
+ timer_ctrl->timeout_msec * 8);
break;
case SPS_TIMER_OP_RESET:
bam_pipe_timer_reset(dev->base, pipe_index);
diff --git a/drivers/platform/msm/sps/sps_bam.h b/drivers/platform/msm/sps/sps_bam.h
index bbc0373..dede487 100644
--- a/drivers/platform/msm/sps/sps_bam.h
+++ b/drivers/platform/msm/sps/sps_bam.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,6 +32,7 @@
BAM_DEV_IRQ_RDY_TO_SLEEP = 0x00000001,
BAM_DEV_IRQ_HRESP_ERROR = 0x00000002,
BAM_DEV_IRQ_ERROR = 0x00000004,
+ BAM_DEV_IRQ_TIMER = 0x00000010,
};
/* Pipe interrupt mask */
diff --git a/drivers/power/battery_current_limit.c b/drivers/power/battery_current_limit.c
index ecda153..69fa4a8 100644
--- a/drivers/power/battery_current_limit.c
+++ b/drivers/power/battery_current_limit.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -120,8 +120,10 @@
if (psy == NULL) {
psy = power_supply_get_by_name("battery");
- if (psy == NULL)
+ if (psy == NULL) {
+ pr_err("failed to get ps battery\n");
return;
+ }
}
if (psy->get_property(psy, POWER_SUPPLY_PROP_CURRENT_NOW, &ret))
@@ -143,6 +145,7 @@
gbcl->bcl_imax_ma = imax_ma;
gbcl->bcl_vbat_mv = vbatt_mv;
+ pr_debug("ibatt %d, imax %d, vbatt %d\n", ibatt_ma, imax_ma, vbatt_mv);
if (gbcl->bcl_threshold_mode[BCL_IBAT_IMAX_THRESHOLD_TYPE_HIGH]
== BCL_IBAT_IMAX_THRESHOLD_ENABLED) {
imax_high_threshold =
@@ -179,8 +182,7 @@
bcl_calculate_imax_trigger();
/* restart the delay work for caculating imax */
schedule_delayed_work(&bcl->bcl_imax_work,
- round_jiffies_relative(msecs_to_jiffies
- (bcl->bcl_poll_interval_msec)));
+ msecs_to_jiffies(bcl->bcl_poll_interval_msec));
}
}
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index e9cf973..1ad7f21 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -83,6 +83,7 @@
#define CHG_COMP_OVR 0x20A
#define IUSB_FINE_RES 0x2B6
#define OVP_USB_UVD 0x2B7
+#define PM8921_USB_TRIM_SEL 0x339
/* check EOC every 10 seconds */
#define EOC_CHECK_PERIOD_MS 10000
@@ -213,6 +214,8 @@
* @alarm_high_mv: the battery alarm voltage high
* @cool_temp_dc: the cool temp threshold in deciCelcius
* @warm_temp_dc: the warm temp threshold in deciCelcius
+ * @hysteresis_temp_dc: the hysteresis between temp thresholds in
+ * deciCelcius
* @resume_voltage_delta: the voltage delta from vdd max at which the
* battery should resume charging
* @term_current: The charging based term current
@@ -235,6 +238,7 @@
unsigned int alarm_high_mv;
int cool_temp_dc;
int warm_temp_dc;
+ int hysteresis_temp_dc;
unsigned int temp_check_period;
unsigned int cool_bat_chg_current;
unsigned int warm_bat_chg_current;
@@ -308,6 +312,7 @@
static int thermal_mitigation;
static struct pm8921_chg_chip *the_chip;
+static void check_temp_thresholds(struct pm8921_chg_chip *chip);
#define LPM_ENABLE_BIT BIT(2)
static int pm8921_chg_set_lpm(struct pm8921_chg_chip *chip, int enable)
@@ -773,6 +778,44 @@
};
/* USB Trim tables */
+static int usb_trim_pm8921_table_1[USB_TRIM_ENTRIES] = {
+ 0x0,
+ 0x0,
+ -0x5,
+ 0x0,
+ -0x7,
+ 0x0,
+ -0x9,
+ -0xA,
+ 0x0,
+ 0x0,
+ -0xE,
+ 0x0,
+ -0xF,
+ 0x0,
+ -0x10,
+ 0x0
+};
+
+static int usb_trim_pm8921_table_2[USB_TRIM_ENTRIES] = {
+ 0x0,
+ 0x0,
+ -0x2,
+ 0x0,
+ -0x4,
+ 0x0,
+ -0x4,
+ -0x5,
+ 0x0,
+ 0x0,
+ -0x6,
+ 0x0,
+ -0x6,
+ 0x0,
+ -0x6,
+ 0x0
+};
+
static int usb_trim_8038_table[USB_TRIM_ENTRIES] = {
0x0,
0x0,
@@ -840,6 +883,8 @@
#define REG_USB_OVP_TRIM_ORIG_MSB 0x09C
#define REG_USB_OVP_TRIM_PM8917 0x2B5
#define REG_USB_OVP_TRIM_PM8917_BIT BIT(0)
+#define USB_TRIM_MAX_DATA_PM8917 0x3F
+#define USB_TRIM_POLARITY_PM8917_BIT BIT(6)
static int pm_chg_usb_trim(struct pm8921_chg_chip *chip, int index)
{
u8 temp, sbi_config, msb, lsb, mask;
@@ -3162,6 +3207,22 @@
struct delayed_work *dwork = to_delayed_work(work);
struct pm8921_chg_chip *chip = container_of(dwork,
struct pm8921_chg_chip, update_heartbeat_work);
+ bool chg_present = chip->usb_present || chip->dc_present;
+
+ /* for battery health when charger is not connected */
+ if (chip->btc_override && !chg_present)
+ schedule_delayed_work(&chip->btc_override_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (chip->btc_delay_ms)));
+
+ /*
+ * check temp thresholds when charger is present and
+ * and battery is FULL. The temperature here can impact
+ * the charging restart conditions.
+ */
+ if (chip->btc_override && chg_present &&
+ !wake_lock_active(&chip->eoc_wake_lock))
+ check_temp_thresholds(chip);
power_supply_changed(&chip->batt_psy);
if (chip->recent_reported_soc <= 20)
@@ -3317,7 +3378,7 @@
if (chip->warm_temp_dc != INT_MIN) {
if (chip->is_bat_warm
- && temp < chip->warm_temp_dc - TEMP_HYSTERISIS_DECIDEGC)
+ && temp < chip->warm_temp_dc - chip->hysteresis_temp_dc)
battery_warm(false);
else if (!chip->is_bat_warm && temp >= chip->warm_temp_dc)
battery_warm(true);
@@ -3325,7 +3386,7 @@
if (chip->cool_temp_dc != INT_MIN) {
if (chip->is_bat_cool
- && temp > chip->cool_temp_dc + TEMP_HYSTERISIS_DECIDEGC)
+ && temp > chip->cool_temp_dc + chip->hysteresis_temp_dc)
battery_cool(false);
else if (!chip->is_bat_cool && temp <= chip->cool_temp_dc)
battery_cool(true);
@@ -3543,7 +3604,8 @@
temp = pm_chg_get_rt_status(chip, BATTTEMP_HOT_IRQ);
if (temp) {
- if (decidegc < chip->btc_override_hot_decidegc)
+ if (decidegc < chip->btc_override_hot_decidegc -
+ chip->hysteresis_temp_dc)
/* stop forcing batt hot */
rc = pm_chg_override_hot(chip, 0);
if (rc)
@@ -3558,7 +3620,8 @@
temp = pm_chg_get_rt_status(chip, BATTTEMP_COLD_IRQ);
if (temp) {
- if (decidegc > chip->btc_override_cold_decidegc)
+ if (decidegc > chip->btc_override_cold_decidegc +
+ chip->hysteresis_temp_dc)
/* stop forcing batt cold */
rc = pm_chg_override_cold(chip, 0);
if (rc)
@@ -3622,7 +3685,8 @@
end = is_charging_finished(chip, vbat_batt_terminal_uv, ichg_meas_ma);
- if (end == CHG_NOT_IN_PROGRESS) {
+ if (end == CHG_NOT_IN_PROGRESS && (!chip->btc_override ||
+ !(chip->usb_present || chip->dc_present))) {
count = 0;
goto eoc_worker_stop;
}
@@ -3650,7 +3714,8 @@
chgdone_irq_handler(chip->pmic_chg_irq[CHGDONE_IRQ], chip);
} else {
check_temp_thresholds(chip);
- adjust_vdd_max_for_fastchg(chip, vbat_batt_terminal_uv);
+ if (end != CHG_NOT_IN_PROGRESS)
+ adjust_vdd_max_for_fastchg(chip, vbat_batt_terminal_uv);
pr_debug("EOC count = %d\n", count);
schedule_delayed_work(&chip->eoc_work,
round_jiffies_relative(msecs_to_jiffies
@@ -3659,9 +3724,9 @@
}
eoc_worker_stop:
- wake_unlock(&chip->eoc_wake_lock);
/* set the vbatdet back, in case it was changed to trigger charging */
set_appropriate_vbatdet(chip);
+ wake_unlock(&chip->eoc_wake_lock);
}
/**
@@ -3781,11 +3846,14 @@
}
}
+#define PM8921_USB_TRIM_SEL_BIT BIT(6)
/* determines the initial present states */
static void __devinit determine_initial_state(struct pm8921_chg_chip *chip)
{
int fsm_state;
int is_fast_chg;
+ int rc = 0;
+ u8 trim_sel_reg = 0, regsbi;
chip->dc_present = !!is_dc_chg_plugged_in(chip);
chip->usb_present = !!is_usb_chg_plugged_in(chip);
@@ -3795,6 +3863,11 @@
schedule_delayed_work(&chip->unplug_check_work,
msecs_to_jiffies(UNPLUG_CHECK_WAIT_PERIOD_MS));
pm8921_chg_enable_irq(chip, CHG_GONE_IRQ);
+
+ if (chip->btc_override)
+ schedule_delayed_work(&chip->btc_override_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (chip->btc_delay_ms)));
}
pm8921_chg_enable_irq(chip, DCIN_VALID_IRQ);
@@ -3843,10 +3916,26 @@
fsm_state);
/* Determine which USB trim column to use */
- if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8917)
+ if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8917) {
chip->usb_trim_table = usb_trim_8917_table;
- else if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8038)
+ } else if (pm8xxx_get_version(chip->dev->parent) ==
+ PM8XXX_VERSION_8038) {
chip->usb_trim_table = usb_trim_8038_table;
+ } else if (pm8xxx_get_version(chip->dev->parent) ==
+ PM8XXX_VERSION_8921) {
+ rc = pm8xxx_readb(chip->dev->parent, REG_SBI_CONFIG, ®sbi);
+ rc |= pm8xxx_writeb(chip->dev->parent, REG_SBI_CONFIG, 0x5E);
+ rc |= pm8xxx_readb(chip->dev->parent, PM8921_USB_TRIM_SEL,
+ &trim_sel_reg);
+ rc |= pm8xxx_writeb(chip->dev->parent, REG_SBI_CONFIG, regsbi);
+ if (rc)
+ pr_err("Failed to read trim sel register rc=%d\n", rc);
+
+ if (trim_sel_reg & PM8921_USB_TRIM_SEL_BIT)
+ chip->usb_trim_table = usb_trim_pm8921_table_1;
+ else
+ chip->usb_trim_table = usb_trim_pm8921_table_2;
+ }
}
struct pm_chg_irq_init_data {
@@ -4574,12 +4663,12 @@
int rc;
struct pm8921_chg_chip *chip = dev_get_drvdata(dev);
- pm8921_chg_force_19p2mhz_clk(chip);
-
rc = pm8921_chg_set_lpm(chip, 0);
if (rc)
pr_err("Failed to set lpm rc=%d\n", rc);
+ pm8921_chg_force_19p2mhz_clk(chip);
+
rc = pm_chg_masked_write(chip, CHG_CNTRL, VREF_BATT_THERM_FORCE_ON,
VREF_BATT_THERM_FORCE_ON);
if (rc)
@@ -4600,6 +4689,8 @@
is_usb_chg_plugged_in(the_chip)))
schedule_delayed_work(&chip->btc_override_work, 0);
+ schedule_delayed_work(&chip->update_heartbeat_work, 0);
+
return 0;
}
@@ -4607,6 +4698,8 @@
{
struct pm8921_chg_chip *chip = dev_get_drvdata(dev);
+ cancel_delayed_work_sync(&chip->update_heartbeat_work);
+
if (chip->btc_override)
cancel_delayed_work_sync(&chip->btc_override_work);
@@ -4663,6 +4756,11 @@
else
chip->warm_temp_dc = INT_MIN;
+ if (pdata->hysteresis_temp)
+ chip->hysteresis_temp_dc = pdata->hysteresis_temp * 10;
+ else
+ chip->hysteresis_temp_dc = TEMP_HYSTERISIS_DECIDEGC;
+
chip->temp_check_period = pdata->temp_check_period;
chip->max_bat_chg_current = pdata->max_bat_chg_current;
/* Assign to corresponding module parameter */
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index 37ac7b5..fd42c47 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -125,7 +125,6 @@
int r_conn_mohm;
int shutdown_soc_valid_limit;
int adjust_soc_low_threshold;
- int adjust_soc_high_threshold;
int chg_term_ua;
enum battery_type batt_type;
unsigned int fcc;
@@ -176,6 +175,7 @@
struct timespec t_soc_queried;
int last_soc;
int last_soc_est;
+ int last_soc_unbound;
int charge_time_us;
int catch_up_time_us;
@@ -194,6 +194,12 @@
int prev_voltage_based_soc;
bool use_voltage_soc;
+ int prev_batt_terminal_uv;
+ int high_ocv_correction_limit_uv;
+ int low_ocv_correction_limit_uv;
+ int flat_ocv_threshold_uv;
+ int hold_soc_est;
+
int ocv_high_threshold_uv;
int ocv_low_threshold_uv;
unsigned long last_recalc_time;
@@ -651,6 +657,7 @@
chip->last_cc_uah = INT_MIN;
chip->last_ocv_temp = batt_temp;
chip->last_soc_invalid = true;
+ chip->prev_batt_terminal_uv = 0;
}
#define OCV_RAW_UNINITIALIZED 0xFFFF
@@ -1296,11 +1303,13 @@
pr_debug("CC_TO_CV ibat_ua = %d CHG SOC %d\n",
ibat_ua, soc);
}
+
+ chip->prev_batt_terminal_uv = batt_terminal_uv;
return soc;
}
/*
- * battery is in CV phase - begin liner inerpolation of soc based on
+ * battery is in CV phase - begin linear interpolation of soc based on
* battery charge current
*/
@@ -1308,10 +1317,11 @@
* if voltage lessened (possibly because of a system load)
* keep reporting the prev chg soc
*/
- if (batt_terminal_uv <= chip->max_voltage_uv - 10000) {
+ if (batt_terminal_uv <= chip->prev_batt_terminal_uv) {
pr_debug("batt_terminal_uv %d < (max = %d - 10000); CC CHG SOC %d\n",
- batt_terminal_uv,
- chip->max_voltage_uv, chip->prev_chg_soc);
+ batt_terminal_uv, chip->prev_batt_terminal_uv,
+ chip->prev_chg_soc);
+ chip->prev_batt_terminal_uv = batt_terminal_uv;
return chip->prev_chg_soc;
}
@@ -1334,6 +1344,7 @@
}
pr_debug("Reporting CHG SOC %d\n", chip->prev_chg_soc);
+ chip->prev_batt_terminal_uv = batt_terminal_uv;
return chip->prev_chg_soc;
}
@@ -1356,6 +1367,7 @@
}
}
+#define NO_ADJUST_HIGH_SOC_THRESHOLD 90
static int adjust_soc(struct qpnp_bms_chip *chip, struct soc_params *params,
int soc, int batt_temp)
{
@@ -1369,6 +1381,7 @@
int slope = 0;
int rc = 0;
int delta_ocv_uv_limit = 0;
+ int correction_limit_uv = 0;
rc = get_simultaneous_batt_v_and_i(chip, &ibat_ua, &vbat_uv);
if (rc < 0) {
@@ -1404,18 +1417,15 @@
/*
* do not adjust
- * if soc is same as what bms calculated
- * if soc_est is between 45 and 25, this is the flat portion of the
- * curve where soc_est is not so accurate. We generally don't want to
- * adjust when soc_est is inaccurate except for the cases when soc is
- * way far off (higher than 50 or lesser than 20).
- * Also don't adjust soc if it is above 90 becuase it might be pulled
- * low and cause a bad user experience
+ * if soc_est is same as what bms calculated
+ * OR if soc_est > adjust_soc_low_threshold
+ * OR if soc is above 90
+ * because we might pull it low
+ * and cause a bad user experience
*/
if (soc_est == soc
- || (is_between(45, chip->adjust_soc_low_threshold, soc_est)
- && is_between(50, chip->adjust_soc_low_threshold - 5, soc))
- || soc >= 90)
+ || soc_est > chip->adjust_soc_low_threshold
+ || soc >= NO_ADJUST_HIGH_SOC_THRESHOLD)
goto out;
if (chip->last_soc_est == -EINVAL)
@@ -1460,6 +1470,21 @@
pr_debug("new delta ocv = %d\n", delta_ocv_uv);
}
+ if (chip->last_ocv_uv > chip->flat_ocv_threshold_uv)
+ correction_limit_uv = chip->high_ocv_correction_limit_uv;
+ else
+ correction_limit_uv = chip->low_ocv_correction_limit_uv;
+
+ if (abs(delta_ocv_uv) > correction_limit_uv) {
+ pr_debug("limiting delta ocv %d limit = %d\n",
+ delta_ocv_uv, correction_limit_uv);
+ if (delta_ocv_uv > 0)
+ delta_ocv_uv = correction_limit_uv;
+ else
+ delta_ocv_uv = -correction_limit_uv;
+ pr_debug("new delta ocv = %d\n", delta_ocv_uv);
+ }
+
chip->last_ocv_uv -= delta_ocv_uv;
if (chip->last_ocv_uv >= chip->max_voltage_uv)
@@ -1474,9 +1499,9 @@
/*
* if soc_new is ZERO force it higher so that phone doesnt report soc=0
- * soc = 0 should happen only when soc_est == 0
+ * soc = 0 should happen only when soc_est is above a set value
*/
- if (soc_new == 0 && soc_est != 0)
+ if (soc_new == 0 && soc_est >= chip->hold_soc_est)
soc_new = 1;
soc = soc_new;
@@ -1874,9 +1899,18 @@
soc = scale_soc_while_chg(chip, delta_time_us,
soc, chip->last_soc);
+ if (chip->last_soc_unbound)
+ chip->last_soc_unbound = false;
+ else if (chip->last_soc != -EINVAL) {
+ if (soc < chip->last_soc && soc != 0)
+ soc = chip->last_soc - 1;
+ if (soc > chip->last_soc && soc != 100)
+ soc = chip->last_soc + 1;
+ }
+
pr_debug("last_soc = %d, calculated_soc = %d, soc = %d\n",
chip->last_soc, chip->calculated_soc, soc);
- chip->last_soc = soc;
+ chip->last_soc = bound_soc(soc);
backup_soc_and_iavg(chip, batt_temp, chip->last_soc);
pr_debug("Reported SOC = %d\n", chip->last_soc);
chip->t_soc_queried = now;
@@ -2143,6 +2177,7 @@
chip->rbatt_sf_lut = batt_data->rbatt_sf_lut;
chip->default_rbatt_mohm = batt_data->default_rbatt_mohm;
chip->rbatt_capacitive_mohm = batt_data->rbatt_capacitive_mohm;
+ chip->flat_ocv_threshold_uv = batt_data->flat_ocv_threshold_uv;
if (chip->pc_temp_ocv_lut == NULL) {
pr_err("temp ocv lut table is NULL\n");
@@ -2174,8 +2209,6 @@
SPMI_PROP_READ(chg_term_ua, "chg-term-ua", rc);
SPMI_PROP_READ(shutdown_soc_valid_limit,
"shutdown-soc-valid-limit", rc);
- SPMI_PROP_READ(adjust_soc_high_threshold,
- "adjust-soc-high-threshold", rc);
SPMI_PROP_READ(adjust_soc_low_threshold,
"adjust-soc-low-threshold", rc);
SPMI_PROP_READ(batt_type, "batt-type", rc);
@@ -2195,6 +2228,12 @@
chip->use_ocv_thresholds = of_property_read_bool(
chip->spmi->dev.of_node,
"qcom,use-ocv-thresholds");
+ SPMI_PROP_READ(high_ocv_correction_limit_uv,
+ "high-ocv-correction-limit-uv", rc);
+ SPMI_PROP_READ(low_ocv_correction_limit_uv,
+ "low-ocv-correction-limit-uv", rc);
+ SPMI_PROP_READ(hold_soc_est,
+ "hold-soc-est", rc);
SPMI_PROP_READ(ocv_high_threshold_uv,
"ocv-voltage-high-threshold-uv", rc);
SPMI_PROP_READ(ocv_low_threshold_uv,
@@ -2210,8 +2249,8 @@
pr_debug("r_conn:%d, shutdown_soc: %d, adjust_soc_low:%d\n",
chip->r_conn_mohm, chip->shutdown_soc_valid_limit,
chip->adjust_soc_low_threshold);
- pr_debug("adjust_soc_high:%d, chg_term_ua:%d, batt_type:%d\n",
- chip->adjust_soc_high_threshold, chip->chg_term_ua,
+ pr_debug("chg_term_ua:%d, batt_type:%d\n",
+ chip->chg_term_ua,
chip->batt_type);
pr_debug("ignore_shutdown_soc:%d, use_voltage_soc:%d\n",
chip->ignore_shutdown_soc, chip->use_voltage_soc);
@@ -2542,6 +2581,11 @@
if (rc) {
pr_err("Could not read current time: %d\n", rc);
} else if (tm_now_sec > chip->last_recalc_time) {
+ /*
+ * unbind the last soc so that the next
+ * recalculation is not limited to changing by 1%
+ */
+ chip->last_soc_unbound = true;
time_since_last_recalc = tm_now_sec - chip->last_recalc_time;
pr_debug("Time since last recalc: %lu\n",
time_since_last_recalc);
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index f4efa756..6a2ce8d 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -85,7 +85,11 @@
#define CHGR_BUCK_BCK_VBAT_REG_MODE 0x74
#define MISC_REVISION2 0x01
#define USB_OVP_CTL 0x42
+#define USB_CHG_GONE_REV_BST 0xED
+#define BUCK_VCHG_OV 0x77
+#define BUCK_TEST_SMBC_MODES 0xE6
#define SEC_ACCESS 0xD0
+#define BAT_IF_VREF_BAT_THM_CTRL 0x4A
#define REG_OFFSET_PERP_SUBTYPE 0x05
/* SMBB peripheral subtype values */
@@ -105,6 +109,13 @@
#define SMBBP_BOOST_SUBTYPE 0x36
#define SMBBP_MISC_SUBTYPE 0x37
+/* SMBCL peripheral subtype values */
+#define SMBCL_CHGR_SUBTYPE 0x41
+#define SMBCL_BUCK_SUBTYPE 0x42
+#define SMBCL_BAT_IF_SUBTYPE 0x43
+#define SMBCL_USB_CHGPTH_SUBTYPE 0x44
+#define SMBCL_MISC_SUBTYPE 0x47
+
#define QPNP_CHARGER_DEV_NAME "qcom,qpnp-charger"
/* Status bits and masks */
@@ -113,6 +124,8 @@
#define CHGR_ON_BAT_FORCE_BIT BIT(0)
#define USB_VALID_DEB_20MS 0x03
#define BUCK_VBAT_REG_NODE_SEL_BIT BIT(0)
+#define VREF_BATT_THERM_FORCE_ON 0xC0
+#define VREF_BAT_THM_ENABLED_FSM 0x80
/* Interrupt definitions */
/* smbb_chg_interrupts */
@@ -157,6 +170,11 @@
/* smbb_misc_interrupts */
#define TFTWDOG_IRQ BIT(0)
+/* SMBB types */
+#define SMBB BIT(1)
+#define SMBBP BIT(2)
+#define SMBCL BIT(3)
+
/* Workaround flags */
#define CHG_FLAGS_VCP_WA BIT(0)
@@ -194,6 +212,8 @@
* @warm_bat_decidegc Warm battery temperature in degree Celsius
* @cool_bat_decidegc Cool battery temperature in degree Celsius
* @revision: PMIC revision
+ * @type: SMBB type
+ * @tchg_mins maximum allowed software initiated charge time
* @thermal_levels amount of thermal mitigation levels
* @thermal_mitigation thermal mitigation level values
* @therm_lvl_sel thermal mitigation level selection
@@ -218,7 +238,7 @@
u16 freq_base;
unsigned int usbin_valid_irq;
unsigned int dcin_valid_irq;
- unsigned int chg_done_irq;
+ unsigned int chg_gone_irq;
unsigned int chg_fastchg_irq;
unsigned int chg_trklchg_irq;
unsigned int chg_failed_irq;
@@ -247,6 +267,8 @@
unsigned int cool_bat_decidegc;
unsigned int safe_current;
unsigned int revision;
+ unsigned int type;
+ unsigned int tchg_mins;
unsigned int thermal_levels;
unsigned int therm_lvl_sel;
unsigned int *thermal_mitigation;
@@ -256,6 +278,8 @@
struct power_supply batt_psy;
uint32_t flags;
struct qpnp_adc_tm_btm_param adc_param;
+ struct work_struct adc_measure_work;
+ struct delayed_work arb_stop_work;
};
static struct of_device_id qpnp_charger_match_table[] = {
@@ -508,6 +532,66 @@
enable ? USB_SUSPEND_BIT : 0, 1);
}
+static int
+qpnp_chg_charge_en(struct qpnp_chg_chip *chip, int enable)
+{
+ return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
+ CHGR_CHG_EN,
+ enable ? CHGR_CHG_EN : 0, 1);
+}
+
+static int
+qpnp_chg_force_run_on_batt(struct qpnp_chg_chip *chip, int disable)
+{
+ /* Don't run on battery for batteryless hardware */
+ if (chip->use_default_batt_values)
+ return 0;
+
+ /* This bit forces the charger to run off of the battery rather
+ * than a connected charger */
+ return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
+ CHGR_ON_BAT_FORCE_BIT,
+ disable ? CHGR_ON_BAT_FORCE_BIT : 0, 1);
+}
+
+static void
+qpnp_arb_stop_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct qpnp_chg_chip *chip = container_of(dwork,
+ struct qpnp_chg_chip, arb_stop_work);
+
+ qpnp_chg_charge_en(chip, !chip->charging_disabled);
+ qpnp_chg_force_run_on_batt(chip, chip->charging_disabled);
+}
+
+static void
+qpnp_bat_if_adc_measure_work(struct work_struct *work)
+{
+ struct qpnp_chg_chip *chip = container_of(work,
+ struct qpnp_chg_chip, adc_measure_work);
+
+ if (qpnp_adc_tm_channel_measure(&chip->adc_param))
+ pr_err("request ADC error\n");
+}
+
+#define ARB_STOP_WORK_MS 1000
+static irqreturn_t
+qpnp_chg_usb_chg_gone_irq_handler(int irq, void *_chip)
+{
+ struct qpnp_chg_chip *chip = _chip;
+
+ pr_debug("chg_gone triggered\n");
+ if (qpnp_chg_is_usb_chg_plugged_in(chip)) {
+ qpnp_chg_charge_en(chip, 0);
+ qpnp_chg_force_run_on_batt(chip, chip->charging_disabled);
+ schedule_delayed_work(&chip->arb_stop_work,
+ msecs_to_jiffies(ARB_STOP_WORK_MS));
+ }
+
+ return IRQ_HANDLED;
+}
+
#define ENUM_T_STOP_BIT BIT(0)
static irqreturn_t
qpnp_chg_usb_usbin_valid_irq_handler(int irq, void *_chip)
@@ -527,7 +611,8 @@
if (chip->usb_present ^ usb_present) {
chip->usb_present = usb_present;
if (!usb_present)
- qpnp_chg_iusbmax_set(chip, QPNP_CHG_I_MAX_MIN_100);
+ qpnp_chg_usb_suspend_enable(chip, 1);
+
power_supply_set_present(chip->usb_psy,
chip->usb_present);
}
@@ -550,8 +635,7 @@
if (chip->cool_bat_decidegc && chip->warm_bat_decidegc
&& batt_present) {
- if (qpnp_adc_tm_channel_measure(&chip->adc_param))
- pr_err("request ADC error\n");
+ schedule_work(&chip->adc_measure_work);
}
}
@@ -621,26 +705,6 @@
return IRQ_HANDLED;
}
-static irqreturn_t
-qpnp_chg_chgr_chg_done_irq_handler(int irq, void *_chip)
-{
- struct qpnp_chg_chip *chip = _chip;
- u8 chgr_sts;
- int rc;
-
- pr_debug("CHG_DONE IRQ triggered\n");
-
- rc = qpnp_chg_read(chip, &chgr_sts,
- INT_RT_STS(chip->chgr_base), 1);
- if (rc)
- pr_err("failed to read interrupt sts %d\n", rc);
-
- chip->chg_done = true;
- power_supply_changed(&chip->batt_psy);
-
- return IRQ_HANDLED;
-}
-
static int
qpnp_batt_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
@@ -657,28 +721,6 @@
}
static int
-qpnp_chg_charge_en(struct qpnp_chg_chip *chip, int enable)
-{
- return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
- CHGR_CHG_EN,
- enable ? CHGR_CHG_EN : 0, 1);
-}
-
-static int
-qpnp_chg_force_run_on_batt(struct qpnp_chg_chip *chip, int disable)
-{
- /* Don't run on battery for batteryless hardware */
- if (chip->use_default_batt_values)
- return 0;
-
- /* This bit forces the charger to run off of the battery rather
- * than a connected charger */
- return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
- CHGR_ON_BAT_FORCE_BIT,
- disable ? CHGR_ON_BAT_FORCE_BIT : 0, 1);
-}
-
-static int
qpnp_chg_buck_control(struct qpnp_chg_chip *chip, int enable)
{
int rc;
@@ -773,6 +815,7 @@
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_TEMP,
@@ -813,16 +856,16 @@
int rc = 0;
struct qpnp_vadc_result results;
- if (chip->revision > 0) {
+ if (chip->revision == 0 && chip->type == SMBB) {
+ pr_err("vbat reading not supported for 1.0 rc=%d\n", rc);
+ return 0;
+ } else {
rc = qpnp_vadc_read(VBAT_SNS, &results);
if (rc) {
pr_err("Unable to read vbat rc=%d\n", rc);
return 0;
}
return results.physical;
- } else {
- pr_err("vbat reading not supported for 1.0 rc=%d\n", rc);
- return 0;
}
}
@@ -912,6 +955,21 @@
return POWER_SUPPLY_STATUS_DISCHARGING;
}
+static int
+get_prop_current_max(struct qpnp_chg_chip *chip)
+{
+ union power_supply_propval ret = {0,};
+
+ if (chip->bms_psy) {
+ chip->bms_psy->get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &ret);
+ return ret.intval;
+ } else {
+ pr_debug("No BMS supply registered return 0\n");
+ }
+
+ return 0;
+}
static int
get_prop_current_now(struct qpnp_chg_chip *chip)
@@ -1039,8 +1097,8 @@
POWER_SUPPLY_PROP_CURRENT_MAX, &ret);
if (ret.intval <= 2 && !chip->use_default_batt_values &&
get_prop_batt_present(chip)) {
- qpnp_chg_iusbmax_set(chip, QPNP_CHG_I_MAX_MIN_100);
qpnp_chg_usb_suspend_enable(chip, 1);
+ qpnp_chg_iusbmax_set(chip, QPNP_CHG_I_MAX_MIN_100);
} else {
qpnp_chg_usb_suspend_enable(chip, 0);
qpnp_chg_iusbmax_set(chip, ret.intval / 1000);
@@ -1090,6 +1148,9 @@
case POWER_SUPPLY_PROP_CAPACITY:
val->intval = get_prop_capacity(chip);
break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = get_prop_current_max(chip);
+ break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
val->intval = get_prop_current_now(chip);
break;
@@ -1186,7 +1247,7 @@
QPNP_CHG_ITERM_MASK, temp, 1);
}
-#define QPNP_CHG_IBATMAX_MIN 100
+#define QPNP_CHG_IBATMAX_MIN 50
#define QPNP_CHG_IBATMAX_MAX 3250
static int
qpnp_chg_ibatmax_set(struct qpnp_chg_chip *chip, int chg_current)
@@ -1198,11 +1259,28 @@
pr_err("bad mA=%d asked to set\n", chg_current);
return -EINVAL;
}
- temp = (chg_current - QPNP_CHG_I_MIN_MA) / QPNP_CHG_I_STEP_MA;
+ temp = chg_current / QPNP_CHG_I_STEP_MA;
return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_IBAT_MAX,
QPNP_CHG_I_MASK, temp, 1);
}
+#define QPNP_CHG_TCHG_MASK 0x7F
+#define QPNP_CHG_TCHG_MIN 4
+#define QPNP_CHG_TCHG_MAX 512
+#define QPNP_CHG_TCHG_STEP 4
+static int qpnp_chg_tchg_max_set(struct qpnp_chg_chip *chip, int minutes)
+{
+ u8 temp;
+
+ if (minutes < QPNP_CHG_TCHG_MIN || minutes > QPNP_CHG_TCHG_MAX) {
+ pr_err("bad max minutes =%d asked to set\n", minutes);
+ return -EINVAL;
+ }
+
+ temp = (minutes - 1)/QPNP_CHG_TCHG_STEP;
+ return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_TCHG_MAX,
+ QPNP_CHG_I_MASK, temp, 1);
+}
#define QPNP_CHG_VBATDET_MIN_MV 3240
#define QPNP_CHG_VBATDET_MAX_MV 5780
#define QPNP_CHG_VBATDET_STEP_MV 20
@@ -1343,6 +1421,7 @@
if (state == ADC_TM_WARM_STATE) {
if (temp > chip->warm_bat_decidegc) {
+ /* Normal to warm */
bat_warm = true;
bat_cool = false;
chip->adc_param.low_temp =
@@ -1351,6 +1430,7 @@
ADC_TM_COOL_THR_ENABLE;
} else if (temp >
chip->cool_bat_decidegc + HYSTERISIS_DECIDEGC){
+ /* Cool to normal */
bat_warm = false;
bat_cool = false;
@@ -1361,14 +1441,16 @@
}
} else {
if (temp < chip->cool_bat_decidegc) {
+ /* Normal to cool */
bat_warm = false;
bat_cool = true;
chip->adc_param.high_temp =
chip->cool_bat_decidegc + HYSTERISIS_DECIDEGC;
chip->adc_param.state_request =
ADC_TM_WARM_THR_ENABLE;
- } else if (temp >
+ } else if (temp <
chip->warm_bat_decidegc - HYSTERISIS_DECIDEGC){
+ /* Warm to normal */
bat_warm = false;
bat_cool = false;
@@ -1421,10 +1503,181 @@
static void
qpnp_chg_setup_flags(struct qpnp_chg_chip *chip)
{
- if (chip->revision > 0)
+ if (chip->revision > 0 && chip->type == SMBB)
chip->flags |= CHG_FLAGS_VCP_WA;
}
+static int
+qpnp_chg_request_irqs(struct qpnp_chg_chip *chip)
+{
+ int rc = 0;
+ struct resource *resource;
+ struct spmi_resource *spmi_resource;
+ u8 subtype;
+ struct spmi_device *spmi = chip->spmi;
+
+ spmi_for_each_container_dev(spmi_resource, chip->spmi) {
+ if (!spmi_resource) {
+ pr_err("qpnp_chg: spmi resource absent\n");
+ return rc;
+ }
+
+ resource = spmi_get_resource(spmi, spmi_resource,
+ IORESOURCE_MEM, 0);
+ if (!(resource && resource->start)) {
+ pr_err("node %s IO resource absent!\n",
+ spmi->dev.of_node->full_name);
+ return rc;
+ }
+
+ rc = qpnp_chg_read(chip, &subtype,
+ resource->start + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ pr_err("Peripheral subtype read failed rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case SMBB_CHGR_SUBTYPE:
+ case SMBBP_CHGR_SUBTYPE:
+ case SMBCL_CHGR_SUBTYPE:
+ chip->chg_fastchg_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "fast-chg-on");
+ if (chip->chg_fastchg_irq < 0) {
+ pr_err("Unable to get fast-chg-on irq\n");
+ return rc;
+ }
+
+ chip->chg_trklchg_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "trkl-chg-on");
+ if (chip->chg_trklchg_irq < 0) {
+ pr_err("Unable to get trkl-chg-on irq\n");
+ return rc;
+ }
+
+ chip->chg_failed_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "chg-failed");
+ if (chip->chg_failed_irq < 0) {
+ pr_err("Unable to get chg_failed irq\n");
+ return rc;
+ }
+
+ rc |= devm_request_irq(chip->dev, chip->chg_failed_irq,
+ qpnp_chg_chgr_chg_failed_irq_handler,
+ IRQF_TRIGGER_RISING, "chg_failed", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d chg_failed chg: %d\n",
+ chip->chg_failed_irq, rc);
+ return rc;
+ }
+
+ rc |= devm_request_irq(chip->dev, chip->chg_fastchg_irq,
+ qpnp_chg_chgr_chg_fastchg_irq_handler,
+ IRQF_TRIGGER_RISING,
+ "fast-chg-on", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d fast-chg-on: %d\n",
+ chip->chg_fastchg_irq, rc);
+ return rc;
+ }
+
+ rc |= devm_request_irq(chip->dev, chip->chg_trklchg_irq,
+ qpnp_chg_chgr_chg_trklchg_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "fast-chg-on", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d trkl-chg-on: %d\n",
+ chip->chg_trklchg_irq, rc);
+ return rc;
+ }
+ enable_irq_wake(chip->chg_fastchg_irq);
+ enable_irq_wake(chip->chg_trklchg_irq);
+ enable_irq_wake(chip->chg_failed_irq);
+
+ break;
+ case SMBB_BAT_IF_SUBTYPE:
+ case SMBBP_BAT_IF_SUBTYPE:
+ case SMBCL_BAT_IF_SUBTYPE:
+ chip->batt_pres_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "batt-pres");
+ if (chip->batt_pres_irq < 0) {
+ pr_err("Unable to get batt-pres irq\n");
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev, chip->batt_pres_irq,
+ qpnp_chg_bat_if_batt_pres_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "bat_if_batt_pres", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d batt-pres irq: %d\n",
+ chip->batt_pres_irq, rc);
+ return rc;
+ }
+
+ enable_irq_wake(chip->batt_pres_irq);
+ break;
+ case SMBB_USB_CHGPTH_SUBTYPE:
+ case SMBBP_USB_CHGPTH_SUBTYPE:
+ case SMBCL_USB_CHGPTH_SUBTYPE:
+ chip->usbin_valid_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "usbin-valid");
+ if (chip->usbin_valid_irq < 0) {
+ pr_err("Unable to get usbin irq\n");
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev, chip->usbin_valid_irq,
+ qpnp_chg_usb_usbin_valid_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "chg_usbin_valid", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d usbinvalid: %d\n",
+ chip->usbin_valid_irq, rc);
+ return rc;
+ }
+
+ chip->chg_gone_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "chg-gone");
+ if (chip->chg_gone_irq < 0) {
+ pr_err("Unable to get chg-gone irq\n");
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev, chip->chg_gone_irq,
+ qpnp_chg_usb_chg_gone_irq_handler,
+ IRQF_TRIGGER_RISING,
+ "chg_gone_irq", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d chg_gone: %d\n",
+ chip->chg_gone_irq, rc);
+ return rc;
+ }
+ enable_irq_wake(chip->usbin_valid_irq);
+ enable_irq_wake(chip->chg_gone_irq);
+ break;
+ case SMBB_DC_CHGPTH_SUBTYPE:
+ chip->dcin_valid_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "dcin-valid");
+ if (chip->dcin_valid_irq < 0) {
+ pr_err("Unable to get dcin irq\n");
+ return -rc;
+ }
+ rc = devm_request_irq(chip->dev, chip->dcin_valid_irq,
+ qpnp_chg_dc_dcin_valid_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "chg_dcin_valid", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d dcinvalid: %d\n",
+ chip->dcin_valid_irq, rc);
+ return rc;
+ }
+
+ enable_irq_wake(chip->dcin_valid_irq);
+ break;
+ }
+ }
+
+ return rc;
+}
+
#define WDOG_EN_BIT BIT(7)
static int
qpnp_chg_hwinit(struct qpnp_chg_chip *chip, u8 subtype,
@@ -1436,73 +1689,7 @@
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
case SMBBP_CHGR_SUBTYPE:
- chip->chg_done_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "chg-done");
- if (chip->chg_done_irq < 0) {
- pr_err("Unable to get chg_done irq\n");
- return -ENXIO;
- }
-
- chip->chg_fastchg_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "fast-chg-on");
- if (chip->chg_fastchg_irq < 0) {
- pr_err("Unable to get fast-chg-on irq\n");
- return -ENXIO;
- }
-
- chip->chg_trklchg_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "trkl-chg-on");
- if (chip->chg_trklchg_irq < 0) {
- pr_err("Unable to get trkl-chg-on irq\n");
- return -ENXIO;
- }
-
- chip->chg_failed_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "chg-failed");
- if (chip->chg_failed_irq < 0) {
- pr_err("Unable to get chg_failed irq\n");
- return -ENXIO;
- }
-
- rc |= devm_request_irq(chip->dev, chip->chg_done_irq,
- qpnp_chg_chgr_chg_done_irq_handler,
- IRQF_TRIGGER_RISING,
- "chg_done", chip);
- if (rc < 0) {
- pr_err("Can't request %d chg_done for chg: %d\n",
- chip->chg_done_irq, rc);
- return -ENXIO;
- }
-
- rc |= devm_request_irq(chip->dev, chip->chg_failed_irq,
- qpnp_chg_chgr_chg_failed_irq_handler,
- IRQF_TRIGGER_RISING, "chg_failed", chip);
- if (rc < 0) {
- pr_err("Can't request %d chg_failed chg: %d\n",
- chip->chg_failed_irq, rc);
- return -ENXIO;
- }
-
- rc |= devm_request_irq(chip->dev, chip->chg_fastchg_irq,
- qpnp_chg_chgr_chg_fastchg_irq_handler,
- IRQF_TRIGGER_RISING,
- "fast-chg-on", chip);
- if (rc < 0) {
- pr_err("Can't request %d fast-chg-on for chg: %d\n",
- chip->chg_fastchg_irq, rc);
- return -ENXIO;
- }
-
- rc |= devm_request_irq(chip->dev, chip->chg_trklchg_irq,
- qpnp_chg_chgr_chg_trklchg_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "fast-chg-on", chip);
- if (rc < 0) {
- pr_err("Can't request %d trkl-chg-on for chg: %d\n",
- chip->chg_trklchg_irq, rc);
- return -ENXIO;
- }
-
+ case SMBCL_CHGR_SUBTYPE:
rc = qpnp_chg_vinmin_set(chip, chip->min_voltage_mv);
if (rc) {
pr_debug("failed setting min_voltage rc=%d\n", rc);
@@ -1541,22 +1728,25 @@
pr_debug("failed setting ibat_Safe rc=%d\n", rc);
return rc;
}
+ rc = qpnp_chg_tchg_max_set(chip, chip->tchg_mins);
+ if (rc) {
+ pr_debug("failed setting tchg_mins rc=%d\n", rc);
+ return rc;
+ }
+
/* HACK: Disable wdog */
rc = qpnp_chg_masked_write(chip, chip->chgr_base + 0x62,
0xFF, 0xA0, 1);
- /* HACK: use digital EOC */
+ /* HACK: use analog EOC */
rc = qpnp_chg_masked_write(chip, chip->chgr_base +
CHGR_IBAT_TERM_CHGR,
- 0x88, 0x80, 1);
+ 0x80, 0x00, 1);
- enable_irq_wake(chip->chg_fastchg_irq);
- enable_irq_wake(chip->chg_trklchg_irq);
- enable_irq_wake(chip->chg_failed_irq);
- enable_irq_wake(chip->chg_done_irq);
break;
case SMBB_BUCK_SUBTYPE:
case SMBBP_BUCK_SUBTYPE:
+ case SMBCL_BUCK_SUBTYPE:
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + CHGR_BUCK_BCK_VBAT_REG_MODE,
BUCK_VBAT_REG_NODE_SEL_BIT,
@@ -1568,43 +1758,20 @@
break;
case SMBB_BAT_IF_SUBTYPE:
case SMBBP_BAT_IF_SUBTYPE:
- chip->batt_pres_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "batt-pres");
- if (chip->batt_pres_irq < 0) {
- pr_err("Unable to get batt-pres irq\n");
- return -ENXIO;
+ case SMBCL_BAT_IF_SUBTYPE:
+ /* Force on VREF_BAT_THM */
+ rc = qpnp_chg_masked_write(chip,
+ chip->bat_if_base + BAT_IF_VREF_BAT_THM_CTRL,
+ VREF_BATT_THERM_FORCE_ON,
+ VREF_BATT_THERM_FORCE_ON, 1);
+ if (rc) {
+ pr_debug("failed to force on VREF_BAT_THM rc=%d\n", rc);
+ return rc;
}
- rc = devm_request_irq(chip->dev, chip->batt_pres_irq,
- qpnp_chg_bat_if_batt_pres_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "bat_if_batt_pres", chip);
- if (rc < 0) {
- pr_err("Can't request %d batt-pres irq for chg: %d\n",
- chip->batt_pres_irq, rc);
- return -ENXIO;
- }
-
- enable_irq_wake(chip->batt_pres_irq);
break;
case SMBB_USB_CHGPTH_SUBTYPE:
case SMBBP_USB_CHGPTH_SUBTYPE:
- chip->usbin_valid_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "usbin-valid");
- if (chip->usbin_valid_irq < 0) {
- pr_err("Unable to get usbin irq\n");
- return -ENXIO;
- }
- rc = devm_request_irq(chip->dev, chip->usbin_valid_irq,
- qpnp_chg_usb_usbin_valid_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "chg_usbin_valid", chip);
- if (rc < 0) {
- pr_err("Can't request %d usbinvalid for chg: %d\n",
- chip->usbin_valid_irq, rc);
- return -ENXIO;
- }
-
- enable_irq_wake(chip->usbin_valid_irq);
+ case SMBCL_USB_CHGPTH_SUBTYPE:
chip->usb_present = qpnp_chg_is_usb_chg_plugged_in(chip);
if (chip->usb_present) {
rc = qpnp_chg_masked_write(chip,
@@ -1627,31 +1794,28 @@
ENUM_T_STOP_BIT,
ENUM_T_STOP_BIT, 1);
+ rc = qpnp_chg_masked_write(chip,
+ chip->usb_chgpth_base + SEC_ACCESS,
+ 0xFF,
+ 0xA5, 1);
+
+ rc = qpnp_chg_masked_write(chip,
+ chip->usb_chgpth_base + USB_CHG_GONE_REV_BST,
+ 0xFF,
+ 0x80, 1);
+
break;
case SMBB_DC_CHGPTH_SUBTYPE:
- chip->dcin_valid_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "dcin-valid");
- if (chip->dcin_valid_irq < 0) {
- pr_err("Unable to get dcin irq\n");
- return -ENXIO;
- }
- rc = devm_request_irq(chip->dev, chip->dcin_valid_irq,
- qpnp_chg_dc_dcin_valid_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "chg_dcin_valid", chip);
- if (rc < 0) {
- pr_err("Can't request %d dcinvalid for chg: %d\n",
- chip->dcin_valid_irq, rc);
- return -ENXIO;
- }
-
- enable_irq_wake(chip->dcin_valid_irq);
break;
case SMBB_BOOST_SUBTYPE:
case SMBBP_BOOST_SUBTYPE:
break;
case SMBB_MISC_SUBTYPE:
+ chip->type = SMBB;
case SMBBP_MISC_SUBTYPE:
+ chip->type = SMBBP;
+ case SMBCL_MISC_SUBTYPE:
+ chip->type = SMBCL;
pr_debug("Setting BOOT_DONE\n");
rc = qpnp_chg_masked_write(chip,
chip->misc_base + CHGR_MISC_BOOT_DONE,
@@ -1671,6 +1835,100 @@
return rc;
}
+#define OF_PROP_READ(chip, prop, qpnp_dt_property, retval, optional) \
+do { \
+ if (retval) \
+ break; \
+ \
+ retval = of_property_read_u32(chip->spmi->dev.of_node, \
+ "qcom," qpnp_dt_property, \
+ &chip->prop); \
+ \
+ if ((retval == -EINVAL) && optional) \
+ retval = 0; \
+ else if (retval) \
+ pr_err("Error reading " #qpnp_dt_property \
+ " property rc = %d\n", rc); \
+} while (0)
+
+static int
+qpnp_charger_read_dt_props(struct qpnp_chg_chip *chip)
+{
+ int rc = 0;
+
+ OF_PROP_READ(chip, max_voltage_mv, "vddmax-mv", rc, 0);
+ OF_PROP_READ(chip, min_voltage_mv, "vinmin-mv", rc, 0);
+ OF_PROP_READ(chip, safe_voltage_mv, "vddsafe-mv", rc, 0);
+ OF_PROP_READ(chip, resume_delta_mv, "vbatdet-delta-mv", rc, 0);
+ OF_PROP_READ(chip, safe_current, "ibatsafe-ma", rc, 0);
+ OF_PROP_READ(chip, max_bat_chg_current, "ibatmax-ma", rc, 0);
+ if (rc)
+ pr_err("failed to read required dt parameters %d\n", rc);
+
+ OF_PROP_READ(chip, term_current, "ibatterm-ma", rc, 1);
+ OF_PROP_READ(chip, maxinput_dc_ma, "maxinput-dc-ma", rc, 1);
+ OF_PROP_READ(chip, maxinput_usb_ma, "maxinput-usb-ma", rc, 1);
+ OF_PROP_READ(chip, warm_bat_decidegc, "warm-bat-decidegc", rc, 1);
+ OF_PROP_READ(chip, cool_bat_decidegc, "cool-bat-decidegc", rc, 1);
+ OF_PROP_READ(chip, tchg_mins, "tchg-mins", rc, 1);
+ if (rc)
+ return rc;
+
+ /* Look up JEITA compliance parameters if cool and warm temp provided */
+ if (chip->cool_bat_decidegc && chip->warm_bat_decidegc) {
+ rc = qpnp_adc_tm_is_ready();
+ if (rc) {
+ pr_err("tm not ready %d\n", rc);
+ return rc;
+ }
+
+ OF_PROP_READ(chip, warm_bat_chg_ma, "ibatmax-warm-ma", rc, 1);
+ OF_PROP_READ(chip, cool_bat_chg_ma, "ibatmax-cool-ma", rc, 1);
+ OF_PROP_READ(chip, warm_bat_mv, "warm-bat-mv", rc, 1);
+ OF_PROP_READ(chip, cool_bat_mv, "cool-bat-mv", rc, 1);
+ if (rc)
+ return rc;
+ }
+
+ /* Get the charging-disabled property */
+ chip->charging_disabled = of_property_read_bool(chip->spmi->dev.of_node,
+ "qcom,charging-disabled");
+
+ /* Get the fake-batt-values property */
+ chip->use_default_batt_values =
+ of_property_read_bool(chip->spmi->dev.of_node,
+ "qcom,use-default-batt-values");
+
+ /* Disable charging when faking battery values */
+ if (chip->use_default_batt_values)
+ chip->charging_disabled = true;
+
+ of_get_property(chip->spmi->dev.of_node, "qcom,thermal-mitigation",
+ &(chip->thermal_levels));
+
+ if (chip->thermal_levels > sizeof(int)) {
+ chip->thermal_mitigation = kzalloc(
+ chip->thermal_levels,
+ GFP_KERNEL);
+
+ if (chip->thermal_mitigation == NULL) {
+ pr_err("thermal mitigation kzalloc() failed.\n");
+ return rc;
+ }
+
+ chip->thermal_levels /= sizeof(int);
+ rc = of_property_read_u32_array(chip->spmi->dev.of_node,
+ "qcom,thermal-mitigation",
+ chip->thermal_mitigation, chip->thermal_levels);
+ if (rc) {
+ pr_err("qcom,thermal-mitigation missing in dt\n");
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
static int __devinit
qpnp_charger_probe(struct spmi_device *spmi)
{
@@ -1697,178 +1955,10 @@
goto fail_chg_enable;
}
- /* Get the vddmax property */
- rc = of_property_read_u32(spmi->dev.of_node, "qcom,chg-vddmax-mv",
- &chip->max_voltage_mv);
- if (rc) {
- pr_err("Error reading vddmax property %d\n", rc);
+ /* Get all device tree properties */
+ rc = qpnp_charger_read_dt_props(chip);
+ if (rc)
goto fail_chg_enable;
- }
-
- /* Get the vinmin property */
- rc = of_property_read_u32(spmi->dev.of_node, "qcom,chg-vinmin-mv",
- &chip->min_voltage_mv);
- if (rc) {
- pr_err("Error reading vddmax property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the vddmax property */
- rc = of_property_read_u32(spmi->dev.of_node, "qcom,chg-vddsafe-mv",
- &chip->safe_voltage_mv);
- if (rc) {
- pr_err("Error reading vddsave property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the vbatdet-delta property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-vbatdet-delta-mv",
- &chip->resume_delta_mv);
- if (rc && rc != -EINVAL) {
- pr_err("Error reading vbatdet-delta property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the ibatsafe property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-ibatsafe-ma",
- &chip->safe_current);
- if (rc) {
- pr_err("Error reading ibatsafe property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the ibatterm property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-ibatterm-ma",
- &chip->term_current);
- if (rc && rc != -EINVAL) {
- pr_err("Error reading ibatterm property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the ibatmax property */
- rc = of_property_read_u32(spmi->dev.of_node, "qcom,chg-ibatmax-ma",
- &chip->max_bat_chg_current);
- if (rc) {
- pr_err("Error reading ibatmax property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the maxinput-dc-ma property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-maxinput-dc-ma",
- &chip->maxinput_dc_ma);
- if (rc && rc != -EINVAL) {
- pr_err("Error reading maxinput-dc-ma property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the maxinput-usb-ma property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-maxinput-usb-ma",
- &chip->maxinput_usb_ma);
- if (rc && rc != -EINVAL) {
- pr_err("Error reading maxinput-usb-ma property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the charging-disabled property */
- chip->charging_disabled = of_property_read_bool(spmi->dev.of_node,
- "qcom,chg-charging-disabled");
-
- /* Get the warm-bat-degc property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-warm-bat-decidegc",
- &chip->warm_bat_decidegc);
- if (rc && rc != -EINVAL) {
- pr_err("Error reading warm-bat-degc property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the cool-bat-degc property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-cool-bat-decidegc",
- &chip->cool_bat_decidegc);
- if (rc && rc != -EINVAL) {
- pr_err("Error reading cool-bat-degc property %d\n", rc);
- goto fail_chg_enable;
- }
-
- if (chip->cool_bat_decidegc && chip->warm_bat_decidegc) {
- rc = qpnp_adc_tm_is_ready();
- if (rc) {
- pr_err("tm not ready %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the ibatmax-warm property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-ibatmax-warm-ma",
- &chip->warm_bat_chg_ma);
- if (rc) {
- pr_err("Error reading ibatmax-warm-ma %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the ibatmax-cool property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-ibatmax-cool-ma",
- &chip->cool_bat_chg_ma);
- if (rc) {
- pr_err("Error reading ibatmax-cool-ma %d\n", rc);
- goto fail_chg_enable;
- }
- /* Get the cool-bat-mv property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-cool-bat-mv",
- &chip->cool_bat_mv);
- if (rc) {
- pr_err("Error reading cool-bat-mv property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the warm-bat-mv property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-warm-bat-mv",
- &chip->warm_bat_mv);
- if (rc) {
- pr_err("Error reading warm-bat-mv property %d\n", rc);
- goto fail_chg_enable;
- }
- }
-
- /* Get the fake-batt-values property */
- chip->use_default_batt_values = of_property_read_bool(spmi->dev.of_node,
- "qcom,chg-use-default-batt-values");
-
- of_get_property(spmi->dev.of_node, "qcom,chg-thermal-mitigation",
- &(chip->thermal_levels));
-
- if (chip->thermal_levels > sizeof(int)) {
- chip->thermal_mitigation = kzalloc(
- chip->thermal_levels,
- GFP_KERNEL);
-
- if (chip->thermal_mitigation == NULL) {
- pr_err("thermal mitigation kzalloc() failed.\n");
- goto fail_chg_enable;
- }
-
- chip->thermal_levels /= sizeof(int);
- rc = of_property_read_u32_array(spmi->dev.of_node,
- "qcom,chg-thermal-mitigation",
- chip->thermal_mitigation, chip->thermal_levels);
- if (rc) {
- pr_err("qcom,chg-thermal-mitigation missing in dt\n");
- goto fail_chg_enable;
- }
- }
-
- /* Disable charging when faking battery values */
- if (chip->use_default_batt_values)
- chip->charging_disabled = true;
spmi_for_each_container_dev(spmi_resource, spmi) {
if (!spmi_resource) {
@@ -1896,6 +1986,7 @@
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
case SMBBP_CHGR_SUBTYPE:
+ case SMBCL_CHGR_SUBTYPE:
chip->chgr_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1906,6 +1997,7 @@
break;
case SMBB_BUCK_SUBTYPE:
case SMBBP_BUCK_SUBTYPE:
+ case SMBCL_BUCK_SUBTYPE:
chip->buck_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1913,9 +2005,31 @@
subtype, rc);
goto fail_chg_enable;
}
+
+ rc = qpnp_chg_masked_write(chip,
+ chip->buck_base + SEC_ACCESS,
+ 0xFF,
+ 0xA5, 1);
+
+ rc = qpnp_chg_masked_write(chip,
+ chip->buck_base + BUCK_VCHG_OV,
+ 0xff,
+ 0x00, 1);
+
+ rc = qpnp_chg_masked_write(chip,
+ chip->buck_base + SEC_ACCESS,
+ 0xFF,
+ 0xA5, 1);
+
+ rc = qpnp_chg_masked_write(chip,
+ chip->buck_base + BUCK_TEST_SMBC_MODES,
+ 0xFF,
+ 0x80, 1);
+
break;
case SMBB_BAT_IF_SUBTYPE:
case SMBBP_BAT_IF_SUBTYPE:
+ case SMBCL_BAT_IF_SUBTYPE:
chip->bat_if_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1926,6 +2040,7 @@
break;
case SMBB_USB_CHGPTH_SUBTYPE:
case SMBBP_USB_CHGPTH_SUBTYPE:
+ case SMBCL_USB_CHGPTH_SUBTYPE:
chip->usb_chgpth_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1955,6 +2070,7 @@
break;
case SMBB_MISC_SUBTYPE:
case SMBBP_MISC_SUBTYPE:
+ case SMBCL_MISC_SUBTYPE:
chip->misc_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -2002,6 +2118,9 @@
pr_err("batt failed to register rc = %d\n", rc);
goto fail_chg_enable;
}
+ INIT_WORK(&chip->adc_measure_work,
+ qpnp_bat_if_adc_measure_work);
+ INIT_DELAYED_WORK(&chip->arb_stop_work, qpnp_arb_stop_work);
}
if (chip->dc_chgpth_base) {
@@ -2056,6 +2175,12 @@
qpnp_chg_charge_en(chip, !chip->charging_disabled);
qpnp_chg_force_run_on_batt(chip, chip->charging_disabled);
+ rc = qpnp_chg_request_irqs(chip);
+ if (rc) {
+ pr_err("failed to request interrupts %d\n", rc);
+ goto unregister_batt;
+ }
+
pr_info("success chg_dis = %d, usb = %d, dc = %d b_health = %d batt_present = %d\n",
chip->charging_disabled,
qpnp_chg_is_usb_chg_plugged_in(chip),
@@ -2082,12 +2207,49 @@
&& chip->batt_present) {
qpnp_adc_tm_disable_chan_meas(&chip->adc_param);
}
+ cancel_work_sync(&chip->adc_measure_work);
+
dev_set_drvdata(&spmi->dev, NULL);
kfree(chip);
return 0;
}
+static int qpnp_chg_resume(struct device *dev)
+{
+ struct qpnp_chg_chip *chip = dev_get_drvdata(dev);
+ int rc = 0;
+
+ rc = qpnp_chg_masked_write(chip,
+ chip->bat_if_base + BAT_IF_VREF_BAT_THM_CTRL,
+ VREF_BATT_THERM_FORCE_ON,
+ VREF_BATT_THERM_FORCE_ON, 1);
+ if (rc)
+ pr_debug("failed to force on VREF_BAT_THM rc=%d\n", rc);
+
+ return rc;
+}
+
+static int qpnp_chg_suspend(struct device *dev)
+{
+ struct qpnp_chg_chip *chip = dev_get_drvdata(dev);
+ int rc = 0;
+
+ rc = qpnp_chg_masked_write(chip,
+ chip->bat_if_base + BAT_IF_VREF_BAT_THM_CTRL,
+ VREF_BATT_THERM_FORCE_ON,
+ VREF_BAT_THM_ENABLED_FSM, 1);
+ if (rc)
+ pr_debug("failed to enable FSM ctrl VREF_BAT_THM rc=%d\n", rc);
+
+ return rc;
+}
+
+static const struct dev_pm_ops qpnp_bms_pm_ops = {
+ .resume = qpnp_chg_resume,
+ .suspend = qpnp_chg_suspend,
+};
+
static struct spmi_driver qpnp_charger_driver = {
.probe = qpnp_charger_probe,
.remove = __devexit_p(qpnp_charger_remove),
diff --git a/drivers/regulator/qpnp-regulator.c b/drivers/regulator/qpnp-regulator.c
index 4cdfaeb..2d10f89 100644
--- a/drivers/regulator/qpnp-regulator.c
+++ b/drivers/regulator/qpnp-regulator.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,12 +19,14 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/spmi.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/ktime.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/qpnp-regulator.h>
@@ -36,6 +38,7 @@
QPNP_VREG_DEBUG_INIT = BIT(2), /* Show state after probe */
QPNP_VREG_DEBUG_WRITES = BIT(3), /* Show SPMI writes */
QPNP_VREG_DEBUG_READS = BIT(4), /* Show SPMI reads */
+ QPNP_VREG_DEBUG_OCP = BIT(5), /* Show VS OCP IRQ events */
};
static int qpnp_vreg_debug_mask;
@@ -156,9 +159,8 @@
#define QPNP_LDO_SOFT_START_ENABLE_MASK 0x80
/* VS regulator over current protection control register layout */
-#define QPNP_VS_OCP_ENABLE_MASK 0x80
-#define QPNP_VS_OCP_OVERRIDE_MASK 0x01
-#define QPNP_VS_OCP_DISABLE 0x00
+#define QPNP_VS_OCP_OVERRIDE 0x01
+#define QPNP_VS_OCP_NO_OVERRIDE 0x00
/* VS regulator soft start control register layout */
#define QPNP_VS_SOFT_START_ENABLE_MASK 0x80
@@ -168,6 +170,11 @@
#define QPNP_BOOST_CURRENT_LIMIT_ENABLE_MASK 0x80
#define QPNP_BOOST_CURRENT_LIMIT_MASK 0x07
+#define QPNP_VS_OCP_DEFAULT_MAX_RETRIES 10
+#define QPNP_VS_OCP_DEFAULT_RETRY_DELAY_MS 30
+#define QPNP_VS_OCP_FALL_DELAY_US 90
+#define QPNP_VS_OCP_FAULT_DELAY_US 20000
+
/*
* This voltage in uV is returned by get_voltage functions when there is no way
* to determine the current voltage level. It is needed because the regulator
@@ -203,17 +210,22 @@
struct qpnp_regulator {
struct regulator_desc rdesc;
+ struct delayed_work ocp_work;
struct spmi_device *spmi_dev;
struct regulator_dev *rdev;
struct qpnp_voltage_set_points *set_points;
enum qpnp_regulator_logical_type logical_type;
int enable_time;
- int ocp_enable_time;
int ocp_enable;
+ int ocp_irq;
+ int ocp_count;
+ int ocp_max_retries;
+ int ocp_retry_delay_ms;
int system_load;
int hpm_min_load;
u32 write_count;
u32 prev_write_count;
+ ktime_t vs_enable_time;
u16 base_addr;
/* ctrl_reg provides a shadow copy of register values 0x40 to 0x47. */
u8 ctrl_reg[8];
@@ -501,35 +513,11 @@
static int qpnp_regulator_vs_enable(struct regulator_dev *rdev)
{
struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
- int rc;
- u8 reg;
- if (vreg->ocp_enable == QPNP_REGULATOR_ENABLE) {
- /* Disable OCP */
- reg = QPNP_VS_OCP_DISABLE;
- rc = qpnp_vreg_write(vreg, QPNP_VS_REG_OCP, ®, 1);
- if (rc)
- goto fail;
- }
+ if (vreg->ocp_irq)
+ vreg->vs_enable_time = ktime_get();
- rc = qpnp_regulator_common_enable(rdev);
- if (rc)
- goto fail;
-
- if (vreg->ocp_enable == QPNP_REGULATOR_ENABLE) {
- /* Wait for inrush current to subsided, then enable OCP. */
- udelay(vreg->ocp_enable_time);
- reg = QPNP_VS_OCP_ENABLE_MASK;
- rc = qpnp_vreg_write(vreg, QPNP_VS_REG_OCP, ®, 1);
- if (rc)
- goto fail;
- }
-
- return rc;
-fail:
- vreg_err(vreg, "qpnp_vreg_write failed, rc=%d\n", rc);
-
- return rc;
+ return qpnp_regulator_common_enable(rdev);
}
static int qpnp_regulator_common_disable(struct regulator_dev *rdev)
@@ -785,6 +773,88 @@
return vreg->enable_time;
}
+static int qpnp_regulator_vs_clear_ocp(struct qpnp_regulator *vreg)
+{
+ int rc;
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+ QPNP_COMMON_DISABLE, QPNP_COMMON_ENABLE_MASK,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+ if (rc)
+ vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+
+ vreg->vs_enable_time = ktime_get();
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+ QPNP_COMMON_ENABLE, QPNP_COMMON_ENABLE_MASK,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+ if (rc)
+ vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+
+ if (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_OCP) {
+ pr_info("%s: switch state toggled after OCP event\n",
+ vreg->rdesc.name);
+ }
+
+ return rc;
+}
+
+static void qpnp_regulator_vs_ocp_work(struct work_struct *work)
+{
+ struct delayed_work *dwork
+ = container_of(work, struct delayed_work, work);
+ struct qpnp_regulator *vreg
+ = container_of(dwork, struct qpnp_regulator, ocp_work);
+
+ qpnp_regulator_vs_clear_ocp(vreg);
+
+ return;
+}
+
+static irqreturn_t qpnp_regulator_vs_ocp_isr(int irq, void *data)
+{
+ struct qpnp_regulator *vreg = data;
+ ktime_t ocp_irq_time;
+ s64 ocp_trigger_delay_us;
+
+ ocp_irq_time = ktime_get();
+ ocp_trigger_delay_us = ktime_us_delta(ocp_irq_time,
+ vreg->vs_enable_time);
+
+ /*
+ * Reset the OCP count if there is a large delay between switch enable
+ * and when OCP triggers. This is indicative of a hotplug event as
+ * opposed to a fault.
+ */
+ if (ocp_trigger_delay_us > QPNP_VS_OCP_FAULT_DELAY_US)
+ vreg->ocp_count = 0;
+
+ /* Wait for switch output to settle back to 0 V after OCP triggered. */
+ udelay(QPNP_VS_OCP_FALL_DELAY_US);
+
+ vreg->ocp_count++;
+
+ if (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_OCP) {
+ pr_info("%s: VS OCP triggered, count = %d, delay = %lld us\n",
+ vreg->rdesc.name, vreg->ocp_count,
+ ocp_trigger_delay_us);
+ }
+
+ if (vreg->ocp_count == 1) {
+ /* Immediately clear the over current condition. */
+ qpnp_regulator_vs_clear_ocp(vreg);
+ } else if (vreg->ocp_count <= vreg->ocp_max_retries) {
+ /* Schedule the over current clear task to run later. */
+ schedule_delayed_work(&vreg->ocp_work,
+ msecs_to_jiffies(vreg->ocp_retry_delay_ms) + 1);
+ } else {
+ vreg_err(vreg, "OCP triggered %d times; no further retries\n",
+ vreg->ocp_count);
+ }
+
+ return IRQ_HANDLED;
+}
+
static const char const *qpnp_print_actions[] = {
[QPNP_REGULATOR_ACTION_INIT] = "initial ",
[QPNP_REGULATOR_ACTION_ENABLE] = "enable ",
@@ -834,7 +904,8 @@
if (type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
|| type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
- || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS) {
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS) {
mode = qpnp_regulator_common_get_mode(rdev);
mode_label = mode == REGULATOR_MODE_NORMAL ? "HPM" : "LPM";
}
@@ -903,9 +974,9 @@
pc_mode_label[1] =
mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK ? 'W' : '_';
- pr_info("%s %-11s: %s, pc_en=%s, alt_mode=%s\n",
+ pr_info("%s %-11s: %s, mode=%s, pc_en=%s, alt_mode=%s\n",
action_label, vreg->rdesc.name, enable_label,
- pc_enable_label, pc_mode_label);
+ mode_label, pc_enable_label, pc_mode_label);
break;
case QPNP_REGULATOR_LOGICAL_TYPE_BOOST:
pr_info("%s %-11s: %s, v=%7d uV\n",
@@ -1099,6 +1170,17 @@
pdata->pin_ctrl_enable & QPNP_COMMON_ENABLE_FOLLOW_ALL_MASK;
}
+ /* Set up HPM control. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS)
+ && (pdata->hpm_enable != QPNP_REGULATOR_USE_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &= ~QPNP_COMMON_MODE_HPM_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ (pdata->hpm_enable ? QPNP_COMMON_MODE_HPM_MASK : 0);
+ }
+
/* Set up auto mode control. */
if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
|| type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
@@ -1224,7 +1306,8 @@
}
if (pdata->ocp_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
- reg = pdata->ocp_enable ? QPNP_VS_OCP_ENABLE_MASK : 0;
+ reg = pdata->ocp_enable ? QPNP_VS_OCP_NO_OVERRIDE
+ : QPNP_VS_OCP_OVERRIDE;
rc = qpnp_vreg_write(vreg, QPNP_VS_REG_OCP, ®, 1);
if (rc) {
vreg_err(vreg, "spmi write failed, rc=%d\n",
@@ -1256,6 +1339,11 @@
}
pdata->base_addr = res->start;
+ /* OCP IRQ is optional so ignore get errors. */
+ pdata->ocp_irq = spmi_get_irq_byname(spmi, NULL, "ocp");
+ if (pdata->ocp_irq < 0)
+ pdata->ocp_irq = 0;
+
/*
* Initialize configuration parameters to use hardware default in case
* no value is specified via device tree.
@@ -1269,6 +1357,7 @@
pdata->pin_ctrl_enable = QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT;
pdata->pin_ctrl_hpm = QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT;
pdata->vs_soft_start_strength = QPNP_VS_SOFT_START_STR_HW_DEFAULT;
+ pdata->hpm_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
/* These bindings are optional, so it is okay if they are not found. */
of_property_read_u32(node, "qcom,auto-mode-enable",
@@ -1276,6 +1365,10 @@
of_property_read_u32(node, "qcom,bypass-mode-enable",
&pdata->bypass_mode_enable);
of_property_read_u32(node, "qcom,ocp-enable", &pdata->ocp_enable);
+ of_property_read_u32(node, "qcom,ocp-max-retries",
+ &pdata->ocp_max_retries);
+ of_property_read_u32(node, "qcom,ocp-retry-delay",
+ &pdata->ocp_retry_delay_ms);
of_property_read_u32(node, "qcom,pull-down-enable",
&pdata->pull_down_enable);
of_property_read_u32(node, "qcom,soft-start-enable",
@@ -1285,12 +1378,11 @@
of_property_read_u32(node, "qcom,pin-ctrl-enable",
&pdata->pin_ctrl_enable);
of_property_read_u32(node, "qcom,pin-ctrl-hpm", &pdata->pin_ctrl_hpm);
+ of_property_read_u32(node, "qcom,hpm-enable", &pdata->hpm_enable);
of_property_read_u32(node, "qcom,vs-soft-start-strength",
&pdata->vs_soft_start_strength);
of_property_read_u32(node, "qcom,system-load", &pdata->system_load);
of_property_read_u32(node, "qcom,enable-time", &pdata->enable_time);
- of_property_read_u32(node, "qcom,ocp-enable-time",
- &pdata->ocp_enable_time);
return rc;
}
@@ -1364,7 +1456,14 @@
vreg->enable_time = pdata->enable_time;
vreg->system_load = pdata->system_load;
vreg->ocp_enable = pdata->ocp_enable;
- vreg->ocp_enable_time = pdata->ocp_enable_time;
+ vreg->ocp_irq = pdata->ocp_irq;
+ vreg->ocp_max_retries = pdata->ocp_max_retries;
+ vreg->ocp_retry_delay_ms = pdata->ocp_retry_delay_ms;
+
+ if (vreg->ocp_max_retries == 0)
+ vreg->ocp_max_retries = QPNP_VS_OCP_DEFAULT_MAX_RETRIES;
+ if (vreg->ocp_retry_delay_ms == 0)
+ vreg->ocp_retry_delay_ms = QPNP_VS_OCP_DEFAULT_RETRY_DELAY_MS;
rdesc = &vreg->rdesc;
rdesc->id = spmi->ctrl->nr;
@@ -1414,18 +1513,37 @@
goto bail;
}
+ if (vreg->logical_type != QPNP_REGULATOR_LOGICAL_TYPE_VS)
+ vreg->ocp_irq = 0;
+
+ if (vreg->ocp_irq) {
+ rc = devm_request_irq(&spmi->dev, vreg->ocp_irq,
+ qpnp_regulator_vs_ocp_isr, IRQF_TRIGGER_RISING, "ocp",
+ vreg);
+ if (rc < 0) {
+ vreg_err(vreg, "failed to request irq %d, rc=%d\n",
+ vreg->ocp_irq, rc);
+ goto bail;
+ }
+
+ INIT_DELAYED_WORK(&vreg->ocp_work, qpnp_regulator_vs_ocp_work);
+ }
+
vreg->rdev = regulator_register(rdesc, &spmi->dev,
&(pdata->init_data), vreg, spmi->dev.of_node);
if (IS_ERR(vreg->rdev)) {
rc = PTR_ERR(vreg->rdev);
vreg_err(vreg, "regulator_register failed, rc=%d\n", rc);
- goto bail;
+ goto cancel_ocp_work;
}
qpnp_vreg_show_state(vreg->rdev, QPNP_REGULATOR_ACTION_INIT);
return 0;
+cancel_ocp_work:
+ if (vreg->ocp_irq)
+ cancel_delayed_work_sync(&vreg->ocp_work);
bail:
if (rc)
vreg_err(vreg, "probe failed, rc=%d\n", rc);
@@ -1445,6 +1563,8 @@
if (vreg) {
regulator_unregister(vreg->rdev);
+ if (vreg->ocp_irq)
+ cancel_delayed_work_sync(&vreg->ocp_work);
kfree(vreg->rdesc.name);
kfree(vreg);
}
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 706fba7..9a4ea63 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -58,6 +58,17 @@
If unsure, say N.
+config SCSI_UFSHCD_PLATFORM
+ tristate "Platform bus based UFS Controller support"
+ depends on SCSI_UFSHCD
+ ---help---
+ This selects the UFS host controller support. Select this if
+ you have an UFS controller on Platform bus.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config SCSI_UFS_TEST
tristate "Universal Flash Storage host controller driver unit-tests"
depends on SCSI_UFSHCD && IOSCHED_TEST
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index bbcc202..8d6665b 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,4 +1,5 @@
# UFSHCD makefile
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
+obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_TEST) += ufs_test.o
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
new file mode 100644
index 0000000..03319ac
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -0,0 +1,217 @@
+/*
+ * Universal Flash Storage Host controller Platform bus based glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/platform_device.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pltfrm_suspend - suspend power management function
+ * @dev: pointer to device handle
+ *
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ /*
+ * TODO:
+ * 1. Call ufshcd_suspend
+ * 2. Do bus specific power management
+ */
+
+ disable_irq(hba->irq);
+
+ return 0;
+}
+
+/**
+ * ufshcd_pltfrm_resume - resume power management function
+ * @dev: pointer to device handle
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ /*
+ * TODO:
+ * 1. Call ufshcd_resume.
+ * 2. Do bus specific wake up
+ */
+
+ enable_irq(hba->irq);
+
+ return 0;
+}
+#else
+#define ufshcd_pltfrm_suspend NULL
+#define ufshcd_pltfrm_resume NULL
+#endif
+
+/**
+ * ufshcd_pltfrm_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_probe(struct platform_device *pdev)
+{
+ struct ufs_hba *hba;
+ void __iomem *mmio_base;
+ struct resource *mem_res;
+ struct resource *irq_res;
+ resource_size_t mem_size;
+ int err;
+ struct device *dev = &pdev->dev;
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res) {
+ dev_err(&pdev->dev,
+ "Memory resource not available\n");
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ mem_size = resource_size(mem_res);
+ if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) {
+ dev_err(&pdev->dev,
+ "Cannot reserve the memory resource\n");
+ err = -EBUSY;
+ goto out_error;
+ }
+
+ mmio_base = ioremap_nocache(mem_res->start, mem_size);
+ if (!mmio_base) {
+ dev_err(&pdev->dev, "memory map failed\n");
+ err = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ dev_err(&pdev->dev, "IRQ resource not available\n");
+ err = -ENODEV;
+ goto out_iounmap;
+ }
+
+ err = dma_set_coherent_mask(dev, dev->coherent_dma_mask);
+ if (err) {
+ dev_err(&pdev->dev, "set dma mask failed\n");
+ goto out_iounmap;
+ }
+
+ err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start);
+ if (err) {
+ dev_err(&pdev->dev, "Intialization failed\n");
+ goto out_iounmap;
+ }
+
+ platform_set_drvdata(pdev, hba);
+
+ return 0;
+
+out_iounmap:
+ iounmap(mmio_base);
+out_release_regions:
+ release_mem_region(mem_res->start, mem_size);
+out_error:
+ return err;
+}
+
+/**
+ * ufshcd_pltfrm_remove - remove platform driver routine
+ * @pdev: pointer to platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_remove(struct platform_device *pdev)
+{
+ struct resource *mem_res;
+ resource_size_t mem_size;
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ disable_irq(hba->irq);
+
+ /* Some buggy controllers raise interrupt after
+ * the resources are removed. So first we unregister the
+ * irq handler and then the resources used by driver
+ */
+
+ free_irq(hba->irq, hba);
+ ufshcd_remove(hba);
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res)
+ dev_err(&pdev->dev, "ufshcd: Memory resource not available\n");
+ else {
+ mem_size = resource_size(mem_res);
+ release_mem_region(mem_res->start, mem_size);
+ }
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static const struct of_device_id ufs_of_match[] = {
+ { .compatible = "jedec,ufs-1.1"},
+};
+
+static const struct dev_pm_ops ufshcd_dev_pm_ops = {
+ .suspend = ufshcd_pltfrm_suspend,
+ .resume = ufshcd_pltfrm_resume,
+};
+
+static struct platform_driver ufshcd_pltfrm_driver = {
+ .probe = ufshcd_pltfrm_probe,
+ .remove = ufshcd_pltfrm_remove,
+ .driver = {
+ .name = "ufshcd",
+ .owner = THIS_MODULE,
+ .pm = &ufshcd_dev_pm_ops,
+ .of_match_table = ufs_of_match,
+ },
+};
+
+module_platform_driver(ufshcd_pltfrm_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 60fd40c..c32a478 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -478,7 +478,7 @@
ucd_cmd_ptr->header.dword_2 = 0;
ucd_cmd_ptr->exp_data_transfer_len =
- cpu_to_be32(lrbp->cmd->transfersize);
+ cpu_to_be32(lrbp->cmd->sdb.length);
memcpy(ucd_cmd_ptr->cdb,
lrbp->cmd->cmnd,
diff --git a/drivers/slimbus/slim-msm-ctrl.c b/drivers/slimbus/slim-msm-ctrl.c
index 9a864aa..4a3ea76 100644
--- a/drivers/slimbus/slim-msm-ctrl.c
+++ b/drivers/slimbus/slim-msm-ctrl.c
@@ -37,8 +37,6 @@
#define QC_DEVID_SAT2 0x4
#define QC_DEVID_PGD 0x5
#define QC_MSM_DEVS 5
-#define INIT_MX_RETRIES 10
-#define DEF_RETRY_MS 10
/* Manager registers */
enum mgr_reg {
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 10c69c3..a0179cb 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -631,6 +631,7 @@
if (mc == SLIM_USR_MC_MASTER_CAPABILITY &&
mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
struct slim_msg_txn txn;
+ int retries = 0;
u8 wbuf[8];
txn.dt = SLIM_MSG_DEST_LOGICALADDR;
txn.ec = 0;
@@ -638,7 +639,6 @@
txn.mc = SLIM_USR_MC_REPORT_SATELLITE;
txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
txn.la = SLIM_LA_MGR;
- txn.rl = 8;
wbuf[0] = SAT_MAGIC_LSB;
wbuf[1] = SAT_MAGIC_MSB;
wbuf[2] = SAT_MSG_VER;
@@ -655,7 +655,8 @@
/* make sure NGD MSG-Q config goes through */
mb();
}
-
+capability_retry:
+ txn.rl = 8;
ret = ngd_xfer_msg(&dev->ctrl, &txn);
if (!ret) {
enum msm_ctrl_state prev_state = dev->state;
@@ -668,6 +669,13 @@
/* ADSP SSR, send device_up notifications */
if (prev_state == MSM_CTRL_DOWN)
schedule_work(&dev->slave_notify);
+ } else if (ret == -EIO) {
+ pr_info("capability message NACKed, retrying");
+ if (retries < INIT_MX_RETRIES) {
+ msleep(DEF_RETRY_MS);
+ retries++;
+ goto capability_retry;
+ }
}
}
if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
@@ -761,7 +769,13 @@
pr_info("ADSP P.C. CTRL state:%d NGD not enumerated:0x%x",
dev->state, laddr);
}
-
+ /* ADSP SSR scenario, need to disconnect pipe before connecting */
+ if (dev->use_rx_msgqs == MSM_MSGQ_DOWN) {
+ struct msm_slim_endp *endpoint = &dev->rx_msgq;
+ sps_disconnect(endpoint->sps);
+ sps_free_endpoint(endpoint->sps);
+ dev->use_rx_msgqs = MSM_MSGQ_RESET;
+ }
/*
* ADSP power collapse case (OR SSR), where HW was reset
* BAM programming will happen when capability message is received
@@ -911,6 +925,8 @@
ngd_slim_enable(dev, false);
/* disconnect BAM pipes */
+ if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+ dev->use_rx_msgqs = MSM_MSGQ_DOWN;
msm_slim_sps_exit(dev, false);
mutex_lock(&ctrl->m_ctrl);
/* device up should be called again after SSR */
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
index 3e19f9b..30341e2 100644
--- a/drivers/slimbus/slim-msm.c
+++ b/drivers/slimbus/slim-msm.c
@@ -581,7 +581,7 @@
void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg)
{
- if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED) {
+ if (dev->use_rx_msgqs >= MSM_MSGQ_ENABLED) {
struct msm_slim_endp *endpoint = &dev->rx_msgq;
struct sps_connect *config = &endpoint->config;
struct sps_mem_buffer *descr = &config->desc;
@@ -590,10 +590,12 @@
memset(&sps_event, 0x00, sizeof(sps_event));
msm_slim_sps_mem_free(dev, mem);
sps_register_event(endpoint->sps, &sps_event);
- sps_disconnect(endpoint->sps);
+ if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED) {
+ sps_disconnect(endpoint->sps);
+ msm_slim_free_endpoint(endpoint);
+ dev->use_rx_msgqs = MSM_MSGQ_RESET;
+ }
msm_slim_sps_mem_free(dev, descr);
- msm_slim_free_endpoint(endpoint);
- dev->use_rx_msgqs = MSM_MSGQ_RESET;
}
if (dereg) {
sps_deregister_bam_device(dev->bam.hdl);
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index 6e329b3..cf2d26f 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -50,6 +50,8 @@
#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
+#define INIT_MX_RETRIES 10
+#define DEF_RETRY_MS 10
#define MSM_CONCUR_MSG 8
#define SAT_CONCUR_MSG 8
#define DEF_WATERMARK (8 << 1)
@@ -159,6 +161,7 @@
MSM_MSGQ_DISABLED,
MSM_MSGQ_RESET,
MSM_MSGQ_ENABLED,
+ MSM_MSGQ_DOWN,
};
struct msm_slim_sps_bam {
diff --git a/drivers/thermal/msm8960_tsens.c b/drivers/thermal/msm8960_tsens.c
index 67e0181..837ac21 100644
--- a/drivers/thermal/msm8960_tsens.c
+++ b/drivers/thermal/msm8960_tsens.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -243,6 +243,17 @@
}
EXPORT_SYMBOL(tsens_get_temp);
+int tsens_get_max_sensor_num(uint32_t *tsens_num_sensors)
+{
+ if (!tmdev)
+ return -ENODEV;
+
+ *tsens_num_sensors = tmdev->tsens_num_sensor;
+
+ return 0;
+}
+EXPORT_SYMBOL(tsens_get_max_sensor_num);
+
static int tsens_tz_get_mode(struct thermal_zone_device *thermal,
enum thermal_device_mode *mode)
{
diff --git a/drivers/thermal/msm8974-tsens.c b/drivers/thermal/msm8974-tsens.c
index f7e5eee..991cf2e 100644
--- a/drivers/thermal/msm8974-tsens.c
+++ b/drivers/thermal/msm8974-tsens.c
@@ -244,7 +244,11 @@
struct tsens_tm_device_sensor {
struct thermal_zone_device *tz_dev;
enum thermal_device_mode mode;
- unsigned int sensor_num;
+ /* Physical HW sensor number */
+ unsigned int sensor_hw_num;
+ /* Software index. This is keep track of the HW/SW
+ * sensor_ID mapping */
+ unsigned int sensor_sw_id;
struct work_struct work;
int offset;
int calib_data_point1;
@@ -273,36 +277,85 @@
struct tsens_tm_device *tmdev;
-static int tsens_tz_code_to_degc(int adc_code, int sensor_num)
+int tsens_get_sw_id_mapping(int sensor_hw_num, int *sensor_sw_idx)
{
- int degc, num, den;
+ int i = 0;
+ bool id_found = false;
+ while (i < tmdev->tsens_num_sensor && !id_found) {
+ if (sensor_hw_num == tmdev->sensor[i].sensor_hw_num) {
+ *sensor_sw_idx = tmdev->sensor[i].sensor_sw_id;
+ id_found = true;
+ }
+ i++;
+ }
+
+ if (!id_found)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(tsens_get_sw_id_mapping);
+
+int tsens_get_hw_id_mapping(int sensor_sw_id, int *sensor_hw_num)
+{
+ int i = 0;
+ bool id_found = false;
+
+ while (i < tmdev->tsens_num_sensor && !id_found) {
+ if (sensor_sw_id == tmdev->sensor[i].sensor_sw_id) {
+ *sensor_hw_num = tmdev->sensor[i].sensor_hw_num;
+ id_found = true;
+ }
+ i++;
+ }
+
+ if (!id_found)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(tsens_get_hw_id_mapping);
+
+static int tsens_tz_code_to_degc(int adc_code, int sensor_sw_id)
+{
+ int degc, num, den, idx;
+
+ idx = sensor_sw_id;
num = ((adc_code * tmdev->tsens_factor) -
- tmdev->sensor[sensor_num].offset);
- den = (int) tmdev->sensor[sensor_num].slope_mul_tsens_factor;
- degc = num/den;
+ tmdev->sensor[idx].offset);
+ den = (int) tmdev->sensor[idx].slope_mul_tsens_factor;
- if ((degc >= 0) && (num % den != 0))
- degc++;
+ if (num > 0)
+ degc = ((num + (den/2))/den);
+ else if (num < 0)
+ degc = ((num - (den/2))/den);
+ else
+ degc = num/den;
+ pr_debug("raw_code:0x%x, sensor_num:%d, degc:%d\n",
+ adc_code, idx, degc);
return degc;
}
-static int tsens_tz_degc_to_code(int degc, int sensor_num)
+static int tsens_tz_degc_to_code(int degc, int idx)
{
- int code = ((degc * tmdev->sensor[sensor_num].slope_mul_tsens_factor)
- + tmdev->sensor[sensor_num].offset)/tmdev->tsens_factor;
+ int code = ((degc * tmdev->sensor[idx].slope_mul_tsens_factor)
+ + tmdev->sensor[idx].offset)/tmdev->tsens_factor;
if (code > TSENS_THRESHOLD_MAX_CODE)
code = TSENS_THRESHOLD_MAX_CODE;
else if (code < TSENS_THRESHOLD_MIN_CODE)
code = TSENS_THRESHOLD_MIN_CODE;
+ pr_debug("raw_code:0x%x, sensor_num:%d, degc:%d\n",
+ code, idx, degc);
return code;
}
-static void msm_tsens_get_temp(int sensor_num, unsigned long *temp)
+static void msm_tsens_get_temp(int sensor_hw_num, unsigned long *temp)
{
unsigned int code, sensor_addr;
+ int sensor_sw_id = -EINVAL, rc = 0;
if (!tmdev->prev_reading_avail) {
while (!(readl_relaxed(TSENS_TRDY_ADDR(tmdev->tsens_addr))
@@ -315,9 +368,17 @@
sensor_addr =
(unsigned int)TSENS_S0_STATUS_ADDR(tmdev->tsens_addr);
code = readl_relaxed(sensor_addr +
- (sensor_num << TSENS_STATUS_ADDR_OFFSET));
+ (sensor_hw_num << TSENS_STATUS_ADDR_OFFSET));
+ /* Obtain SW index to map the corresponding thermal zone's
+ * offset and slope for code to degc conversion. */
+ rc = tsens_get_sw_id_mapping(sensor_hw_num, &sensor_sw_id);
+ if (rc < 0) {
+ pr_err("tsens mapping index not found\n");
+ return;
+ }
+
*temp = tsens_tz_code_to_degc((code & TSENS_SN_STATUS_TEMP_MASK),
- sensor_num);
+ sensor_sw_id);
}
static int tsens_tz_get_temp(struct thermal_zone_device *thermal,
@@ -328,7 +389,7 @@
if (!tm_sensor || tm_sensor->mode != THERMAL_DEVICE_ENABLED || !temp)
return -EINVAL;
- msm_tsens_get_temp(tm_sensor->sensor_num, temp);
+ msm_tsens_get_temp(tm_sensor->sensor_hw_num, temp);
return 0;
}
@@ -344,6 +405,17 @@
}
EXPORT_SYMBOL(tsens_get_temp);
+int tsens_get_max_sensor_num(uint32_t *tsens_num_sensors)
+{
+ if (!tmdev)
+ return -ENODEV;
+
+ *tsens_num_sensors = tmdev->tsens_num_sensor;
+
+ return 0;
+}
+EXPORT_SYMBOL(tsens_get_max_sensor_num);
+
static int tsens_tz_get_mode(struct thermal_zone_device *thermal,
enum thermal_device_mode *mode)
{
@@ -392,8 +464,9 @@
hi_code = TSENS_THRESHOLD_MAX_CODE;
reg_cntl = readl_relaxed((TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
- (tmdev->tsens_addr) +
- (tm_sensor->sensor_num * 4)));
+ (tmdev->tsens_addr) +
+ (tm_sensor->sensor_hw_num *
+ TSENS_SN_ADDR_OFFSET)));
switch (trip) {
case TSENS_TRIP_WARM:
code = (reg_cntl & TSENS_UPPER_THRESHOLD_MASK)
@@ -418,8 +491,8 @@
if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
writel_relaxed(reg_cntl | mask,
(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
- (tmdev->tsens_addr) +
- (tm_sensor->sensor_num * 4)));
+ (tmdev->tsens_addr) +
+ (tm_sensor->sensor_hw_num * TSENS_SN_ADDR_OFFSET)));
else {
if (code < lo_code || code > hi_code) {
pr_err("%s with invalid code %x\n", __func__, code);
@@ -427,7 +500,7 @@
}
writel_relaxed(reg_cntl & ~mask,
(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(tmdev->tsens_addr) +
- (tm_sensor->sensor_num * 4)));
+ (tm_sensor->sensor_hw_num * TSENS_SN_ADDR_OFFSET)));
}
mb();
return 0;
@@ -438,13 +511,14 @@
{
struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
unsigned int reg;
+ int sensor_sw_id = -EINVAL, rc = 0;
if (!tm_sensor || trip < 0 || !temp)
return -EINVAL;
reg = readl_relaxed(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
(tmdev->tsens_addr) +
- (tm_sensor->sensor_num * TSENS_SN_ADDR_OFFSET));
+ (tm_sensor->sensor_hw_num * TSENS_SN_ADDR_OFFSET));
switch (trip) {
case TSENS_TRIP_WARM:
reg = (reg & TSENS_UPPER_THRESHOLD_MASK) >>
@@ -457,7 +531,12 @@
return -EINVAL;
}
- *temp = tsens_tz_code_to_degc(reg, tm_sensor->sensor_num);
+ rc = tsens_get_sw_id_mapping(tm_sensor->sensor_hw_num, &sensor_sw_id);
+ if (rc < 0) {
+ pr_err("tsens mapping index not found\n");
+ return rc;
+ }
+ *temp = tsens_tz_code_to_degc(reg, sensor_sw_id);
return 0;
}
@@ -465,9 +544,8 @@
static int tsens_tz_notify(struct thermal_zone_device *thermal,
int count, enum thermal_trip_type type)
{
- /* TSENS driver does not shutdown the device.
- All Thermal notification are sent to the
- thermal daemon to take appropriate action */
+ /* Critical temperature threshold are enabled and will
+ * shutdown the device once critical thresholds are crossed. */
pr_debug("%s debug\n", __func__);
return 1;
}
@@ -477,10 +555,14 @@
{
struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
unsigned int reg_cntl;
- int code, hi_code, lo_code, code_err_chk;
+ int code, hi_code, lo_code, code_err_chk, sensor_sw_id = 0, rc = 0;
- code_err_chk = code = tsens_tz_degc_to_code(temp,
- tm_sensor->sensor_num);
+ rc = tsens_get_sw_id_mapping(tm_sensor->sensor_hw_num, &sensor_sw_id);
+ if (rc < 0) {
+ pr_err("tsens mapping index not found\n");
+ return rc;
+ }
+ code_err_chk = code = tsens_tz_degc_to_code(temp, sensor_sw_id);
if (!tm_sensor || trip < 0)
return -EINVAL;
@@ -488,8 +570,8 @@
hi_code = TSENS_THRESHOLD_MAX_CODE;
reg_cntl = readl_relaxed(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
- (tmdev->tsens_addr) +
- (tm_sensor->sensor_num * TSENS_SN_ADDR_OFFSET));
+ (tmdev->tsens_addr) + (tm_sensor->sensor_hw_num *
+ TSENS_SN_ADDR_OFFSET));
switch (trip) {
case TSENS_TRIP_WARM:
code <<= TSENS_UPPER_THRESHOLD_SHIFT;
@@ -512,7 +594,7 @@
writel_relaxed(reg_cntl | code, (TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
(tmdev->tsens_addr) +
- (tm_sensor->sensor_num *
+ (tm_sensor->sensor_hw_num *
TSENS_SN_ADDR_OFFSET)));
mb();
return 0;
@@ -543,35 +625,48 @@
tsens_work);
unsigned int i, status, threshold;
unsigned int sensor_status_addr, sensor_status_ctrl_addr;
+ int sensor_sw_id = -EINVAL, rc = 0;
sensor_status_addr =
(unsigned int)TSENS_S0_STATUS_ADDR(tmdev->tsens_addr);
sensor_status_ctrl_addr =
(unsigned int)TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
(tmdev->tsens_addr);
- for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+ for (i = 0; i < tm->tsens_num_sensor; i++) {
bool upper_thr = false, lower_thr = false;
- status = readl_relaxed(sensor_status_addr);
- threshold = readl_relaxed(sensor_status_ctrl_addr);
+ uint32_t addr_offset;
+
+ addr_offset = tm->sensor[i].sensor_hw_num *
+ TSENS_SN_ADDR_OFFSET;
+ status = readl_relaxed(sensor_status_addr + addr_offset);
+ threshold = readl_relaxed(sensor_status_ctrl_addr +
+ addr_offset);
if (status & TSENS_SN_STATUS_UPPER_STATUS) {
writel_relaxed(threshold | TSENS_UPPER_STATUS_CLR,
- sensor_status_ctrl_addr);
+ TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(
+ tmdev->tsens_addr + addr_offset));
upper_thr = true;
}
if (status & TSENS_SN_STATUS_LOWER_STATUS) {
writel_relaxed(threshold | TSENS_LOWER_STATUS_CLR,
- sensor_status_ctrl_addr);
+ TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(
+ tmdev->tsens_addr + addr_offset));
lower_thr = true;
}
if (upper_thr || lower_thr) {
/* Notify user space */
schedule_work(&tm->sensor[i].work);
- pr_debug("sensor:%d trigger temp (%d degC)\n", i,
+ rc = tsens_get_sw_id_mapping(
+ tm->sensor[i].sensor_hw_num,
+ &sensor_sw_id);
+ if (rc < 0)
+ pr_err("tsens mapping index not found\n");
+ pr_debug("sensor:%d trigger temp (%d degC)\n",
+ tm->sensor[i].sensor_hw_num,
tsens_tz_code_to_degc((status &
- TSENS_SN_STATUS_TEMP_MASK), i));
+ TSENS_SN_STATUS_TEMP_MASK),
+ sensor_sw_id));
}
- sensor_status_addr += TSENS_SN_ADDR_OFFSET;
- sensor_status_ctrl_addr += TSENS_SN_ADDR_OFFSET;
}
mb();
}
@@ -585,17 +680,19 @@
static void tsens_hw_init(void)
{
- unsigned int reg_cntl = 0;
+ unsigned int reg_cntl = 0, sensor_en = 0;
unsigned int i;
if (tmdev->tsens_local_init) {
writel_relaxed(reg_cntl, TSENS_CTRL_ADDR(tmdev->tsens_addr));
writel_relaxed(reg_cntl | TSENS_SW_RST,
TSENS_CTRL_ADDR(tmdev->tsens_addr));
- reg_cntl |= ((TSENS_62_5_MS_MEAS_PERIOD <<
- TSENS_MEAS_PERIOD_SHIFT) |
- (((1 << tmdev->tsens_num_sensor) - 1) << TSENS_SENSOR0_SHIFT) |
- TSENS_EN);
+ reg_cntl |= (TSENS_62_5_MS_MEAS_PERIOD <<
+ TSENS_MEAS_PERIOD_SHIFT);
+ for (i = 0; i < tmdev->tsens_num_sensor; i++)
+ sensor_en |= (1 << tmdev->sensor[i].sensor_hw_num);
+ sensor_en <<= TSENS_SENSOR0_SHIFT;
+ reg_cntl |= (sensor_en | TSENS_EN);
writel_relaxed(reg_cntl, TSENS_CTRL_ADDR(tmdev->tsens_addr));
writel_relaxed(TSENS_GLOBAL_INIT_DATA,
TSENS_GLOBAL_CONFIG(tmdev->tsens_addr));
@@ -604,10 +701,12 @@
for (i = 0; i < tmdev->tsens_num_sensor; i++) {
writel_relaxed(TSENS_SN_MIN_MAX_STATUS_CTRL_DATA,
TSENS_SN_MIN_MAX_STATUS_CTRL(tmdev->tsens_addr)
- + (i * TSENS_SN_ADDR_OFFSET));
+ + (tmdev->sensor[i].sensor_hw_num *
+ TSENS_SN_ADDR_OFFSET));
writel_relaxed(TSENS_SN_REMOTE_CFG_DATA,
TSENS_SN_REMOTE_CONFIG(tmdev->tsens_addr)
- + (i * TSENS_SN_ADDR_OFFSET));
+ + (tmdev->sensor[i].sensor_hw_num *
+ TSENS_SN_ADDR_OFFSET));
}
pr_debug("Local TSENS control initialization\n");
}
@@ -634,6 +733,7 @@
tsens_calibration_mode = (calib_data[0] & TSENS_8X10_TSENS_CAL_SEL)
>> TSENS_8X10_CAL_SEL_SHIFT;
+ pr_debug("calib mode scheme:%x\n", tsens_calibration_mode);
if ((tsens_calibration_mode == TSENS_TWO_POINT_CALIB) ||
(tsens_calibration_mode == TSENS_ONE_POINT_CALIB_OPTION_2)) {
@@ -688,6 +788,9 @@
int32_t num = 0, den = 0;
tmdev->sensor[i].calib_data_point2 = calib_tsens_point2_data[i];
tmdev->sensor[i].calib_data_point1 = calib_tsens_point1_data[i];
+ pr_debug("sensor:%d - calib_data_point1:0x%x, calib_data_point2:0x%x\n",
+ i, tmdev->sensor[i].calib_data_point1,
+ tmdev->sensor[i].calib_data_point2);
if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB) {
/* slope (m) = adc_code2 - adc_code1 (y2 - y1)/
temp_120_degc - temp_30_degc (x2 - x1) */
@@ -732,6 +835,7 @@
tsens_calibration_mode = (calib_data[5] & TSENS_8X26_TSENS_CAL_SEL)
>> TSENS_8X26_CAL_SEL_SHIFT;
+ pr_debug("calib mode scheme:%x\n", tsens_calibration_mode);
if ((tsens_calibration_mode == TSENS_TWO_POINT_CALIB) ||
(tsens_calibration_mode == TSENS_ONE_POINT_CALIB_OPTION_2)) {
@@ -841,6 +945,9 @@
int32_t num = 0, den = 0;
tmdev->sensor[i].calib_data_point2 = calib_tsens_point2_data[i];
tmdev->sensor[i].calib_data_point1 = calib_tsens_point1_data[i];
+ pr_debug("sensor:%d - calib_data_point1:0x%x, calib_data_point2:0x%x\n",
+ i, tmdev->sensor[i].calib_data_point1,
+ tmdev->sensor[i].calib_data_point2);
if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB) {
/* slope (m) = adc_code2 - adc_code1 (y2 - y1)/
temp_120_degc - temp_30_degc (x2 - x1) */
@@ -881,11 +988,14 @@
TSENS_EEPROM_REDUNDANCY_SEL(tmdev->tsens_calib_addr));
calib_redun_sel = calib_redun_sel & TSENS_QFPROM_BACKUP_REDUN_SEL;
calib_redun_sel >>= TSENS_QFPROM_BACKUP_REDUN_SHIFT;
+ pr_debug("calib_redun_sel:%x\n", calib_redun_sel);
- for (i = 0; i < TSENS_MAIN_CALIB_ADDR_RANGE; i++)
+ for (i = 0; i < TSENS_MAIN_CALIB_ADDR_RANGE; i++) {
calib_data[i] = readl_relaxed(
(TSENS_EEPROM(tmdev->tsens_calib_addr))
+ (i * TSENS_SN_ADDR_OFFSET));
+ pr_debug("calib raw data row%d:0x%x\n", i, calib_data[i]);
+ }
if (calib_redun_sel == TSENS_QFPROM_BACKUP_SEL) {
tsens_calibration_mode = (calib_data[4] & TSENS_CAL_SEL_0_1)
@@ -893,6 +1003,7 @@
temp = (calib_data[5] & TSENS_CAL_SEL_2)
>> TSENS_CAL_SEL_SHIFT_2;
tsens_calibration_mode |= temp;
+ pr_debug("backup calib mode:%x\n", calib_redun_sel);
for (i = 0; i < TSENS_BACKUP_CALIB_ADDR_RANGE; i++)
calib_data_backup[i] = readl_relaxed(
@@ -978,6 +1089,7 @@
temp = (calib_data[3] & TSENS_CAL_SEL_2)
>> TSENS_CAL_SEL_SHIFT_2;
tsens_calibration_mode |= temp;
+ pr_debug("calib mode scheme:%x\n", tsens_calibration_mode);
if ((tsens_calibration_mode == TSENS_ONE_POINT_CALIB) ||
(tsens_calibration_mode ==
TSENS_ONE_POINT_CALIB_OPTION_2) ||
@@ -1089,6 +1201,7 @@
if ((tsens_calibration_mode == TSENS_ONE_POINT_CALIB_OPTION_2) ||
(tsens_calibration_mode == TSENS_TWO_POINT_CALIB)) {
+ pr_debug("one point calibration calculation\n");
calib_tsens_point1_data[0] =
((((tsens_base1_data) + tsens0_point1) << 2) |
TSENS_BIT_APPEND);
@@ -1166,6 +1279,9 @@
int32_t num = 0, den = 0;
tmdev->sensor[i].calib_data_point2 = calib_tsens_point2_data[i];
tmdev->sensor[i].calib_data_point1 = calib_tsens_point1_data[i];
+ pr_debug("sensor:%d - calib_data_point1:0x%x, calib_data_point2:0x%x\n",
+ i, tmdev->sensor[i].calib_data_point1,
+ tmdev->sensor[i].calib_data_point2);
if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB) {
/* slope (m) = adc_code2 - adc_code1 (y2 - y1)/
temp_120_degc - temp_30_degc (x2 - x1) */
@@ -1178,6 +1294,7 @@
tmdev->sensor[i].offset = (tmdev->sensor[i].calib_data_point1 *
tmdev->tsens_factor) - (TSENS_CAL_DEGC_POINT1 *
tmdev->sensor[i].slope_mul_tsens_factor);
+ pr_debug("offset:%d\n", tmdev->sensor[i].offset);
INIT_WORK(&tmdev->sensor[i].work, notify_uspace_tsens_fn);
tmdev->prev_reading_avail = false;
}
@@ -1209,6 +1326,7 @@
const struct device_node *of_node = pdev->dev.of_node;
struct resource *res_mem = NULL;
u32 *tsens_slope_data;
+ u32 *sensor_id;
u32 rc = 0, i, tsens_num_sensors, calib_type;
const char *tsens_calib_mode;
@@ -1266,6 +1384,29 @@
tmdev->tsens_local_init = of_property_read_bool(of_node,
"qcom,tsens-local-init");
+ sensor_id = devm_kzalloc(&pdev->dev,
+ tsens_num_sensors * sizeof(u32), GFP_KERNEL);
+ if (!sensor_id) {
+ dev_err(&pdev->dev, "can not allocate sensor id\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,sensor-id", sensor_id, tsens_num_sensors);
+ if (rc) {
+ pr_debug("Default sensor id mapping\n");
+ for (i = 0; i < tsens_num_sensors; i++) {
+ tmdev->sensor[i].sensor_hw_num = i;
+ tmdev->sensor[i].sensor_sw_id = i;
+ }
+ } else {
+ pr_debug("Use specified sensor id mapping\n");
+ for (i = 0; i < tsens_num_sensors; i++) {
+ tmdev->sensor[i].sensor_hw_num = sensor_id[i];
+ tmdev->sensor[i].sensor_sw_id = i;
+ }
+ }
+
tmdev->tsens_irq = platform_get_irq(pdev, 0);
if (tmdev->tsens_irq < 0) {
pr_err("Invalid get irq\n");
@@ -1408,9 +1549,9 @@
for (i = 0; i < tmdev->tsens_num_sensor; i++) {
char name[18];
- snprintf(name, sizeof(name), "tsens_tz_sensor%d", i);
+ snprintf(name, sizeof(name), "tsens_tz_sensor%d",
+ tmdev->sensor[i].sensor_hw_num);
tmdev->sensor[i].mode = THERMAL_DEVICE_ENABLED;
- tmdev->sensor[i].sensor_num = i;
tmdev->sensor[i].tz_dev = thermal_zone_device_register(name,
TSENS_TRIP_NUM, &tmdev->sensor[i],
&tsens_thermal_zone_ops, 0, 0, 0, 0);
@@ -1490,11 +1631,10 @@
},
};
-static int __init tsens_tm_init_driver(void)
+int __init tsens_tm_init_driver(void)
{
return platform_driver_register(&tsens_tm_driver);
}
-arch_initcall(tsens_tm_init_driver);
static int __init tsens_thermal_register(void)
{
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index 5aca48d..12ac3bc 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -23,17 +23,447 @@
#include <linux/msm_thermal.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
#include <mach/cpufreq.h>
+#include <mach/rpm-regulator.h>
+#include <mach/rpm-regulator-smd.h>
+#include <linux/regulator/consumer.h>
-static int enabled;
+#define MAX_RAILS 5
+
static struct msm_thermal_data msm_thermal_info;
static uint32_t limited_max_freq = MSM_CPUFREQ_NO_LIMIT;
static struct delayed_work check_temp_work;
+static bool core_control_enabled;
+static uint32_t cpus_offlined;
+static DEFINE_MUTEX(core_control_mutex);
+static int enabled;
+static int rails_cnt;
+static int psm_rails_cnt;
static int limit_idx;
static int limit_idx_low;
static int limit_idx_high;
+static int max_tsens_num;
static struct cpufreq_frequency_table *table;
+static uint32_t usefreq;
+static int freq_table_get;
+static bool vdd_rstr_enabled;
+static bool vdd_rstr_nodes_called;
+static bool vdd_rstr_probed;
+static bool psm_enabled;
+static bool psm_nodes_called;
+static bool psm_probed;
+static DEFINE_MUTEX(vdd_rstr_mutex);
+static DEFINE_MUTEX(psm_mutex);
+
+struct rail {
+ const char *name;
+ uint32_t freq_req;
+ uint32_t min_level;
+ uint32_t num_levels;
+ uint32_t curr_level;
+ uint32_t levels[3];
+ struct kobj_attribute value_attr;
+ struct kobj_attribute level_attr;
+ struct regulator *reg;
+ struct attribute_group attr_gp;
+};
+
+struct psm_rail {
+ const char *name;
+ uint8_t init;
+ uint8_t mode;
+ struct kobj_attribute mode_attr;
+ struct rpm_regulator *reg;
+ struct attribute_group attr_gp;
+};
+
+static struct psm_rail *psm_rails;
+static struct rail *rails;
+
+struct vdd_rstr_enable {
+ struct kobj_attribute ko_attr;
+ uint32_t enabled;
+};
+
+/* For SMPS only*/
+enum PMIC_SW_MODE {
+ PMIC_AUTO_MODE = RPM_REGULATOR_MODE_AUTO,
+ PMIC_IPEAK_MODE = RPM_REGULATOR_MODE_IPEAK,
+ PMIC_PWM_MODE = RPM_REGULATOR_MODE_HPM,
+};
+
+#define VDD_RES_RO_ATTRIB(_rail, ko_attr, j, _name) \
+ ko_attr.attr.name = __stringify(_name); \
+ ko_attr.attr.mode = 444; \
+ ko_attr.show = vdd_rstr_reg_##_name##_show; \
+ ko_attr.store = NULL; \
+ _rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define VDD_RES_RW_ATTRIB(_rail, ko_attr, j, _name) \
+ ko_attr.attr.name = __stringify(_name); \
+ ko_attr.attr.mode = 644; \
+ ko_attr.show = vdd_rstr_reg_##_name##_show; \
+ ko_attr.store = vdd_rstr_reg_##_name##_store; \
+ _rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define VDD_RSTR_ENABLE_FROM_ATTRIBS(attr) \
+ (container_of(attr, struct vdd_rstr_enable, ko_attr));
+
+#define VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr) \
+ (container_of(attr, struct rail, value_attr));
+
+#define VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr) \
+ (container_of(attr, struct rail, level_attr));
+
+#define PSM_RW_ATTRIB(_rail, ko_attr, j, _name) \
+ ko_attr.attr.name = __stringify(_name); \
+ ko_attr.attr.mode = 644; \
+ ko_attr.show = psm_reg_##_name##_show; \
+ ko_attr.store = psm_reg_##_name##_store; \
+ _rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define PSM_REG_MODE_FROM_ATTRIBS(attr) \
+ (container_of(attr, struct psm_rail, mode_attr));
+/* If freq table exists, then we can send freq request */
+static int check_freq_table(void)
+{
+ int ret = 0;
+ struct cpufreq_frequency_table *table = NULL;
+
+ table = cpufreq_frequency_get_table(0);
+ if (!table) {
+ pr_debug("%s: error reading cpufreq table\n", __func__);
+ return -EINVAL;
+ }
+ freq_table_get = 1;
+
+ return ret;
+}
+
+static int update_cpu_min_freq_all(uint32_t min)
+{
+ int cpu = 0;
+ int ret = 0;
+
+ if (!freq_table_get) {
+ ret = check_freq_table();
+ if (ret) {
+ pr_err("%s:Fail to get freq table\n", __func__);
+ return ret;
+ }
+ }
+ /* If min is larger than allowed max */
+ if (min != MSM_CPUFREQ_NO_LIMIT &&
+ min > table[limit_idx_high].frequency)
+ min = table[limit_idx_high].frequency;
+
+ for_each_possible_cpu(cpu) {
+ ret = msm_cpufreq_set_freq_limits(cpu, min, limited_max_freq);
+
+ if (ret) {
+ pr_err("%s:Fail to set limits for cpu%d\n",
+ __func__, cpu);
+ return ret;
+ }
+
+ if (cpufreq_update_policy(cpu))
+ pr_debug("%s: Cannot update policy for cpu%d\n",
+ __func__, cpu);
+ }
+
+ return ret;
+}
+
+static int vdd_restriction_apply_freq(struct rail *r, int level)
+{
+ int ret = 0;
+
+ /* level = -1: disable, level = 0,1,2..n: enable */
+ if (level == -1) {
+ ret = update_cpu_min_freq_all(r->min_level);
+ if (ret)
+ return ret;
+ else
+ r->curr_level = -1;
+ } else if (level >= 0 && level < (r->num_levels)) {
+ ret = update_cpu_min_freq_all(r->levels[level]);
+ if (ret)
+ return ret;
+ else
+ r->curr_level = level;
+ } else {
+ pr_err("level input:%d is not within range\n", level);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vdd_restriction_apply_voltage(struct rail *r, int level)
+{
+ int ret = 0;
+
+ if (r->reg == NULL) {
+ pr_info("Do not have regulator handle:%s, can't apply vdd\n",
+ r->name);
+ return -EFAULT;
+ }
+ /* level = -1: disable, level = 0,1,2..n: enable */
+ if (level == -1) {
+ ret = regulator_set_voltage(r->reg, r->min_level,
+ r->levels[r->num_levels - 1]);
+ if (!ret)
+ r->curr_level = -1;
+ } else if (level >= 0 && level < (r->num_levels)) {
+ ret = regulator_set_voltage(r->reg, r->levels[level],
+ r->levels[r->num_levels - 1]);
+ if (!ret)
+ r->curr_level = level;
+ } else {
+ pr_err("level input:%d is not within range\n", level);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+/* 1:enable, 0:disable */
+static int vdd_restriction_apply_all(int en)
+{
+ int i = 0;
+ int fail_cnt = 0;
+ int ret = 0;
+
+ for (i = 0; i < rails_cnt; i++) {
+ if (rails[i].freq_req == 1 && freq_table_get)
+ ret = vdd_restriction_apply_freq(&rails[i],
+ en ? 0 : -1);
+ else
+ ret = vdd_restriction_apply_voltage(&rails[i],
+ en ? 0 : -1);
+ if (ret) {
+ pr_err("Cannot set voltage for %s", rails[i].name);
+ fail_cnt++;
+ }
+ }
+ /* Check fail_cnt again to make sure all of the rails are applied
+ * restriction successfully or not */
+ if (fail_cnt)
+ return -EFAULT;
+
+ return ret;
+}
+
+/* Setting all rails the same mode */
+static int psm_set_mode_all(int mode)
+{
+ int i = 0;
+ int fail_cnt = 0;
+ int ret = 0;
+
+ for (i = 0; i < psm_rails_cnt; i++) {
+ if (psm_rails[i].mode != mode) {
+ ret = rpm_regulator_set_mode(psm_rails[i].reg, mode);
+ if (ret) {
+ pr_err("Cannot set mode:%d for %s",
+ mode, psm_rails[i].name);
+ fail_cnt++;
+ } else
+ psm_rails[i].mode = mode;
+ }
+ }
+
+ return fail_cnt ? (-EFAULT) : ret;
+}
+
+static int vdd_rstr_en_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", en->enabled);
+}
+
+static ssize_t vdd_rstr_en_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int i = 0;
+ uint8_t en_cnt = 0;
+ uint8_t dis_cnt = 0;
+ uint32_t val = 0;
+ struct kernel_param kp;
+ struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
+
+ mutex_lock(&vdd_rstr_mutex);
+ kp.arg = &val;
+ ret = param_set_bool(buf, &kp);
+ if (ret) {
+ pr_err("Invalid input %s for enabled\n", buf);
+ goto done_vdd_rstr_en;
+ }
+
+ if ((val == 0) && (en->enabled == 0))
+ goto done_vdd_rstr_en;
+
+ for (i = 0; i < rails_cnt; i++) {
+ if (rails[i].freq_req == 1 && freq_table_get)
+ ret = vdd_restriction_apply_freq(&rails[i],
+ (val) ? 0 : -1);
+ else
+ ret = vdd_restriction_apply_voltage(&rails[i],
+ (val) ? 0 : -1);
+
+ /* Even if fail to set one rail, still try to set the
+ * others. Continue the loop */
+ if (ret)
+ pr_err("Set vdd restriction for %s failed\n",
+ rails[i].name);
+ else {
+ if (val)
+ en_cnt++;
+ else
+ dis_cnt++;
+ }
+ }
+ /* As long as one rail is enabled, vdd rstr is enabled */
+ if (val && en_cnt)
+ en->enabled = 1;
+ else if (!val && (dis_cnt == rails_cnt))
+ en->enabled = 0;
+
+done_vdd_rstr_en:
+ mutex_unlock(&vdd_rstr_mutex);
+ return count;
+}
+
+static struct vdd_rstr_enable vdd_rstr_en = {
+ .ko_attr.attr.name = __stringify(enabled),
+ .ko_attr.attr.mode = 644,
+ .ko_attr.show = vdd_rstr_en_show,
+ .ko_attr.store = vdd_rstr_en_store,
+ .enabled = 1,
+};
+
+static struct attribute *vdd_rstr_en_attribs[] = {
+ &vdd_rstr_en.ko_attr.attr,
+ NULL,
+};
+
+static struct attribute_group vdd_rstr_en_attribs_gp = {
+ .attrs = vdd_rstr_en_attribs,
+};
+
+static int vdd_rstr_reg_value_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int val = 0;
+ struct rail *reg = VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr);
+ /* -1:disabled, -2:fail to get regualtor handle */
+ if (reg->curr_level < 0)
+ val = reg->curr_level;
+ else
+ val = reg->levels[reg->curr_level];
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", reg->levels[reg->curr_level]);
+}
+
+static int vdd_rstr_reg_level_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
+ return snprintf(buf, PAGE_SIZE, "%d\n", reg->curr_level);
+}
+
+static ssize_t vdd_rstr_reg_level_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int val = 0;
+
+ struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
+
+ mutex_lock(&vdd_rstr_mutex);
+ if (vdd_rstr_en.enabled == 0)
+ goto done_store_level;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("Invalid input %s for level\n", buf);
+ goto done_store_level;
+ }
+
+ if (val < 0 || val > reg->num_levels - 1) {
+ pr_err(" Invalid number %d for level\n", val);
+ goto done_store_level;
+ }
+
+ if (val != reg->curr_level) {
+ if (reg->freq_req == 1 && freq_table_get)
+ update_cpu_min_freq_all(reg->levels[val]);
+ else {
+ ret = vdd_restriction_apply_voltage(reg, val);
+ if (ret) {
+ pr_err( \
+ "Set vdd restriction for regulator %s failed\n",
+ reg->name);
+ goto done_store_level;
+ }
+ }
+ reg->curr_level = val;
+ }
+
+done_store_level:
+ mutex_unlock(&vdd_rstr_mutex);
+ return count;
+}
+
+static int psm_reg_mode_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
+ return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode);
+}
+
+static ssize_t psm_reg_mode_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int val = 0;
+ struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
+
+ mutex_lock(&psm_mutex);
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("Invalid input %s for mode\n", buf);
+ goto done_psm_store;
+ }
+
+ if ((val != PMIC_PWM_MODE) && (val != PMIC_AUTO_MODE)) {
+ pr_err(" Invalid number %d for mode\n", val);
+ goto done_psm_store;
+ }
+
+ if (val != reg->mode) {
+ ret = rpm_regulator_set_mode(reg->reg, val);
+ if (ret) {
+ pr_err( \
+ "Fail to set PMIC SW Mode:%d for %s\n",
+ val, reg->name);
+ goto done_psm_store;
+ }
+ reg->mode = val;
+ }
+
+done_psm_store:
+ mutex_unlock(&psm_mutex);
+ return count;
+}
static int msm_thermal_get_freq_table(void)
{
@@ -42,7 +472,7 @@
table = cpufreq_frequency_get_table(0);
if (table == NULL) {
- pr_debug("%s: error reading cpufreq table\n", __func__);
+ pr_debug("%s: error reading cpufreq table\n", KBUILD_MODNAME);
ret = -EINVAL;
goto fail;
}
@@ -67,17 +497,166 @@
limited_max_freq = max_freq;
if (max_freq != MSM_CPUFREQ_NO_LIMIT)
- pr_info("msm_thermal: Limiting cpu%d max frequency to %d\n",
- cpu, max_freq);
+ pr_info("%s: Limiting cpu%d max frequency to %d\n",
+ KBUILD_MODNAME, cpu, max_freq);
else
- pr_info("msm_thermal: Max frequency reset for cpu%d\n", cpu);
+ pr_info("%s: Max frequency reset for cpu%d\n",
+ KBUILD_MODNAME, cpu);
ret = cpufreq_update_policy(cpu);
return ret;
}
-static void check_temp(struct work_struct *work)
+static void __cpuinit do_core_control(long temp)
+{
+ int i = 0;
+ int ret = 0;
+
+ if (!core_control_enabled)
+ return;
+
+ mutex_lock(&core_control_mutex);
+ if (msm_thermal_info.core_control_mask &&
+ temp >= msm_thermal_info.core_limit_temp_degC) {
+ for (i = num_possible_cpus(); i > 0; i--) {
+ if (!(msm_thermal_info.core_control_mask & BIT(i)))
+ continue;
+ if (cpus_offlined & BIT(i) && !cpu_online(i))
+ continue;
+ pr_info("%s: Set Offline: CPU%d Temp: %ld\n",
+ KBUILD_MODNAME, i, temp);
+ ret = cpu_down(i);
+ if (ret)
+ pr_err("%s: Error %d offline core %d\n",
+ KBUILD_MODNAME, ret, i);
+ cpus_offlined |= BIT(i);
+ break;
+ }
+ } else if (msm_thermal_info.core_control_mask && cpus_offlined &&
+ temp <= (msm_thermal_info.core_limit_temp_degC -
+ msm_thermal_info.core_temp_hysteresis_degC)) {
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (!(cpus_offlined & BIT(i)))
+ continue;
+ cpus_offlined &= ~BIT(i);
+ pr_info("%s: Allow Online CPU%d Temp: %ld\n",
+ KBUILD_MODNAME, i, temp);
+ /* If this core is already online, then bring up the
+ * next offlined core.
+ */
+ if (cpu_online(i))
+ continue;
+ ret = cpu_up(i);
+ if (ret)
+ pr_err("%s: Error %d online core %d\n",
+ KBUILD_MODNAME, ret, i);
+ break;
+ }
+ }
+ mutex_unlock(&core_control_mutex);
+}
+
+static int do_vdd_restriction(void)
+{
+ struct tsens_device tsens_dev;
+ long temp = 0;
+ int ret = 0;
+ int i = 0;
+ int dis_cnt = 0;
+
+ if (!vdd_rstr_enabled)
+ return ret;
+
+ if (usefreq && !freq_table_get) {
+ if (check_freq_table())
+ return ret;
+ }
+
+ mutex_lock(&vdd_rstr_mutex);
+ for (i = 0; i < max_tsens_num; i++) {
+ tsens_dev.sensor_num = i;
+ ret = tsens_get_temp(&tsens_dev, &temp);
+ if (ret) {
+ pr_debug("%s: Unable to read TSENS sensor %d\n",
+ __func__, tsens_dev.sensor_num);
+ dis_cnt++;
+ continue;
+ }
+ if (temp <= msm_thermal_info.vdd_rstr_temp_hyst_degC &&
+ vdd_rstr_en.enabled == 0) {
+ ret = vdd_restriction_apply_all(1);
+ if (ret) {
+ pr_err( \
+ "Enable vdd rstr votlage for all failed\n");
+ goto exit;
+ }
+ vdd_rstr_en.enabled = 1;
+ goto exit;
+ } else if (temp > msm_thermal_info.vdd_rstr_temp_degC &&
+ vdd_rstr_en.enabled == 1)
+ dis_cnt++;
+ }
+ if (dis_cnt == max_tsens_num) {
+ ret = vdd_restriction_apply_all(0);
+ if (ret) {
+ pr_err("Disable vdd rstr votlage for all failed\n");
+ goto exit;
+ }
+ vdd_rstr_en.enabled = 0;
+ }
+exit:
+ mutex_unlock(&vdd_rstr_mutex);
+ return ret;
+}
+
+static int do_psm(void)
+{
+ struct tsens_device tsens_dev;
+ long temp = 0;
+ int ret = 0;
+ int i = 0;
+ int auto_cnt = 0;
+
+ mutex_lock(&psm_mutex);
+ for (i = 0; i < max_tsens_num; i++) {
+ tsens_dev.sensor_num = i;
+ ret = tsens_get_temp(&tsens_dev, &temp);
+ if (ret) {
+ pr_debug("%s: Unable to read TSENS sensor %d\n",
+ __func__, tsens_dev.sensor_num);
+ auto_cnt++;
+ continue;
+ }
+
+ /* As long as one sensor is above the threshold, set PWM mode
+ * on all rails, and loop stops. Set auto mode when all rails
+ * are below thershold */
+ if (temp > msm_thermal_info.psm_temp_degC) {
+ ret = psm_set_mode_all(PMIC_PWM_MODE);
+ if (ret) {
+ pr_err("Set pwm mode for all failed\n");
+ goto exit;
+ }
+ break;
+ } else if (temp <= msm_thermal_info.psm_temp_hyst_degC)
+ auto_cnt++;
+ }
+
+ if (auto_cnt == max_tsens_num) {
+ ret = psm_set_mode_all(PMIC_AUTO_MODE);
+ if (ret) {
+ pr_err("Set auto mode for all failed\n");
+ goto exit;
+ }
+ }
+
+exit:
+ mutex_unlock(&psm_mutex);
+ return ret;
+}
+
+static void __cpuinit check_temp(struct work_struct *work)
{
static int limit_init;
struct tsens_device tsens_dev;
@@ -85,12 +664,11 @@
uint32_t max_freq = limited_max_freq;
int cpu = 0;
int ret = 0;
-
tsens_dev.sensor_num = msm_thermal_info.sensor_id;
ret = tsens_get_temp(&tsens_dev, &temp);
if (ret) {
- pr_debug("msm_thermal: Unable to read TSENS sensor %d\n",
- tsens_dev.sensor_num);
+ pr_debug("%s: Unable to read TSENS sensor %d\n",
+ KBUILD_MODNAME, tsens_dev.sensor_num);
goto reschedule;
}
@@ -102,6 +680,10 @@
limit_init = 1;
}
+ do_core_control(temp);
+ do_vdd_restriction();
+ do_psm();
+
if (temp >= msm_thermal_info.limit_temp_degC) {
if (limit_idx == limit_idx_low)
goto reschedule;
@@ -129,8 +711,9 @@
for_each_possible_cpu(cpu) {
ret = update_cpu_max_freq(cpu, max_freq);
if (ret)
- pr_debug("Unable to limit cpu%d max freq to %d\n",
- cpu, max_freq);
+ pr_debug(
+ "%s: Unable to limit cpu%d max freq to %d\n",
+ KBUILD_MODNAME, cpu, max_freq);
}
reschedule:
@@ -139,7 +722,36 @@
msecs_to_jiffies(msm_thermal_info.poll_ms));
}
-static void disable_msm_thermal(void)
+static int __cpuinit msm_thermal_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) {
+ if (core_control_enabled &&
+ (msm_thermal_info.core_control_mask & BIT(cpu)) &&
+ (cpus_offlined & BIT(cpu))) {
+ pr_info(
+ "%s: Preventing cpu%d from coming online.\n",
+ KBUILD_MODNAME, cpu);
+ return NOTIFY_BAD;
+ }
+ }
+
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata msm_thermal_cpu_notifier = {
+ .notifier_call = msm_thermal_cpu_callback,
+};
+
+/**
+ * We will reset the cpu frequencies limits here. The core online/offline
+ * status will be carried over to the process stopping the msm_thermal, as
+ * we dont want to online a core and bring in the thermal issues.
+ */
+static void __cpuinit disable_msm_thermal(void)
{
int cpu = 0;
@@ -155,7 +767,7 @@
}
}
-static int set_enabled(const char *val, const struct kernel_param *kp)
+static int __cpuinit set_enabled(const char *val, const struct kernel_param *kp)
{
int ret = 0;
@@ -163,9 +775,10 @@
if (!enabled)
disable_msm_thermal();
else
- pr_info("msm_thermal: no action for enabled = %d\n", enabled);
+ pr_info("%s: no action for enabled = %d\n",
+ KBUILD_MODNAME, enabled);
- pr_info("msm_thermal: enabled = %d\n", enabled);
+ pr_info("%s: enabled = %d\n", KBUILD_MODNAME, enabled);
return ret;
}
@@ -178,18 +791,561 @@
module_param_cb(enabled, &module_ops, &enabled, 0644);
MODULE_PARM_DESC(enabled, "enforce thermal limit on cpu");
+
+/* Call with core_control_mutex locked */
+static int __cpuinit update_offline_cores(int val)
+{
+ int cpu = 0;
+ int ret = 0;
+
+ cpus_offlined = msm_thermal_info.core_control_mask & val;
+ if (!core_control_enabled)
+ return 0;
+
+ for_each_possible_cpu(cpu) {
+ if (!(cpus_offlined & BIT(cpu)))
+ continue;
+ if (!cpu_online(cpu))
+ continue;
+ ret = cpu_down(cpu);
+ if (ret)
+ pr_err("%s: Unable to offline cpu%d\n",
+ KBUILD_MODNAME, cpu);
+ }
+ return ret;
+}
+
+static ssize_t show_cc_enabled(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", core_control_enabled);
+}
+
+static ssize_t __cpuinit store_cc_enabled(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int val = 0;
+
+ mutex_lock(&core_control_mutex);
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
+ goto done_store_cc;
+ }
+
+ if (core_control_enabled == !!val)
+ goto done_store_cc;
+
+ core_control_enabled = !!val;
+ if (core_control_enabled) {
+ pr_info("%s: Core control enabled\n", KBUILD_MODNAME);
+ register_cpu_notifier(&msm_thermal_cpu_notifier);
+ update_offline_cores(cpus_offlined);
+ } else {
+ pr_info("%s: Core control disabled\n", KBUILD_MODNAME);
+ unregister_cpu_notifier(&msm_thermal_cpu_notifier);
+ }
+
+done_store_cc:
+ mutex_unlock(&core_control_mutex);
+ return count;
+}
+
+static ssize_t show_cpus_offlined(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", cpus_offlined);
+}
+
+static ssize_t __cpuinit store_cpus_offlined(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ uint32_t val = 0;
+
+ mutex_lock(&core_control_mutex);
+ ret = kstrtouint(buf, 10, &val);
+ if (ret) {
+ pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
+ goto done_cc;
+ }
+
+ if (enabled) {
+ pr_err("%s: Ignoring request; polling thread is enabled.\n",
+ KBUILD_MODNAME);
+ goto done_cc;
+ }
+
+ if (cpus_offlined == val)
+ goto done_cc;
+
+ update_offline_cores(val);
+done_cc:
+ mutex_unlock(&core_control_mutex);
+ return count;
+}
+
+static __cpuinitdata struct kobj_attribute cc_enabled_attr =
+__ATTR(enabled, 0644, show_cc_enabled, store_cc_enabled);
+
+static __cpuinitdata struct kobj_attribute cpus_offlined_attr =
+__ATTR(cpus_offlined, 0644, show_cpus_offlined, store_cpus_offlined);
+
+static __cpuinitdata struct attribute *cc_attrs[] = {
+ &cc_enabled_attr.attr,
+ &cpus_offlined_attr.attr,
+ NULL,
+};
+
+static __cpuinitdata struct attribute_group cc_attr_group = {
+ .attrs = cc_attrs,
+};
+
+static __init int msm_thermal_add_cc_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *cc_kobj = NULL;
+ int ret = 0;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("%s: cannot find kobject for module\n",
+ KBUILD_MODNAME);
+ ret = -ENOENT;
+ goto done_cc_nodes;
+ }
+
+ cc_kobj = kobject_create_and_add("core_control", module_kobj);
+ if (!cc_kobj) {
+ pr_err("%s: cannot create core control kobj\n",
+ KBUILD_MODNAME);
+ ret = -ENOMEM;
+ goto done_cc_nodes;
+ }
+
+ ret = sysfs_create_group(cc_kobj, &cc_attr_group);
+ if (ret) {
+ pr_err("%s: cannot create group\n", KBUILD_MODNAME);
+ goto done_cc_nodes;
+ }
+
+ return 0;
+
+done_cc_nodes:
+ if (cc_kobj)
+ kobject_del(cc_kobj);
+ return ret;
+}
+
int __devinit msm_thermal_init(struct msm_thermal_data *pdata)
{
int ret = 0;
BUG_ON(!pdata);
- BUG_ON(pdata->sensor_id >= TSENS_MAX_SENSORS);
+ tsens_get_max_sensor_num(&max_tsens_num);
+ BUG_ON(msm_thermal_info.sensor_id >= max_tsens_num);
memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
enabled = 1;
+ core_control_enabled = 1;
INIT_DELAYED_WORK(&check_temp_work, check_temp);
schedule_delayed_work(&check_temp_work, 0);
+ register_cpu_notifier(&msm_thermal_cpu_notifier);
+
+ return ret;
+}
+
+static int vdd_restriction_reg_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < rails_cnt; i++) {
+ if (rails[i].freq_req == 1) {
+ usefreq |= BIT(i);
+ check_freq_table();
+ /* Restrict frequency by default until we have made
+ * our first temp reading */
+ if (freq_table_get)
+ ret = vdd_restriction_apply_freq(&rails[i], 0);
+ else
+ pr_info("%s:Defer vdd rstr freq init\n",
+ __func__);
+ } else {
+ rails[i].reg = devm_regulator_get(&pdev->dev,
+ rails[i].name);
+ if (IS_ERR_OR_NULL(rails[i].reg)) {
+ ret = PTR_ERR(rails[i].reg);
+ if (ret != -EPROBE_DEFER) {
+ pr_err( \
+ "%s, could not get regulator: %s\n",
+ rails[i].name, __func__);
+ rails[i].reg = NULL;
+ rails[i].curr_level = -2;
+ return ret;
+ }
+ return ret;
+ }
+ /* Restrict votlage by default until we have made
+ * our first temp reading */
+ ret = vdd_restriction_apply_voltage(&rails[i], 0);
+ }
+ }
+
+ return ret;
+}
+
+static int psm_reg_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+
+ for (i = 0; i < psm_rails_cnt; i++) {
+ psm_rails[i].reg = rpm_regulator_get(&pdev->dev,
+ psm_rails[i].name);
+ if (IS_ERR_OR_NULL(psm_rails[i].reg)) {
+ ret = PTR_ERR(psm_rails[i].reg);
+ if (ret != -EPROBE_DEFER) {
+ pr_err("%s, could not get rpm regulator: %s\n",
+ psm_rails[i].name, __func__);
+ psm_rails[i].reg = NULL;
+ goto psm_reg_exit;
+ }
+ return ret;
+ }
+ /* Apps default vote for PWM mode */
+ psm_rails[i].init = PMIC_PWM_MODE;
+ ret = rpm_regulator_set_mode(psm_rails[i].reg,
+ psm_rails[i].init);
+ if (ret) {
+ pr_err("%s: Cannot set PMIC PWM mode\n", __func__);
+ return ret;
+ } else
+ psm_rails[i].mode = PMIC_PWM_MODE;
+ }
+
+ return ret;
+
+psm_reg_exit:
+ if (ret) {
+ for (j = 0; j < i; j++) {
+ if (psm_rails[j].reg != NULL)
+ rpm_regulator_put(psm_rails[j].reg);
+ }
+ }
+
+ return ret;
+}
+
+static int msm_thermal_add_vdd_rstr_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *vdd_rstr_kobj = NULL;
+ struct kobject *vdd_rstr_reg_kobj[MAX_RAILS] = {0};
+ int rc = 0;
+ int i = 0;
+
+ if (!vdd_rstr_probed) {
+ vdd_rstr_nodes_called = true;
+ return rc;
+ }
+
+ if (vdd_rstr_probed && rails_cnt == 0)
+ return rc;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("%s: cannot find kobject for module %s\n",
+ __func__, KBUILD_MODNAME);
+ rc = -ENOENT;
+ goto thermal_sysfs_add_exit;
+ }
+
+ vdd_rstr_kobj = kobject_create_and_add("vdd_restriction", module_kobj);
+ if (!vdd_rstr_kobj) {
+ pr_err("%s: cannot create vdd_restriction kobject\n", __func__);
+ rc = -ENOMEM;
+ goto thermal_sysfs_add_exit;
+ }
+
+ rc = sysfs_create_group(vdd_rstr_kobj, &vdd_rstr_en_attribs_gp);
+ if (rc) {
+ pr_err("%s: cannot create kobject attribute group\n", __func__);
+ rc = -ENOMEM;
+ goto thermal_sysfs_add_exit;
+ }
+
+ for (i = 0; i < rails_cnt; i++) {
+ vdd_rstr_reg_kobj[i] = kobject_create_and_add(rails[i].name,
+ vdd_rstr_kobj);
+ if (!vdd_rstr_reg_kobj[i]) {
+ pr_err("%s: cannot create for kobject for %s\n",
+ __func__, rails[i].name);
+ rc = -ENOMEM;
+ goto thermal_sysfs_add_exit;
+ }
+
+ rails[i].attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 3,
+ GFP_KERNEL);
+ if (!rails[i].attr_gp.attrs) {
+ rc = -ENOMEM;
+ goto thermal_sysfs_add_exit;
+ }
+
+ VDD_RES_RW_ATTRIB(rails[i], rails[i].level_attr, 0, level);
+ VDD_RES_RO_ATTRIB(rails[i], rails[i].value_attr, 1, value);
+ rails[i].attr_gp.attrs[2] = NULL;
+
+ rc = sysfs_create_group(vdd_rstr_reg_kobj[i],
+ &rails[i].attr_gp);
+ if (rc) {
+ pr_err("%s: cannot create attribute group for %s\n",
+ __func__, rails[i].name);
+ goto thermal_sysfs_add_exit;
+ }
+ }
+
+ return rc;
+
+thermal_sysfs_add_exit:
+ if (rc) {
+ for (i = 0; i < rails_cnt; i++) {
+ kobject_del(vdd_rstr_reg_kobj[i]);
+ kfree(rails[i].attr_gp.attrs);
+ }
+ if (vdd_rstr_kobj)
+ kobject_del(vdd_rstr_kobj);
+ }
+ return rc;
+}
+
+static int msm_thermal_add_psm_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *psm_kobj = NULL;
+ struct kobject *psm_reg_kobj[MAX_RAILS] = {0};
+ int rc = 0;
+ int i = 0;
+
+ if (!psm_probed) {
+ psm_nodes_called = true;
+ return rc;
+ }
+
+ if (psm_probed && psm_rails_cnt == 0)
+ return rc;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("%s: cannot find kobject for module %s\n",
+ __func__, KBUILD_MODNAME);
+ rc = -ENOENT;
+ goto psm_node_exit;
+ }
+
+ psm_kobj = kobject_create_and_add("pmic_sw_mode", module_kobj);
+ if (!psm_kobj) {
+ pr_err("%s: cannot create psm kobject\n", KBUILD_MODNAME);
+ rc = -ENOMEM;
+ goto psm_node_exit;
+ }
+
+ for (i = 0; i < psm_rails_cnt; i++) {
+ psm_reg_kobj[i] = kobject_create_and_add(psm_rails[i].name,
+ psm_kobj);
+ if (!psm_reg_kobj[i]) {
+ pr_err("%s: cannot create for kobject for %s\n",
+ KBUILD_MODNAME, psm_rails[i].name);
+ rc = -ENOMEM;
+ goto psm_node_exit;
+ }
+ psm_rails[i].attr_gp.attrs = kzalloc( \
+ sizeof(struct attribute *) * 2, GFP_KERNEL);
+ if (!psm_rails[i].attr_gp.attrs) {
+ rc = -ENOMEM;
+ goto psm_node_exit;
+ }
+
+ PSM_RW_ATTRIB(psm_rails[i], psm_rails[i].mode_attr, 0, mode);
+ psm_rails[i].attr_gp.attrs[1] = NULL;
+
+ rc = sysfs_create_group(psm_reg_kobj[i], &psm_rails[i].attr_gp);
+ if (rc) {
+ pr_err("%s: cannot create attribute group for %s\n",
+ KBUILD_MODNAME, psm_rails[i].name);
+ goto psm_node_exit;
+ }
+ }
+
+ return rc;
+
+psm_node_exit:
+ if (rc) {
+ for (i = 0; i < psm_rails_cnt; i++) {
+ kobject_del(psm_reg_kobj[i]);
+ kfree(psm_rails[i].attr_gp.attrs);
+ }
+ if (psm_kobj)
+ kobject_del(psm_kobj);
+ }
+ return rc;
+}
+
+static int probe_vdd_rstr(struct device_node *node,
+ struct msm_thermal_data *data, struct platform_device *pdev)
+{
+ int ret = 0;
+ int i = 0;
+ int arr_size;
+ char *key = NULL;
+ struct device_node *child_node = NULL;
+
+ key = "qcom,vdd-restriction-temp";
+ ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_degC);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,vdd-restriction-temp-hysteresis";
+ ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_hyst_degC);
+ if (ret)
+ goto read_node_fail;
+
+ for_each_child_of_node(node, child_node) {
+ rails_cnt++;
+ }
+
+ if (rails_cnt == 0)
+ goto read_node_fail;
+ if (rails_cnt >= MAX_RAILS) {
+ pr_err("%s: Too many rails.\n", __func__);
+ return -EFAULT;
+ }
+
+ rails = kzalloc(sizeof(struct rail) * rails_cnt,
+ GFP_KERNEL);
+ if (!rails) {
+ pr_err("%s: Fail to allocate memory for rails.\n", __func__);
+ return -ENOMEM;
+ }
+
+ i = 0;
+ for_each_child_of_node(node, child_node) {
+ key = "qcom,vdd-rstr-reg";
+ ret = of_property_read_string(child_node, key, &rails[i].name);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,levels";
+ if (!of_get_property(child_node, key, &arr_size))
+ goto read_node_fail;
+ rails[i].num_levels = arr_size/sizeof(__be32);
+ if (rails[i].num_levels >
+ sizeof(rails[i].levels)/sizeof(uint32_t)) {
+ pr_err("%s: Array size too large\n", __func__);
+ return -EFAULT;
+ }
+ ret = of_property_read_u32_array(child_node, key,
+ rails[i].levels, rails[i].num_levels);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,min-level";
+ ret = of_property_read_u32(child_node, key,
+ &rails[i].min_level);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,freq-req";
+ rails[i].freq_req = of_property_read_bool(child_node, key);
+
+ if (ret)
+ goto read_node_fail;
+ rails[i].curr_level = 0;
+ rails[i].reg = NULL;
+ i++;
+ }
+
+ if (rails_cnt) {
+ ret = vdd_restriction_reg_init(pdev);
+ if (ret) {
+ pr_info("%s:Failed to get regulators. KTM continues.\n",
+ __func__);
+ goto read_node_fail;
+ }
+ vdd_rstr_enabled = true;
+ }
+read_node_fail:
+ vdd_rstr_probed = true;
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. KTM continues\n",
+ __func__, node->full_name, key);
+ kfree(rails);
+ rails_cnt = 0;
+ }
+ if (ret == -EPROBE_DEFER)
+ vdd_rstr_probed = false;
+ return ret;
+}
+
+static int probe_psm(struct device_node *node, struct msm_thermal_data *data,
+ struct platform_device *pdev)
+{
+ int ret = 0;
+ int j = 0;
+ char *key = NULL;
+
+ key = "qcom,pmic-sw-mode-temp";
+ ret = of_property_read_u32(node, key, &data->psm_temp_degC);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,pmic-sw-mode-temp-hysteresis";
+ ret = of_property_read_u32(node, key, &data->psm_temp_hyst_degC);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,pmic-sw-mode-regs";
+ psm_rails_cnt = of_property_count_strings(node, key);
+ psm_rails = kzalloc(sizeof(struct psm_rail) * psm_rails_cnt,
+ GFP_KERNEL);
+ if (!psm_rails) {
+ pr_err("%s: Fail to allocate memory for psm rails\n", __func__);
+ psm_rails_cnt = 0;
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < psm_rails_cnt; j++) {
+ ret = of_property_read_string_index(node, key, j,
+ &psm_rails[j].name);
+ if (ret)
+ goto read_node_fail;
+ }
+
+ if (psm_rails_cnt) {
+ ret = psm_reg_init(pdev);
+ if (ret) {
+ pr_info("%s:Failed to get regulators. KTM continues.\n",
+ __func__);
+ goto read_node_fail;
+ }
+ psm_enabled = true;
+ }
+
+read_node_fail:
+ psm_probed = true;
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. KTM continues\n",
+ __func__, node->full_name, key);
+ kfree(psm_rails);
+ psm_rails_cnt = 0;
+ }
+ if (ret == -EPROBE_DEFER)
+ psm_probed = false;
return ret;
}
@@ -198,14 +1354,15 @@
int ret = 0;
char *key = NULL;
struct device_node *node = pdev->dev.of_node;
+
struct msm_thermal_data data;
memset(&data, 0, sizeof(struct msm_thermal_data));
+
key = "qcom,sensor-id";
ret = of_property_read_u32(node, key, &data.sensor_id);
if (ret)
goto fail;
- WARN_ON(data.sensor_id >= TSENS_MAX_SENSORS);
key = "qcom,poll-ms";
ret = of_property_read_u32(node, key, &data.poll_ms);
@@ -224,17 +1381,50 @@
key = "qcom,freq-step";
ret = of_property_read_u32(node, key, &data.freq_step);
+ if (ret)
+ goto fail;
+ key = "qcom,core-limit-temp";
+ ret = of_property_read_u32(node, key, &data.core_limit_temp_degC);
+
+ key = "qcom,core-temp-hysteresis";
+ ret = of_property_read_u32(node, key, &data.core_temp_hysteresis_degC);
+
+ key = "qcom,core-control-mask";
+ ret = of_property_read_u32(node, key, &data.core_control_mask);
+
+ /* Probe optional properties below. Call probe_psm before
+ * probe_vdd_rstr because rpm_regulator_get has to be called
+ * before devm_regulator_get*/
+ ret = probe_psm(node, &data, pdev);
+ if (ret == -EPROBE_DEFER)
+ goto fail;
+ ret = probe_vdd_rstr(node, &data, pdev);
+ if (ret == -EPROBE_DEFER)
+ goto fail;
+
+ /* In case sysfs add nodes get called before probe function.
+ * Need to make sure sysfs node is created again */
+ if (psm_nodes_called) {
+ msm_thermal_add_psm_nodes();
+ psm_nodes_called = false;
+ }
+ if (vdd_rstr_nodes_called) {
+ msm_thermal_add_vdd_rstr_nodes();
+ vdd_rstr_nodes_called = false;
+ }
+ ret = msm_thermal_init(&data);
+
+ return ret;
fail:
if (ret)
pr_err("%s: Failed reading node=%s, key=%s\n",
- __func__, node->full_name, key);
- else
- ret = msm_thermal_init(&data);
+ __func__, node->full_name, key);
return ret;
}
+
static struct of_device_id msm_thermal_match_table[] = {
{.compatible = "qcom,msm-thermal"},
{},
@@ -253,3 +1443,14 @@
{
return platform_driver_register(&msm_thermal_device_driver);
}
+
+int __init msm_thermal_late_init(void)
+{
+ msm_thermal_add_cc_nodes();
+ msm_thermal_add_psm_nodes();
+ msm_thermal_add_vdd_rstr_nodes();
+
+ return 0;
+}
+late_initcall(msm_thermal_late_init);
+
diff --git a/drivers/thermal/pm8xxx-tm.c b/drivers/thermal/pm8xxx-tm.c
index 4568933..99a9454 100644
--- a/drivers/thermal/pm8xxx-tm.c
+++ b/drivers/thermal/pm8xxx-tm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -682,6 +682,13 @@
return 0;
}
+static void pm8xxx_tm_shutdown(struct platform_device *pdev)
+{
+ struct pm8xxx_tm_chip *chip = platform_get_drvdata(pdev);
+
+ pm8xxx_tm_write_pwm(chip, TEMP_ALARM_PWM_EN_NEVER);
+}
+
#ifdef CONFIG_PM
static int pm8xxx_tm_suspend(struct device *dev)
{
@@ -719,6 +726,7 @@
static struct platform_driver pm8xxx_tm_driver = {
.probe = pm8xxx_tm_probe,
.remove = __devexit_p(pm8xxx_tm_remove),
+ .shutdown = pm8xxx_tm_shutdown,
.driver = {
.name = PM8XXX_TM_DEV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index e7d2e0f..d848a18 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -36,6 +36,8 @@
/* QPNP VADC TM register definition */
#define QPNP_REVISION3 0x2
+#define QPNP_PERPH_SUBTYPE 0x5
+#define QPNP_PERPH_TYPE2 0x2
#define QPNP_REVISION_EIGHT_CHANNEL_SUPPORT 2
#define QPNP_STATUS1 0x8
#define QPNP_STATUS1_OP_MODE 4
@@ -366,7 +368,7 @@
static int32_t qpnp_adc_tm_check_revision(uint32_t btm_chan_num)
{
- u8 rev;
+ u8 rev, perph_subtype;
int rc = 0;
rc = qpnp_adc_tm_read_reg(QPNP_REVISION3, &rev);
@@ -375,10 +377,18 @@
return rc;
}
- if ((rev < QPNP_REVISION_EIGHT_CHANNEL_SUPPORT) &&
- (btm_chan_num > QPNP_ADC_TM_M4_ADC_CH_SEL_CTL)) {
- pr_debug("Version does not support more than 5 channels\n");
- return -EINVAL;
+ rc = qpnp_adc_tm_read_reg(QPNP_PERPH_SUBTYPE, &perph_subtype);
+ if (rc) {
+ pr_err("adc-tm perph_subtype read failed\n");
+ return rc;
+ }
+
+ if (perph_subtype == QPNP_PERPH_TYPE2) {
+ if ((rev < QPNP_REVISION_EIGHT_CHANNEL_SUPPORT) &&
+ (btm_chan_num > QPNP_ADC_TM_M4_ADC_CH_SEL_CTL)) {
+ pr_debug("Version does not support more than 5 channels\n");
+ return -EINVAL;
+ }
}
return rc;
@@ -1584,6 +1594,8 @@
adc_tm->sensor[sen_idx].sensor_num = sen_idx;
pr_debug("btm_chan:%x, vadc_chan:%x\n", btm_channel_num,
adc_tm->adc->adc_channels[sen_idx].channel_num);
+ thermal_node = of_property_read_bool(child,
+ "qcom,thermal-node");
if (thermal_node) {
/* Register with the thermal zone */
pr_debug("thermal node%x\n", btm_channel_num);
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 8806004..f695870 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -631,7 +631,7 @@
"pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
return ret;
}
- /* Register callback event for EOT (End of transfer) event. */
+ /* Register callback event for DESC_DONE event. */
ret = sps_register_event(sps_pipe_handle, sps_event);
if (ret) {
pr_err("msm_serial_hs: sps_connect() failed for rx!!\n"
@@ -964,7 +964,10 @@
*/
mb();
if (is_blsp_uart(msm_uport)) {
- sps_disconnect(sps_pipe_handle);
+ ret = sps_disconnect(sps_pipe_handle);
+ if (ret)
+ pr_err("%s(): sps_disconnect failed\n",
+ __func__);
msm_hs_spsconnect_rx(uport);
msm_serial_hs_rx_tlet((unsigned long) &rx->tlet);
} else {
@@ -1023,8 +1026,12 @@
disconnect_rx_endpoint);
struct msm_hs_rx *rx = &msm_uport->rx;
struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
+ int ret = 0;
- sps_disconnect(sps_pipe_handle);
+ ret = sps_disconnect(sps_pipe_handle);
+ if (ret)
+ pr_err("%s(): sps_disconnect failed\n", __func__);
+
wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
msm_uport->rx.flush = FLUSH_SHUTDOWN;
wake_up(&msm_uport->rx.wait);
@@ -1158,7 +1165,7 @@
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct msm_hs_rx *rx = &msm_uport->rx;
struct sps_pipe *sps_pipe_handle;
- u32 flags = SPS_IOVEC_FLAG_EOT;
+ u32 flags = SPS_IOVEC_FLAG_INT;
unsigned int buffer_pending = msm_uport->rx.buffer_pending;
unsigned int data;
@@ -1284,7 +1291,7 @@
struct sps_event_notify *notify;
struct msm_hs_rx *rx;
struct sps_pipe *sps_pipe_handle;
- u32 sps_flags = SPS_IOVEC_FLAG_EOT;
+ u32 sps_flags = SPS_IOVEC_FLAG_INT;
msm_uport = container_of((struct tasklet_struct *)tlet_ptr,
struct msm_hs_port, rx.tlet);
@@ -1699,8 +1706,6 @@
int ret;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct circ_buf *tx_buf = &uport->state->xmit;
- struct msm_hs_rx *rx = &msm_uport->rx;
- struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
mutex_lock(&msm_uport->clk_mutex);
spin_lock_irqsave(&uport->lock, flags);
@@ -1745,7 +1750,6 @@
if (is_blsp_uart(msm_uport)) {
msm_uport->clk_req_off_state =
CLK_REQ_OFF_RXSTALE_FLUSHED;
- sps_disconnect(sps_pipe_handle);
}
mutex_unlock(&msm_uport->clk_mutex);
return 0; /* RXSTALE flush not complete - retry */
@@ -1920,6 +1924,22 @@
return IRQ_HANDLED;
}
+/*
+ * Find UART device port using its port index value.
+ */
+struct uart_port *msm_hs_get_uart_port(int port_index)
+{
+ int i;
+
+ for (i = 0; i < UARTDM_NR; i++) {
+ if (q_uart_port[i].uport.line == port_index)
+ return &q_uart_port[i].uport;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(msm_hs_get_uart_port);
+
/* request to turn off uart clock once pending TX is flushed */
void msm_hs_request_clock_off(struct uart_port *uport) {
unsigned long flags;
@@ -2320,9 +2340,7 @@
msm_hs_start_rx_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
- ret = pm_runtime_set_active(uport->dev);
- if (ret)
- dev_err(uport->dev, "set active error:%d\n", ret);
+
pm_runtime_enable(uport->dev);
return 0;
@@ -2638,7 +2656,7 @@
sps_config->mode = SPS_MODE_SRC;
sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
sps_config->dest_pipe_index = 0;
- sps_config->options = SPS_O_EOT;
+ sps_config->options = SPS_O_DESC_DONE;
} else {
/* For UART consumer transfer, source is system memory
where as destination is UART peripheral */
@@ -2666,11 +2684,14 @@
memset(sps_config->desc.base, 0x00, sps_config->desc.size);
sps_event->mode = SPS_TRIGGER_CALLBACK;
- sps_event->options = SPS_O_EOT;
- if (is_producer)
+
+ if (is_producer) {
sps_event->callback = msm_hs_sps_rx_callback;
- else
+ sps_event->options = SPS_O_DESC_DONE;
+ } else {
sps_event->callback = msm_hs_sps_tx_callback;
+ sps_event->options = SPS_O_EOT;
+ }
sps_event->user = (void *)msm_uport;
@@ -3144,7 +3165,10 @@
pr_err("%s():HSUART TX Stalls.\n", __func__);
} else {
/* BAM Disconnect for TX */
- sps_disconnect(sps_pipe_handle);
+ ret = sps_disconnect(sps_pipe_handle);
+ if (ret)
+ pr_err("%s(): sps_disconnect failed\n",
+ __func__);
}
}
tasklet_kill(&msm_uport->tx.tlet);
@@ -3154,7 +3178,6 @@
cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
flush_workqueue(msm_uport->hsuart_wq);
pm_runtime_disable(uport->dev);
- pm_runtime_set_suspended(uport->dev);
/* Disable the transmitter */
msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK);
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index 954848e..7aa14de 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -1706,6 +1706,30 @@
port->uartclk = 7372800;
msm_hsl_port = UART_TO_MSM(port);
+ msm_hsl_port->clk = clk_get(&pdev->dev, "core_clk");
+ if (unlikely(IS_ERR(msm_hsl_port->clk))) {
+ ret = PTR_ERR(msm_hsl_port->clk);
+ if (ret != -EPROBE_DEFER)
+ pr_err("Error getting clk\n");
+ return ret;
+ }
+
+ /* Interface clock is not required by all UART configurations.
+ * GSBI UART and BLSP UART needs interface clock but Legacy UART
+ * do not require interface clock. Hence, do not fail probe with
+ * iface clk_get failure.
+ */
+ msm_hsl_port->pclk = clk_get(&pdev->dev, "iface_clk");
+ if (unlikely(IS_ERR(msm_hsl_port->pclk))) {
+ ret = PTR_ERR(msm_hsl_port->pclk);
+ if (ret == -EPROBE_DEFER) {
+ clk_put(msm_hsl_port->clk);
+ return ret;
+ } else {
+ msm_hsl_port->pclk = NULL;
+ }
+ }
+
/* Identify UART functional mode as 2-wire or 4-wire. */
if (pdata && pdata->config_gpio == 4)
msm_hsl_port->func_mode = UART_FOUR_WIRE;
@@ -1743,22 +1767,12 @@
"gsbi_resource");
if (!gsbi_resource)
gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- msm_hsl_port->clk = clk_get(&pdev->dev, "core_clk");
- msm_hsl_port->pclk = clk_get(&pdev->dev, "iface_clk");
if (gsbi_resource)
msm_hsl_port->uart_type = GSBI_HSUART;
else
msm_hsl_port->uart_type = LEGACY_HSUART;
- if (unlikely(IS_ERR(msm_hsl_port->clk))) {
- pr_err("Error getting clk\n");
- return PTR_ERR(msm_hsl_port->clk);
- }
- if (unlikely(IS_ERR(msm_hsl_port->pclk))) {
- pr_err("Error getting pclk\n");
- return PTR_ERR(msm_hsl_port->pclk);
- }
uart_resource = platform_get_resource_byname(pdev,
IORESOURCE_MEM,
diff --git a/drivers/tty/smux_ctl.c b/drivers/tty/smux_ctl.c
index 2e091cc..1b3a7abe 100644
--- a/drivers/tty/smux_ctl.c
+++ b/drivers/tty/smux_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -940,6 +940,7 @@
static int smux_ctl_remove(struct platform_device *pdev)
{
int i;
+ int ret;
SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
@@ -950,6 +951,13 @@
devp->abort_wait = 1;
wake_up(&devp->write_wait_queue);
wake_up(&devp->read_wait_queue);
+
+ if (atomic_read(&devp->ref_count)) {
+ ret = msm_smux_close(devp->id);
+ if (ret)
+ pr_err("%s: unable to close ch %d, ret %d\n",
+ __func__, devp->id, ret);
+ }
mutex_unlock(&devp->dev_lock);
/* Empty RX queue */
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 6619e96..fab5219 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -475,6 +475,7 @@
void *mem;
u8 mode;
+ bool host_only_mode;
mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
if (!mem) {
@@ -487,7 +488,7 @@
if (!dev->dma_mask)
dev->dma_mask = &dwc3_dma_mask;
if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ dev->coherent_dma_mask = DMA_BIT_MASK(64);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
@@ -548,6 +549,7 @@
dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
+ host_only_mode = of_property_read_bool(node, "host-only-mode");
pm_runtime_no_callbacks(dev);
pm_runtime_set_active(dev);
@@ -561,6 +563,12 @@
mode = DWC3_MODE(dwc->hwparams.hwparams0);
+ /* Override mode if user selects host-only config with DRD core */
+ if (host_only_mode && (mode == DWC3_MODE_DRD)) {
+ dev_dbg(dev, "host only mode selected\n");
+ mode = DWC3_MODE_HOST;
+ }
+
switch (mode) {
case DWC3_MODE_DEVICE:
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 93504eb..df95646 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -693,9 +693,10 @@
list_for_each(ptr, &dep->request_list) {
req = list_entry(ptr, struct dwc3_request, list);
- seq_printf(s, "req:0x%p len: %d sts: %d dma:0x%x num_sgs: %d\n",
+ seq_printf(s,
+ "req:0x%p len: %d sts: %d dma:0x%pa num_sgs: %d\n",
req, req->request.length, req->request.status,
- req->request.dma, req->request.num_sgs);
+ &req->request.dma, req->request.num_sgs);
}
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -731,9 +732,10 @@
list_for_each(ptr, &dep->req_queued) {
req = list_entry(ptr, struct dwc3_request, list);
- seq_printf(s, "req:0x%p len:%d sts:%d dma:%x nsg:%d trb:0x%p\n",
+ seq_printf(s,
+ "req:0x%p len:%d sts:%d dma:%pa nsg:%d trb:0x%p\n",
req, req->request.length, req->request.status,
- req->request.dma, req->request.num_sgs, req->trb);
+ &req->request.dma, req->request.num_sgs, req->trb);
}
spin_unlock_irqrestore(&dwc->lock, flags);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 435ef3b..924e8f4 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -127,6 +127,8 @@
#define DBM_TRB_DMA 0x20000000
#define DBM_TRB_EP_NUM(ep) (ep<<24)
+#define USB3_PORTSC (0x430)
+#define PORT_PE (0x1 << 1)
/**
* USB QSCRATCH Hardware registers
*
@@ -177,6 +179,9 @@
struct regulator *hsusb_vddcx;
struct regulator *ssusb_1p8;
struct regulator *ssusb_vddcx;
+
+ /* VBUS regulator if no OTG and running in host only mode */
+ struct regulator *vbus_otg;
struct dwc3_ext_xceiv ext_xceiv;
bool resume_pending;
atomic_t pm_suspended;
@@ -210,6 +215,9 @@
bool vbus_active;
bool ext_inuse;
enum dwc3_id_state id_state;
+ unsigned long lpm_flags;
+#define MDWC3_CORECLK_OFF BIT(0)
+#define MDWC3_TCXO_SHUTDOWN BIT(1)
};
#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
@@ -1254,50 +1262,11 @@
return ret;
}
-/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
-static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *msm)
+/* Reinitialize SSPHY parameters by overriding using QSCRATCH CR interface */
+static void dwc3_msm_ss_phy_reg_init(struct dwc3_msm *msm)
{
u32 data = 0;
- /* SSPHY Initialization: Use ref_clk from pads and set its parameters */
- dwc3_msm_write_reg(msm->base, SS_PHY_CTRL_REG, 0x10210002);
- msleep(30);
- /* Assert SSPHY reset */
- dwc3_msm_write_reg(msm->base, SS_PHY_CTRL_REG, 0x10210082);
- usleep_range(2000, 2200);
- /* De-assert SSPHY reset - power and ref_clock must be ON */
- dwc3_msm_write_reg(msm->base, SS_PHY_CTRL_REG, 0x10210002);
- usleep_range(2000, 2200);
- /* Ref clock must be stable now, enable ref clock for HS mode */
- dwc3_msm_write_reg(msm->base, SS_PHY_CTRL_REG, 0x10210102);
- usleep_range(2000, 2200);
- /*
- * HSPHY Initialization: Enable UTMI clock and clamp enable HVINTs,
- * and disable RETENTION (power-on default is ENABLED)
- */
- dwc3_msm_write_reg(msm->base, HS_PHY_CTRL_REG, 0x5220bb2);
- usleep_range(2000, 2200);
- /* Disable (bypass) VBUS and ID filters */
- dwc3_msm_write_reg(msm->base, QSCRATCH_GENERAL_CFG, 0x78);
- /*
- * write HSPHY init value to QSCRATCH reg to set HSPHY parameters like
- * VBUS valid threshold, disconnect valid threshold, DC voltage level,
- * preempasis and rise/fall time.
- */
- if (override_phy_init)
- msm->hsphy_init_seq = override_phy_init;
- if (msm->hsphy_init_seq)
- dwc3_msm_write_readback(msm->base,
- PARAMETER_OVERRIDE_X_REG, 0x03FFFFFF,
- msm->hsphy_init_seq & 0x03FFFFFF);
-
- /* Enable master clock for RAMs to allow BAM to access RAMs when
- * RAM clock gating is enabled via DWC3's GCTL. Otherwise, issues
- * are seen where RAM clocks get turned OFF in SS mode
- */
- dwc3_msm_write_reg(msm->base, CGCTL_REG,
- dwc3_msm_read_reg(msm->base, CGCTL_REG) | 0x18);
-
/*
* WORKAROUND: There is SSPHY suspend bug due to which USB enumerates
* in HS mode instead of SS mode. Workaround it by asserting
@@ -1344,6 +1313,51 @@
dwc3_msm_write_readback(msm->base, SS_PHY_PARAM_CTRL_1, 0x07, 0x5);
}
+/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
+static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *msm)
+{
+ /* SSPHY Initialization: Use ref_clk from pads and set its parameters */
+ dwc3_msm_write_reg(msm->base, SS_PHY_CTRL_REG, 0x10210002);
+ msleep(30);
+ /* Assert SSPHY reset */
+ dwc3_msm_write_reg(msm->base, SS_PHY_CTRL_REG, 0x10210082);
+ usleep_range(2000, 2200);
+ /* De-assert SSPHY reset - power and ref_clock must be ON */
+ dwc3_msm_write_reg(msm->base, SS_PHY_CTRL_REG, 0x10210002);
+ usleep_range(2000, 2200);
+ /* Ref clock must be stable now, enable ref clock for HS mode */
+ dwc3_msm_write_reg(msm->base, SS_PHY_CTRL_REG, 0x10210102);
+ usleep_range(2000, 2200);
+ /*
+ * HSPHY Initialization: Enable UTMI clock and clamp enable HVINTs,
+ * and disable RETENTION (power-on default is ENABLED)
+ */
+ dwc3_msm_write_reg(msm->base, HS_PHY_CTRL_REG, 0x5220bb2);
+ usleep_range(2000, 2200);
+ /* Disable (bypass) VBUS and ID filters */
+ dwc3_msm_write_reg(msm->base, QSCRATCH_GENERAL_CFG, 0x78);
+ /*
+ * write HSPHY init value to QSCRATCH reg to set HSPHY parameters like
+ * VBUS valid threshold, disconnect valid threshold, DC voltage level,
+ * preempasis and rise/fall time.
+ */
+ if (override_phy_init)
+ msm->hsphy_init_seq = override_phy_init;
+ if (msm->hsphy_init_seq)
+ dwc3_msm_write_readback(msm->base,
+ PARAMETER_OVERRIDE_X_REG, 0x03FFFFFF,
+ msm->hsphy_init_seq & 0x03FFFFFF);
+
+ /* Enable master clock for RAMs to allow BAM to access RAMs when
+ * RAM clock gating is enabled via DWC3's GCTL. Otherwise, issues
+ * are seen where RAM clocks get turned OFF in SS mode
+ */
+ dwc3_msm_write_reg(msm->base, CGCTL_REG,
+ dwc3_msm_read_reg(msm->base, CGCTL_REG) | 0x18);
+
+ dwc3_msm_ss_phy_reg_init(msm);
+}
+
static void dwc3_msm_block_reset(bool core_reset)
{
@@ -1579,6 +1593,7 @@
int ret;
bool dcp;
bool host_bus_suspend;
+ bool host_ss_active;
dev_dbg(mdwc->dev, "%s: entering lpm\n", __func__);
@@ -1587,6 +1602,7 @@
return 0;
}
+ host_ss_active = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC) & PORT_PE;
if (mdwc->hs_phy_irq)
disable_irq(mdwc->hs_phy_irq);
@@ -1600,7 +1616,8 @@
0x37, 0x0);
}
- dcp = mdwc->charger.chg_type == DWC3_DCP_CHARGER;
+ dcp = ((mdwc->charger.chg_type == DWC3_DCP_CHARGER) ||
+ (mdwc->charger.chg_type == DWC3_PROPRIETARY_CHARGER));
host_bus_suspend = mdwc->host_mode == 1;
/* Sequence to put SSPHY in low power state:
@@ -1655,14 +1672,19 @@
/* make sure above writes are completed before turning off clocks */
wmb();
- clk_disable_unprepare(mdwc->core_clk);
+ if (!host_bus_suspend || !host_ss_active) {
+ clk_disable_unprepare(mdwc->core_clk);
+ mdwc->lpm_flags |= MDWC3_CORECLK_OFF;
+ }
clk_disable_unprepare(mdwc->iface_clk);
- if (!host_bus_suspend) {
+ if (!host_bus_suspend)
clk_disable_unprepare(mdwc->utmi_clk);
+ if (!host_bus_suspend) {
/* USB PHY no more requires TCXO */
clk_disable_unprepare(mdwc->xo_clk);
+ mdwc->lpm_flags |= MDWC3_TCXO_SHUTDOWN;
}
if (mdwc->bus_perf_client) {
@@ -1678,15 +1700,19 @@
dwc3_ssusb_ldo_enable(0);
dwc3_ssusb_config_vddcx(0);
- if (!host_bus_suspend)
+ if (!host_bus_suspend && !dcp)
dwc3_hsusb_config_vddcx(0);
wake_unlock(&mdwc->wlock);
atomic_set(&mdwc->in_lpm, 1);
dev_info(mdwc->dev, "DWC3 in low power mode\n");
- if (mdwc->hs_phy_irq)
+ if (mdwc->hs_phy_irq) {
enable_irq(mdwc->hs_phy_irq);
+ /* with DCP we dont require wakeup using HS_PHY_IRQ */
+ if (dcp)
+ disable_irq_wake(mdwc->hs_phy_irq);
+ }
return 0;
}
@@ -1713,17 +1739,22 @@
dev_err(mdwc->dev, "Failed to vote for bus scaling\n");
}
- dcp = mdwc->charger.chg_type == DWC3_DCP_CHARGER;
+ dcp = ((mdwc->charger.chg_type == DWC3_DCP_CHARGER) ||
+ (mdwc->charger.chg_type == DWC3_PROPRIETARY_CHARGER));
host_bus_suspend = mdwc->host_mode == 1;
- if (!host_bus_suspend) {
+ if (mdwc->lpm_flags & MDWC3_TCXO_SHUTDOWN) {
/* Vote for TCXO while waking up USB HSPHY */
ret = clk_prepare_enable(mdwc->xo_clk);
if (ret)
dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
__func__, ret);
+ mdwc->lpm_flags &= ~MDWC3_TCXO_SHUTDOWN;
}
+ if (!host_bus_suspend)
+ clk_prepare_enable(mdwc->utmi_clk);
+
if (mdwc->otg_xceiv && mdwc->ext_xceiv.otg_capability && !dcp &&
!host_bus_suspend)
dwc3_hsusb_ldo_enable(1);
@@ -1731,16 +1762,17 @@
dwc3_ssusb_ldo_enable(1);
dwc3_ssusb_config_vddcx(1);
- if (!host_bus_suspend) {
+ if (!host_bus_suspend && !dcp)
dwc3_hsusb_config_vddcx(1);
- clk_prepare_enable(mdwc->utmi_clk);
- }
clk_prepare_enable(mdwc->ref_clk);
usleep_range(1000, 1200);
clk_prepare_enable(mdwc->iface_clk);
- clk_prepare_enable(mdwc->core_clk);
+ if (mdwc->lpm_flags & MDWC3_CORECLK_OFF) {
+ clk_prepare_enable(mdwc->core_clk);
+ mdwc->lpm_flags &= ~MDWC3_CORECLK_OFF;
+ }
if (host_bus_suspend) {
/* Disable HV interrupt */
@@ -1791,6 +1823,11 @@
udelay(10);
dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG, (1 << 7), 0x0);
+ /*
+ * Reinitilize SSPHY parameters as SS_PHY RESET will reset
+ * the internal registers to default values.
+ */
+ dwc3_msm_ss_phy_reg_init(mdwc);
atomic_set(&mdwc->in_lpm, 0);
/* match disable_irq call from isr */
@@ -1798,6 +1835,9 @@
enable_irq(mdwc->hs_phy_irq);
mdwc->lpm_irq_seen = false;
}
+ /* it must DCP disconnect, re-enable HS_PHY wakeup IRQ */
+ if (mdwc->hs_phy_irq && dcp)
+ enable_irq_wake(mdwc->hs_phy_irq);
dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
@@ -1827,7 +1867,7 @@
if (mdwc->otg_xceiv)
mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
DWC3_EVENT_PHY_RESUME);
- pm_runtime_put_sync(mdwc->dev);
+ pm_runtime_put_noidle(mdwc->dev);
if (mdwc->otg_xceiv && (mdwc->ext_xceiv.otg_capability))
mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
DWC3_EVENT_XCEIV_STATE);
@@ -2434,10 +2474,14 @@
dev_err(&pdev->dev, "irqreq IDINT failed\n");
goto disable_hs_ldo;
}
+
local_irq_save(flags);
/* Update initial ID state */
- msm->id_state = msm->ext_xceiv.id =
+ msm->id_state =
!!irq_read_line(msm->pmic_id_irq);
+ if (msm->id_state == DWC3_ID_GROUND)
+ queue_work(system_nrt_wq,
+ &msm->id_work);
local_irq_restore(flags);
enable_irq_wake(msm->pmic_id_irq);
}
@@ -2514,24 +2558,28 @@
goto disable_hs_ldo;
}
- msm->usb_psy.name = "usb";
- msm->usb_psy.type = POWER_SUPPLY_TYPE_USB;
- msm->usb_psy.supplied_to = dwc3_msm_pm_power_supplied_to;
- msm->usb_psy.num_supplicants = ARRAY_SIZE(
- dwc3_msm_pm_power_supplied_to);
- msm->usb_psy.properties = dwc3_msm_pm_power_props_usb;
- msm->usb_psy.num_properties = ARRAY_SIZE(dwc3_msm_pm_power_props_usb);
- msm->usb_psy.get_property = dwc3_msm_power_get_property_usb;
- msm->usb_psy.set_property = dwc3_msm_power_set_property_usb;
- msm->usb_psy.external_power_changed =
- dwc3_msm_external_power_changed;
+ /* usb_psy required only for vbus_notifications or charging support */
+ if (msm->ext_xceiv.otg_capability || !msm->charger.charging_disabled) {
+ msm->usb_psy.name = "usb";
+ msm->usb_psy.type = POWER_SUPPLY_TYPE_USB;
+ msm->usb_psy.supplied_to = dwc3_msm_pm_power_supplied_to;
+ msm->usb_psy.num_supplicants = ARRAY_SIZE(
+ dwc3_msm_pm_power_supplied_to);
+ msm->usb_psy.properties = dwc3_msm_pm_power_props_usb;
+ msm->usb_psy.num_properties =
+ ARRAY_SIZE(dwc3_msm_pm_power_props_usb);
+ msm->usb_psy.get_property = dwc3_msm_power_get_property_usb;
+ msm->usb_psy.set_property = dwc3_msm_power_set_property_usb;
+ msm->usb_psy.external_power_changed =
+ dwc3_msm_external_power_changed;
- ret = power_supply_register(&pdev->dev, &msm->usb_psy);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "%s:power_supply_register usb failed\n",
- __func__);
- goto disable_hs_ldo;
+ ret = power_supply_register(&pdev->dev, &msm->usb_psy);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "%s:power_supply_register usb failed\n",
+ __func__);
+ goto disable_hs_ldo;
+ }
}
if (node) {
@@ -2556,7 +2604,8 @@
}
msm->otg_xceiv = usb_get_transceiver();
- if (msm->otg_xceiv) {
+ /* Register with OTG if present, ignore USB2 OTG using other PHY */
+ if (msm->otg_xceiv && !(msm->otg_xceiv->flags & ENABLE_SECONDARY_PHY)) {
msm->charger.start_detection = dwc3_start_chg_det;
ret = dwc3_set_charger(msm->otg_xceiv->otg, &msm->charger);
if (ret || !msm->charger.notify_detection_complete) {
@@ -2574,7 +2623,20 @@
goto put_xcvr;
}
} else {
- dev_err(&pdev->dev, "%s: No OTG transceiver found\n", __func__);
+ dev_dbg(&pdev->dev, "No OTG, DWC3 running in host only mode\n");
+ msm->host_mode = 1;
+ msm->vbus_otg = devm_regulator_get(&pdev->dev, "vbus_dwc3");
+ if (IS_ERR(msm->vbus_otg)) {
+ dev_dbg(&pdev->dev, "Failed to get vbus regulator\n");
+ msm->vbus_otg = 0;
+ } else {
+ ret = regulator_enable(msm->vbus_otg);
+ if (ret) {
+ msm->vbus_otg = 0;
+ dev_err(&pdev->dev, "Failed to enable vbus_otg\n");
+ }
+ }
+ msm->otg_xceiv = NULL;
}
wake_lock_init(&msm->wlock, WAKE_LOCK_SUSPEND, "msm_dwc3");
@@ -2586,7 +2648,8 @@
put_xcvr:
usb_put_transceiver(msm->otg_xceiv);
put_psupply:
- power_supply_unregister(&msm->usb_psy);
+ if (msm->usb_psy.dev)
+ power_supply_unregister(&msm->usb_psy);
disable_hs_ldo:
dwc3_hsusb_ldo_enable(0);
free_hs_ldo_init:
@@ -2635,6 +2698,10 @@
dwc3_start_chg_det(&msm->charger, false);
usb_put_transceiver(msm->otg_xceiv);
}
+ if (msm->usb_psy.dev)
+ power_supply_unregister(&msm->usb_psy);
+ if (msm->vbus_otg)
+ regulator_disable(msm->vbus_otg);
pm_runtime_disable(msm->dev);
wake_lock_destroy(&msm->wlock);
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 1d67cee..a3b2617 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -198,8 +198,6 @@
} else {
dev_dbg(otg->phy->dev, "%s: turn off host\n", __func__);
- platform_device_del(dwc->xhci);
-
ret = regulator_disable(dotg->vbus_otg);
if (ret) {
dev_err(otg->phy->dev, "unable to disable vbus_otg\n");
@@ -207,6 +205,7 @@
}
dwc3_otg_notify_host_mode(otg, on);
+ platform_device_del(dwc->xhci);
/*
* Perform USB hardware RESET (both core reset and DBM reset)
* when moving from host to peripheral. This is required for
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 66854b2..8d2ec97 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -393,7 +393,6 @@
u32 recip;
u32 wValue;
u32 wIndex;
- u32 reg;
int ret;
wValue = le16_to_cpu(ctrl->wValue);
@@ -414,13 +413,6 @@
return -EINVAL;
if (dwc->speed != DWC3_DSTS_SUPERSPEED)
return -EINVAL;
-
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (set)
- reg |= DWC3_DCTL_INITU1ENA;
- else
- reg &= ~DWC3_DCTL_INITU1ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
break;
case USB_DEVICE_U2_ENABLE:
@@ -428,13 +420,6 @@
return -EINVAL;
if (dwc->speed != DWC3_DSTS_SUPERSPEED)
return -EINVAL;
-
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (set)
- reg |= DWC3_DCTL_INITU2ENA;
- else
- reg &= ~DWC3_DCTL_INITU2ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
break;
case USB_DEVICE_LTM_ENABLE:
@@ -539,7 +524,6 @@
{
u32 cfg;
int ret;
- u32 reg;
dwc->start_config_issued = false;
cfg = le16_to_cpu(ctrl->wValue);
@@ -554,14 +538,6 @@
/* if the cfg matches and the cfg is non zero */
if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
dwc->dev_state = DWC3_CONFIGURED_STATE;
- /*
- * Enable transition to U1/U2 state when
- * nothing is pending from application.
- */
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- reg |= (DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA);
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-
dwc->resize_fifos = true;
dev_dbg(dwc->dev, "resize fifos flag SET\n");
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f060718..c08a259 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2169,7 +2169,7 @@
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
WARN_ON_ONCE(ret);
dep->resource_index = 0;
-
+ dep->flags &= ~DWC3_EP_BUSY;
udelay(100);
}
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index d6d8a76..420d030 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -79,6 +79,7 @@
/* Add XHCI device if !OTG, otherwise OTG takes care of this */
if (!dwc->dotg) {
+ xhci->dev.parent = dwc->dev;
ret = platform_device_add(xhci);
if (ret) {
dev_err(dwc->dev, "failed to register xHCI device\n");
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index d4bdf99..705600d 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -53,6 +53,7 @@
#include "f_rmnet_sdio.c"
#include "f_rmnet_smd_sdio.c"
#include "f_rmnet.c"
+#include "f_gps.c"
#ifdef CONFIG_SND_PCM
#include "f_audio_source.c"
#endif
@@ -731,6 +732,47 @@
.attributes = rmnet_function_attributes,
};
+static void gps_function_cleanup(struct android_usb_function *f)
+{
+ gps_cleanup();
+}
+
+static int gps_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ int err;
+ static int gps_initialized;
+
+ if (!gps_initialized) {
+ gps_initialized = 1;
+ err = gps_init_port();
+ if (err) {
+ pr_err("gps: Cannot init gps port");
+ return err;
+ }
+ }
+
+ err = gps_gport_setup();
+ if (err) {
+ pr_err("gps: Cannot setup transports");
+ return err;
+ }
+ err = gps_bind_config(c);
+ if (err) {
+ pr_err("Could not bind gps config\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static struct android_usb_function gps_function = {
+ .name = "gps",
+ .cleanup = gps_function_cleanup,
+ .bind_config = gps_function_bind_config,
+};
+
+
/* ecm transport string */
static char ecm_transports[MAX_XPORT_STR_LEN];
@@ -1783,6 +1825,7 @@
&rmnet_sdio_function,
&rmnet_smd_sdio_function,
&rmnet_function,
+ &gps_function,
&diag_function,
&qdss_function,
&serial_function,
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 569f200..3cad3ce 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -98,6 +98,32 @@
}
}
+static void ci13xxx_msm_reset(void)
+{
+ struct ci13xxx *udc = _udc;
+ struct usb_phy *phy = udc->transceiver;
+ struct device *dev = udc->gadget.dev.parent;
+
+ writel_relaxed(0, USB_AHBBURST);
+ writel_relaxed(0x08, USB_AHBMODE);
+
+ if (phy && (phy->flags & ENABLE_SECONDARY_PHY)) {
+ int temp;
+
+ dev_dbg(dev, "using secondary hsphy\n");
+ temp = readl_relaxed(USB_PHY_CTRL2);
+ temp |= (1<<16);
+ writel_relaxed(temp, USB_PHY_CTRL2);
+
+ /*
+ * Add memory barrier to make sure above LINK writes are
+ * complete before moving ahead with USB peripheral mode
+ * enumeration.
+ */
+ mb();
+ }
+}
+
static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event)
{
struct device *dev = udc->gadget.dev.parent;
@@ -105,8 +131,7 @@
switch (event) {
case CI13XXX_CONTROLLER_RESET_EVENT:
dev_info(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
- writel(0, USB_AHBBURST);
- writel_relaxed(0x08, USB_AHBMODE);
+ ci13xxx_msm_reset();
break;
case CI13XXX_CONTROLLER_DISCONNECT_EVENT:
dev_info(dev, "CI13XXX_CONTROLLER_DISCONNECT_EVENT received\n");
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index e0255ce..d0ebda1 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -66,7 +66,8 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/usb/msm_hsusb.h>
-
+#include <linux/tracepoint.h>
+#include <mach/usb_trace.h>
#include "ci13xxx_udc.h"
/* Turns on streaming. overrides CI13XXX_DISABLE_STREAMING */
@@ -139,6 +140,21 @@
return n ? n-1 : 32;
}
+struct ci13xxx_ebi_err_entry {
+ u32 *usb_req_buf;
+ u32 usb_req_length;
+ u32 ep_info;
+ struct ci13xxx_ebi_err_entry *next;
+};
+
+struct ci13xxx_ebi_err_data {
+ u32 ebi_err_addr;
+ u32 apkt0;
+ u32 apkt1;
+ struct ci13xxx_ebi_err_entry *ebi_err_entry;
+};
+static struct ci13xxx_ebi_err_data *ebi_err_data;
+
/******************************************************************************
* HW block
*****************************************************************************/
@@ -429,7 +445,8 @@
int n = hw_ep_bit(num, dir);
struct ci13xxx_ep *mEp = &_udc->ci13xxx_ep[n];
- if (_udc->skip_flush || list_empty(&mEp->qh.queue))
+ /* Flush ep0 even when queue is empty */
+ if (_udc->skip_flush || (num && list_empty(&mEp->qh.queue)))
return 0;
start = ktime_get();
@@ -1738,6 +1755,72 @@
return 0;
}
+static void dump_usb_info(void *ignore, unsigned int ebi_addr,
+ unsigned int ebi_apacket0, unsigned int ebi_apacket1)
+{
+ struct ci13xxx *udc = _udc;
+ unsigned long flags;
+ struct list_head *ptr = NULL;
+ struct ci13xxx_req *req = NULL;
+ struct ci13xxx_ep *mEp;
+ unsigned i;
+ struct ci13xxx_ebi_err_entry *temp_dump;
+ static int count;
+ u32 epdir = 0;
+
+ if (count)
+ return;
+ count++;
+
+ pr_info("%s: USB EBI error detected\n", __func__);
+
+ ebi_err_data = kmalloc(sizeof(struct ci13xxx_ebi_err_data),
+ GFP_ATOMIC);
+ if (!ebi_err_data) {
+ pr_err("%s: memory alloc failed for ebi_err_data\n", __func__);
+ return;
+ }
+
+ ebi_err_data->ebi_err_entry = kmalloc(
+ sizeof(struct ci13xxx_ebi_err_entry),
+ GFP_ATOMIC);
+ if (!ebi_err_data->ebi_err_entry) {
+ kfree(ebi_err_data);
+ pr_err("%s: memory alloc failed for ebi_err_entry\n", __func__);
+ return;
+ }
+
+ ebi_err_data->ebi_err_addr = ebi_addr;
+ ebi_err_data->apkt0 = ebi_apacket0;
+ ebi_err_data->apkt1 = ebi_apacket1;
+
+ temp_dump = ebi_err_data->ebi_err_entry;
+ pr_info("\n DUMPING USB Requests Information\n");
+ spin_lock_irqsave(udc->lock, flags);
+ for (i = 0; i < hw_ep_max; i++) {
+ list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue) {
+ mEp = &udc->ci13xxx_ep[i];
+ req = list_entry(ptr, struct ci13xxx_req, queue);
+
+ temp_dump->usb_req_buf = req->req.buf;
+ temp_dump->usb_req_length = req->req.length;
+ epdir = mEp->dir;
+ temp_dump->ep_info = mEp->num | (epdir << 15);
+
+ temp_dump->next = kmalloc(
+ sizeof(struct ci13xxx_ebi_err_entry),
+ GFP_ATOMIC);
+ if (!temp_dump->next) {
+ pr_err("%s: memory alloc failed\n", __func__);
+ spin_unlock_irqrestore(udc->lock, flags);
+ return;
+ }
+ temp_dump = temp_dump->next;
+ }
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+}
+
/******************************************************************************
* UTIL block
*****************************************************************************/
@@ -2025,7 +2108,18 @@
if (mReq->zptr) {
if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
return -EBUSY;
- dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
+
+ /* The controller may access this dTD one more time.
+ * Defer freeing this to next zero length dTD completion.
+ * It is safe to assume that controller will no longer
+ * access the previous dTD after next dTD completion.
+ */
+ if (mEp->last_zptr)
+ dma_pool_free(mEp->td_pool, mEp->last_zptr,
+ mEp->last_zdma);
+ mEp->last_zptr = mReq->zptr;
+ mEp->last_zdma = mReq->zdma;
+
mReq->zptr = NULL;
}
@@ -2177,9 +2271,10 @@
usb_ep_fifo_flush(&udc->ep0out.ep);
usb_ep_fifo_flush(&udc->ep0in.ep);
- if (udc->status != NULL) {
- usb_ep_free_request(&udc->ep0in.ep, udc->status);
- udc->status = NULL;
+ if (udc->ep0in.last_zptr) {
+ dma_pool_free(udc->ep0in.td_pool, udc->ep0in.last_zptr,
+ udc->ep0in.last_zdma);
+ udc->ep0in.last_zptr = NULL;
}
return 0;
@@ -2234,10 +2329,6 @@
if (retval)
goto done;
- udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_ATOMIC);
- if (udc->status == NULL)
- retval = -ENOMEM;
-
spin_lock(udc->lock);
done:
@@ -2306,8 +2397,8 @@
return;
}
- kfree(req->buf);
- usb_ep_free_request(ep, req);
+ if (req->status)
+ err("GET_STATUS failed");
}
/**
@@ -2323,8 +2414,7 @@
__acquires(mEp->lock)
{
struct ci13xxx_ep *mEp = &udc->ep0in;
- struct usb_request *req = NULL;
- gfp_t gfp_flags = GFP_ATOMIC;
+ struct usb_request *req = udc->status;
int dir, num, retval;
trace("%p, %p", mEp, setup);
@@ -2332,19 +2422,9 @@
if (mEp == NULL || setup == NULL)
return -EINVAL;
- spin_unlock(mEp->lock);
- req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
- spin_lock(mEp->lock);
- if (req == NULL)
- return -ENOMEM;
-
req->complete = isr_get_status_complete;
req->length = 2;
- req->buf = kzalloc(req->length, gfp_flags);
- if (req->buf == NULL) {
- retval = -ENOMEM;
- goto err_free_req;
- }
+ req->buf = udc->status_buf;
if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
if (setup->wIndex == OTG_STATUS_SELECTOR) {
@@ -2367,18 +2447,7 @@
/* else do nothing; reserved for future use */
spin_unlock(mEp->lock);
- retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
- spin_lock(mEp->lock);
- if (retval)
- goto err_free_buf;
-
- return 0;
-
- err_free_buf:
- kfree(req->buf);
- err_free_req:
- spin_unlock(mEp->lock);
- usb_ep_free_request(&mEp->ep, req);
+ retval = usb_ep_queue(&mEp->ep, req, GFP_ATOMIC);
spin_lock(mEp->lock);
return retval;
}
@@ -2421,11 +2490,9 @@
trace("%p", udc);
mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
- if (udc->status) {
- udc->status->context = udc;
- udc->status->complete = isr_setup_status_complete;
- } else
- return -EINVAL;
+ udc->status->context = udc;
+ udc->status->complete = isr_setup_status_complete;
+ udc->status->length = 0;
spin_unlock(mEp->lock);
retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
@@ -2859,6 +2926,12 @@
} while (mEp->dir != direction);
+ if (mEp->last_zptr) {
+ dma_pool_free(mEp->td_pool, mEp->last_zptr,
+ mEp->last_zdma);
+ mEp->last_zptr = NULL;
+ }
+
mEp->desc = NULL;
mEp->ep.desc = NULL;
mEp->ep.maxpacket = USHRT_MAX;
@@ -3393,6 +3466,14 @@
retval = usb_ep_enable(&udc->ep0in.ep);
if (retval)
return retval;
+ udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_KERNEL);
+ if (!udc->status)
+ return -ENOMEM;
+ udc->status_buf = kzalloc(2, GFP_KERNEL); /* for GET_STATUS */
+ if (!udc->status_buf) {
+ usb_ep_free_request(&udc->ep0in.ep, udc->status);
+ return -ENOMEM;
+ }
spin_lock_irqsave(udc->lock, flags);
udc->gadget.ep0 = &udc->ep0in.ep;
@@ -3476,6 +3557,9 @@
driver->unbind(&udc->gadget); /* MAY SLEEP */
spin_lock_irqsave(udc->lock, flags);
+ usb_ep_free_request(&udc->ep0in.ep, udc->status);
+ kfree(udc->status_buf);
+
udc->gadget.dev.driver = NULL;
/* free resources */
@@ -3692,6 +3776,11 @@
pm_runtime_no_callbacks(&udc->gadget.dev);
pm_runtime_enable(&udc->gadget.dev);
+ retval = register_trace_usb_daytona_invalid_access(dump_usb_info,
+ NULL);
+ if (retval)
+ pr_err("Registering trace failed\n");
+
_udc = udc;
return retval;
@@ -3725,11 +3814,17 @@
static void udc_remove(void)
{
struct ci13xxx *udc = _udc;
+ int retval;
if (udc == NULL) {
err("EINVAL");
return;
}
+ retval = unregister_trace_usb_daytona_invalid_access(dump_usb_info,
+ NULL);
+ if (retval)
+ pr_err("Unregistering trace failed\n");
+
usb_del_gadget_udc(&udc->gadget);
if (udc->transceiver) {
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 3145418..1530474 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -115,6 +115,8 @@
spinlock_t *lock;
struct device *device;
struct dma_pool *td_pool;
+ struct ci13xxx_td *last_zptr;
+ dma_addr_t last_zdma;
unsigned long dTD_update_fail_count;
unsigned long prime_fail_count;
int prime_timer_count;
@@ -153,6 +155,7 @@
struct dma_pool *qh_pool; /* DMA pool for queue heads */
struct dma_pool *td_pool; /* DMA pool for transfer descs */
struct usb_request *status; /* ep0 status request */
+ void *status_buf;/* GET_STATUS buffer */
struct usb_gadget gadget; /* USB slave device */
struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
index 3b1843b..3355e19 100644
--- a/drivers/usb/gadget/f_diag.c
+++ b/drivers/usb/gadget/f_diag.c
@@ -139,9 +139,6 @@
* @out_desc: USB OUT endpoint descriptor struct
* @read_pool: List of requests used for Rx (OUT ep)
* @write_pool: List of requests used for Tx (IN ep)
- * @config_work: Work item schedule after interface is configured to notify
- * CONNECT event to diag char driver and updating product id
- * and serial number to MODEM/IMEM.
* @lock: Spinlock to proctect read_pool, write_pool lists
* @cdev: USB composite device struct
* @ch: USB diag channel
@@ -153,7 +150,6 @@
struct usb_ep *in;
struct list_head read_pool;
struct list_head write_pool;
- struct work_struct config_work;
spinlock_t lock;
unsigned configured;
struct usb_composite_dev *cdev;
@@ -176,21 +172,20 @@
return container_of(f, struct diag_context, function);
}
-static void usb_config_work_func(struct work_struct *work)
+static void diag_update_pid_and_serial_num(struct diag_context *ctxt)
{
- struct diag_context *ctxt = container_of(work,
- struct diag_context, config_work);
struct usb_composite_dev *cdev = ctxt->cdev;
struct usb_gadget_strings *table;
struct usb_string *s;
- if (!ctxt->ch)
+ if (!ctxt->update_pid_and_serial_num)
return;
- if (ctxt->ch->notify)
- ctxt->ch->notify(ctxt->ch->priv, USB_DIAG_CONNECT, NULL);
-
- if (!ctxt->update_pid_and_serial_num)
+ /*
+ * update pid and serail number to dload only if diag
+ * interface is zeroth interface.
+ */
+ if (intf_desc.bInterfaceNumber)
return;
/* pass on product id and serial number to dload */
@@ -612,7 +607,6 @@
usb_ep_disable(dev->in);
return rc;
}
- schedule_work(&dev->config_work);
dev->dpkts_tolaptop = 0;
dev->dpkts_tomodem = 0;
@@ -622,6 +616,9 @@
dev->configured = 1;
spin_unlock_irqrestore(&dev->lock, flags);
+ if (dev->ch->notify)
+ dev->ch->notify(dev->ch->priv, USB_DIAG_CONNECT, NULL);
+
return rc;
}
@@ -699,6 +696,7 @@
if (!f->ss_descriptors)
goto fail;
}
+ diag_update_pid_and_serial_num(ctxt);
return 0;
fail:
if (f->ss_descriptors)
@@ -761,7 +759,6 @@
spin_lock_init(&dev->lock);
INIT_LIST_HEAD(&dev->read_pool);
INIT_LIST_HEAD(&dev->write_pool);
- INIT_WORK(&dev->config_work, usb_config_work_func);
ret = usb_add_function(c, &dev->function);
if (ret) {
diff --git a/drivers/usb/gadget/f_gps.c b/drivers/usb/gadget/f_gps.c
new file mode 100644
index 0000000..ef08fc5
--- /dev/null
+++ b/drivers/usb/gadget/f_gps.c
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+
+#include <mach/usb_gadget_xport.h>
+
+#include "u_rmnet.h"
+#include "gadget_chips.h"
+
+#define GPS_NOTIFY_INTERVAL 5
+#define GPS_MAX_NOTIFY_SIZE 64
+
+
+#define ACM_CTRL_DTR (1 << 0)
+
+/* TODO: use separate structures for data and
+ * control paths
+ */
+struct f_gps {
+ struct grmnet port;
+ u8 port_num;
+ int ifc_id;
+ atomic_t online;
+ atomic_t ctrl_online;
+ struct usb_composite_dev *cdev;
+
+ spinlock_t lock;
+
+ /* usb eps */
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+
+ /* control info */
+ struct list_head cpkt_resp_q;
+ atomic_t notify_count;
+ unsigned long cpkts_len;
+};
+
+static struct gps_ports {
+ enum transport_type ctrl_xport;
+ struct f_gps *port;
+} gps_port;
+
+static struct usb_interface_descriptor gps_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor gps_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+ .bInterval = 1 << GPS_NOTIFY_INTERVAL,
+};
+
+static struct usb_descriptor_header *gps_fs_function[] = {
+ (struct usb_descriptor_header *) &gps_interface_desc,
+ (struct usb_descriptor_header *) &gps_fs_notify_desc,
+ NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor gps_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+ .bInterval = GPS_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_descriptor_header *gps_hs_function[] = {
+ (struct usb_descriptor_header *) &gps_interface_desc,
+ (struct usb_descriptor_header *) &gps_hs_notify_desc,
+ NULL,
+};
+
+/* Super speed support */
+static struct usb_endpoint_descriptor gps_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+ .bInterval = GPS_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor gps_ss_notify_comp_desc = {
+ .bLength = sizeof gps_ss_notify_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+};
+
+static struct usb_descriptor_header *gps_ss_function[] = {
+ (struct usb_descriptor_header *) &gps_interface_desc,
+ (struct usb_descriptor_header *) &gps_ss_notify_desc,
+ (struct usb_descriptor_header *) &gps_ss_notify_comp_desc,
+ NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string gps_string_defs[] = {
+ [0].s = "GPS",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings gps_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = gps_string_defs,
+};
+
+static struct usb_gadget_strings *gps_strings[] = {
+ &gps_string_table,
+ NULL,
+};
+
+static void gps_ctrl_response_available(struct f_gps *dev);
+
+/* ------- misc functions --------------------*/
+
+static inline struct f_gps *func_to_gps(struct usb_function *f)
+{
+ return container_of(f, struct f_gps, port.func);
+}
+
+static inline struct f_gps *port_to_gps(struct grmnet *r)
+{
+ return container_of(r, struct f_gps, port);
+}
+
+static struct usb_request *
+gps_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->buf = kmalloc(len, flags);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ req->length = len;
+
+ return req;
+}
+
+void gps_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+static struct rmnet_ctrl_pkt *gps_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct rmnet_ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void gps_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+
+/* -------------------------------------------*/
+
+static int gps_gport_setup(void)
+{
+ u8 base;
+ int res;
+
+ res = gsmd_ctrl_setup(GPS_CTRL_CLIENT, 1, &base);
+ gps_port.port->port_num += base;
+ return res;
+}
+
+static int gport_ctrl_connect(struct f_gps *dev)
+{
+ return gsmd_ctrl_connect(&dev->port, dev->port_num);
+}
+
+static int gport_gps_disconnect(struct f_gps *dev)
+{
+ gsmd_ctrl_disconnect(&dev->port, dev->port_num);
+ return 0;
+}
+
+static void gps_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_gps *dev = func_to_gps(f);
+
+ pr_debug("%s: portno:%d\n", __func__, dev->port_num);
+
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->descriptors);
+
+ gps_free_req(dev->notify, dev->notify_req);
+
+ kfree(f->name);
+}
+
+static void gps_purge_responses(struct f_gps *dev)
+{
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s: port#%d\n", __func__, dev->port_num);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (!list_empty(&dev->cpkt_resp_q)) {
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ atomic_set(&dev->notify_count, 0);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void gps_suspend(struct usb_function *f)
+{
+ struct f_gps *dev = func_to_gps(f);
+ gps_purge_responses(dev);
+
+}
+
+static void gps_disable(struct usb_function *f)
+{
+ struct f_gps *dev = func_to_gps(f);
+
+ usb_ep_disable(dev->notify);
+ dev->notify->driver_data = NULL;
+
+ atomic_set(&dev->online, 0);
+
+ gps_purge_responses(dev);
+
+ gport_gps_disconnect(dev);
+}
+
+static int
+gps_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_gps *dev = func_to_gps(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+ int ret;
+ struct list_head *cpkt;
+
+ pr_debug("%s:dev:%p\n", __func__, dev);
+
+ if (dev->notify->driver_data)
+ usb_ep_disable(dev->notify);
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
+ if (ret) {
+ dev->notify->desc = NULL;
+ ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
+ dev->notify->name, ret);
+ return ret;
+ }
+ ret = usb_ep_enable(dev->notify);
+
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, dev->notify->name, ret);
+ return ret;
+ }
+ dev->notify->driver_data = dev;
+
+ ret = gport_ctrl_connect(dev);
+
+ atomic_set(&dev->online, 1);
+
+ /* In case notifications were aborted, but there are pending control
+ packets in the response queue, re-add the notifications */
+ list_for_each(cpkt, &dev->cpkt_resp_q)
+ gps_ctrl_response_available(dev);
+
+ return ret;
+}
+
+static void gps_ctrl_response_available(struct f_gps *dev)
+{
+ struct usb_request *req = dev->notify_req;
+ struct usb_cdc_notification *event;
+ unsigned long flags;
+ int ret;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s:dev:%p\n", __func__, dev);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!atomic_read(&dev->online) || !req || !req->buf) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ if (atomic_inc_return(&dev->notify_count) != 1) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ event = req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+ if (ret) {
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!list_empty(&dev->cpkt_resp_q)) {
+ atomic_dec(&dev->notify_count);
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ gps_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_debug("ep enqueue error %d\n", ret);
+ }
+}
+
+static void gps_connect(struct grmnet *gr)
+{
+ struct f_gps *dev;
+
+ if (!gr) {
+ pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
+ return;
+ }
+
+ dev = port_to_gps(gr);
+
+ atomic_set(&dev->ctrl_online, 1);
+}
+
+static void gps_disconnect(struct grmnet *gr)
+{
+ struct f_gps *dev;
+ struct usb_cdc_notification *event;
+ int status;
+
+ if (!gr) {
+ pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
+ return;
+ }
+
+ dev = port_to_gps(gr);
+
+ atomic_set(&dev->ctrl_online, 0);
+
+ if (!atomic_read(&dev->online)) {
+ pr_debug("%s: nothing to do\n", __func__);
+ return;
+ }
+
+ usb_ep_fifo_flush(dev->notify);
+
+ event = dev->notify_req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+
+ status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+ if (status < 0) {
+ if (!atomic_read(&dev->online))
+ return;
+ pr_err("%s: gps notify ep enqueue error %d\n",
+ __func__, status);
+ }
+
+ gps_purge_responses(dev);
+}
+
+static int
+gps_send_cpkt_response(void *gr, void *buf, size_t len)
+{
+ struct f_gps *dev;
+ struct rmnet_ctrl_pkt *cpkt;
+ unsigned long flags;
+
+ if (!gr || !buf) {
+ pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
+ __func__, gr, buf);
+ return -ENODEV;
+ }
+ cpkt = gps_alloc_ctrl_pkt(len, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+ return -ENOMEM;
+ }
+ memcpy(cpkt->buf, buf, len);
+ cpkt->len = len;
+
+ dev = port_to_gps(gr);
+
+ pr_debug("%s: dev:%p\n", __func__, dev);
+
+ if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
+ gps_free_ctrl_pkt(cpkt);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ gps_ctrl_response_available(dev);
+
+ return 0;
+}
+
+static void
+gps_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_gps *dev = req->context;
+ struct usb_composite_dev *cdev;
+
+ if (!dev) {
+ pr_err("%s: dev is null\n", __func__);
+ return;
+ }
+
+ pr_debug("%s: dev:%p\n", __func__, dev);
+
+ cdev = dev->cdev;
+
+ if (dev->port.send_encap_cmd)
+ dev->port.send_encap_cmd(dev->port_num, req->buf, req->actual);
+}
+
+static void gps_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_gps *dev = req->context;
+ int status = req->status;
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ atomic_set(&dev->notify_count, 0);
+ break;
+ default:
+ pr_err("gps notify ep error %d\n", status);
+ /* FALLTHROUGH */
+ case 0:
+ if (!atomic_read(&dev->ctrl_online))
+ break;
+
+ if (atomic_dec_and_test(&dev->notify_count))
+ break;
+
+ status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
+ if (status) {
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!list_empty(&dev->cpkt_resp_q)) {
+ atomic_dec(&dev->notify_count);
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ gps_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_debug("ep enqueue error %d\n", status);
+ }
+ break;
+ }
+}
+
+static int
+gps_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_gps *dev = func_to_gps(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = cdev->req;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ int ret = -EOPNOTSUPP;
+
+ pr_debug("%s:dev:%p\n", __func__, dev);
+
+ if (!atomic_read(&dev->online)) {
+ pr_debug("%s: usb cable is not connected\n", __func__);
+ return -ENOTCONN;
+ }
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ ret = w_length;
+ req->complete = gps_cmd_complete;
+ req->context = dev;
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ if (w_value)
+ goto invalid;
+ else {
+ unsigned len;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ spin_lock(&dev->lock);
+ if (list_empty(&dev->cpkt_resp_q)) {
+ pr_err("%s: ctrl resp queue empty", __func__);
+ spin_unlock(&dev->lock);
+ goto invalid;
+ }
+
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ spin_unlock(&dev->lock);
+
+ len = min_t(unsigned, w_length, cpkt->len);
+ memcpy(req->buf, cpkt->buf, len);
+ ret = len;
+
+ gps_free_ctrl_pkt(cpkt);
+ }
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+ if (dev->port.notify_modem)
+ dev->port.notify_modem(&dev->port,
+ dev->port_num, w_value);
+ ret = 0;
+
+ break;
+ default:
+
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (ret >= 0) {
+ VDBG(cdev, "gps req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (ret < w_length);
+ req->length = ret;
+ ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0)
+ ERROR(cdev, "gps ep0 enqueue err %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int gps_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_gps *dev = func_to_gps(f);
+ struct usb_ep *ep;
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret = -ENODEV;
+
+ dev->ifc_id = usb_interface_id(c, f);
+ if (dev->ifc_id < 0) {
+ pr_err("%s: unable to allocate ifc id, err:%d",
+ __func__, dev->ifc_id);
+ return dev->ifc_id;
+ }
+ gps_interface_desc.bInterfaceNumber = dev->ifc_id;
+
+ dev->port.in = NULL;
+ dev->port.out = NULL;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &gps_fs_notify_desc);
+ if (!ep) {
+ pr_err("%s: usb epnotify autoconfig failed\n", __func__);
+ ret = -ENODEV;
+ goto ep_auto_notify_fail;
+ }
+ dev->notify = ep;
+ ep->driver_data = cdev;
+
+ dev->notify_req = gps_alloc_req(ep,
+ sizeof(struct usb_cdc_notification),
+ GFP_KERNEL);
+ if (IS_ERR(dev->notify_req)) {
+ pr_err("%s: unable to allocate memory for notify req\n",
+ __func__);
+ ret = -ENOMEM;
+ goto ep_notify_alloc_fail;
+ }
+
+ dev->notify_req->complete = gps_notify_complete;
+ dev->notify_req->context = dev;
+
+ ret = -ENOMEM;
+ f->descriptors = usb_copy_descriptors(gps_fs_function);
+
+ if (!f->descriptors)
+ goto fail;
+
+ if (gadget_is_dualspeed(cdev->gadget)) {
+ gps_hs_notify_desc.bEndpointAddress =
+ gps_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(gps_hs_function);
+
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(cdev->gadget)) {
+ gps_ss_notify_desc.bEndpointAddress =
+ gps_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(gps_ss_function);
+
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ pr_info("%s: GPS(%d) %s Speed\n",
+ __func__, dev->port_num,
+ gadget_is_dualspeed(cdev->gadget) ? "dual" : "full");
+
+ return 0;
+
+fail:
+ if (f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->descriptors)
+ usb_free_descriptors(f->descriptors);
+ if (dev->notify_req)
+ gps_free_req(dev->notify, dev->notify_req);
+ep_notify_alloc_fail:
+ dev->notify->driver_data = NULL;
+ dev->notify = NULL;
+ep_auto_notify_fail:
+ return ret;
+}
+
+static int gps_bind_config(struct usb_configuration *c)
+{
+ int status;
+ struct f_gps *dev;
+ struct usb_function *f;
+ unsigned long flags;
+
+ pr_debug("%s: usb config:%p\n", __func__, c);
+
+ if (gps_string_defs[0].id == 0) {
+ status = usb_string_id(c->cdev);
+ if (status < 0) {
+ pr_err("%s: failed to get string id, err:%d\n",
+ __func__, status);
+ return status;
+ }
+ gps_string_defs[0].id = status;
+ }
+
+ dev = gps_port.port;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->cdev = c->cdev;
+ f = &dev->port.func;
+ f->name = kasprintf(GFP_ATOMIC, "gps");
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!f->name) {
+ pr_err("%s: cannot allocate memory for name\n", __func__);
+ return -ENOMEM;
+ }
+
+ f->strings = gps_strings;
+ f->bind = gps_bind;
+ f->unbind = gps_unbind;
+ f->disable = gps_disable;
+ f->set_alt = gps_set_alt;
+ f->setup = gps_setup;
+ f->suspend = gps_suspend;
+ dev->port.send_cpkt_response = gps_send_cpkt_response;
+ dev->port.disconnect = gps_disconnect;
+ dev->port.connect = gps_connect;
+
+ status = usb_add_function(c, f);
+ if (status) {
+ pr_err("%s: usb add function failed: %d\n",
+ __func__, status);
+ kfree(f->name);
+ return status;
+ }
+
+ pr_debug("%s: complete\n", __func__);
+
+ return status;
+}
+
+static void gps_cleanup(void)
+{
+ kfree(gps_port.port);
+}
+
+static int gps_init_port(void)
+{
+ struct f_gps *dev;
+
+ dev = kzalloc(sizeof(struct f_gps), GFP_KERNEL);
+ if (!dev) {
+ pr_err("%s: Unable to allocate gps device\n", __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&dev->lock);
+ INIT_LIST_HEAD(&dev->cpkt_resp_q);
+ dev->port_num = 0;
+
+ gps_port.port = dev;
+ gps_port.ctrl_xport = USB_GADGET_XPORT_SMD;
+
+ return 0;
+}
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index 5a3d753..22f8dc9 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -1703,6 +1703,7 @@
ntb_parameters.dwNtbInMaxSize =
cpu_to_le32(NTB_DEFAULT_IN_SIZE_IPA);
ntb_parameters.dwNtbOutMaxSize = cpu_to_le32(NTB_OUT_SIZE_IPA);
+ ntb_parameters.wNdpInDivisor = 1;
}
INIT_LIST_HEAD(&mbim->cpkt_req_q);
diff --git a/drivers/usb/gadget/f_qc_ecm.c b/drivers/usb/gadget/f_qc_ecm.c
index 51f0e50..8e7cbb2 100644
--- a/drivers/usb/gadget/f_qc_ecm.c
+++ b/drivers/usb/gadget/f_qc_ecm.c
@@ -430,8 +430,6 @@
bam_data_disconnect(&ecm_qc_bam_port, 0);
- ecm_ipa_cleanup(ipa_params.ipa_priv);
-
return 0;
}
@@ -849,6 +847,10 @@
usb_ep_free_request(ecm->notify, ecm->notify_req);
ecm_qc_string_defs[1].s = NULL;
+
+ if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA)
+ ecm_ipa_cleanup(ipa_params.ipa_priv);
+
kfree(ecm);
}
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
index 2dccca8..f095efb 100644
--- a/drivers/usb/gadget/f_rmnet.c
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -301,6 +301,7 @@
int ret;
int port_idx;
int i;
+ u8 base;
pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u data hsuart ports: %u"
" smd ports: %u ctrl hsic ports: %u ctrl hsuart ports: %u"
@@ -317,9 +318,13 @@
}
if (no_ctrl_smd_ports) {
- ret = gsmd_ctrl_setup(no_ctrl_smd_ports);
+ ret = gsmd_ctrl_setup(FRMNET_CTRL_CLIENT,
+ no_ctrl_smd_ports, &base);
if (ret)
return ret;
+ for (i = 0; i < nr_rmnet_ports; i++)
+ if (rmnet_ports[i].port)
+ rmnet_ports[i].port->port_num += base;
}
if (no_data_hsic_ports) {
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
index 67c9a1a..c601000 100644
--- a/drivers/usb/gadget/u_bam.c
+++ b/drivers/usb/gadget/u_bam.c
@@ -717,6 +717,7 @@
ipa_notify_cb usb_notify_cb;
void *priv;
int ret;
+ unsigned long flags;
if (d->trans == USB_GADGET_XPORT_BAM2BAM) {
ret = usb_bam_connect(d->src_connection_idx, &d->src_pipe_idx);
@@ -769,9 +770,21 @@
}
}
- d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL);
- if (!d->rx_req)
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (!port->port_usb) {
+ pr_debug("%s: usb cable is disconnected, exiting\n", __func__);
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
return;
+ }
+ d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_ATOMIC);
+ if (!d->rx_req) {
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_err("%s: out of memory\n", __func__);
+ return;
+ }
d->rx_req->context = port;
d->rx_req->complete = gbam_endless_rx_complete;
@@ -779,9 +792,14 @@
sps_params = (MSM_SPS_MODE | d->src_pipe_idx |
MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
d->rx_req->udc_priv = sps_params;
- d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_KERNEL);
- if (!d->tx_req)
+
+ d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_ATOMIC);
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ if (!d->tx_req) {
+ pr_err("%s: out of memory\n", __func__);
return;
+ }
d->tx_req->context = port;
d->tx_req->complete = gbam_endless_tx_complete;
diff --git a/drivers/usb/gadget/u_rmnet.h b/drivers/usb/gadget/u_rmnet.h
index a9cca50..06471a4 100644
--- a/drivers/usb/gadget/u_rmnet.h
+++ b/drivers/usb/gadget/u_rmnet.h
@@ -46,6 +46,13 @@
void (*connect)(struct grmnet *g);
};
+enum ctrl_client {
+ FRMNET_CTRL_CLIENT,
+ GPS_CTRL_CLIENT,
+
+ NR_CTRL_CLIENTS
+};
+
int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port);
int gbam_connect(struct grmnet *gr, u8 port_num,
enum transport_type trans, u8 src_connection_idx,
@@ -56,7 +63,8 @@
void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans);
int gsmd_ctrl_connect(struct grmnet *gr, int port_num);
void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num);
-int gsmd_ctrl_setup(unsigned int count);
+int gsmd_ctrl_setup(enum ctrl_client client_num, unsigned int count,
+ u8 *first_port_idx);
int gqti_ctrl_connect(struct grmnet *gr);
void gqti_ctrl_disconnect(struct grmnet *gr);
diff --git a/drivers/usb/gadget/u_rmnet_ctrl_qti.c b/drivers/usb/gadget/u_rmnet_ctrl_qti.c
index e92978f..182cd40 100644
--- a/drivers/usb/gadget/u_rmnet_ctrl_qti.c
+++ b/drivers/usb/gadget/u_rmnet_ctrl_qti.c
@@ -259,20 +259,6 @@
return -EBUSY;
}
- /* block until online */
- while (!(atomic_read(&port->connected))) {
- pr_debug("Not connected. Wait.\n");
- ret = wait_event_interruptible(port->read_wq,
- atomic_read(&port->connected));
- if (ret < 0) {
- rmnet_ctrl_unlock(&port->read_excl);
- if (ret == -ERESTARTSYS)
- return -ERESTARTSYS;
- else
- return -EINTR;
- }
- }
-
/* block until a new packet is available */
do {
spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/usb/gadget/u_rmnet_ctrl_smd.c b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
index f60aa6a..caea4ef 100644
--- a/drivers/usb/gadget/u_rmnet_ctrl_smd.c
+++ b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
@@ -24,11 +24,16 @@
#include "u_rmnet.h"
-#define NR_CTRL_SMD_PORTS 3
-static int n_rmnet_ctrl_ports;
-static char *rmnet_ctrl_names[] = {"DATA40_CNTL", "DATA39_CNTL", "DATA38_CNTL"};
+#define MAX_CTRL_PER_CLIENT 3
+#define MAX_CTRL_PORT (MAX_CTRL_PER_CLIENT * NR_CTRL_CLIENTS)
+static char *ctrl_names[NR_CTRL_CLIENTS][MAX_CTRL_PER_CLIENT] = {
+ {"DATA40_CNTL", "DATA39_CNTL", "DATA38_CNTL"},
+ {"DATA39_CNTL"},
+};
static struct workqueue_struct *grmnet_ctrl_wq;
+u8 online_clients;
+
#define SMD_CH_MAX_LEN 20
#define CH_OPENED 0
#define CH_READY 1
@@ -68,7 +73,7 @@
static struct rmnet_ctrl_ports {
struct rmnet_ctrl_port *port;
struct platform_driver pdrv;
-} ctrl_smd_ports[NR_CTRL_SMD_PORTS];
+} ctrl_smd_ports[MAX_CTRL_PORT];
/*---------------misc functions---------------- */
@@ -172,6 +177,15 @@
}
spin_unlock_irqrestore(&port->port_lock, flags);
}
+static int is_legal_port_num(u8 portno)
+{
+ if (portno >= MAX_CTRL_PORT)
+ return false;
+ if (ctrl_smd_ports[portno].port == NULL)
+ return false;
+
+ return true;
+}
static int
grmnet_ctrl_smd_send_cpkt_tomodem(u8 portno,
@@ -182,7 +196,7 @@
struct smd_ch_info *c;
struct rmnet_ctrl_pkt *cpkt;
- if (portno >= n_rmnet_ctrl_ports) {
+ if (!is_legal_port_num(portno)) {
pr_err("%s: Invalid portno#%d\n", __func__, portno);
return -ENODEV;
}
@@ -225,7 +239,7 @@
int clear_bits = 0;
int temp = 0;
- if (portno >= n_rmnet_ctrl_ports) {
+ if (!is_legal_port_num(portno)) {
pr_err("%s: Invalid portno#%d\n", __func__, portno);
return;
}
@@ -375,8 +389,8 @@
pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
- if (port_num >= n_rmnet_ctrl_ports) {
- pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ if (!is_legal_port_num(port_num)) {
+ pr_err("%s: Invalid port_num#%d\n", __func__, port_num);
return -ENODEV;
}
@@ -408,6 +422,11 @@
struct platform_driver *pdrv;
c = &port->ctrl_ch;
+ if (c->ch) {
+ smd_close(c->ch);
+ c->ch = NULL;
+ }
+
if (test_bit(CH_READY, &c->flags) ||
test_bit(CH_PREPARE_READY, &c->flags)) {
clear_bit(CH_PREPARE_READY, &c->flags);
@@ -426,8 +445,8 @@
pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
- if (port_num >= n_rmnet_ctrl_ports) {
- pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ if (!is_legal_port_num(port_num)) {
+ pr_err("%s: Invalid port_num#%d\n", __func__, port_num);
return;
}
@@ -460,11 +479,6 @@
smd_tiocmset(c->ch, c->cbits_tomodem, clear_bits);
}
- if (c->ch) {
- smd_close(c->ch);
- c->ch = NULL;
- }
-
queue_delayed_work(grmnet_ctrl_wq, &port->disconnect_w, 0);
}
@@ -478,7 +492,10 @@
pr_debug("%s: name:%s\n", __func__, pdev->name);
- for (i = 0; i < n_rmnet_ctrl_ports; i++) {
+ for (i = 0; i < MAX_CTRL_PORT; i++) {
+ if (!ctrl_smd_ports[i].port)
+ continue;
+
port = ctrl_smd_ports[i].port;
c = &port->ctrl_ch;
@@ -508,7 +525,10 @@
pr_debug("%s: name:%s\n", __func__, pdev->name);
- for (i = 0; i < n_rmnet_ctrl_ports; i++) {
+ for (i = 0; i < MAX_CTRL_PORT; i++) {
+ if (!ctrl_smd_ports[i].port)
+ continue;
+
port = ctrl_smd_ports[i].port;
c = &port->ctrl_ch;
@@ -555,7 +575,8 @@
INIT_DELAYED_WORK(&port->disconnect_w, grmnet_ctrl_smd_disconnect_w);
c = &port->ctrl_ch;
- c->name = rmnet_ctrl_names[portno];
+ c->name = ctrl_names[portno / MAX_CTRL_PER_CLIENT]
+ [portno % MAX_CTRL_PER_CLIENT];
c->port = port;
init_waitqueue_head(&c->wait);
INIT_LIST_HEAD(&c->tx_q);
@@ -575,44 +596,54 @@
return 0;
}
-int gsmd_ctrl_setup(unsigned int count)
+int gsmd_ctrl_setup(enum ctrl_client client_num, unsigned int count,
+ u8 *first_port_idx)
{
- int i;
+ int i, start_port, allocated_ports;
int ret;
pr_debug("%s: requested ports:%d\n", __func__, count);
- if (!count || count > NR_CTRL_SMD_PORTS) {
+ if (!count || count > MAX_CTRL_PER_CLIENT) {
pr_err("%s: Invalid num of ports count:%d\n",
__func__, count);
return -EINVAL;
}
- grmnet_ctrl_wq = alloc_workqueue("gsmd_ctrl",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
- if (!grmnet_ctrl_wq) {
- pr_err("%s: Unable to create workqueue grmnet_ctrl\n",
- __func__);
- return -ENOMEM;
+ if (!online_clients) {
+ grmnet_ctrl_wq = alloc_workqueue("gsmd_ctrl",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!grmnet_ctrl_wq) {
+ pr_err("%s: Unable to create workqueue grmnet_ctrl\n",
+ __func__);
+ return -ENOMEM;
+ }
}
+ online_clients++;
- for (i = 0; i < count; i++) {
- n_rmnet_ctrl_ports++;
+ start_port = MAX_CTRL_PER_CLIENT * client_num;
+ allocated_ports = 0;
+ for (i = start_port; i < count + start_port; i++) {
+ allocated_ports++;
ret = grmnet_ctrl_smd_port_alloc(i);
if (ret) {
pr_err("%s: Unable to alloc port:%d\n", __func__, i);
- n_rmnet_ctrl_ports--;
+ allocated_ports--;
goto free_ctrl_smd_ports;
}
}
-
+ if (first_port_idx)
+ *first_port_idx = start_port;
return 0;
free_ctrl_smd_ports:
- for (i = 0; i < n_rmnet_ctrl_ports; i++)
- grmnet_ctrl_smd_port_free(i);
+ for (i = 0; i < allocated_ports; i++)
+ grmnet_ctrl_smd_port_free(start_port + i);
- destroy_workqueue(grmnet_ctrl_wq);
+
+ online_clients--;
+ if (!online_clients)
+ destroy_workqueue(grmnet_ctrl_wq);
return ret;
}
@@ -634,10 +665,11 @@
if (!buf)
return -ENOMEM;
- for (i = 0; i < n_rmnet_ctrl_ports; i++) {
- port = ctrl_smd_ports[i].port;
- if (!port)
+ for (i = 0; i < MAX_CTRL_PORT; i++) {
+ if (!ctrl_smd_ports[i].port)
continue;
+ port = ctrl_smd_ports[i].port;
+
spin_lock_irqsave(&port->port_lock, flags);
c = &port->ctrl_ch;
@@ -677,10 +709,10 @@
int i;
unsigned long flags;
- for (i = 0; i < n_rmnet_ctrl_ports; i++) {
- port = ctrl_smd_ports[i].port;
- if (!port)
+ for (i = 0; i < MAX_CTRL_PORT; i++) {
+ if (!ctrl_smd_ports[i].port)
continue;
+ port = ctrl_smd_ports[i].port;
spin_lock_irqsave(&port->port_lock, flags);
@@ -727,6 +759,7 @@
static int __init gsmd_ctrl_init(void)
{
gsmd_ctrl_debugfs_init();
+ online_clients = 0;
return 0;
}
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index 9879122..ede8bdb 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -78,6 +78,7 @@
struct clk *alt_core_clk;
struct clk *phy_clk;
struct clk *cal_clk;
+ struct clk *inactivity_clk;
struct regulator *hsic_vddcx;
struct regulator *hsic_gdsc;
atomic_t async_int;
@@ -575,6 +576,8 @@
clk_disable_unprepare(mehci->phy_clk);
clk_disable_unprepare(mehci->cal_clk);
clk_disable_unprepare(mehci->ahb_clk);
+ if (!IS_ERR(mehci->inactivity_clk))
+ clk_disable_unprepare(mehci->inactivity_clk);
ret = clk_reset(mehci->core_clk, CLK_RESET_ASSERT);
if (ret) {
@@ -596,6 +599,8 @@
clk_prepare_enable(mehci->phy_clk);
clk_prepare_enable(mehci->cal_clk);
clk_prepare_enable(mehci->ahb_clk);
+ if (!IS_ERR(mehci->inactivity_clk))
+ clk_prepare_enable(mehci->inactivity_clk);
}
}
@@ -794,6 +799,8 @@
clk_disable_unprepare(mehci->phy_clk);
clk_disable_unprepare(mehci->cal_clk);
clk_disable_unprepare(mehci->ahb_clk);
+ if (!IS_ERR(mehci->inactivity_clk))
+ clk_disable_unprepare(mehci->inactivity_clk);
none_vol = vdd_val[mehci->vdd_type][VDD_NONE];
max_vol = vdd_val[mehci->vdd_type][VDD_MAX];
@@ -876,6 +883,8 @@
clk_prepare_enable(mehci->phy_clk);
clk_prepare_enable(mehci->cal_clk);
clk_prepare_enable(mehci->ahb_clk);
+ if (!IS_ERR(mehci->inactivity_clk))
+ clk_prepare_enable(mehci->inactivity_clk);
temp = readl_relaxed(USB_USBCMD);
temp &= ~ASYNC_INTR_CTRL;
@@ -1332,44 +1341,48 @@
if (pdata->resume_gpio)
gpio_direction_output(pdata->resume_gpio, 1);
- mehci->resume_status = 0;
- resume_thread = kthread_run(msm_hsic_resume_thread,
- mehci, "hsic_resume_thread");
- if (IS_ERR(resume_thread)) {
- pr_err("Error creating resume thread:%lu\n",
- PTR_ERR(resume_thread));
- return PTR_ERR(resume_thread);
+ if (!mehci->ehci.resume_sof_bug) {
+ ehci_bus_resume(hcd);
+ } else {
+ mehci->resume_status = 0;
+ resume_thread = kthread_run(msm_hsic_resume_thread,
+ mehci, "hsic_resume_thread");
+ if (IS_ERR(resume_thread)) {
+ pr_err("Error creating resume thread:%lu\n",
+ PTR_ERR(resume_thread));
+ return PTR_ERR(resume_thread);
+ }
+
+ wait_for_completion(&mehci->rt_completion);
+
+ if (mehci->resume_status < 0)
+ return mehci->resume_status;
+
+ dbg_log_event(NULL, "FPR: Wokeup", 0);
+ spin_lock_irq(&ehci->lock);
+ (void) ehci_readl(ehci, &ehci->regs->command);
+
+ temp = 0;
+ if (ehci->async->qh_next.qh)
+ temp |= CMD_ASE;
+ if (ehci->periodic_sched)
+ temp |= CMD_PSE;
+ if (temp) {
+ ehci->command |= temp;
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ }
+
+ ehci->next_statechange = jiffies + msecs_to_jiffies(5);
+ hcd->state = HC_STATE_RUNNING;
+ ehci->rh_state = EHCI_RH_RUNNING;
+ ehci->command |= CMD_RUN;
+
+ /* Now we can safely re-enable irqs */
+ ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
+
+ spin_unlock_irq(&ehci->lock);
}
- wait_for_completion(&mehci->rt_completion);
-
- if (mehci->resume_status < 0)
- return mehci->resume_status;
-
- dbg_log_event(NULL, "FPR: Wokeup", 0);
- spin_lock_irq(&ehci->lock);
- (void) ehci_readl(ehci, &ehci->regs->command);
-
- temp = 0;
- if (ehci->async->qh_next.qh)
- temp |= CMD_ASE;
- if (ehci->periodic_sched)
- temp |= CMD_PSE;
- if (temp) {
- ehci->command |= temp;
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- }
-
- ehci->next_statechange = jiffies + msecs_to_jiffies(5);
- hcd->state = HC_STATE_RUNNING;
- ehci->rh_state = EHCI_RH_RUNNING;
- ehci->command |= CMD_RUN;
-
- /* Now we can safely re-enable irqs */
- ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
-
- spin_unlock_irq(&ehci->lock);
-
if (pdata->resume_gpio)
gpio_direction_output(pdata->resume_gpio, 0);
@@ -1503,10 +1516,21 @@
goto put_cal_clk;
}
+ /*
+ * Inactivity_clk is required for hsic bam inactivity timer.
+ * This clock is not compulsory and is defined in clock lookup
+ * only for targets that need to use the inactivity timer feature.
+ */
+ mehci->inactivity_clk = clk_get(mehci->dev, "inactivity_clk");
+ if (IS_ERR(mehci->inactivity_clk))
+ dev_dbg(mehci->dev, "failed to get inactivity_clk\n");
+
clk_prepare_enable(mehci->core_clk);
clk_prepare_enable(mehci->phy_clk);
clk_prepare_enable(mehci->cal_clk);
clk_prepare_enable(mehci->ahb_clk);
+ if (!IS_ERR(mehci->inactivity_clk))
+ clk_prepare_enable(mehci->inactivity_clk);
return 0;
@@ -1516,7 +1540,11 @@
clk_disable_unprepare(mehci->phy_clk);
clk_disable_unprepare(mehci->cal_clk);
clk_disable_unprepare(mehci->ahb_clk);
+ if (!IS_ERR(mehci->inactivity_clk))
+ clk_disable_unprepare(mehci->inactivity_clk);
}
+ if (!IS_ERR(mehci->inactivity_clk))
+ clk_put(mehci->inactivity_clk);
clk_put(mehci->ahb_clk);
put_cal_clk:
clk_put(mehci->cal_clk);
@@ -1808,6 +1836,8 @@
res_gpio = 0;
pdata->resume_gpio = res_gpio;
+ pdata->phy_sof_workaround = of_property_read_bool(node,
+ "qcom,phy-sof-workaround");
pdata->ignore_cal_pad_config = of_property_read_bool(node,
"hsic,ignore-cal-pad-config");
of_property_read_u32(node, "hsic,strobe-pad-offset",
@@ -1821,6 +1851,8 @@
"qcom,pool-64-bit-align");
pdata->enable_hbm = of_property_read_bool(node,
"qcom,enable-hbm");
+ pdata->disable_park_mode = (of_property_read_bool(node,
+ "qcom,disable-park-mode"));
return pdata;
}
@@ -1899,10 +1931,12 @@
spin_lock_init(&mehci->wakeup_lock);
- mehci->ehci.susp_sof_bug = 1;
- mehci->ehci.reset_sof_bug = 1;
+ if (pdata->phy_sof_workaround) {
+ mehci->ehci.susp_sof_bug = 1;
+ mehci->ehci.reset_sof_bug = 1;
+ mehci->ehci.resume_sof_bug = 1;
+ }
- mehci->ehci.resume_sof_bug = 1;
mehci->ehci.pool_64_bit_align = pdata->pool_64_bit_align;
mehci->enable_hbm = pdata->enable_hbm;
@@ -2056,7 +2090,7 @@
pm_runtime_put_sync(pdev->dev.parent);
if (mehci->enable_hbm)
- hbm_init(hcd);
+ hbm_init(hcd, pdata->disable_park_mode);
return 0;
diff --git a/drivers/usb/host/hbm.c b/drivers/usb/host/hbm.c
index 1a0c0aa..d34301d 100644
--- a/drivers/usb/host/hbm.c
+++ b/drivers/usb/host/hbm.c
@@ -44,6 +44,7 @@
struct hbm_msm {
u32 *base;
struct usb_hcd *hcd;
+ bool disable_park_mode;
};
static struct hbm_msm *hbm_ctx;
@@ -173,8 +174,8 @@
USB_OTG_HS_HBM_PIPE_PRODUCER, 1 << pipe_num,
(is_consumer ? 0 : 1));
- /* disable park mode as default */
- set_disable_park_mode(pipe_num, true);
+ /* set park mode */
+ set_disable_park_mode(pipe_num, hbm_ctx->disable_park_mode);
/* enable zlt as default*/
set_disable_zlt(pipe_num, false);
@@ -186,7 +187,7 @@
return 0;
}
-void hbm_init(struct usb_hcd *hcd)
+void hbm_init(struct usb_hcd *hcd, bool disable_park_mode)
{
pr_info("%s\n", __func__);
@@ -198,6 +199,7 @@
hbm_ctx->base = hcd->regs;
hbm_ctx->hcd = hcd;
+ hbm_ctx->disable_park_mode = disable_park_mode;
/* reset hbm */
hbm_reset(true);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 79dcf2f..46b5ce4 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb/otg.h>
+#include <linux/usb/msm_hsusb.h>
#include "xhci.h"
@@ -140,6 +141,10 @@
goto release_mem_region;
}
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto unmap_registers;
@@ -166,7 +171,8 @@
goto put_usb3_hcd;
phy = usb_get_transceiver();
- if (phy && phy->otg) {
+ /* Register with OTG if present, ignore USB2 OTG using other PHY */
+ if (phy && phy->otg && !(phy->flags & ENABLE_SECONDARY_PHY)) {
dev_dbg(&pdev->dev, "%s otg support available\n", __func__);
ret = otg_set_host(phy->otg, &hcd->self);
if (ret) {
@@ -175,15 +181,12 @@
usb_put_transceiver(phy);
goto put_usb3_hcd;
}
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
} else {
pm_runtime_no_callbacks(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get(&pdev->dev);
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
put_usb3_hcd:
@@ -222,9 +225,6 @@
if (phy && phy->otg) {
otg_set_host(phy->otg, NULL);
usb_put_transceiver(phy);
- } else {
- pm_runtime_put(&dev->dev);
- pm_runtime_disable(&dev->dev);
}
return 0;
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index cae2c17..4865b03 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -478,6 +478,7 @@
u32 val;
int ret;
int retries;
+ struct msm_otg_platform_data *pdata = motg->pdata;
ret = msm_otg_link_clk_reset(motg, 1);
if (ret)
@@ -496,6 +497,9 @@
if (ret)
return ret;
+ if (pdata && pdata->enable_sec_phy)
+ writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16),
+ USB_PHY_CTRL2);
val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK;
writel(val | PORTSC_PTS_ULPI, USB_PORTSC);
@@ -535,6 +539,7 @@
static int msm_otg_link_reset(struct msm_otg *motg)
{
int cnt = 0;
+ struct msm_otg_platform_data *pdata = motg->pdata;
writel_relaxed(USBCMD_RESET, USB_USBCMD);
while (cnt < LINK_RESET_TIMEOUT_USEC) {
@@ -551,6 +556,9 @@
writel_relaxed(0x0, USB_AHBBURST);
writel_relaxed(0x08, USB_AHBMODE);
+ if (pdata && pdata->enable_sec_phy)
+ writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16),
+ USB_PHY_CTRL2);
return 0;
}
@@ -3784,8 +3792,6 @@
&pdata->default_mode);
of_property_read_u32(node, "qcom,hsusb-otg-phy-type",
&pdata->phy_type);
- of_property_read_u32(node, "qcom,hsusb-otg-pmic-id-irq",
- &pdata->pmic_id_irq);
pdata->disable_reset_on_disconnect = of_property_read_bool(node,
"qcom,hsusb-otg-disable-reset");
pdata->pnoc_errata_fix = of_property_read_bool(node,
@@ -3798,6 +3804,12 @@
"qcom,hsusb-otg-delay-lpm");
pdata->dp_manual_pullup = of_property_read_bool(node,
"qcom,dp-manual-pullup");
+ pdata->enable_sec_phy = of_property_read_bool(node,
+ "qcom,usb2-enable-hsphy2");
+
+ pdata->pmic_id_irq = platform_get_irq_byname(pdev, "pmic_id_irq");
+ if (pdata->pmic_id_irq < 0)
+ pdata->pmic_id_irq = 0;
return pdata;
}
@@ -4093,6 +4105,9 @@
if (pdata->dp_manual_pullup)
phy->flags |= ENABLE_DP_MANUAL_PULLUP;
+ if (pdata->enable_sec_phy)
+ phy->flags |= ENABLE_SECONDARY_PHY;
+
ret = usb_set_transceiver(&motg->phy);
if (ret) {
dev_err(&pdev->dev, "usb_set_transceiver failed\n");
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index a3d8d7e..b57d0a4 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -945,6 +945,7 @@
void mdp4_writeback_dma_stop(struct msm_fb_data_type *mfd);
int mdp4_writeback_init(struct fb_info *info);
int mdp4_writeback_terminate(struct fb_info *info);
+int mdp4_writeback_set_mirroring_hint(struct fb_info *info, int hint);
uint32_t mdp_block2base(uint32_t block);
int mdp_hist_lut_config(struct mdp_hist_lut_data *data);
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index e415a95..afa7b97 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -245,7 +245,7 @@
return PTR_ERR(*srcp_ihdl);
}
pr_debug("%s(): ion_hdl %p, ion_buf %d\n", __func__, *srcp_ihdl,
- ion_share_dma_buf(display_iclient, *srcp_ihdl));
+ mem_id);
pr_debug("mixer %u, pipe %u, plane %u\n", pipe->mixer_num,
pipe->pipe_ndx, plane);
if (ion_map_iommu(display_iclient, *srcp_ihdl,
@@ -3273,10 +3273,10 @@
pr_debug("pipe->flags 0x%x\n", pipe->flags);
if (pipe->flags & MDP_SECURE_OVERLAY_SESSION) {
mfd->mem_hid &= ~BIT(ION_IOMMU_HEAP_ID);
- mfd->mem_hid |= ION_SECURE;
+ mfd->mem_hid |= ION_FLAG_SECURE;
} else {
mfd->mem_hid |= BIT(ION_IOMMU_HEAP_ID);
- mfd->mem_hid &= ~ION_SECURE;
+ mfd->mem_hid &= ~ION_FLAG_SECURE;
}
}
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index 7caf0ad..62e89d3 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -807,3 +807,23 @@
mutex_unlock(&mfd->writeback_mutex);
wake_up(&mfd->wait_q);
}
+
+int mdp4_writeback_set_mirroring_hint(struct fb_info *info, int hint)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+ if (mfd->panel.type != WRITEBACK_PANEL)
+ return -ENOTSUPP;
+
+ switch (hint) {
+ case MDP_WRITEBACK_MIRROR_ON:
+ case MDP_WRITEBACK_MIRROR_PAUSE:
+ case MDP_WRITEBACK_MIRROR_RESUME:
+ case MDP_WRITEBACK_MIRROR_OFF:
+ pr_info("wfd state switched to %d\n", hint);
+ switch_set_state(&mfd->writeback_sdev, hint);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c
index 2423de5..f8b7f2f 100644
--- a/drivers/video/msm/mdp4_util.c
+++ b/drivers/video/msm/mdp4_util.c
@@ -2314,7 +2314,7 @@
pr_err("ion_map_iommu() read failed\n");
return -ENOMEM;
}
- if (mfd->mem_hid & ION_SECURE) {
+ if (mfd->mem_hid & ION_FLAG_SECURE) {
if (ion_phys(mfd->iclient, buf->ihdl,
&addr, (size_t *)&len)) {
pr_err("%s:%d: ion_phys map failed\n",
@@ -2377,7 +2377,7 @@
if (!IS_ERR_OR_NULL(mfd->iclient)) {
if (!IS_ERR_OR_NULL(buf->ihdl)) {
if (mdp_iommu_split_domain) {
- if (!(mfd->mem_hid & ION_SECURE))
+ if (!(mfd->mem_hid & ION_FLAG_SECURE))
ion_unmap_iommu(mfd->iclient, buf->ihdl,
DISPLAY_WRITE_DOMAIN, GEN_POOL);
ion_unmap_iommu(mfd->iclient, buf->ihdl,
diff --git a/drivers/video/msm/mdp4_wfd_writeback.c b/drivers/video/msm/mdp4_wfd_writeback.c
index d96fc7d..ba6c78b 100644
--- a/drivers/video/msm/mdp4_wfd_writeback.c
+++ b/drivers/video/msm/mdp4_wfd_writeback.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,6 +75,13 @@
platform_set_drvdata(mdp_dev, mfd);
+ mfd->writeback_sdev.name = "wfd";
+ rc = switch_dev_register(&mfd->writeback_sdev);
+ if (rc) {
+ pr_err("Failed to setup switch dev for writeback panel");
+ return rc;
+ }
+
rc = platform_device_add(mdp_dev);
if (rc) {
WRITEBACK_MSG_ERR("failed to add device");
@@ -84,8 +91,16 @@
return rc;
}
+static int writeback_remove(struct platform_device *pdev)
+{
+ struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+ switch_dev_unregister(&mfd->writeback_sdev);
+ return 0;
+}
+
static struct platform_driver writeback_driver = {
.probe = writeback_probe,
+ .remove = writeback_remove,
.driver = {
.name = "writeback",
},
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index 7fafbc64..2c58e49 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -1,3 +1,6 @@
+mdss-mdp3-objs = mdp3.o mdp3_dma.o mdp3_ctrl.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp3.o
+
mdss-mdp-objs := mdss_mdp.o mdss_mdp_ctl.o mdss_mdp_pipe.o mdss_mdp_util.o
mdss-mdp-objs += mdss_mdp_pp.o
mdss-mdp-objs += mdss_mdp_intf_video.o
@@ -7,12 +10,14 @@
mdss-mdp-objs += mdss_mdp_overlay.o
mdss-mdp-objs += mdss_mdp_wb.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
-obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
ifeq ($(CONFIG_FB_MSM_MDSS),y)
obj-$(CONFIG_DEBUG_FS) += mdss_debug.o
endif
+dsi-v2-objs = dsi_v2.o dsi_host_v2.o dsi_io_v2.o dsi_panel_v2.o
+obj-$(CONFIG_FB_MSM_MDSS) += dsi-v2.o
+
mdss-dsi-objs := mdss_dsi.o mdss_dsi_host.o
mdss-dsi-objs += mdss_dsi_panel.o
mdss-dsi-objs += msm_mdss_io_8974.o
@@ -23,11 +28,14 @@
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_util.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_edid.o
-obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334) += mhl_sii8334.o mhl_msc.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_cec.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334) += mhl_sii8334.o mhl_msc.o
obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
mdss-qpic-objs := mdss_qpic.o mdss_fb.o mdss_qpic_panel.o
obj-$(CONFIG_FB_MSM_QPIC) += mdss-qpic.o
obj-$(CONFIG_FB_MSM_QPIC_ILI_QVGA_PANEL) += qpic_panel_ili_qvga.o
+
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
diff --git a/drivers/video/msm/mdss/dsi_host_v2.c b/drivers/video/msm/mdss/dsi_host_v2.c
new file mode 100644
index 0000000..887dde7
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_host_v2.c
@@ -0,0 +1,1040 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+
+#include "dsi_v2.h"
+#include "dsi_io_v2.h"
+#include "dsi_host_v2.h"
+
+#define DSI_POLL_SLEEP_US 1000
+#define DSI_POLL_TIMEOUT_US 16000
+#define DSI_ESC_CLK_RATE 19200000
+#define DSI_DMA_CMD_TIMEOUT_MS 200
+
+struct dsi_host_v2_private {
+ struct completion dma_comp;
+ int irq_enabled;
+ spinlock_t irq_lock;
+ spinlock_t mdp_lock;
+ int mdp_busy;
+ int irq_no;
+ unsigned char *dsi_base;
+ struct device dis_dev;
+};
+
+static struct dsi_host_v2_private *dsi_host_private;
+
+int msm_dsi_init(void)
+{
+ if (!dsi_host_private) {
+ dsi_host_private = kzalloc(sizeof(struct dsi_host_v2_private),
+ GFP_KERNEL);
+ if (!dsi_host_private) {
+ pr_err("fail to alloc dsi host private data\n");
+ return -ENOMEM;
+ }
+ }
+
+ init_completion(&dsi_host_private->dma_comp);
+ spin_lock_init(&dsi_host_private->irq_lock);
+ spin_lock_init(&dsi_host_private->mdp_lock);
+ return 0;
+}
+
+void msm_dsi_deinit(void)
+{
+ kfree(dsi_host_private);
+ dsi_host_private = NULL;
+}
+
+void msm_dsi_ack_err_status(unsigned char *ctrl_base)
+{
+ u32 status;
+
+ status = MIPI_INP(ctrl_base + DSI_ACK_ERR_STATUS);
+
+ if (status) {
+ MIPI_OUTP(ctrl_base + DSI_ACK_ERR_STATUS, status);
+ pr_debug("%s: status=%x\n", __func__, status);
+ }
+}
+
+void msm_dsi_timeout_status(unsigned char *ctrl_base)
+{
+ u32 status;
+
+ status = MIPI_INP(ctrl_base + DSI_TIMEOUT_STATUS);
+ if (status & 0x0111) {
+ MIPI_OUTP(ctrl_base + DSI_TIMEOUT_STATUS, status);
+ pr_debug("%s: status=%x\n", __func__, status);
+ }
+}
+
+void msm_dsi_dln0_phy_err(unsigned char *ctrl_base)
+{
+ u32 status;
+
+ status = MIPI_INP(ctrl_base + DSI_DLN0_PHY_ERR);
+
+ if (status & 0x011111) {
+ MIPI_OUTP(ctrl_base + DSI_DLN0_PHY_ERR, status);
+ pr_debug("%s: status=%x\n", __func__, status);
+ }
+}
+
+void msm_dsi_fifo_status(unsigned char *ctrl_base)
+{
+ u32 status;
+
+ status = MIPI_INP(ctrl_base + DSI_FIFO_STATUS);
+
+ if (status & 0x44444489) {
+ MIPI_OUTP(ctrl_base + DSI_FIFO_STATUS, status);
+ pr_debug("%s: status=%x\n", __func__, status);
+ }
+}
+
+void msm_dsi_status(unsigned char *ctrl_base)
+{
+ u32 status;
+
+ status = MIPI_INP(ctrl_base + DSI_STATUS);
+
+ if (status & 0x80000000) {
+ MIPI_OUTP(ctrl_base + DSI_STATUS, status);
+ pr_debug("%s: status=%x\n", __func__, status);
+ }
+}
+
+void msm_dsi_error(unsigned char *ctrl_base)
+{
+ msm_dsi_ack_err_status(ctrl_base);
+ msm_dsi_timeout_status(ctrl_base);
+ msm_dsi_fifo_status(ctrl_base);
+ msm_dsi_status(ctrl_base);
+ msm_dsi_dln0_phy_err(ctrl_base);
+}
+
+void msm_dsi_enable_irq(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dsi_host_private->irq_lock, flags);
+ if (dsi_host_private->irq_enabled) {
+ pr_debug("%s: IRQ aleady enabled\n", __func__);
+ spin_unlock_irqrestore(&dsi_host_private->irq_lock, flags);
+ return;
+ }
+
+ enable_irq(dsi_host_private->irq_no);
+ dsi_host_private->irq_enabled = 1;
+ spin_unlock_irqrestore(&dsi_host_private->irq_lock, flags);
+}
+
+void msm_dsi_disable_irq(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dsi_host_private->irq_lock, flags);
+ if (dsi_host_private->irq_enabled == 0) {
+ pr_debug("%s: IRQ already disabled\n", __func__);
+ spin_unlock_irqrestore(&dsi_host_private->irq_lock, flags);
+ return;
+ }
+ disable_irq(dsi_host_private->irq_no);
+ dsi_host_private->irq_enabled = 0;
+ spin_unlock_irqrestore(&dsi_host_private->irq_lock, flags);
+}
+
+void msm_dsi_disable_irq_nosync(void)
+{
+ spin_lock(&dsi_host_private->irq_lock);
+ if (dsi_host_private->irq_enabled == 0) {
+ pr_debug("%s: IRQ cannot be disabled\n", __func__);
+ spin_unlock(&dsi_host_private->irq_lock);
+ return;
+ }
+ disable_irq_nosync(dsi_host_private->irq_no);
+ dsi_host_private->irq_enabled = 0;
+ spin_unlock(&dsi_host_private->irq_lock);
+}
+
+irqreturn_t msm_dsi_isr(int irq, void *ptr)
+{
+ u32 isr;
+
+ isr = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL);
+ MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, isr);
+
+ if (isr & DSI_INTR_ERROR)
+ msm_dsi_error(dsi_host_private->dsi_base);
+
+ if (isr & DSI_INTR_CMD_DMA_DONE)
+ complete(&dsi_host_private->dma_comp);
+
+ if (isr & DSI_INTR_CMD_MDP_DONE) {
+ spin_lock(&dsi_host_private->mdp_lock);
+ dsi_host_private->mdp_busy = false;
+ msm_dsi_disable_irq_nosync();
+ spin_unlock(&dsi_host_private->mdp_lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int msm_dsi_irq_init(struct device *dev, int irq_no)
+{
+ int ret;
+
+ ret = devm_request_irq(dev, irq_no, msm_dsi_isr,
+ IRQF_DISABLED, "DSI", NULL);
+ if (ret) {
+ pr_err("msm_dsi_irq_init request_irq() failed!\n");
+ return ret;
+ }
+ dsi_host_private->irq_no = irq_no;
+ disable_irq(irq_no);
+ return 0;
+}
+
+void msm_dsi_host_init(struct mipi_panel_info *pinfo)
+{
+ u32 dsi_ctrl, intr_ctrl, data;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ pr_debug("msm_dsi_host_init\n");
+ pinfo->rgb_swap = DSI_RGB_SWAP_RGB;
+
+ if (pinfo->mode == DSI_VIDEO_MODE) {
+ data = 0;
+ if (pinfo->pulse_mode_hsa_he)
+ data |= BIT(28);
+ if (pinfo->hfp_power_stop)
+ data |= BIT(24);
+ if (pinfo->hbp_power_stop)
+ data |= BIT(20);
+ if (pinfo->hsa_power_stop)
+ data |= BIT(16);
+ if (pinfo->eof_bllp_power_stop)
+ data |= BIT(15);
+ if (pinfo->bllp_power_stop)
+ data |= BIT(12);
+ data |= ((pinfo->traffic_mode & 0x03) << 8);
+ data |= ((pinfo->dst_format & 0x03) << 4); /* 2 bits */
+ data |= (pinfo->vc & 0x03);
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_CTRL, data);
+
+ data = 0;
+ data |= ((pinfo->rgb_swap & 0x07) << 12);
+ if (pinfo->b_sel)
+ data |= BIT(8);
+ if (pinfo->g_sel)
+ data |= BIT(4);
+ if (pinfo->r_sel)
+ data |= BIT(0);
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_DATA_CTRL, data);
+ } else if (pinfo->mode == DSI_CMD_MODE) {
+ data = 0;
+ data |= ((pinfo->interleave_max & 0x0f) << 20);
+ data |= ((pinfo->rgb_swap & 0x07) << 16);
+ if (pinfo->b_sel)
+ data |= BIT(12);
+ if (pinfo->g_sel)
+ data |= BIT(8);
+ if (pinfo->r_sel)
+ data |= BIT(4);
+ data |= (pinfo->dst_format & 0x0f); /* 4 bits */
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_CTRL, data);
+
+ /* DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL */
+ data = pinfo->wr_mem_continue & 0x0ff;
+ data <<= 8;
+ data |= (pinfo->wr_mem_start & 0x0ff);
+ if (pinfo->insert_dcs_cmd)
+ data |= BIT(16);
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL,
+ data);
+ } else
+ pr_err("%s: Unknown DSI mode=%d\n", __func__, pinfo->mode);
+
+ dsi_ctrl = BIT(8) | BIT(2); /* clock enable & cmd mode */
+ intr_ctrl = 0;
+ intr_ctrl = (DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_CMD_MDP_DONE_MASK);
+
+ if (pinfo->crc_check)
+ dsi_ctrl |= BIT(24);
+ if (pinfo->ecc_check)
+ dsi_ctrl |= BIT(20);
+ if (pinfo->data_lane3)
+ dsi_ctrl |= BIT(7);
+ if (pinfo->data_lane2)
+ dsi_ctrl |= BIT(6);
+ if (pinfo->data_lane1)
+ dsi_ctrl |= BIT(5);
+ if (pinfo->data_lane0)
+ dsi_ctrl |= BIT(4);
+
+ /* from frame buffer, low power mode */
+ /* DSI_COMMAND_MODE_DMA_CTRL */
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL, 0x14000000);
+
+ data = 0;
+ if (pinfo->te_sel)
+ data |= BIT(31);
+ data |= pinfo->mdp_trigger << 4;/* cmd mdp trigger */
+ data |= pinfo->dma_trigger; /* cmd dma trigger */
+ data |= (pinfo->stream & 0x01) << 8;
+ MIPI_OUTP(ctrl_base + DSI_TRIG_CTRL, data);
+
+ /* DSI_LAN_SWAP_CTRL */
+ MIPI_OUTP(ctrl_base + DSI_LANE_SWAP_CTRL, pinfo->dlane_swap);
+
+ /* clock out ctrl */
+ data = pinfo->t_clk_post & 0x3f; /* 6 bits */
+ data <<= 8;
+ data |= pinfo->t_clk_pre & 0x3f; /* 6 bits */
+ /* DSI_CLKOUT_TIMING_CTRL */
+ MIPI_OUTP(ctrl_base + DSI_CLKOUT_TIMING_CTRL, data);
+
+ data = 0;
+ if (pinfo->rx_eot_ignore)
+ data |= BIT(4);
+ if (pinfo->tx_eot_append)
+ data |= BIT(0);
+ MIPI_OUTP(ctrl_base + DSI_EOT_PACKET_CTRL, data);
+
+
+ /* allow only ack-err-status to generate interrupt */
+ /* DSI_ERR_INT_MASK0 */
+ MIPI_OUTP(ctrl_base + DSI_ERR_INT_MASK0, 0x13ff3fe0);
+
+ intr_ctrl |= DSI_INTR_ERROR_MASK;
+ MIPI_OUTP(ctrl_base + DSI_INT_CTRL, intr_ctrl);
+
+ /* turn esc, byte, dsi, pclk, sclk, hclk on */
+ MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0x23f);
+
+ dsi_ctrl |= BIT(0); /* enable dsi */
+ MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+
+ wmb();
+}
+
+void msm_dsi_set_tx_power_mode(int mode)
+{
+ u32 data;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ data = MIPI_INP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL);
+
+ if (mode == 0)
+ data &= ~BIT(26);
+ else
+ data |= BIT(26);
+
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL, data);
+}
+
+void msm_dsi_sw_reset(void)
+{
+ u32 dsi_ctrl;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ pr_debug("msm_dsi_sw_reset\n");
+
+ dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+ dsi_ctrl &= ~0x01;
+ MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+ wmb();
+
+ /* turn esc, byte, dsi, pclk, sclk, hclk on */
+ MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0x23f);
+ wmb();
+
+ MIPI_OUTP(ctrl_base + DSI_SOFT_RESET, 0x01);
+ wmb();
+ MIPI_OUTP(ctrl_base + DSI_SOFT_RESET, 0x00);
+ wmb();
+}
+
+void msm_dsi_controller_cfg(int enable)
+{
+ u32 dsi_ctrl, status;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ pr_debug("msm_dsi_controller_cfg\n");
+
+ /* Check for CMD_MODE_DMA_BUSY */
+ if (readl_poll_timeout((ctrl_base + DSI_STATUS),
+ status,
+ ((status & 0x02) == 0),
+ DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US))
+ pr_err("%s: DSI status=%x failed\n", __func__, status);
+
+ /* Check for x_HS_FIFO_EMPTY */
+ if (readl_poll_timeout((ctrl_base + DSI_FIFO_STATUS),
+ status,
+ ((status & 0x11111000) == 0x11111000),
+ DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US))
+ pr_err("%s: FIFO status=%x failed\n", __func__, status);
+
+ /* Check for VIDEO_MODE_ENGINE_BUSY */
+ if (readl_poll_timeout((ctrl_base + DSI_STATUS),
+ status,
+ ((status & 0x08) == 0),
+ DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US)) {
+ pr_err("%s: DSI status=%x\n", __func__, status);
+ pr_err("%s: Doing sw reset\n", __func__);
+ msm_dsi_sw_reset();
+ }
+
+ dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+ if (enable)
+ dsi_ctrl |= 0x01;
+ else
+ dsi_ctrl &= ~0x01;
+
+ MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+ wmb();
+}
+
+void msm_dsi_op_mode_config(int mode, struct mdss_panel_data *pdata)
+{
+ u32 dsi_ctrl, intr_ctrl;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ pr_debug("msm_dsi_op_mode_config\n");
+
+ dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+ /*If Video enabled, Keep Video and Cmd mode ON */
+
+
+ dsi_ctrl &= ~0x06;
+
+ if (mode == DSI_VIDEO_MODE) {
+ dsi_ctrl |= 0x02;
+ intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK;
+ } else { /* command mode */
+ dsi_ctrl |= 0x04;
+
+ intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_ERROR_MASK |
+ DSI_INTR_CMD_MDP_DONE_MASK;
+ }
+
+ pr_debug("%s: dsi_ctrl=%x intr=%x\n", __func__, dsi_ctrl, intr_ctrl);
+
+ MIPI_OUTP(ctrl_base + DSI_INT_CTRL, intr_ctrl);
+ MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+ wmb();
+}
+
+void msm_dsi_cmd_mdp_start(void)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&dsi_host_private->mdp_lock, flag);
+ msm_dsi_enable_irq();
+ dsi_host_private->mdp_busy = true;
+ spin_unlock_irqrestore(&dsi_host_private->mdp_lock, flag);
+}
+
+int msm_dsi_cmd_reg_tx(u32 data)
+{
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ MIPI_OUTP(ctrl_base + DSI_TRIG_CTRL, 0x04);/* sw trigger */
+ MIPI_OUTP(ctrl_base + DSI_CTRL, 0x135);
+ wmb();
+
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL, data);
+ wmb();
+ MIPI_OUTP(ctrl_base + DSI_CMD_MODE_DMA_SW_TRIGGER, 0x01);
+ wmb();
+
+ udelay(300); /*per spec*/
+
+ return 0;
+}
+
+int msm_dsi_cmd_dma_tx(struct dsi_buf *tp)
+{
+ int len, rc;
+ unsigned long size, addr;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ len = ALIGN(tp->len, 4);
+ size = ALIGN(tp->len, SZ_4K);
+
+ tp->dmap = dma_map_single(&dsi_host_private->dis_dev, tp->data, size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dsi_host_private->dis_dev, tp->dmap)) {
+ pr_err("%s: dmap mapp failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ addr = tp->dmap;
+
+ INIT_COMPLETION(dsi_host_private->dma_comp);
+
+ MIPI_OUTP(ctrl_base + DSI_DMA_CMD_OFFSET, addr);
+ MIPI_OUTP(ctrl_base + DSI_DMA_CMD_LENGTH, len);
+ wmb();
+
+ MIPI_OUTP(ctrl_base + DSI_CMD_MODE_DMA_SW_TRIGGER, 0x01);
+ wmb();
+
+ rc = wait_for_completion_timeout(&dsi_host_private->dma_comp,
+ msecs_to_jiffies(DSI_DMA_CMD_TIMEOUT_MS));
+ if (rc == 0) {
+ pr_err("DSI command transaction time out\n");
+ rc = -ETIME;
+ }
+
+ dma_unmap_single(&dsi_host_private->dis_dev, tp->dmap, size,
+ DMA_TO_DEVICE);
+ tp->dmap = 0;
+ return rc;
+}
+
+int msm_dsi_cmd_dma_rx(struct dsi_buf *rp, int rlen)
+{
+ u32 *lp, data;
+ int i, off, cnt;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ lp = (u32 *)rp->data;
+ cnt = rlen;
+ cnt += 3;
+ cnt >>= 2;
+
+ if (cnt > 4)
+ cnt = 4; /* 4 x 32 bits registers only */
+
+ off = DSI_RDBK_DATA0;
+ off += ((cnt - 1) * 4);
+
+ for (i = 0; i < cnt; i++) {
+ data = (u32)MIPI_INP(ctrl_base + off);
+ *lp++ = ntohl(data); /* to network byte order */
+ pr_debug("%s: data = 0x%x and ntohl(data) = 0x%x\n",
+ __func__, data, ntohl(data));
+ off -= 4;
+ rp->len += sizeof(*lp);
+ }
+
+ return 0;
+}
+
+int msm_dsi_cmds_tx(struct mdss_panel_data *pdata,
+ struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt)
+{
+ struct dsi_cmd_desc *cm;
+ u32 dsi_ctrl, ctrl;
+ int i, video_mode;
+ unsigned long flag;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ /* turn on cmd mode
+ * for video mode, do not send cmds more than
+ * one pixel line, since it only transmit it
+ * during BLLP.
+ */
+ dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+ video_mode = dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
+ if (video_mode) {
+ ctrl = dsi_ctrl | 0x04; /* CMD_MODE_EN */
+ MIPI_OUTP(ctrl_base + DSI_CTRL, ctrl);
+ }
+
+ spin_lock_irqsave(&dsi_host_private->mdp_lock, flag);
+ msm_dsi_enable_irq();
+ dsi_host_private->mdp_busy = true;
+ spin_unlock_irqrestore(&dsi_host_private->mdp_lock, flag);
+
+ cm = cmds;
+ dsi_buf_init(tp);
+ for (i = 0; i < cnt; i++) {
+ dsi_buf_init(tp);
+ dsi_cmd_dma_add(tp, cm);
+ msm_dsi_cmd_dma_tx(tp);
+ if (cm->wait)
+ msleep(cm->wait);
+ cm++;
+ }
+
+ spin_lock_irqsave(&dsi_host_private->mdp_lock, flag);
+ dsi_host_private->mdp_busy = false;
+ msm_dsi_disable_irq();
+ spin_unlock_irqrestore(&dsi_host_private->mdp_lock, flag);
+
+ if (video_mode)
+ MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+ return 0;
+}
+
+/* MDSS_DSI_MRPS, Maximum Return Packet Size */
+static char max_pktsize[2] = {0x00, 0x00}; /* LSB tx first, 10 bytes */
+
+static struct dsi_cmd_desc pkt_size_cmd[] = {
+ {DTYPE_MAX_PKTSIZE, 1, 0, 0, 0,
+ sizeof(max_pktsize), max_pktsize}
+};
+
+/*
+ * DSI panel reply with MAX_RETURN_PACKET_SIZE bytes of data
+ * plus DCS header, ECC and CRC for DCS long read response
+ * mdss_dsi_controller only have 4x32 bits register ( 16 bytes) to
+ * hold data per transaction.
+ * MDSS_DSI_LEN equal to 8
+ * len should be either 4 or 8
+ * any return data more than MDSS_DSI_LEN need to be break down
+ * to multiple transactions.
+ *
+ * ov_mutex need to be acquired before call this function.
+ */
+int msm_dsi_cmds_rx(struct mdss_panel_data *pdata,
+ struct dsi_buf *tp, struct dsi_buf *rp,
+ struct dsi_cmd_desc *cmds, int rlen)
+{
+ int cnt, len, diff, pkt_size;
+ unsigned long flag;
+ char cmd;
+
+ if (pdata->panel_info.mipi.no_max_pkt_size)
+ rlen = ALIGN(rlen, 4); /* Only support rlen = 4*n */
+
+ len = rlen;
+ diff = 0;
+
+ if (len <= 2) {
+ cnt = 4; /* short read */
+ } else {
+ if (len > DSI_LEN)
+ len = DSI_LEN; /* 8 bytes at most */
+
+ len = ALIGN(len, 4); /* len 4 bytes align */
+ diff = len - rlen;
+ /*
+ * add extra 2 bytes to len to have overall
+ * packet size is multipe by 4. This also make
+ * sure 4 bytes dcs headerlocates within a
+ * 32 bits register after shift in.
+ * after all, len should be either 6 or 10.
+ */
+ len += 2;
+ cnt = len + 6; /* 4 bytes header + 2 bytes crc */
+ }
+
+ spin_lock_irqsave(&dsi_host_private->mdp_lock, flag);
+ msm_dsi_enable_irq();
+ dsi_host_private->mdp_busy = true;
+ spin_unlock_irqrestore(&dsi_host_private->mdp_lock, flag);
+
+ if (!pdata->panel_info.mipi.no_max_pkt_size) {
+ /* packet size need to be set at every read */
+ pkt_size = len;
+ max_pktsize[0] = pkt_size;
+ dsi_buf_init(tp);
+ dsi_cmd_dma_add(tp, pkt_size_cmd);
+ msm_dsi_cmd_dma_tx(tp);
+ pr_debug("%s: Max packet size sent\n", __func__);
+ }
+
+ dsi_buf_init(tp);
+ dsi_cmd_dma_add(tp, cmds);
+
+ /* transmit read comamnd to client */
+ msm_dsi_cmd_dma_tx(tp);
+ /*
+ * once cmd_dma_done interrupt received,
+ * return data from client is ready and stored
+ * at RDBK_DATA register already
+ */
+ dsi_buf_init(rp);
+ if (pdata->panel_info.mipi.no_max_pkt_size) {
+ /*
+ * expect rlen = n * 4
+ * short alignement for start addr
+ */
+ rp->data += 2;
+ }
+
+ msm_dsi_cmd_dma_rx(rp, cnt);
+
+ spin_lock_irqsave(&dsi_host_private->mdp_lock, flag);
+ dsi_host_private->mdp_busy = false;
+ msm_dsi_disable_irq();
+ spin_unlock_irqrestore(&dsi_host_private->mdp_lock, flag);
+
+ if (pdata->panel_info.mipi.no_max_pkt_size) {
+ /*
+ * remove extra 2 bytes from previous
+ * rx transaction at shift register
+ * which was inserted during copy
+ * shift registers to rx buffer
+ * rx payload start from long alignment addr
+ */
+ rp->data += 2;
+ }
+
+ cmd = rp->data[0];
+ switch (cmd) {
+ case DTYPE_ACK_ERR_RESP:
+ pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__);
+ break;
+ case DTYPE_GEN_READ1_RESP:
+ case DTYPE_DCS_READ1_RESP:
+ dsi_short_read1_resp(rp);
+ break;
+ case DTYPE_GEN_READ2_RESP:
+ case DTYPE_DCS_READ2_RESP:
+ dsi_short_read2_resp(rp);
+ break;
+ case DTYPE_GEN_LREAD_RESP:
+ case DTYPE_DCS_LREAD_RESP:
+ dsi_long_read_resp(rp);
+ rp->len -= 2; /* extra 2 bytes added */
+ rp->len -= diff; /* align bytes */
+ break;
+ default:
+ pr_debug("%s: Unknown cmd received\n", __func__);
+ break;
+ }
+
+ return rp->len;
+}
+
+static int msm_dsi_cal_clk_rate(struct mdss_panel_data *pdata,
+ u32 *bitclk_rate,
+ u32 *dsiclk_rate,
+ u32 *byteclk_rate,
+ u32 *pclk_rate)
+{
+ struct mdss_panel_info *pinfo;
+ struct mipi_panel_info *mipi;
+ u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+ int lanes;
+
+ pinfo = &pdata->panel_info;
+ mipi = &pdata->panel_info.mipi;
+
+ hbp = pdata->panel_info.lcdc.h_back_porch;
+ hfp = pdata->panel_info.lcdc.h_front_porch;
+ vbp = pdata->panel_info.lcdc.v_back_porch;
+ vfp = pdata->panel_info.lcdc.v_front_porch;
+ hspw = pdata->panel_info.lcdc.h_pulse_width;
+ vspw = pdata->panel_info.lcdc.v_pulse_width;
+ width = pdata->panel_info.xres;
+ height = pdata->panel_info.yres;
+
+ lanes = 0;
+ if (mipi->data_lane0)
+ lanes++;
+ if (mipi->data_lane1)
+ lanes++;
+ if (mipi->data_lane2)
+ lanes++;
+ if (mipi->data_lane3)
+ lanes++;
+ if (lanes == 0)
+ return -EINVAL;
+
+ *bitclk_rate = (width + hbp + hfp + hspw) * (height + vbp + vfp + vspw);
+ *bitclk_rate *= mipi->frame_rate;
+ *bitclk_rate *= pdata->panel_info.bpp;
+ *bitclk_rate /= lanes;
+
+ *byteclk_rate = *bitclk_rate / 8;
+ *dsiclk_rate = *byteclk_rate * lanes;
+ *pclk_rate = *byteclk_rate * lanes * 8 / pdata->panel_info.bpp;
+
+ pr_debug("dsiclk_rate=%u, byteclk=%u, pck_=%u\n",
+ *dsiclk_rate, *byteclk_rate, *pclk_rate);
+ return 0;
+}
+
+static int msm_dsi_on(struct mdss_panel_data *pdata)
+{
+ int ret = 0;
+ u32 clk_rate;
+ struct mdss_panel_info *pinfo;
+ struct mipi_panel_info *mipi;
+ u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+ u32 ystride, bpp, data;
+ u32 dummy_xres, dummy_yres;
+ u32 bitclk_rate = 0, byteclk_rate = 0, pclk_rate = 0, dsiclk_rate = 0;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ pr_debug("msm_dsi_on\n");
+
+ pinfo = &pdata->panel_info;
+
+ ret = msm_dsi_regulator_enable();
+ if (ret) {
+ pr_err("%s: DSI power on failed\n", __func__);
+ return ret;
+ }
+
+ msm_dsi_ahb_ctrl(1);
+ msm_dsi_phy_sw_reset(dsi_host_private->dsi_base);
+ msm_dsi_phy_init(dsi_host_private->dsi_base, pdata);
+
+ msm_dsi_cal_clk_rate(pdata, &bitclk_rate, &dsiclk_rate,
+ &byteclk_rate, &pclk_rate);
+ msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, dsiclk_rate,
+ byteclk_rate, pclk_rate);
+ msm_dsi_prepare_clocks();
+ msm_dsi_clk_enable();
+
+ clk_rate = pdata->panel_info.clk_rate;
+ clk_rate = min(clk_rate, pdata->panel_info.clk_max);
+
+ hbp = pdata->panel_info.lcdc.h_back_porch;
+ hfp = pdata->panel_info.lcdc.h_front_porch;
+ vbp = pdata->panel_info.lcdc.v_back_porch;
+ vfp = pdata->panel_info.lcdc.v_front_porch;
+ hspw = pdata->panel_info.lcdc.h_pulse_width;
+ vspw = pdata->panel_info.lcdc.v_pulse_width;
+ width = pdata->panel_info.xres;
+ height = pdata->panel_info.yres;
+
+ mipi = &pdata->panel_info.mipi;
+ if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+ dummy_xres = pdata->panel_info.lcdc.xres_pad;
+ dummy_yres = pdata->panel_info.lcdc.yres_pad;
+
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_ACTIVE_H,
+ ((hspw + hbp + width + dummy_xres) << 16 |
+ (hspw + hbp)));
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_ACTIVE_V,
+ ((vspw + vbp + height + dummy_yres) << 16 |
+ (vspw + vbp)));
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_TOTAL,
+ (vspw + vbp + height + dummy_yres +
+ vfp - 1) << 16 | (hspw + hbp +
+ width + dummy_xres + hfp - 1));
+
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_HSYNC, (hspw << 16));
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_VSYNC, 0);
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_VSYNC_VPOS,
+ (vspw << 16));
+
+ } else { /* command mode */
+ if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+ bpp = 3;
+ else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
+ bpp = 3;
+ else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+ bpp = 2;
+ else
+ bpp = 3; /* Default format set to RGB888 */
+
+ ystride = width * bpp + 1;
+
+ data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM0_CTRL,
+ data);
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM1_CTRL,
+ data);
+
+ data = height << 16 | width;
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM1_TOTAL,
+ data);
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM0_TOTAL,
+ data);
+ }
+
+ msm_dsi_sw_reset();
+ msm_dsi_host_init(mipi);
+
+ if (mipi->force_clk_lane_hs) {
+ u32 tmp;
+
+ tmp = MIPI_INP(ctrl_base + DSI_LANE_CTRL);
+ tmp |= (1<<28);
+ MIPI_OUTP(ctrl_base + DSI_LANE_CTRL, tmp);
+ wmb();
+ }
+
+ msm_dsi_op_mode_config(mipi->mode, pdata);
+
+ return ret;
+}
+
+static int msm_dsi_off(struct mdss_panel_data *pdata)
+{
+ int ret = 0;
+
+ pr_debug("msm_dsi_off\n");
+ msm_dsi_controller_cfg(0);
+ msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, 0, 0, 0);
+ msm_dsi_clk_disable();
+ msm_dsi_unprepare_clocks();
+
+ msm_dsi_ahb_ctrl(0);
+
+ ret = msm_dsi_regulator_disable();
+ if (ret) {
+ pr_err("%s: Panel power off failed\n", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int __devinit msm_dsi_probe(struct platform_device *pdev)
+{
+ struct dsi_interface intf;
+ int rc = 0;
+
+ pr_debug("%s\n", __func__);
+
+ rc = msm_dsi_init();
+ if (rc)
+ return rc;
+
+ if (pdev->dev.of_node) {
+ struct resource *mdss_dsi_mres;
+ pdev->id = 0;
+ mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mdss_dsi_mres) {
+ pr_err("%s:%d unable to get the MDSS reg resources",
+ __func__, __LINE__);
+ return -ENOMEM;
+ } else {
+ dsi_host_private->dsi_base = ioremap(
+ mdss_dsi_mres->start,
+ resource_size(mdss_dsi_mres));
+ if (!dsi_host_private->dsi_base) {
+ pr_err("%s:%d unable to remap dsi resources",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
+ mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mdss_dsi_mres || mdss_dsi_mres->start == 0) {
+ pr_err("%s:%d unable to get the MDSS irq resources",
+ __func__, __LINE__);
+ rc = -ENODEV;
+ goto dsi_probe_error;
+ } else {
+ rc = msm_dsi_irq_init(&pdev->dev, mdss_dsi_mres->start);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s: failed to init irq, rc=%d\n",
+ __func__, rc);
+ goto dsi_probe_error;
+ }
+ }
+
+ rc = msm_dsi_io_init(pdev);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s: failed to init DSI IO, rc=%d\n",
+ __func__, rc);
+ goto dsi_probe_error;
+ }
+
+ rc = of_platform_populate(pdev->dev.of_node,
+ NULL, NULL, &pdev->dev);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s: failed to add child nodes, rc=%d\n",
+ __func__, rc);
+ goto dsi_probe_error;
+ }
+
+ }
+
+ dsi_host_private->dis_dev = pdev->dev;
+ intf.on = msm_dsi_on;
+ intf.off = msm_dsi_off;
+ intf.op_mode_config = msm_dsi_op_mode_config;
+ intf.tx = msm_dsi_cmds_tx;
+ intf.rx = msm_dsi_cmds_rx;
+ intf.index = 0;
+ intf.private = NULL;
+ dsi_register_interface(&intf);
+ pr_debug("%s success\n", __func__);
+ return 0;
+dsi_probe_error:
+ if (dsi_host_private->dsi_base) {
+ iounmap(dsi_host_private->dsi_base);
+ dsi_host_private->dsi_base = NULL;
+ }
+ msm_dsi_io_deinit();
+ msm_dsi_deinit();
+ return rc;
+}
+
+static int __devexit msm_dsi_remove(struct platform_device *pdev)
+{
+ msm_dsi_disable_irq();
+ msm_dsi_io_deinit();
+ iounmap(dsi_host_private->dsi_base);
+ dsi_host_private->dsi_base = NULL;
+ msm_dsi_deinit();
+ return 0;
+}
+
+static const struct of_device_id msm_dsi_v2_dt_match[] = {
+ {.compatible = "qcom,msm-dsi-v2"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_dsi_v2_dt_match);
+
+static struct platform_driver msm_dsi_v2_driver = {
+ .probe = msm_dsi_probe,
+ .remove = __devexit_p(msm_dsi_remove),
+ .shutdown = NULL,
+ .driver = {
+ .name = "msm_dsi_v2",
+ .of_match_table = msm_dsi_v2_dt_match,
+ },
+};
+
+static int msm_dsi_v2_register_driver(void)
+{
+ return platform_driver_register(&msm_dsi_v2_driver);
+}
+
+static int __init msm_dsi_v2_driver_init(void)
+{
+ int ret;
+
+ ret = msm_dsi_v2_register_driver();
+ if (ret) {
+ pr_err("msm_dsi_v2_register_driver() failed!\n");
+ return ret;
+ }
+
+ return ret;
+}
+module_init(msm_dsi_v2_driver_init);
+
+static void __exit msm_dsi_v2_driver_cleanup(void)
+{
+ platform_driver_unregister(&msm_dsi_v2_driver);
+}
+module_exit(msm_dsi_v2_driver_cleanup);
diff --git a/drivers/video/msm/mdss/dsi_host_v2.h b/drivers/video/msm/mdss/dsi_host_v2.h
new file mode 100644
index 0000000..cec9774
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_host_v2.h
@@ -0,0 +1,169 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef DSI_HOST_V2_H
+#define DSI_HOST_V2_H
+
+#include <linux/bitops.h>
+
+#define DSI_INTR_ERROR_MASK BIT(25)
+#define DSI_INTR_ERROR BIT(24)
+#define DSI_INTR_VIDEO_DONE_MASK BIT(17)
+#define DSI_INTR_VIDEO_DONE BIT(16)
+#define DSI_INTR_CMD_MDP_DONE_MASK BIT(9)
+#define DSI_INTR_CMD_MDP_DONE BIT(8)
+#define DSI_INTR_CMD_DMA_DONE_MASK BIT(1)
+#define DSI_INTR_CMD_DMA_DONE BIT(0)
+
+#define DSI_CTRL 0x0000
+#define DSI_STATUS 0x0004
+#define DSI_FIFO_STATUS 0x0008
+#define DSI_VIDEO_MODE_CTRL 0x000C
+#define DSI_VIDEO_MODE_DATA_CTRL 0x001C
+#define DSI_VIDEO_MODE_ACTIVE_H 0x0020
+#define DSI_VIDEO_MODE_ACTIVE_V 0x0024
+#define DSI_VIDEO_MODE_TOTAL 0x0028
+#define DSI_VIDEO_MODE_HSYNC 0x002C
+#define DSI_VIDEO_MODE_VSYNC 0x0030
+#define DSI_VIDEO_MODE_VSYNC_VPOS 0x0034
+#define DSI_COMMAND_MODE_DMA_CTRL 0x0038
+#define DSI_COMMAND_MODE_MDP_CTRL 0x003C
+#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL 0x0040
+#define DSI_DMA_CMD_OFFSET 0x0044
+#define DSI_DMA_CMD_LENGTH 0x0048
+#define DSI_DMA_FIFO_CTRL 0x004C
+#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL 0x0054
+#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL 0x0058
+#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL 0x005C
+#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL 0x0060
+#define DSI_ACK_ERR_STATUS 0x0064
+#define DSI_RDBK_DATA0 0x0068
+#define DSI_RDBK_DATA1 0x006C
+#define DSI_RDBK_DATA2 0x0070
+#define DSI_RDBK_DATA3 0x0074
+#define DSI_RDBK_DATATYPE0 0x0078
+#define DSI_RDBK_DATATYPE1 0x007C
+#define DSI_TRIG_CTRL 0x0080
+#define DSI_EXT_MUX 0x0084
+#define DSI_EXT_TE_PULSE_DETECT_CTRL 0x0088
+#define DSI_CMD_MODE_DMA_SW_TRIGGER 0x008C
+#define DSI_CMD_MODE_MDP_SW_TRIGGER 0x0090
+#define DSI_CMD_MODE_BTA_SW_TRIGGER 0x0094
+#define DSI_RESET_SW_TRIGGER 0x0098
+#define DSI_LANE_CTRL 0x00A8
+#define DSI_LANE_SWAP_CTRL 0x00AC
+#define DSI_DLN0_PHY_ERR 0x00B0
+#define DSI_TIMEOUT_STATUS 0x00BC
+#define DSI_CLKOUT_TIMING_CTRL 0x00C0
+#define DSI_EOT_PACKET 0x00C4
+#define DSI_EOT_PACKET_CTRL 0x00C8
+#define DSI_ERR_INT_MASK0 0x0108
+#define DSI_INT_CTRL 0x010c
+#define DSI_SOFT_RESET 0x0114
+#define DSI_CLK_CTRL 0x0118
+#define DSI_CLK_STATUS 0x011C
+#define DSI_PHY_SW_RESET 0x0128
+#define DSI_COMMAND_MODE_MDP_IDLE_CTRL 0x0190
+#define DSI_VERSION 0x01F0
+
+#define DSI_DSIPHY_PLL_CTRL_0 0x0200
+#define DSI_DSIPHY_PLL_CTRL_1 0x0204
+#define DSI_DSIPHY_PLL_CTRL_2 0x0208
+#define DSI_DSIPHY_PLL_CTRL_3 0x020C
+#define DSI_DSIPHY_PLL_CTRL_4 0x0210
+#define DSI_DSIPHY_PLL_CTRL_5 0x0214
+#define DSI_DSIPHY_PLL_CTRL_6 0x0218
+#define DSI_DSIPHY_PLL_CTRL_7 0x021C
+#define DSI_DSIPHY_PLL_CTRL_8 0x0220
+#define DSI_DSIPHY_PLL_CTRL_9 0x0224
+#define DSI_DSIPHY_PLL_CTRL_10 0x0228
+#define DSI_DSIPHY_PLL_CTRL_11 0x022C
+#define DSI_DSIPHY_PLL_CTRL_12 0x0230
+#define DSI_DSIPHY_PLL_CTRL_13 0x0234
+#define DSI_DSIPHY_PLL_CTRL_14 0x0238
+#define DSI_DSIPHY_PLL_CTRL_15 0x023C
+#define DSI_DSIPHY_PLL_CTRL_16 0x0240
+#define DSI_DSIPHY_PLL_CTRL_17 0x0244
+#define DSI_DSIPHY_PLL_CTRL_18 0x0248
+#define DSI_DSIPHY_PLL_CTRL_19 0x024C
+#define DSI_DSIPHY_ANA_CTRL0 0x0260
+#define DSI_DSIPHY_ANA_CTRL1 0x0264
+#define DSI_DSIPHY_ANA_CTRL2 0x0268
+#define DSI_DSIPHY_ANA_CTRL3 0x026C
+#define DSI_DSIPHY_ANA_CTRL4 0x0270
+#define DSI_DSIPHY_ANA_CTRL5 0x0274
+#define DSI_DSIPHY_ANA_CTRL6 0x0278
+#define DSI_DSIPHY_ANA_CTRL7 0x027C
+#define DSI_DSIPHY_PLL_RDY 0x0280
+#define DSI_DSIPHY_PLL_ANA_STATUS0 0x0294
+#define DSI_DSIPHY_PLL_ANA_STATUS1 0x0298
+#define DSI_DSIPHY_PLL_ANA_STATUS2 0x029C
+#define DSI_DSIPHY_LN0_CFG0 0x0300
+#define DSI_DSIPHY_LN0_CFG1 0x0304
+#define DSI_DSIPHY_LN0_CFG2 0x0308
+#define DSI_DSIPHY_LN1_CFG0 0x0340
+#define DSI_DSIPHY_LN1_CFG1 0x0344
+#define DSI_DSIPHY_LN1_CFG2 0x0348
+#define DSI_DSIPHY_LN2_CFG0 0x0380
+#define DSI_DSIPHY_LN2_CFG1 0x0384
+#define DSI_DSIPHY_LN2_CFG2 0x0388
+#define DSI_DSIPHY_LN3_CFG0 0x03C0
+#define DSI_DSIPHY_LN3_CFG1 0x03C4
+#define DSI_DSIPHY_LN3_CFG2 0x03C8
+#define DSI_DSIPHY_LNCK_CFG0 0x0400
+#define DSI_DSIPHY_LNCK_CFG1 0x0404
+#define DSI_DSIPHY_LNCK_CFG2 0x0408
+#define DSI_DSIPHY_TIMING_CTRL_0 0x0440
+#define DSI_DSIPHY_TIMING_CTRL_1 0x0444
+#define DSI_DSIPHY_TIMING_CTRL_2 0x0448
+#define DSI_DSIPHY_TIMING_CTRL_3 0x044C
+#define DSI_DSIPHY_TIMING_CTRL_4 0x0450
+#define DSI_DSIPHY_TIMING_CTRL_5 0x0454
+#define DSI_DSIPHY_TIMING_CTRL_6 0x0458
+#define DSI_DSIPHY_TIMING_CTRL_7 0x045C
+#define DSI_DSIPHY_TIMING_CTRL_8 0x0460
+#define DSI_DSIPHY_TIMING_CTRL_9 0x0464
+#define DSI_DSIPHY_TIMING_CTRL_10 0x0468
+#define DSI_DSIPHY_TIMING_CTRL_11 0x046C
+#define DSI_DSIPHY_CTRL_0 0x0470
+#define DSI_DSIPHY_CTRL_1 0x0474
+#define DSI_DSIPHY_CTRL_2 0x0478
+#define DSI_DSIPHY_CTRL_3 0x047C
+#define DSI_DSIPHY_STRENGTH_CTRL_0 0x0480
+#define DSI_DSIPHY_STRENGTH_CTRL_1 0x0484
+#define DSI_DSIPHY_STRENGTH_CTRL_2 0x0488
+#define DSI_DSIPHY_LDO_CNTRL 0x04B0
+#define DSI_DSIPHY_REGULATOR_CTRL_0 0x0500
+#define DSI_DSIPHY_REGULATOR_CTRL_1 0x0504
+#define DSI_DSIPHY_REGULATOR_CTRL_2 0x0508
+#define DSI_DSIPHY_REGULATOR_CTRL_3 0x050C
+#define DSI_DSIPHY_REGULATOR_CTRL_4 0x0510
+#define DSI_DSIPHY_REGULATOR_TEST 0x0514
+#define DSI_DSIPHY_REGULATOR_CAL_PWR_CFG 0x0518
+#define DSI_DSIPHY_CAL_HW_TRIGGER 0x0528
+#define DSI_DSIPHY_CAL_SW_CFG0 0x052C
+#define DSI_DSIPHY_CAL_SW_CFG1 0x0530
+#define DSI_DSIPHY_CAL_SW_CFG2 0x0534
+#define DSI_DSIPHY_CAL_HW_CFG0 0x0538
+#define DSI_DSIPHY_CAL_HW_CFG1 0x053C
+#define DSI_DSIPHY_CAL_HW_CFG2 0x0540
+#define DSI_DSIPHY_CAL_HW_CFG3 0x0544
+#define DSI_DSIPHY_CAL_HW_CFG4 0x0548
+#define DSI_DSIPHY_REGULATOR_CAL_STATUS0 0x0550
+#define DSI_DSIPHY_BIST_CTRL0 0x048C
+#define DSI_DSIPHY_BIST_CTRL1 0x0490
+#define DSI_DSIPHY_BIST_CTRL2 0x0494
+#define DSI_DSIPHY_BIST_CTRL3 0x0498
+#define DSI_DSIPHY_BIST_CTRL4 0x049C
+#define DSI_DSIPHY_BIST_CTRL5 0x04A0
+
+#endif /* DSI_HOST_V2_H */
diff --git a/drivers/video/msm/mdss/dsi_io_v2.c b/drivers/video/msm/mdss/dsi_io_v2.c
new file mode 100644
index 0000000..93f2c76
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_io_v2.c
@@ -0,0 +1,449 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/clk.h>
+
+#include "dsi_v2.h"
+#include "dsi_io_v2.h"
+#include "dsi_host_v2.h"
+
+struct msm_dsi_io_private {
+ struct regulator *vdda_vreg;
+ struct clk *dsi_byte_clk;
+ struct clk *dsi_esc_clk;
+ struct clk *dsi_pixel_clk;
+ struct clk *dsi_ahb_clk;
+ struct clk *dsi_clk;
+ int msm_dsi_clk_on;
+ int msm_dsi_ahb_clk_on;
+};
+
+static struct msm_dsi_io_private *dsi_io_private;
+
+#define DSI_VDDA_VOLTAGE 1200000
+
+void msm_dsi_ahb_ctrl(int enable)
+{
+ if (enable) {
+ if (dsi_io_private->msm_dsi_ahb_clk_on) {
+ pr_debug("ahb clks already ON\n");
+ return;
+ }
+ clk_enable(dsi_io_private->dsi_ahb_clk);
+ dsi_io_private->msm_dsi_ahb_clk_on = 1;
+ } else {
+ if (dsi_io_private->msm_dsi_ahb_clk_on == 0) {
+ pr_debug("ahb clk already OFF\n");
+ return;
+ }
+ clk_disable(dsi_io_private->dsi_ahb_clk);
+ dsi_io_private->msm_dsi_ahb_clk_on = 0;
+ }
+}
+
+int msm_dsi_io_init(struct platform_device *dev)
+{
+ int rc;
+
+ if (!dsi_io_private) {
+ dsi_io_private = kzalloc(sizeof(struct msm_dsi_io_private),
+ GFP_KERNEL);
+ if (!dsi_io_private) {
+ pr_err("fail to alloc dsi io private data structure\n");
+ return -ENOMEM;
+ }
+ }
+
+ rc = msm_dsi_clk_init(dev);
+ if (rc) {
+ pr_err("fail to initialize DSI clock\n");
+ return rc;
+ }
+
+ rc = msm_dsi_regulator_init(dev);
+ if (rc) {
+ pr_err("fail to initialize DSI regulator\n");
+ return rc;
+ }
+ return 0;
+}
+
+void msm_dsi_io_deinit(void)
+{
+ if (dsi_io_private) {
+ msm_dsi_clk_deinit();
+ msm_dsi_regulator_deinit();
+ kfree(dsi_io_private);
+ dsi_io_private = NULL;
+ }
+}
+
+int msm_dsi_clk_init(struct platform_device *dev)
+{
+ int rc = 0;
+
+ dsi_io_private->dsi_clk = clk_get(&dev->dev, "dsi_clk");
+ if (IS_ERR(dsi_io_private->dsi_clk)) {
+ pr_err("can't find dsi core_clk\n");
+ rc = PTR_ERR(dsi_io_private->dsi_clk);
+ dsi_io_private->dsi_clk = NULL;
+ return rc;
+ }
+ dsi_io_private->dsi_byte_clk = clk_get(&dev->dev, "byte_clk");
+ if (IS_ERR(dsi_io_private->dsi_byte_clk)) {
+ pr_err("can't find dsi byte_clk\n");
+ rc = PTR_ERR(dsi_io_private->dsi_byte_clk);
+ dsi_io_private->dsi_byte_clk = NULL;
+ return rc;
+ }
+
+ dsi_io_private->dsi_esc_clk = clk_get(&dev->dev, "esc_clk");
+ if (IS_ERR(dsi_io_private->dsi_esc_clk)) {
+ pr_err("can't find dsi esc_clk\n");
+ rc = PTR_ERR(dsi_io_private->dsi_esc_clk);
+ dsi_io_private->dsi_esc_clk = NULL;
+ return rc;
+ }
+
+ dsi_io_private->dsi_pixel_clk = clk_get(&dev->dev, "pixel_clk");
+ if (IS_ERR(dsi_io_private->dsi_pixel_clk)) {
+ pr_err("can't find dsi pixel\n");
+ rc = PTR_ERR(dsi_io_private->dsi_pixel_clk);
+ dsi_io_private->dsi_pixel_clk = NULL;
+ return rc;
+ }
+
+ dsi_io_private->dsi_ahb_clk = clk_get(&dev->dev, "iface_clk");
+ if (IS_ERR(dsi_io_private->dsi_ahb_clk)) {
+ pr_err("can't find dsi iface_clk\n");
+ rc = PTR_ERR(dsi_io_private->dsi_ahb_clk);
+ dsi_io_private->dsi_ahb_clk = NULL;
+ return rc;
+ }
+ clk_prepare(dsi_io_private->dsi_ahb_clk);
+
+ return 0;
+}
+
+void msm_dsi_clk_deinit(void)
+{
+ if (dsi_io_private->dsi_clk) {
+ clk_put(dsi_io_private->dsi_clk);
+ dsi_io_private->dsi_clk = NULL;
+ }
+ if (dsi_io_private->dsi_byte_clk) {
+ clk_put(dsi_io_private->dsi_byte_clk);
+ dsi_io_private->dsi_byte_clk = NULL;
+ }
+ if (dsi_io_private->dsi_esc_clk) {
+ clk_put(dsi_io_private->dsi_esc_clk);
+ dsi_io_private->dsi_esc_clk = NULL;
+ }
+ if (dsi_io_private->dsi_pixel_clk) {
+ clk_put(dsi_io_private->dsi_pixel_clk);
+ dsi_io_private->dsi_pixel_clk = NULL;
+ }
+ if (dsi_io_private->dsi_ahb_clk) {
+ clk_unprepare(dsi_io_private->dsi_ahb_clk);
+ clk_put(dsi_io_private->dsi_ahb_clk);
+ dsi_io_private->dsi_ahb_clk = NULL;
+ }
+}
+
+int msm_dsi_prepare_clocks(void)
+{
+ clk_prepare(dsi_io_private->dsi_clk);
+ clk_prepare(dsi_io_private->dsi_byte_clk);
+ clk_prepare(dsi_io_private->dsi_esc_clk);
+ clk_prepare(dsi_io_private->dsi_pixel_clk);
+ return 0;
+}
+
+int msm_dsi_unprepare_clocks(void)
+{
+ clk_unprepare(dsi_io_private->dsi_clk);
+ clk_unprepare(dsi_io_private->dsi_esc_clk);
+ clk_unprepare(dsi_io_private->dsi_byte_clk);
+ clk_unprepare(dsi_io_private->dsi_pixel_clk);
+ return 0;
+}
+
+int msm_dsi_clk_set_rate(unsigned long esc_rate,
+ unsigned long dsi_rate,
+ unsigned long byte_rate,
+ unsigned long pixel_rate)
+{
+ int rc;
+ rc = clk_set_rate(dsi_io_private->dsi_clk, dsi_rate);
+ if (rc) {
+ pr_err("dsi_esc_clk - clk_set_rate failed =%d\n", rc);
+ return rc;
+ }
+
+ rc = clk_set_rate(dsi_io_private->dsi_esc_clk, esc_rate);
+ if (rc) {
+ pr_err("dsi_esc_clk - clk_set_rate failed =%d\n", rc);
+ return rc;
+ }
+
+ rc = clk_set_rate(dsi_io_private->dsi_byte_clk, byte_rate);
+ if (rc) {
+ pr_err("dsi_byte_clk - clk_set_rate faile = %dd\n", rc);
+ return rc;
+ }
+
+ rc = clk_set_rate(dsi_io_private->dsi_pixel_clk, pixel_rate);
+ if (rc) {
+ pr_err("dsi_pixel_clk - clk_set_rate failed = %d\n", rc);
+ return rc;
+ }
+ return 0;
+}
+
+int msm_dsi_clk_enable(void)
+{
+ if (dsi_io_private->msm_dsi_clk_on) {
+ pr_debug("dsi_clks on already\n");
+ return 0;
+ }
+
+ clk_enable(dsi_io_private->dsi_clk);
+ clk_enable(dsi_io_private->dsi_esc_clk);
+ clk_enable(dsi_io_private->dsi_byte_clk);
+ clk_enable(dsi_io_private->dsi_pixel_clk);
+
+ dsi_io_private->msm_dsi_clk_on = 1;
+ return 0;
+}
+
+int msm_dsi_clk_disable(void)
+{
+ if (dsi_io_private->msm_dsi_clk_on == 0) {
+ pr_debug("mdss_dsi_clks already OFF\n");
+ return 0;
+ }
+
+ clk_disable(dsi_io_private->dsi_clk);
+ clk_disable(dsi_io_private->dsi_byte_clk);
+ clk_disable(dsi_io_private->dsi_esc_clk);
+ clk_disable(dsi_io_private->dsi_pixel_clk);
+
+ dsi_io_private->msm_dsi_clk_on = 0;
+ return 0;
+}
+
+int msm_dsi_regulator_init(struct platform_device *dev)
+{
+ int ret = 0;
+
+ dsi_io_private->vdda_vreg = devm_regulator_get(&dev->dev, "vdda");
+ if (IS_ERR(dsi_io_private->vdda_vreg)) {
+ ret = PTR_ERR(dsi_io_private->vdda_vreg);
+ pr_err("could not get vdda 8110_l4, ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_set_voltage(dsi_io_private->vdda_vreg, DSI_VDDA_VOLTAGE,
+ DSI_VDDA_VOLTAGE);
+ if (ret)
+ pr_err("vdd_io_vreg->set_voltage failed, ret=%d\n", ret);
+
+ return ret;
+}
+
+void msm_dsi_regulator_deinit(void)
+{
+ if (!IS_ERR(dsi_io_private->vdda_vreg)) {
+ devm_regulator_put(dsi_io_private->vdda_vreg);
+ dsi_io_private->vdda_vreg = NULL;
+ }
+}
+
+int msm_dsi_regulator_enable(void)
+{
+ int ret;
+
+ ret = regulator_enable(dsi_io_private->vdda_vreg);
+ if (ret) {
+ pr_err("%s: Failed to enable regulator.\n", __func__);
+ return ret;
+ }
+ msleep(20); /*per DSI controller spec*/
+ return ret;
+}
+
+int msm_dsi_regulator_disable(void)
+{
+ int ret;
+
+ ret = regulator_disable(dsi_io_private->vdda_vreg);
+ if (ret) {
+ pr_err("%s: Failed to disable regulator.\n", __func__);
+ return ret;
+ }
+ wmb();
+ msleep(20); /*per DSI controller spec*/
+
+ return ret;
+}
+
+static void msm_dsi_phy_strength_init(unsigned char *ctrl_base,
+ struct mdss_dsi_phy_ctrl *pd)
+{
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_STRENGTH_CTRL_0, pd->strength[0]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_STRENGTH_CTRL_2, pd->strength[1]);
+}
+
+static void msm_dsi_phy_ctrl_init(unsigned char *ctrl_base,
+ struct mdss_panel_data *pdata)
+{
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_0, 0x5f);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_3, 0x10);
+}
+
+static void msm_dsi_phy_regulator_init(unsigned char *ctrl_base,
+ struct mdss_dsi_phy_ctrl *pd)
+{
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_LDO_CNTRL, 0x04);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_0, pd->regulator[0]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_1, pd->regulator[1]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_2, pd->regulator[2]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_3, pd->regulator[3]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_4, pd->regulator[4]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CAL_PWR_CFG,
+ pd->regulator[5]);
+
+}
+
+static int msm_dsi_phy_calibration(unsigned char *ctrl_base)
+{
+ int i = 0, term_cnt = 5000, ret = 0, cal_busy;
+
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_SW_CFG2, 0x0);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG1, 0x5a);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG3, 0x10);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG4, 0x01);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG0, 0x01);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_TRIGGER, 0x01);
+ usleep_range(5000, 5000); /*per DSI controller spec*/
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_TRIGGER, 0x00);
+
+ cal_busy = MIPI_INP(ctrl_base + DSI_DSIPHY_REGULATOR_CAL_STATUS0);
+ while (cal_busy & 0x10) {
+ i++;
+ if (i > term_cnt) {
+ ret = -EINVAL;
+ pr_err("msm_dsi_phy_calibration error\n");
+ break;
+ }
+ cal_busy = MIPI_INP(ctrl_base +
+ DSI_DSIPHY_REGULATOR_CAL_STATUS0);
+ }
+
+ return ret;
+}
+
+static void msm_dsi_phy_lane_init(unsigned char *ctrl_base,
+ struct mdss_dsi_phy_ctrl *pd)
+{
+ int ln, index;
+
+ /*CFG0, CFG1, CFG2, TEST_DATAPATH, TEST_STR0, TEST_STR1*/
+ for (ln = 0; ln < 5; ln++) {
+ unsigned char *off = ctrl_base + 0x0300 + (ln * 0x40);
+ index = ln * 6;
+ MIPI_OUTP(off, pd->laneCfg[index]);
+ MIPI_OUTP(off + 4, pd->laneCfg[index + 1]);
+ MIPI_OUTP(off + 8, pd->laneCfg[index + 2]);
+ MIPI_OUTP(off + 12, pd->laneCfg[index + 3]);
+ MIPI_OUTP(off + 20, pd->laneCfg[index + 4]);
+ MIPI_OUTP(off + 24, pd->laneCfg[index + 5]);
+ }
+ wmb();
+}
+
+static void msm_dsi_phy_timing_init(unsigned char *ctrl_base,
+ struct mdss_dsi_phy_ctrl *pd)
+{
+ int i, off = DSI_DSIPHY_TIMING_CTRL_0;
+ for (i = 0; i < 12; i++) {
+ MIPI_OUTP(ctrl_base + off, pd->timing[i]);
+ off += 4;
+ }
+ wmb();
+}
+
+static void msm_dsi_phy_bist_init(unsigned char *ctrl_base,
+ struct mdss_dsi_phy_ctrl *pd)
+{
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL4, pd->bistCtrl[4]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL1, pd->bistCtrl[1]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL0, pd->bistCtrl[0]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL4, 0);
+ wmb();
+}
+
+int msm_dsi_phy_init(unsigned char *ctrl_base,
+ struct mdss_panel_data *pdata)
+{
+ struct mdss_dsi_phy_ctrl *pd;
+
+ pd = pdata->panel_info.mipi.dsi_phy_db;
+
+ msm_dsi_phy_strength_init(ctrl_base, pd);
+
+ msm_dsi_phy_ctrl_init(ctrl_base, pdata);
+
+ msm_dsi_phy_regulator_init(ctrl_base, pd);
+
+ msm_dsi_phy_calibration(ctrl_base);
+
+ msm_dsi_phy_lane_init(ctrl_base, pd);
+
+ msm_dsi_phy_timing_init(ctrl_base, pd);
+
+ msm_dsi_phy_bist_init(ctrl_base, pd);
+
+ return 0;
+}
+
+void msm_dsi_phy_sw_reset(unsigned char *ctrl_base)
+{
+ /* start phy sw reset */
+ MIPI_OUTP(ctrl_base + DSI_PHY_SW_RESET, 0x0001);
+ udelay(1000); /*per DSI controller spec*/
+ wmb();
+ /* end phy sw reset */
+ MIPI_OUTP(ctrl_base + DSI_PHY_SW_RESET, 0x0000);
+ udelay(100); /*per DSI controller spec*/
+ wmb();
+}
+
+void msm_dsi_phy_enable(unsigned char *ctrl_base, int on)
+{
+ if (on) {
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_PLL_CTRL_5, 0x050);
+ } else {
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_PLL_CTRL_5, 0x05f);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_0, 0x02);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_0, 0x00);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_1, 0x7f);
+ MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0);
+ }
+}
diff --git a/drivers/video/msm/mdss/dsi_io_v2.h b/drivers/video/msm/mdss/dsi_io_v2.h
new file mode 100644
index 0000000..285bf30
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_io_v2.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef DSI_IO_V2_H
+#define DSI_IO_V2_H
+
+#include "mdss_panel.h"
+
+void msm_dsi_ahb_ctrl(int enable);
+
+int msm_dsi_io_init(struct platform_device *dev);
+
+void msm_dsi_io_deinit(void);
+
+int msm_dsi_clk_init(struct platform_device *dev);
+
+void msm_dsi_clk_deinit(void);
+
+int msm_dsi_prepare_clocks(void);
+
+int msm_dsi_unprepare_clocks(void);
+
+int msm_dsi_clk_set_rate(unsigned long esc_rate,
+ unsigned long dsi_rate,
+ unsigned long byte_rate,
+ unsigned long pixel_rate);
+
+int msm_dsi_clk_enable(void);
+
+int msm_dsi_clk_disable(void);
+
+int msm_dsi_regulator_init(struct platform_device *dev);
+
+void msm_dsi_regulator_deinit(void);
+
+int msm_dsi_regulator_enable(void);
+
+int msm_dsi_regulator_disable(void);
+
+int msm_dsi_phy_init(unsigned char *ctrl_base,
+ struct mdss_panel_data *pdata);
+
+void msm_dsi_phy_sw_reset(unsigned char *ctrl_base);
+
+#endif /* DSI_IO_V2_H */
diff --git a/drivers/video/msm/mdss/dsi_panel_v2.c b/drivers/video/msm/mdss/dsi_panel_v2.c
new file mode 100644
index 0000000..e46ea3b
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_panel_v2.c
@@ -0,0 +1,834 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/qpnp/pin.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+
+#include "dsi_v2.h"
+
+#define DT_CMD_HDR 6
+
+struct dsi_panel_private {
+ struct dsi_buf dsi_panel_tx_buf;
+ struct dsi_buf dsi_panel_rx_buf;
+
+ int rst_gpio;
+ int disp_en_gpio;
+ int video_mode_gpio;
+ char bl_ctrl;
+
+ struct regulator *vddio_vreg;
+ struct regulator *vdda_vreg;
+
+ struct dsi_panel_cmds_list *on_cmds_list;
+ struct dsi_panel_cmds_list *off_cmds_list;
+ struct mdss_dsi_phy_ctrl phy_params;
+
+ char *on_cmds;
+ char *off_cmds;
+};
+
+static struct dsi_panel_private *panel_private;
+
+DEFINE_LED_TRIGGER(bl_led_trigger);
+
+int dsi_panel_init(void)
+{
+ int rc;
+
+ if (!panel_private) {
+ panel_private = kzalloc(sizeof(struct dsi_panel_private),
+ GFP_KERNEL);
+ if (!panel_private) {
+ pr_err("fail to alloc dsi panel private data\n");
+ return -ENOMEM;
+ }
+ }
+
+ rc = dsi_buf_alloc(&panel_private->dsi_panel_tx_buf,
+ ALIGN(DSI_BUF_SIZE,
+ SZ_4K));
+ if (rc)
+ return rc;
+
+ rc = dsi_buf_alloc(&panel_private->dsi_panel_rx_buf,
+ ALIGN(DSI_BUF_SIZE,
+ SZ_4K));
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+void dsi_panel_deinit(void)
+{
+ if (!panel_private)
+ return;
+
+ kfree(panel_private->dsi_panel_tx_buf.start);
+ kfree(panel_private->dsi_panel_rx_buf.start);
+
+ if (!IS_ERR(panel_private->vddio_vreg))
+ devm_regulator_put(panel_private->vddio_vreg);
+
+ if (!IS_ERR(panel_private->vdda_vreg))
+ devm_regulator_put(panel_private->vdda_vreg);
+
+ if (panel_private->on_cmds_list) {
+ kfree(panel_private->on_cmds_list->buf);
+ kfree(panel_private->on_cmds_list);
+ }
+ if (panel_private->off_cmds_list) {
+ kfree(panel_private->off_cmds_list->buf);
+ kfree(panel_private->off_cmds_list);
+ }
+
+ kfree(panel_private->on_cmds);
+ kfree(panel_private->off_cmds);
+ kfree(panel_private);
+ panel_private = NULL;
+}
+int dsi_panel_power(int enable)
+{
+ int ret;
+ if (enable) {
+ ret = regulator_enable(panel_private->vddio_vreg);
+ if (ret) {
+ pr_err("dsi_panel_power regulator enable vddio fail\n");
+ return ret;
+ }
+ ret = regulator_enable(panel_private->vdda_vreg);
+ if (ret) {
+ pr_err("dsi_panel_power regulator enable vdda fail\n");
+ return ret;
+ }
+ } else {
+ ret = regulator_disable(panel_private->vddio_vreg);
+ if (ret) {
+ pr_err("dsi_panel_power regulator disable vddio fail\n");
+ return ret;
+ }
+ ret = regulator_disable(panel_private->vdda_vreg);
+ if (ret) {
+ pr_err("dsi_panel_power regulator dsiable vdda fail\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+void dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
+{
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ if (!gpio_is_valid(panel_private->disp_en_gpio)) {
+ pr_debug("%s:%d, reset line not configured\n",
+ __func__, __LINE__);
+ }
+
+ if (!gpio_is_valid(panel_private->rst_gpio)) {
+ pr_debug("%s:%d, reset line not configured\n",
+ __func__, __LINE__);
+ return;
+ }
+
+ pr_debug("%s: enable = %d\n", __func__, enable);
+
+ if (enable) {
+ dsi_panel_power(1);
+ gpio_request(panel_private->rst_gpio, "panel_reset");
+ gpio_set_value(panel_private->rst_gpio, 1);
+ /*
+ * these delay values are by experiments currently, will need
+ * to move to device tree late
+ */
+ msleep(20);
+ gpio_set_value(panel_private->rst_gpio, 0);
+ udelay(200);
+ gpio_set_value(panel_private->rst_gpio, 1);
+ msleep(20);
+ if (gpio_is_valid(panel_private->disp_en_gpio)) {
+ gpio_request(panel_private->disp_en_gpio,
+ "panel_enable");
+ gpio_set_value(panel_private->disp_en_gpio, 1);
+ }
+ if (gpio_is_valid(panel_private->video_mode_gpio)) {
+ gpio_request(panel_private->video_mode_gpio,
+ "panel_video_mdoe");
+ if (pdata->panel_info.mipi.mode == DSI_VIDEO_MODE)
+ gpio_set_value(panel_private->video_mode_gpio,
+ 1);
+ else
+ gpio_set_value(panel_private->video_mode_gpio,
+ 0);
+ }
+ } else {
+ gpio_set_value(panel_private->rst_gpio, 0);
+ gpio_free(panel_private->rst_gpio);
+
+ if (gpio_is_valid(panel_private->disp_en_gpio)) {
+ gpio_set_value(panel_private->disp_en_gpio, 0);
+ gpio_free(panel_private->disp_en_gpio);
+ }
+
+ if (gpio_is_valid(panel_private->video_mode_gpio))
+ gpio_free(panel_private->video_mode_gpio);
+
+ dsi_panel_power(0);
+ }
+}
+
+static void dsi_panel_bl_ctrl(struct mdss_panel_data *pdata,
+ u32 bl_level)
+{
+ if (panel_private->bl_ctrl) {
+ switch (panel_private->bl_ctrl) {
+ case BL_WLED:
+ led_trigger_event(bl_led_trigger, bl_level);
+ break;
+
+ default:
+ pr_err("%s: Unknown bl_ctrl configuration\n",
+ __func__);
+ break;
+ }
+ } else
+ pr_err("%s:%d, bl_ctrl not configured", __func__, __LINE__);
+}
+
+static int dsi_panel_on(struct mdss_panel_data *pdata)
+{
+ struct mipi_panel_info *mipi;
+
+ mipi = &pdata->panel_info.mipi;
+
+ pr_debug("%s:%d, debug info (mode) : %d\n", __func__, __LINE__,
+ mipi->mode);
+
+ if (mipi->mode == DSI_VIDEO_MODE) {
+ dsi_cmds_tx_v2(pdata, &panel_private->dsi_panel_tx_buf,
+ panel_private->on_cmds_list->buf,
+ panel_private->on_cmds_list->size);
+ } else {
+ pr_err("%s:%d, CMD MODE NOT SUPPORTED", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dsi_panel_off(struct mdss_panel_data *pdata)
+{
+ struct mipi_panel_info *mipi;
+ mipi = &pdata->panel_info.mipi;
+
+ pr_debug("%s:%d, debug info\n", __func__, __LINE__);
+
+ if (mipi->mode == DSI_VIDEO_MODE) {
+ dsi_cmds_tx_v2(pdata, &panel_private->dsi_panel_tx_buf,
+ panel_private->off_cmds_list->buf,
+ panel_private->off_cmds_list->size);
+ } else {
+ pr_debug("%s:%d, CMD mode not supported", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dsi_panel_parse_gpio(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ panel_private->disp_en_gpio = of_get_named_gpio(np,
+ "qcom,enable-gpio", 0);
+ panel_private->rst_gpio = of_get_named_gpio(np, "qcom,rst-gpio", 0);
+ panel_private->video_mode_gpio = of_get_named_gpio(np,
+ "qcom,mode-selection-gpio", 0);
+ return 0;
+}
+
+static int dsi_panel_parse_regulator(struct platform_device *pdev)
+{
+ int ret;
+ panel_private->vddio_vreg = devm_regulator_get(&pdev->dev, "vddio");
+ if (IS_ERR(panel_private->vddio_vreg)) {
+ pr_err("%s: could not get vddio vreg, rc=%ld\n",
+ __func__, PTR_ERR(panel_private->vddio_vreg));
+ return PTR_ERR(panel_private->vddio_vreg);
+ }
+ ret = regulator_set_voltage(panel_private->vddio_vreg,
+ 1800000,
+ 1800000);
+ if (ret) {
+ pr_err("%s: set voltage failed on vddio_vreg, rc=%d\n",
+ __func__, ret);
+ return ret;
+ }
+ panel_private->vdda_vreg = devm_regulator_get(&pdev->dev, "vdda");
+ if (IS_ERR(panel_private->vdda_vreg)) {
+ pr_err("%s: could not get vdda_vreg , rc=%ld\n",
+ __func__, PTR_ERR(panel_private->vdda_vreg));
+ return PTR_ERR(panel_private->vdda_vreg);
+ }
+ ret = regulator_set_voltage(panel_private->vdda_vreg,
+ 2850000,
+ 2850000);
+ if (ret) {
+ pr_err("%s: set voltage failed on vdda_vreg, rc=%d\n",
+ __func__, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int dsi_panel_parse_timing(struct platform_device *pdev,
+ struct dsi_panel_common_pdata *panel_data)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 res[6], tmp;
+ int rc;
+
+ rc = of_property_read_u32_array(np, "qcom,mdss-pan-res", res, 2);
+ if (rc) {
+ pr_err("%s:%d, panel resolution not specified\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ panel_data->panel_info.xres = (!rc ? res[0] : 480);
+ panel_data->panel_info.yres = (!rc ? res[1] : 800);
+
+ rc = of_property_read_u32_array(np, "qcom,mdss-pan-active-res", res, 2);
+ if (rc == 0) {
+ panel_data->panel_info.lcdc.xres_pad =
+ panel_data->panel_info.xres - res[0];
+ panel_data->panel_info.lcdc.yres_pad =
+ panel_data->panel_info.yres - res[1];
+ }
+
+ rc = of_property_read_u32(np, "qcom,mdss-pan-bpp", &tmp);
+ if (rc) {
+ pr_err("%s:%d, panel bpp not specified\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ panel_data->panel_info.bpp = (!rc ? tmp : 24);
+
+ rc = of_property_read_u32_array(np,
+ "qcom,mdss-pan-porch-values", res, 6);
+ if (rc) {
+ pr_err("%s:%d, panel porch not specified\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ panel_data->panel_info.lcdc.h_back_porch = (!rc ? res[0] : 6);
+ panel_data->panel_info.lcdc.h_pulse_width = (!rc ? res[1] : 2);
+ panel_data->panel_info.lcdc.h_front_porch = (!rc ? res[2] : 6);
+ panel_data->panel_info.lcdc.v_back_porch = (!rc ? res[3] : 6);
+ panel_data->panel_info.lcdc.v_pulse_width = (!rc ? res[4] : 2);
+ panel_data->panel_info.lcdc.v_front_porch = (!rc ? res[5] : 6);
+
+ return 0;
+}
+
+static int dsi_panel_parse_phy(struct platform_device *pdev,
+ struct dsi_panel_common_pdata *panel_data)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 res[6], tmp;
+ int i, len, rc;
+ const char *data;
+
+ rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mode", &tmp);
+ panel_data->panel_info.mipi.mode = (!rc ? tmp : DSI_VIDEO_MODE);
+
+ rc = of_property_read_u32(np,
+ "qcom,mdss-pan-dsi-h-pulse-mode", &tmp);
+ panel_data->panel_info.mipi.pulse_mode_hsa_he = (!rc ? tmp : false);
+
+ rc = of_property_read_u32_array(np,
+ "qcom,mdss-pan-dsi-h-power-stop", res, 3);
+ panel_data->panel_info.mipi.hbp_power_stop = (!rc ? res[0] : false);
+ panel_data->panel_info.mipi.hsa_power_stop = (!rc ? res[1] : false);
+ panel_data->panel_info.mipi.hfp_power_stop = (!rc ? res[2] : false);
+
+ rc = of_property_read_u32_array(np,
+ "qcom,mdss-pan-dsi-bllp-power-stop", res, 2);
+ panel_data->panel_info.mipi.bllp_power_stop =
+ (!rc ? res[0] : false);
+ panel_data->panel_info.mipi.eof_bllp_power_stop =
+ (!rc ? res[1] : false);
+
+ rc = of_property_read_u32(np,
+ "qcom,mdss-pan-dsi-traffic-mode", &tmp);
+ panel_data->panel_info.mipi.traffic_mode =
+ (!rc ? tmp : DSI_NON_BURST_SYNCH_PULSE);
+
+ rc = of_property_read_u32(np,
+ "qcom,mdss-pan-dsi-dst-format", &tmp);
+ panel_data->panel_info.mipi.dst_format =
+ (!rc ? tmp : DSI_VIDEO_DST_FORMAT_RGB888);
+
+ rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-vc", &tmp);
+ panel_data->panel_info.mipi.vc = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-rgb-swap", &tmp);
+ panel_data->panel_info.mipi.rgb_swap = (!rc ? tmp : DSI_RGB_SWAP_RGB);
+
+ rc = of_property_read_u32_array(np,
+ "qcom,mdss-pan-dsi-data-lanes", res, 4);
+ panel_data->panel_info.mipi.data_lane0 = (!rc ? res[0] : true);
+ panel_data->panel_info.mipi.data_lane1 = (!rc ? res[1] : false);
+ panel_data->panel_info.mipi.data_lane2 = (!rc ? res[2] : false);
+ panel_data->panel_info.mipi.data_lane3 = (!rc ? res[3] : false);
+
+ rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-dlane-swap", &tmp);
+ panel_data->panel_info.mipi.dlane_swap = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32_array(np, "qcom,mdss-pan-dsi-t-clk", res, 2);
+ panel_data->panel_info.mipi.t_clk_pre = (!rc ? res[0] : 0x24);
+ panel_data->panel_info.mipi.t_clk_post = (!rc ? res[1] : 0x03);
+
+ rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-stream", &tmp);
+ panel_data->panel_info.mipi.stream = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mdp-tr", &tmp);
+ panel_data->panel_info.mipi.mdp_trigger =
+ (!rc ? tmp : DSI_CMD_TRIGGER_SW);
+ if (panel_data->panel_info.mipi.mdp_trigger > 6) {
+ pr_err("%s:%d, Invalid mdp trigger. Forcing to sw trigger",
+ __func__, __LINE__);
+ panel_data->panel_info.mipi.mdp_trigger =
+ DSI_CMD_TRIGGER_SW;
+ }
+
+ rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-dma-tr", &tmp);
+ panel_data->panel_info.mipi.dma_trigger =
+ (!rc ? tmp : DSI_CMD_TRIGGER_SW);
+ if (panel_data->panel_info.mipi.dma_trigger > 6) {
+ pr_err("%s:%d, Invalid dma trigger. Forcing to sw trigger",
+ __func__, __LINE__);
+ panel_data->panel_info.mipi.dma_trigger =
+ DSI_CMD_TRIGGER_SW;
+ }
+
+ rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-frame-rate", &tmp);
+ panel_data->panel_info.mipi.frame_rate = (!rc ? tmp : 60);
+
+ data = of_get_property(np, "qcom,panel-phy-regulatorSettings", &len);
+ if ((!data) || (len != 6)) {
+ pr_err("%s:%d, Unable to read Phy regulator settings",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ for (i = 0; i < len; i++)
+ panel_private->phy_params.regulator[i] = data[i];
+
+ data = of_get_property(np, "qcom,panel-phy-timingSettings", &len);
+ if ((!data) || (len != 12)) {
+ pr_err("%s:%d, Unable to read Phy timing settings",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ for (i = 0; i < len; i++)
+ panel_private->phy_params.timing[i] = data[i];
+
+ data = of_get_property(np, "qcom,panel-phy-strengthCtrl", &len);
+ if ((!data) || (len != 2)) {
+ pr_err("%s:%d, Unable to read Phy Strength ctrl settings",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ panel_private->phy_params.strength[0] = data[0];
+ panel_private->phy_params.strength[1] = data[1];
+
+ data = of_get_property(np, "qcom,panel-phy-bistCtrl", &len);
+ if ((!data) || (len != 6)) {
+ pr_err("%s:%d, Unable to read Phy Bist Ctrl settings",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ for (i = 0; i < len; i++)
+ panel_private->phy_params.bistCtrl[i] = data[i];
+
+ data = of_get_property(np, "qcom,panel-phy-laneConfig", &len);
+ if ((!data) || (len != 30)) {
+ pr_err("%s:%d, Unable to read Phy lane configure settings",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ for (i = 0; i < len; i++)
+ panel_private->phy_params.laneCfg[i] = data[i];
+
+ panel_data->panel_info.mipi.dsi_phy_db = &panel_private->phy_params;
+ return 0;
+}
+
+static int dsi_panel_parse_init_cmds(struct platform_device *pdev,
+ struct dsi_panel_common_pdata *panel_data)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int i, len;
+ int cmd_plen, data_offset;
+ const char *data;
+ const char *on_cmds_state, *off_cmds_state;
+ int num_of_on_cmds = 0, num_of_off_cmds = 0;
+
+ data = of_get_property(np, "qcom,panel-on-cmds", &len);
+ if (!data) {
+ pr_err("%s:%d, Unable to read ON cmds", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ panel_private->on_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL);
+ if (!panel_private->on_cmds)
+ return -ENOMEM;
+
+ memcpy(panel_private->on_cmds, data, len);
+
+ data_offset = 0;
+ cmd_plen = 0;
+ while ((len - data_offset) >= DT_CMD_HDR) {
+ data_offset += (DT_CMD_HDR - 1);
+ cmd_plen = panel_private->on_cmds[data_offset++];
+ data_offset += cmd_plen;
+ num_of_on_cmds++;
+ }
+ if (!num_of_on_cmds) {
+ pr_err("%s:%d, No ON cmds specified", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ panel_private->on_cmds_list =
+ kzalloc(sizeof(struct dsi_panel_cmds_list), GFP_KERNEL);
+ if (!panel_private->on_cmds_list)
+ return -ENOMEM;
+
+ panel_private->on_cmds_list->buf =
+ kzalloc((num_of_on_cmds * sizeof(struct dsi_cmd_desc)),
+ GFP_KERNEL);
+ if (!panel_private->on_cmds_list->buf)
+ return -ENOMEM;
+
+ data_offset = 0;
+ for (i = 0; i < num_of_on_cmds; i++) {
+ panel_private->on_cmds_list->buf[i].dtype =
+ panel_private->on_cmds[data_offset++];
+ panel_private->on_cmds_list->buf[i].last =
+ panel_private->on_cmds[data_offset++];
+ panel_private->on_cmds_list->buf[i].vc =
+ panel_private->on_cmds[data_offset++];
+ panel_private->on_cmds_list->buf[i].ack =
+ panel_private->on_cmds[data_offset++];
+ panel_private->on_cmds_list->buf[i].wait =
+ panel_private->on_cmds[data_offset++];
+ panel_private->on_cmds_list->buf[i].dlen =
+ panel_private->on_cmds[data_offset++];
+ panel_private->on_cmds_list->buf[i].payload =
+ &panel_private->on_cmds[data_offset];
+ data_offset += (panel_private->on_cmds_list->buf[i].dlen);
+ }
+
+ if (data_offset != len) {
+ pr_err("%s:%d, Incorrect ON command entries",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ panel_private->on_cmds_list->size = num_of_on_cmds;
+
+ on_cmds_state = of_get_property(pdev->dev.of_node,
+ "qcom,on-cmds-dsi-state", NULL);
+ if (!strncmp(on_cmds_state, "DSI_LP_MODE", 11)) {
+ panel_private->on_cmds_list->ctrl_state = DSI_LP_MODE;
+ } else if (!strncmp(on_cmds_state, "DSI_HS_MODE", 11)) {
+ panel_private->on_cmds_list->ctrl_state = DSI_HS_MODE;
+ } else {
+ pr_debug("%s: ON cmds state not specified. Set Default\n",
+ __func__);
+ panel_private->on_cmds_list->ctrl_state = DSI_LP_MODE;
+ }
+
+ panel_data->dsi_panel_on_cmds = panel_private->on_cmds_list;
+
+ data = of_get_property(np, "qcom,panel-off-cmds", &len);
+ if (!data) {
+ pr_err("%s:%d, Unable to read OFF cmds", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ panel_private->off_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL);
+ if (!panel_private->off_cmds)
+ return -ENOMEM;
+
+ memcpy(panel_private->off_cmds, data, len);
+
+ data_offset = 0;
+ cmd_plen = 0;
+ while ((len - data_offset) >= DT_CMD_HDR) {
+ data_offset += (DT_CMD_HDR - 1);
+ cmd_plen = panel_private->off_cmds[data_offset++];
+ data_offset += cmd_plen;
+ num_of_off_cmds++;
+ }
+ if (!num_of_off_cmds) {
+ pr_err("%s:%d, No OFF cmds specified", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ panel_private->off_cmds_list =
+ kzalloc(sizeof(struct dsi_panel_cmds_list), GFP_KERNEL);
+ if (!panel_private->off_cmds_list)
+ return -ENOMEM;
+
+ panel_private->off_cmds_list->buf = kzalloc(num_of_off_cmds
+ * sizeof(struct dsi_cmd_desc),
+ GFP_KERNEL);
+ if (!panel_private->off_cmds_list->buf)
+ return -ENOMEM;
+
+ data_offset = 0;
+ for (i = 0; i < num_of_off_cmds; i++) {
+ panel_private->off_cmds_list->buf[i].dtype =
+ panel_private->off_cmds[data_offset++];
+ panel_private->off_cmds_list->buf[i].last =
+ panel_private->off_cmds[data_offset++];
+ panel_private->off_cmds_list->buf[i].vc =
+ panel_private->off_cmds[data_offset++];
+ panel_private->off_cmds_list->buf[i].ack =
+ panel_private->off_cmds[data_offset++];
+ panel_private->off_cmds_list->buf[i].wait =
+ panel_private->off_cmds[data_offset++];
+ panel_private->off_cmds_list->buf[i].dlen =
+ panel_private->off_cmds[data_offset++];
+ panel_private->off_cmds_list->buf[i].payload =
+ &panel_private->off_cmds[data_offset];
+ data_offset += (panel_private->off_cmds_list->buf[i].dlen);
+ }
+
+ if (data_offset != len) {
+ pr_err("%s:%d, Incorrect OFF command entries",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ panel_private->off_cmds_list->size = num_of_off_cmds;
+ off_cmds_state = of_get_property(pdev->dev.of_node,
+ "qcom,off-cmds-dsi-state", NULL);
+ if (!strncmp(off_cmds_state, "DSI_LP_MODE", 11)) {
+ panel_private->off_cmds_list->ctrl_state =
+ DSI_LP_MODE;
+ } else if (!strncmp(off_cmds_state, "DSI_HS_MODE", 11)) {
+ panel_private->off_cmds_list->ctrl_state = DSI_HS_MODE;
+ } else {
+ pr_debug("%s: ON cmds state not specified. Set Default\n",
+ __func__);
+ panel_private->off_cmds_list->ctrl_state = DSI_LP_MODE;
+ }
+
+ panel_data->dsi_panel_off_cmds = panel_private->off_cmds_list;
+
+ return 0;
+}
+
+static int dsi_panel_parse_backlight(struct platform_device *pdev,
+ struct dsi_panel_common_pdata *panel_data,
+ char *bl_ctrl)
+{
+ int rc;
+ u32 res[6];
+ static const char *bl_ctrl_type;
+
+ bl_ctrl_type = of_get_property(pdev->dev.of_node,
+ "qcom,mdss-pan-bl-ctrl", NULL);
+ if ((bl_ctrl_type) && (!strncmp(bl_ctrl_type, "bl_ctrl_wled", 12))) {
+ led_trigger_register_simple("bkl-trigger", &bl_led_trigger);
+ pr_debug("%s: SUCCESS-> WLED TRIGGER register\n", __func__);
+ *bl_ctrl = BL_WLED;
+ }
+
+ rc = of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,mdss-pan-bl-levels", res, 2);
+ panel_data->panel_info.bl_min = (!rc ? res[0] : 0);
+ panel_data->panel_info.bl_max = (!rc ? res[1] : 255);
+ return rc;
+}
+
+static int dsi_panel_parse_other(struct platform_device *pdev,
+ struct dsi_panel_common_pdata *panel_data)
+{
+ const char *pdest;
+ u32 tmp;
+ int rc;
+
+ pdest = of_get_property(pdev->dev.of_node,
+ "qcom,mdss-pan-dest", NULL);
+ if (strlen(pdest) != 9) {
+ pr_err("%s: Unknown pdest specified\n", __func__);
+ return -EINVAL;
+ }
+ if (!strncmp(pdest, "display_1", 9)) {
+ panel_data->panel_info.pdest = DISPLAY_1;
+ } else if (!strncmp(pdest, "display_2", 9)) {
+ panel_data->panel_info.pdest = DISPLAY_2;
+ } else {
+ pr_debug("%s: pdest not specified. Set Default\n",
+ __func__);
+ panel_data->panel_info.pdest = DISPLAY_1;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-pan-underflow-clr", &tmp);
+ panel_data->panel_info.lcdc.underflow_clr = (!rc ? tmp : 0xff);
+
+ return rc;
+}
+
+static int dsi_panel_parse_dt(struct platform_device *pdev,
+ struct dsi_panel_common_pdata *panel_data,
+ char *bl_ctrl)
+{
+ int rc;
+
+ rc = dsi_panel_parse_gpio(pdev);
+ if (rc) {
+ pr_err("fail to parse panel GPIOs\n");
+ return rc;
+ }
+
+ rc = dsi_panel_parse_regulator(pdev);
+ if (rc) {
+ pr_err("fail to parse panel regulators\n");
+ return rc;
+ }
+
+ rc = dsi_panel_parse_timing(pdev, panel_data);
+ if (rc) {
+ pr_err("fail to parse panel timing\n");
+ return rc;
+ }
+
+ rc = dsi_panel_parse_phy(pdev, panel_data);
+ if (rc) {
+ pr_err("fail to parse DSI PHY settings\n");
+ return rc;
+ }
+
+ rc = dsi_panel_parse_backlight(pdev, panel_data, bl_ctrl);
+ if (rc) {
+ pr_err("fail to parse DSI backlight\n");
+ return rc;
+ }
+
+ rc = dsi_panel_parse_other(pdev, panel_data);
+ if (rc) {
+ pr_err("fail to parse DSI panel destination\n");
+ return rc;
+ }
+
+ rc = dsi_panel_parse_init_cmds(pdev, panel_data);
+ if (rc) {
+ pr_err("fail to parse DSI init commands\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int __devinit dsi_panel_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ static struct dsi_panel_common_pdata vendor_pdata;
+ static const char *panel_name;
+
+ pr_debug("%s:%d, debug info id=%d", __func__, __LINE__, pdev->id);
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ panel_name = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!panel_name)
+ pr_debug("%s:%d, panel name not specified\n",
+ __func__, __LINE__);
+ else
+ pr_debug("%s: Panel Name = %s\n", __func__, panel_name);
+
+ rc = dsi_panel_init();
+ if (rc) {
+ pr_err("dsi_panel_init failed %d\n", rc);
+ goto dsi_panel_probe_error;
+
+ }
+ rc = dsi_panel_parse_dt(pdev, &vendor_pdata, &panel_private->bl_ctrl);
+ if (rc) {
+ pr_err("dsi_panel_parse_dt failed %d\n", rc);
+ goto dsi_panel_probe_error;
+ }
+
+ vendor_pdata.on = dsi_panel_on;
+ vendor_pdata.off = dsi_panel_off;
+ vendor_pdata.reset = dsi_panel_reset;
+ vendor_pdata.bl_fnc = dsi_panel_bl_ctrl;
+
+ rc = dsi_panel_device_register_v2(pdev, &vendor_pdata,
+ panel_private->bl_ctrl);
+
+ if (rc) {
+ pr_err("dsi_panel_device_register_v2 failed %d\n", rc);
+ goto dsi_panel_probe_error;
+ }
+
+ return 0;
+dsi_panel_probe_error:
+ dsi_panel_deinit();
+ return rc;
+}
+
+static int __devexit dsi_panel_remove(struct platform_device *pdev)
+{
+ dsi_panel_deinit();
+ return 0;
+}
+
+
+static const struct of_device_id dsi_panel_match[] = {
+ {.compatible = "qcom,dsi-panel-v2"},
+ {}
+};
+
+static struct platform_driver this_driver = {
+ .probe = dsi_panel_probe,
+ .remove = __devexit_p(dsi_panel_remove),
+ .driver = {
+ .name = "dsi_v2_panel",
+ .of_match_table = dsi_panel_match,
+ },
+};
+
+static int __init dsi_panel_module_init(void)
+{
+ return platform_driver_register(&this_driver);
+}
+module_init(dsi_panel_module_init);
diff --git a/drivers/video/msm/mdss/dsi_v2.c b/drivers/video/msm/mdss/dsi_v2.c
new file mode 100644
index 0000000..5833796
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_v2.c
@@ -0,0 +1,798 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/of_device.h>
+
+#include "mdss_panel.h"
+#include "dsi_v2.h"
+
+static struct dsi_panel_common_pdata *panel_common_data;
+static struct dsi_interface dsi_intf;
+
+static int dsi_off(struct mdss_panel_data *pdata)
+{
+ int rc = 0;
+ if (!panel_common_data || !pdata)
+ return -ENODEV;
+
+ if (dsi_intf.op_mode_config)
+ dsi_intf.op_mode_config(DSI_CMD_MODE, pdata);
+
+ pr_debug("panel off commands\n");
+ if (panel_common_data->off)
+ panel_common_data->off(pdata);
+
+ pr_debug("turn off dsi controller\n");
+ if (dsi_intf.off)
+ rc = dsi_intf.off(pdata);
+
+ if (rc) {
+ pr_err("mdss_dsi_off DSI failed %d\n", rc);
+ return rc;
+ }
+
+ pr_debug("turn off panel power\n");
+ if (panel_common_data->reset)
+ panel_common_data->reset(pdata, 0);
+
+ return rc;
+}
+
+static int dsi_on(struct mdss_panel_data *pdata)
+{
+ int rc = 0;
+
+ pr_debug("dsi_on\n");
+
+ if (!panel_common_data || !pdata)
+ return -ENODEV;
+
+
+ pr_debug("dsi_on DSI controller ont\n");
+ if (dsi_intf.on)
+ rc = dsi_intf.on(pdata);
+
+ if (rc) {
+ pr_err("mdss_dsi_on DSI failed %d\n", rc);
+ return rc;
+ }
+ pr_debug("dsi_on power on panel\n");
+ if (panel_common_data->reset)
+ panel_common_data->reset(pdata, 1);
+
+ pr_debug("dsi_on DSI panel ont\n");
+ if (panel_common_data->on)
+ rc = panel_common_data->on(pdata);
+
+ if (rc) {
+ pr_err("mdss_dsi_on panel failed %d\n", rc);
+ return rc;
+ }
+ return rc;
+}
+
+static int dsi_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
+{
+ int rc = 0;
+
+ if (!pdata || !panel_common_data) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -ENODEV;
+ }
+
+ switch (event) {
+ case MDSS_EVENT_PANEL_ON:
+ rc = dsi_on(pdata);
+ break;
+ case MDSS_EVENT_PANEL_OFF:
+ rc = dsi_off(pdata);
+ break;
+ default:
+ pr_debug("%s: unhandled event=%d\n", __func__, event);
+ break;
+ }
+ return rc;
+}
+
+static struct platform_device *get_dsi_platform_device(
+ struct platform_device *dev)
+{
+ struct device_node *dsi_ctrl_np;
+ struct platform_device *ctrl_pdev;
+
+ dsi_ctrl_np = of_parse_phandle(dev->dev.of_node,
+ "qcom,dsi-ctrl-phandle", 0);
+
+ if (!dsi_ctrl_np)
+ return NULL;
+
+ ctrl_pdev = of_find_device_by_node(dsi_ctrl_np);
+ if (!ctrl_pdev)
+ return NULL;
+
+ return ctrl_pdev;
+}
+
+int dsi_panel_device_register_v2(struct platform_device *dev,
+ struct dsi_panel_common_pdata *panel_data,
+ char backlight_ctrl)
+{
+ struct mipi_panel_info *mipi;
+ struct platform_device *ctrl_pdev;
+ int rc;
+ u8 lanes = 0, bpp;
+ u32 h_period, v_period;
+ static struct mdss_panel_data dsi_panel_data;
+
+ h_period = ((panel_data->panel_info.lcdc.h_pulse_width)
+ + (panel_data->panel_info.lcdc.h_back_porch)
+ + (panel_data->panel_info.xres)
+ + (panel_data->panel_info.lcdc.h_front_porch));
+
+ v_period = ((panel_data->panel_info.lcdc.v_pulse_width)
+ + (panel_data->panel_info.lcdc.v_back_porch)
+ + (panel_data->panel_info.yres)
+ + (panel_data->panel_info.lcdc.v_front_porch));
+
+ mipi = &panel_data->panel_info.mipi;
+
+ panel_data->panel_info.type =
+ ((mipi->mode == DSI_VIDEO_MODE)
+ ? MIPI_VIDEO_PANEL : MIPI_CMD_PANEL);
+
+ if (mipi->data_lane3)
+ lanes += 1;
+ if (mipi->data_lane2)
+ lanes += 1;
+ if (mipi->data_lane1)
+ lanes += 1;
+ if (mipi->data_lane0)
+ lanes += 1;
+
+
+ if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+ || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB888)
+ || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB666_LOOSE))
+ bpp = 3;
+ else if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+ || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB565))
+ bpp = 2;
+ else
+ bpp = 3; /* Default format set to RGB888 */
+
+ if (panel_data->panel_info.type == MIPI_VIDEO_PANEL &&
+ !panel_data->panel_info.clk_rate) {
+ h_period += panel_data->panel_info.lcdc.xres_pad;
+ v_period += panel_data->panel_info.lcdc.yres_pad;
+
+ if (lanes > 0) {
+ panel_data->panel_info.clk_rate =
+ ((h_period * v_period * (mipi->frame_rate) * bpp * 8)
+ / lanes);
+ } else {
+ pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+ panel_data->panel_info.clk_rate =
+ (h_period * v_period
+ * (mipi->frame_rate) * bpp * 8);
+ }
+ }
+
+ ctrl_pdev = get_dsi_platform_device(dev);
+ if (!ctrl_pdev)
+ return -EPROBE_DEFER;
+
+ dsi_panel_data.event_handler = dsi_event_handler;
+
+ dsi_panel_data.panel_info = panel_data->panel_info;
+
+ dsi_panel_data.set_backlight = panel_data->bl_fnc;
+ panel_common_data = panel_data;
+ /*
+ * register in mdp driver
+ */
+ rc = mdss_register_panel(ctrl_pdev, &dsi_panel_data);
+ if (rc) {
+ dev_err(&dev->dev, "unable to register MIPI DSI panel\n");
+ return rc;
+ }
+
+ pr_debug("%s: Panal data initialized\n", __func__);
+ return 0;
+}
+
+void dsi_register_interface(struct dsi_interface *intf)
+{
+ dsi_intf = *intf;
+}
+
+int dsi_cmds_tx_v2(struct mdss_panel_data *pdata,
+ struct dsi_buf *tp, struct dsi_cmd_desc *cmds,
+ int cnt)
+{
+ int rc = 0;
+
+ if (!dsi_intf.tx)
+ return -EINVAL;
+
+ rc = dsi_intf.tx(pdata, tp, cmds, cnt);
+ return rc;
+}
+
+int dsi_cmds_rx_v2(struct mdss_panel_data *pdata,
+ struct dsi_buf *tp, struct dsi_buf *rp,
+ struct dsi_cmd_desc *cmds, int rlen)
+{
+ int rc = 0;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!dsi_intf.rx)
+ return -EINVAL;
+
+ rc = dsi_intf.rx(pdata, tp, rp, cmds, rlen);
+ return rc;
+}
+
+static char *dsi_buf_reserve(struct dsi_buf *dp, int len)
+{
+ dp->data += len;
+ return dp->data;
+}
+
+
+static char *dsi_buf_push(struct dsi_buf *dp, int len)
+{
+ dp->data -= len;
+ dp->len += len;
+ return dp->data;
+}
+
+static char *dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen)
+{
+ dp->hdr = (u32 *)dp->data;
+ return dsi_buf_reserve(dp, hlen);
+}
+
+char *dsi_buf_init(struct dsi_buf *dp)
+{
+ int off;
+
+ dp->data = dp->start;
+ off = (int)dp->data;
+ /* 8 byte align */
+ off &= 0x07;
+ if (off)
+ off = 8 - off;
+ dp->data += off;
+ dp->len = 0;
+ return dp->data;
+}
+
+int dsi_buf_alloc(struct dsi_buf *dp, int size)
+{
+ dp->start = kmalloc(size, GFP_KERNEL);
+ if (dp->start == NULL) {
+ pr_err("%s:%u\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ dp->end = dp->start + size;
+ dp->size = size;
+
+ if ((int)dp->start & 0x07) {
+ pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ dp->data = dp->start;
+ dp->len = 0;
+ return 0;
+}
+
+/*
+ * mipi dsi generic long write
+ */
+static int dsi_generic_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ char *bp;
+ u32 *hp;
+ int i, len;
+
+ bp = dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+ /* fill up payload */
+ if (cm->payload) {
+ len = cm->dlen;
+ len += 3;
+ len &= ~0x03; /* multipled by 4 */
+ for (i = 0; i < cm->dlen; i++)
+ *bp++ = cm->payload[i];
+
+ /* append 0xff to the end */
+ for (; i < len; i++)
+ *bp++ = 0xff;
+
+ dp->len += len;
+ }
+
+ /* fill up header */
+ hp = dp->hdr;
+ *hp = 0;
+ *hp = DSI_HDR_WC(cm->dlen);
+ *hp |= DSI_HDR_VC(cm->vc);
+ *hp |= DSI_HDR_LONG_PKT;
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_LWRITE);
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ return dp->len;
+}
+
+/*
+ * mipi dsi generic short write with 0, 1 2 parameters
+ */
+static int dsi_generic_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ u32 *hp;
+ int len;
+
+ if (cm->dlen && cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return 0;
+ }
+
+ dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(cm->vc);
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ len = (cm->dlen > 2) ? 2 : cm->dlen;
+
+ if (len == 1) {
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE1);
+ *hp |= DSI_HDR_DATA1(cm->payload[0]);
+ *hp |= DSI_HDR_DATA2(0);
+ } else if (len == 2) {
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE2);
+ *hp |= DSI_HDR_DATA1(cm->payload[0]);
+ *hp |= DSI_HDR_DATA2(cm->payload[1]);
+ } else {
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE);
+ *hp |= DSI_HDR_DATA1(0);
+ *hp |= DSI_HDR_DATA2(0);
+ }
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ return dp->len;
+}
+
+/*
+ * mipi dsi gerneric read with 0, 1 2 parameters
+ */
+static int dsi_generic_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ u32 *hp;
+ int len;
+
+ if (cm->dlen && cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return 0;
+ }
+
+ dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(cm->vc);
+ *hp |= DSI_HDR_BTA;
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ len = (cm->dlen > 2) ? 2 : cm->dlen;
+
+ if (len == 1) {
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ1);
+ *hp |= DSI_HDR_DATA1(cm->payload[0]);
+ *hp |= DSI_HDR_DATA2(0);
+ } else if (len == 2) {
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ2);
+ *hp |= DSI_HDR_DATA1(cm->payload[0]);
+ *hp |= DSI_HDR_DATA2(cm->payload[1]);
+ } else {
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ);
+ *hp |= DSI_HDR_DATA1(0);
+ *hp |= DSI_HDR_DATA2(0);
+ }
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return dp->len;
+}
+
+/*
+ * mipi dsi dcs long write
+ */
+static int dsi_dcs_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ char *bp;
+ u32 *hp;
+ int i, len;
+
+ bp = dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+ /*
+ * fill up payload
+ * dcs command byte (first byte) followed by payload
+ */
+ if (cm->payload) {
+ len = cm->dlen;
+ len += 3;
+ len &= ~0x03; /* multipled by 4 */
+ for (i = 0; i < cm->dlen; i++)
+ *bp++ = cm->payload[i];
+
+ /* append 0xff to the end */
+ for (; i < len; i++)
+ *bp++ = 0xff;
+
+ dp->len += len;
+ }
+
+ /* fill up header */
+ hp = dp->hdr;
+ *hp = 0;
+ *hp = DSI_HDR_WC(cm->dlen);
+ *hp |= DSI_HDR_VC(cm->vc);
+ *hp |= DSI_HDR_LONG_PKT;
+ *hp |= DSI_HDR_DTYPE(DTYPE_DCS_LWRITE);
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ return dp->len;
+}
+
+/*
+ * mipi dsi dcs short write with 0 parameters
+ */
+static int dsi_dcs_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ u32 *hp;
+ int len;
+
+ if (cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(cm->vc);
+ if (cm->ack)
+ *hp |= DSI_HDR_BTA;
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ len = (cm->dlen > 1) ? 1 : cm->dlen;
+
+ *hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE);
+ *hp |= DSI_HDR_DATA1(cm->payload[0]); /* dcs command byte */
+ *hp |= DSI_HDR_DATA2(0);
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return dp->len;
+}
+
+/*
+ * mipi dsi dcs short write with 1 parameters
+ */
+static int dsi_dcs_swrite1(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ u32 *hp;
+
+ if (cm->dlen < 2 || cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(cm->vc);
+ if (cm->ack)
+ *hp |= DSI_HDR_BTA;
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ *hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE1);
+ *hp |= DSI_HDR_DATA1(cm->payload[0]); /* dcs comamnd byte */
+ *hp |= DSI_HDR_DATA2(cm->payload[1]); /* parameter */
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ return dp->len;
+}
+
+/*
+ * mipi dsi dcs read with 0 parameters
+ */
+static int dsi_dcs_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ u32 *hp;
+
+ if (cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(cm->vc);
+ *hp |= DSI_HDR_BTA;
+ *hp |= DSI_HDR_DTYPE(DTYPE_DCS_READ);
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ *hp |= DSI_HDR_DATA1(cm->payload[0]); /* dcs command byte */
+ *hp |= DSI_HDR_DATA2(0);
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ return dp->len; /* 4 bytes */
+}
+
+static int dsi_cm_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ u32 *hp;
+
+ dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(cm->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_CM_ON);
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ return dp->len; /* 4 bytes */
+}
+
+static int dsi_cm_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ u32 *hp;
+
+ dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(cm->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_CM_OFF);
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ return dp->len; /* 4 bytes */
+}
+
+static int dsi_peripheral_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ u32 *hp;
+
+ dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(cm->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_ON);
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ return dp->len; /* 4 bytes */
+}
+
+static int dsi_peripheral_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ u32 *hp;
+
+ dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(cm->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_OFF);
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ return dp->len; /* 4 bytes */
+}
+
+static int dsi_set_max_pktsize(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ u32 *hp;
+
+ if (cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return 0;
+ }
+
+ dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(cm->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_MAX_PKTSIZE);
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ *hp |= DSI_HDR_DATA1(cm->payload[0]);
+ *hp |= DSI_HDR_DATA2(cm->payload[1]);
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ return dp->len; /* 4 bytes */
+}
+
+static int dsi_null_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ u32 *hp;
+
+ dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp = DSI_HDR_WC(cm->dlen);
+ *hp |= DSI_HDR_LONG_PKT;
+ *hp |= DSI_HDR_VC(cm->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_NULL_PKT);
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ return dp->len; /* 4 bytes */
+}
+
+static int dsi_blank_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ u32 *hp;
+
+ dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp = DSI_HDR_WC(cm->dlen);
+ *hp |= DSI_HDR_LONG_PKT;
+ *hp |= DSI_HDR_VC(cm->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_BLANK_PKT);
+ if (cm->last)
+ *hp |= DSI_HDR_LAST;
+
+ dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ return dp->len; /* 4 bytes */
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+int dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ int len = 0;
+
+ switch (cm->dtype) {
+ case DTYPE_GEN_WRITE:
+ case DTYPE_GEN_WRITE1:
+ case DTYPE_GEN_WRITE2:
+ len = dsi_generic_swrite(dp, cm);
+ break;
+ case DTYPE_GEN_LWRITE:
+ len = dsi_generic_lwrite(dp, cm);
+ break;
+ case DTYPE_GEN_READ:
+ case DTYPE_GEN_READ1:
+ case DTYPE_GEN_READ2:
+ len = dsi_generic_read(dp, cm);
+ break;
+ case DTYPE_DCS_LWRITE:
+ len = dsi_dcs_lwrite(dp, cm);
+ break;
+ case DTYPE_DCS_WRITE:
+ len = dsi_dcs_swrite(dp, cm);
+ break;
+ case DTYPE_DCS_WRITE1:
+ len = dsi_dcs_swrite1(dp, cm);
+ break;
+ case DTYPE_DCS_READ:
+ len = dsi_dcs_read(dp, cm);
+ break;
+ case DTYPE_MAX_PKTSIZE:
+ len = dsi_set_max_pktsize(dp, cm);
+ break;
+ case DTYPE_NULL_PKT:
+ len = dsi_null_pkt(dp, cm);
+ break;
+ case DTYPE_BLANK_PKT:
+ len = dsi_blank_pkt(dp, cm);
+ break;
+ case DTYPE_CM_ON:
+ len = dsi_cm_on(dp, cm);
+ break;
+ case DTYPE_CM_OFF:
+ len = dsi_cm_off(dp, cm);
+ break;
+ case DTYPE_PERIPHERAL_ON:
+ len = dsi_peripheral_on(dp, cm);
+ break;
+ case DTYPE_PERIPHERAL_OFF:
+ len = dsi_peripheral_off(dp, cm);
+ break;
+ default:
+ pr_debug("%s: dtype=%x NOT supported\n",
+ __func__, cm->dtype);
+ break;
+
+ }
+
+ return len;
+}
+
+/*
+ * mdss_dsi_short_read1_resp: 1 parameter
+ */
+int dsi_short_read1_resp(struct dsi_buf *rp)
+{
+ /* strip out dcs type */
+ rp->data++;
+ rp->len = 1;
+ return rp->len;
+}
+
+/*
+ * mdss_dsi_short_read2_resp: 2 parameter
+ */
+int dsi_short_read2_resp(struct dsi_buf *rp)
+{
+ /* strip out dcs type */
+ rp->data++;
+ rp->len = 2;
+ return rp->len;
+}
+
+int dsi_long_read_resp(struct dsi_buf *rp)
+{
+ short len;
+
+ len = rp->data[2];
+ len <<= 8;
+ len |= rp->data[1];
+ /* strip out dcs header */
+ rp->data += 4;
+ rp->len -= 4;
+ /* strip out 2 bytes of checksum */
+ rp->len -= 2;
+ return len;
+}
diff --git a/drivers/video/msm/mdss/dsi_v2.h b/drivers/video/msm/mdss/dsi_v2.h
new file mode 100644
index 0000000..f68527c
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_v2.h
@@ -0,0 +1,237 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_DSI_H
+#define MDSS_DSI_H
+
+#include <linux/list.h>
+#include <mach/scm-io.h>
+
+#include "mdss_panel.h"
+
+#define MIPI_OUTP(addr, data) writel_relaxed((data), (addr))
+#define MIPI_INP(addr) readl_relaxed(addr)
+
+#define MIPI_DSI_PRIM 1
+#define MIPI_DSI_SECD 2
+
+#define MIPI_DSI_PANEL_VGA 0
+#define MIPI_DSI_PANEL_WVGA 1
+#define MIPI_DSI_PANEL_WVGA_PT 2
+#define MIPI_DSI_PANEL_FWVGA_PT 3
+#define MIPI_DSI_PANEL_WSVGA_PT 4
+#define MIPI_DSI_PANEL_QHD_PT 5
+#define MIPI_DSI_PANEL_WXGA 6
+#define MIPI_DSI_PANEL_WUXGA 7
+#define MIPI_DSI_PANEL_720P_PT 8
+#define DSI_PANEL_MAX 8
+
+enum {
+ DSI_VIDEO_MODE,
+ DSI_CMD_MODE,
+};
+
+enum {
+ ST_DSI_CLK_OFF,
+ ST_DSI_SUSPEND,
+ ST_DSI_RESUME,
+ ST_DSI_PLAYING,
+ ST_DSI_NUM
+};
+
+enum {
+ EV_DSI_UPDATE,
+ EV_DSI_DONE,
+ EV_DSI_TOUT,
+ EV_DSI_NUM
+};
+
+enum {
+ LANDSCAPE = 1,
+ PORTRAIT = 2,
+};
+
+enum {
+ DSI_CMD_MODE_DMA,
+ DSI_CMD_MODE_MDP,
+};
+
+enum {
+ BL_PWM,
+ BL_WLED,
+ BL_DCS_CMD,
+ UNKNOWN_CTRL,
+};
+
+enum {
+ DSI_LP_MODE,
+ DSI_HS_MODE,
+};
+
+#define DSI_NON_BURST_SYNCH_PULSE 0
+#define DSI_NON_BURST_SYNCH_EVENT 1
+#define DSI_BURST_MODE 2
+
+#define DSI_RGB_SWAP_RGB 0
+#define DSI_RGB_SWAP_RBG 1
+#define DSI_RGB_SWAP_BGR 2
+#define DSI_RGB_SWAP_BRG 3
+#define DSI_RGB_SWAP_GRB 4
+#define DSI_RGB_SWAP_GBR 5
+
+#define DSI_VIDEO_DST_FORMAT_RGB565 0
+#define DSI_VIDEO_DST_FORMAT_RGB666 1
+#define DSI_VIDEO_DST_FORMAT_RGB666_LOOSE 2
+#define DSI_VIDEO_DST_FORMAT_RGB888 3
+
+#define DSI_CMD_DST_FORMAT_RGB111 0
+#define DSI_CMD_DST_FORMAT_RGB332 3
+#define DSI_CMD_DST_FORMAT_RGB444 4
+#define DSI_CMD_DST_FORMAT_RGB565 6
+#define DSI_CMD_DST_FORMAT_RGB666 7
+#define DSI_CMD_DST_FORMAT_RGB888 8
+
+#define DSI_CMD_TRIGGER_NONE 0x0 /* mdp trigger */
+#define DSI_CMD_TRIGGER_TE 0x02
+#define DSI_CMD_TRIGGER_SW 0x04
+#define DSI_CMD_TRIGGER_SW_SEOF 0x05 /* cmd dma only */
+#define DSI_CMD_TRIGGER_SW_TE 0x06
+
+#define DSI_HOST_HDR_SIZE 4
+#define DSI_HDR_LAST BIT(31)
+#define DSI_HDR_LONG_PKT BIT(30)
+#define DSI_HDR_BTA BIT(29)
+#define DSI_HDR_VC(vc) (((vc) & 0x03) << 22)
+#define DSI_HDR_DTYPE(dtype) (((dtype) & 0x03f) << 16)
+#define DSI_HDR_DATA2(data) (((data) & 0x0ff) << 8)
+#define DSI_HDR_DATA1(data) ((data) & 0x0ff)
+#define DSI_HDR_WC(wc) ((wc) & 0x0ffff)
+
+#define DSI_BUF_SIZE 1024
+#define DSI_MRPS 0x04 /* Maximum Return Packet Size */
+
+#define DSI_LEN 8 /* 4 x 4 - 6 - 2, bytes dcs header+crc-align */
+
+struct dsi_buf {
+ u32 *hdr; /* dsi host header */
+ char *start; /* buffer start addr */
+ char *end; /* buffer end addr */
+ int size; /* size of buffer */
+ char *data; /* buffer */
+ int len; /* data length */
+ dma_addr_t dmap; /* mapped dma addr */
+};
+
+/* dcs read/write */
+#define DTYPE_DCS_WRITE 0x05 /* short write, 0 parameter */
+#define DTYPE_DCS_WRITE1 0x15 /* short write, 1 parameter */
+#define DTYPE_DCS_READ 0x06 /* read */
+#define DTYPE_DCS_LWRITE 0x39 /* long write */
+
+/* generic read/write */
+#define DTYPE_GEN_WRITE 0x03 /* short write, 0 parameter */
+#define DTYPE_GEN_WRITE1 0x13 /* short write, 1 parameter */
+#define DTYPE_GEN_WRITE2 0x23 /* short write, 2 parameter */
+#define DTYPE_GEN_LWRITE 0x29 /* long write */
+#define DTYPE_GEN_READ 0x04 /* long read, 0 parameter */
+#define DTYPE_GEN_READ1 0x14 /* long read, 1 parameter */
+#define DTYPE_GEN_READ2 0x24 /* long read, 2 parameter */
+
+#define DTYPE_TEAR_ON 0x35 /* set tear on */
+#define DTYPE_MAX_PKTSIZE 0x37 /* set max packet size */
+#define DTYPE_NULL_PKT 0x09 /* null packet, no data */
+#define DTYPE_BLANK_PKT 0x19 /* blankiing packet, no data */
+
+#define DTYPE_CM_ON 0x02 /* color mode off */
+#define DTYPE_CM_OFF 0x12 /* color mode on */
+#define DTYPE_PERIPHERAL_OFF 0x22
+#define DTYPE_PERIPHERAL_ON 0x32
+
+/*
+ * dcs response
+ */
+#define DTYPE_ACK_ERR_RESP 0x02
+#define DTYPE_EOT_RESP 0x08 /* end of tx */
+#define DTYPE_GEN_READ1_RESP 0x11 /* 1 parameter, short */
+#define DTYPE_GEN_READ2_RESP 0x12 /* 2 parameter, short */
+#define DTYPE_GEN_LREAD_RESP 0x1a
+#define DTYPE_DCS_LREAD_RESP 0x1c
+#define DTYPE_DCS_READ1_RESP 0x21 /* 1 parameter, short */
+#define DTYPE_DCS_READ2_RESP 0x22 /* 2 parameter, short */
+
+struct dsi_cmd_desc {
+ int dtype;
+ int last;
+ int vc;
+ int ack; /* ask ACK from peripheral */
+ int wait;
+ int dlen;
+ char *payload;
+};
+
+struct dsi_panel_cmds_list {
+ struct dsi_cmd_desc *buf;
+ char size;
+ char ctrl_state;
+};
+
+struct dsi_panel_common_pdata {
+ struct mdss_panel_info panel_info;
+ int (*on) (struct mdss_panel_data *pdata);
+ int (*off) (struct mdss_panel_data *pdata);
+ void (*reset)(struct mdss_panel_data *pdata, int enable);
+ void (*bl_fnc) (struct mdss_panel_data *pdata, u32 bl_level);
+ struct dsi_panel_cmds_list *dsi_panel_on_cmds;
+ struct dsi_panel_cmds_list *dsi_panel_off_cmds;
+};
+
+struct dsi_interface {
+ int (*on)(struct mdss_panel_data *pdata);
+ int (*off)(struct mdss_panel_data *pdata);
+ void (*op_mode_config)(int mode, struct mdss_panel_data *pdata);
+ int (*tx)(struct mdss_panel_data *pdata,
+ struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt);
+ int (*rx)(struct mdss_panel_data *pdata,
+ struct dsi_buf *tp, struct dsi_buf *rp,
+ struct dsi_cmd_desc *cmds, int len);
+ int index;
+ void *private;
+};
+
+int dsi_panel_device_register_v2(struct platform_device *pdev,
+ struct dsi_panel_common_pdata *panel_data,
+ char bl_ctrl);
+
+void dsi_register_interface(struct dsi_interface *intf);
+
+int dsi_cmds_rx_v2(struct mdss_panel_data *pdata,
+ struct dsi_buf *tp, struct dsi_buf *rp,
+ struct dsi_cmd_desc *cmds, int len);
+
+int dsi_cmds_tx_v2(struct mdss_panel_data *pdata,
+ struct dsi_buf *tp, struct dsi_cmd_desc *cmds,
+ int cnt);
+
+char *dsi_buf_init(struct dsi_buf *dp);
+
+int dsi_buf_alloc(struct dsi_buf *dp, int size);
+
+int dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm);
+
+int dsi_short_read1_resp(struct dsi_buf *rp);
+
+int dsi_short_read2_resp(struct dsi_buf *rp);
+
+int dsi_long_read_resp(struct dsi_buf *rp);
+
+#endif /* MDSS_DSI_H */
diff --git a/drivers/video/msm/mdss/mdp3.c b/drivers/video/msm/mdss/mdp3.c
new file mode 100644
index 0000000..52243eb
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3.c
@@ -0,0 +1,915 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/memory_alloc.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+
+#include <mach/board.h>
+#include <mach/clk.h>
+#include <mach/hardware.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/iommu.h>
+#include <mach/iommu_domains.h>
+#include <mach/msm_memtypes.h>
+
+#include "mdp3.h"
+#include "mdss_fb.h"
+#include "mdp3_hwio.h"
+#include "mdp3_ctrl.h"
+
+#define MDP_CORE_HW_VERSION 0x03040310
+struct mdp3_hw_resource *mdp3_res;
+
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_MDP_PORT0, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+ MDP_BUS_VECTOR_ENTRY(0, 0),
+ MDP_BUS_VECTOR_ENTRY(SZ_128M, SZ_256M),
+ MDP_BUS_VECTOR_ENTRY(SZ_256M, SZ_512M),
+};
+
+static struct msm_bus_paths mdp_bus_usecases[ARRAY_SIZE(mdp_bus_vectors)];
+
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+ .usecase = mdp_bus_usecases,
+ .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+ .name = "mdp3",
+};
+
+struct mdp3_iommu_domain_map mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_MAX] = {
+ [MDP3_IOMMU_DOMAIN] = {
+ .domain_type = MDP3_IOMMU_DOMAIN,
+ .client_name = "mdp_dma",
+ .partitions = {
+ {
+ .start = SZ_128K,
+ .size = SZ_1G - SZ_128K,
+ },
+ },
+ .npartitions = 1,
+ },
+};
+
+struct mdp3_iommu_ctx_map mdp3_iommu_contexts[MDP3_IOMMU_CTX_MAX] = {
+ [MDP3_IOMMU_CTX_PPP_0] = {
+ .ctx_type = MDP3_IOMMU_CTX_PPP_0,
+ .domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN],
+ .ctx_name = "mdpe_0",
+ .attached = 0,
+ },
+ [MDP3_IOMMU_CTX_PPP_1] = {
+ .ctx_type = MDP3_IOMMU_CTX_PPP_1,
+ .domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN],
+ .ctx_name = "mdpe_1",
+ .attached = 0,
+ },
+
+ [MDP3_IOMMU_CTX_DMA_0] = {
+ .ctx_type = MDP3_IOMMU_CTX_DMA_0,
+ .domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN],
+ .ctx_name = "mdps_0",
+ .attached = 0,
+ },
+
+ [MDP3_IOMMU_CTX_DMA_1] = {
+ .ctx_type = MDP3_IOMMU_CTX_DMA_1,
+ .domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN],
+ .ctx_name = "mdps_1",
+ .attached = 0,
+ },
+};
+
+static irqreturn_t mdp3_irq_handler(int irq, void *ptr)
+{
+ int i = 0;
+ struct mdp3_hw_resource *mdata = (struct mdp3_hw_resource *)ptr;
+ u32 mdp_interrupt = MDP3_REG_READ(MDP3_REG_INTR_STATUS);
+
+ MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, mdp_interrupt);
+ pr_debug("mdp3_irq_handler irq=%d\n", mdp_interrupt);
+
+ spin_lock(&mdata->irq_lock);
+ mdp_interrupt &= mdata->irqMask;
+
+ while (mdp_interrupt && i < MDP3_MAX_INTR) {
+ if ((mdp_interrupt & 0x1) && mdata->callbacks[i].cb)
+ mdata->callbacks[i].cb(i, mdata->callbacks[i].data);
+ mdp_interrupt = mdp_interrupt >> 1;
+ i++;
+ }
+ spin_unlock(&mdata->irq_lock);
+
+ return IRQ_HANDLED;
+}
+
+void mdp3_irq_enable(int type)
+{
+ unsigned long flag;
+ int irqEnabled = 0;
+
+ pr_debug("mdp3_irq_enable type=%d\n", type);
+ spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+ if (mdp3_res->irqMask & BIT(type)) {
+ pr_debug("interrupt %d already enabled\n", type);
+ spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+ return;
+ }
+ irqEnabled = mdp3_res->irqMask;
+ mdp3_res->irqMask |= BIT(type);
+ MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irqMask);
+ if (!irqEnabled)
+ enable_irq(mdp3_res->irq);
+ spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+void mdp3_irq_disable(int type)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+ if (mdp3_res->irqMask & BIT(type)) {
+ mdp3_res->irqMask &= ~BIT(type);
+ MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irqMask);
+ if (!mdp3_res->irqMask)
+ disable_irq(mdp3_res->irq);
+ } else {
+ pr_debug("interrupt %d not enabled\n", type);
+ }
+ spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+void mdp3_irq_disable_nosync(int type)
+{
+ if (mdp3_res->irqMask & BIT(type)) {
+ mdp3_res->irqMask &= ~BIT(type);
+ MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irqMask);
+ if (!mdp3_res->irqMask)
+ disable_irq_nosync(mdp3_res->irq);
+ } else {
+ pr_debug("interrupt %d not enabled\n", type);
+ }
+}
+
+int mdp3_set_intr_callback(u32 type, struct mdp3_intr_cb *cb)
+{
+ unsigned long flag;
+
+ pr_debug("interrupt %d callback n", type);
+ spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+ if (cb)
+ mdp3_res->callbacks[type] = *cb;
+ else
+ mdp3_res->callbacks[type].cb = NULL;
+
+ spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+ return 0;
+}
+
+static int mdp3_bus_scale_register(void)
+{
+ if (!mdp3_res->bus_handle) {
+ struct msm_bus_scale_pdata *bus_pdata = &mdp_bus_scale_table;
+ int i;
+
+ for (i = 0; i < bus_pdata->num_usecases; i++) {
+ mdp_bus_usecases[i].num_paths = 1;
+ mdp_bus_usecases[i].vectors = &mdp_bus_vectors[i];
+ }
+
+ mdp3_res->bus_handle = msm_bus_scale_register_client(bus_pdata);
+ if (!mdp3_res->bus_handle) {
+ pr_err("not able to get bus scale\n");
+ return -ENOMEM;
+ }
+ pr_debug("register bus_hdl=%x\n", mdp3_res->bus_handle);
+ }
+ return 0;
+}
+
+static void mdp3_bus_scale_unregister(void)
+{
+ pr_debug("unregister bus_handle=%x\n", mdp3_res->bus_handle);
+
+ if (mdp3_res->bus_handle)
+ msm_bus_scale_unregister_client(mdp3_res->bus_handle);
+}
+
+int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
+{
+ static int current_bus_idx;
+ int bus_idx;
+ int rc;
+
+ if (mdp3_res->bus_handle < 1) {
+ pr_err("invalid bus handle %d\n", mdp3_res->bus_handle);
+ return -EINVAL;
+ }
+
+ if ((ab_quota | ib_quota) == 0) {
+ bus_idx = 0;
+ } else {
+ int num_cases = mdp_bus_scale_table.num_usecases;
+ struct msm_bus_vectors *vect = NULL;
+
+ bus_idx = (current_bus_idx % (num_cases - 1)) + 1;
+
+ /* aligning to avoid performing updates for small changes */
+ ab_quota = ALIGN(ab_quota, SZ_64M);
+ ib_quota = ALIGN(ib_quota, SZ_64M);
+
+ vect = mdp_bus_scale_table.usecase[current_bus_idx].vectors;
+ if ((ab_quota == vect->ab) && (ib_quota == vect->ib)) {
+ pr_debug("skip bus scaling, no change in vectors\n");
+ return 0;
+ }
+
+ vect = mdp_bus_scale_table.usecase[bus_idx].vectors;
+ vect->ab = ab_quota;
+ vect->ib = ib_quota;
+
+ pr_debug("bus scale idx=%d ab=%llu ib=%llu\n", bus_idx,
+ vect->ab, vect->ib);
+ }
+ current_bus_idx = bus_idx;
+ rc = msm_bus_scale_client_update_request(mdp3_res->bus_handle, bus_idx);
+ return rc;
+}
+
+static int mdp3_clk_update(u32 clk_idx, u32 enable)
+{
+ int ret = -EINVAL;
+ struct clk *clk;
+ int count = 0;
+
+ if (clk_idx >= MDP3_MAX_CLK || !mdp3_res->clocks[clk_idx])
+ return -ENODEV;
+
+ clk = mdp3_res->clocks[clk_idx];
+
+ if (enable)
+ mdp3_res->clock_ref_count[clk_idx]++;
+ else
+ mdp3_res->clock_ref_count[clk_idx]--;
+
+ count = mdp3_res->clock_ref_count[clk_idx];
+ if (count == 1) {
+ pr_debug("clk=%d en=%d\n", clk_idx, enable);
+ ret = clk_prepare_enable(clk);
+ } else if (count == 0) {
+ pr_debug("clk=%d disable\n", clk_idx);
+ clk_disable_unprepare(clk);
+ ret = 0;
+ } else if (count < 0) {
+ pr_err("clk=%d count=%d\n", clk_idx, count);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+
+
+int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate)
+{
+ int ret = 0;
+ unsigned long rounded_rate;
+ struct clk *clk = mdp3_res->clocks[clk_type];
+
+ if (clk) {
+ mutex_lock(&mdp3_res->res_mutex);
+ rounded_rate = clk_round_rate(clk, clk_rate);
+ if (IS_ERR_VALUE(rounded_rate)) {
+ pr_err("unable to round rate err=%ld\n", rounded_rate);
+ mutex_unlock(&mdp3_res->res_mutex);
+ return -EINVAL;
+ }
+ if (rounded_rate != clk_get_rate(clk)) {
+ ret = clk_set_rate(clk, rounded_rate);
+ if (ret)
+ pr_err("clk_set_rate failed ret=%d\n", ret);
+ else
+ pr_debug("mdp clk rate=%lu\n", rounded_rate);
+ }
+ mutex_unlock(&mdp3_res->res_mutex);
+ } else {
+ pr_err("mdp src clk not setup properly\n");
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+unsigned long mdp3_get_clk_rate(u32 clk_idx)
+{
+ unsigned long clk_rate = 0;
+ struct clk *clk;
+
+ if (clk_idx >= MDP3_MAX_CLK)
+ return -ENODEV;
+
+ clk = mdp3_res->clocks[clk_idx];
+
+ if (clk) {
+ mutex_lock(&mdp3_res->res_mutex);
+ clk_rate = clk_get_rate(clk);
+ mutex_unlock(&mdp3_res->res_mutex);
+ }
+ return clk_rate;
+}
+
+static int mdp3_clk_register(char *clk_name, int clk_idx)
+{
+ struct clk *tmp;
+
+ if (clk_idx >= MDP3_MAX_CLK) {
+ pr_err("invalid clk index %d\n", clk_idx);
+ return -EINVAL;
+ }
+
+ tmp = devm_clk_get(&mdp3_res->pdev->dev, clk_name);
+ if (IS_ERR(tmp)) {
+ pr_err("unable to get clk: %s\n", clk_name);
+ return PTR_ERR(tmp);
+ }
+
+ mdp3_res->clocks[clk_idx] = tmp;
+
+ return 0;
+}
+
+static int mdp3_clk_setup(void)
+{
+ int rc;
+
+ rc = mdp3_clk_register("iface_clk", MDP3_CLK_AHB);
+ if (rc)
+ return rc;
+
+ rc = mdp3_clk_register("core_clk", MDP3_CLK_CORE);
+ if (rc)
+ return rc;
+
+ rc = mdp3_clk_register("vsync_clk", MDP3_CLK_VSYNC);
+ if (rc)
+ return rc;
+
+ rc = mdp3_clk_register("lcdc_clk", MDP3_CLK_LCDC);
+ if (rc)
+ return rc;
+
+ rc = mdp3_clk_register("dsi_clk", MDP3_CLK_DSI);
+ if (rc)
+ return rc;
+ return rc;
+}
+
+static void mdp3_clk_remove(void)
+{
+ clk_put(mdp3_res->clocks[MDP3_CLK_AHB]);
+ clk_put(mdp3_res->clocks[MDP3_CLK_CORE]);
+ clk_put(mdp3_res->clocks[MDP3_CLK_VSYNC]);
+ clk_put(mdp3_res->clocks[MDP3_CLK_LCDC]);
+ clk_put(mdp3_res->clocks[MDP3_CLK_DSI]);
+}
+
+int mdp3_clk_enable(int enable)
+{
+ int rc;
+
+ pr_debug("MDP CLKS %s\n", (enable ? "Enable" : "Disable"));
+
+ mutex_lock(&mdp3_res->res_mutex);
+ rc = mdp3_clk_update(MDP3_CLK_AHB, enable);
+ rc |= mdp3_clk_update(MDP3_CLK_CORE, enable);
+ rc |= mdp3_clk_update(MDP3_CLK_VSYNC, enable);
+ rc |= mdp3_clk_update(MDP3_CLK_DSI, enable);
+ mutex_unlock(&mdp3_res->res_mutex);
+ return rc;
+}
+
+static int mdp3_irq_setup(void)
+{
+ int ret;
+
+ ret = devm_request_irq(&mdp3_res->pdev->dev,
+ mdp3_res->irq,
+ mdp3_irq_handler,
+ IRQF_DISABLED, "MDP", mdp3_res);
+ if (ret) {
+ pr_err("mdp request_irq() failed!\n");
+ return ret;
+ }
+ disable_irq(mdp3_res->irq);
+ return 0;
+}
+
+static int mdp3_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token)
+{
+ pr_err("MDP IOMMU page fault: iova 0x%lx\n", iova);
+ return 0;
+}
+
+int mdp3_iommu_attach(int context)
+{
+ struct mdp3_iommu_ctx_map *context_map;
+ struct mdp3_iommu_domain_map *domain_map;
+
+ if (context >= MDP3_IOMMU_CTX_MAX)
+ return -EINVAL;
+
+ context_map = mdp3_res->iommu_contexts + context;
+ if (context_map->attached) {
+ pr_warn("mdp iommu already attached\n");
+ return 0;
+ }
+
+ domain_map = context_map->domain;
+
+ iommu_attach_device(domain_map->domain, context_map->ctx);
+
+ context_map->attached = true;
+ return 0;
+}
+
+int mdp3_iommu_dettach(int context)
+{
+ struct mdp3_iommu_ctx_map *context_map;
+ struct mdp3_iommu_domain_map *domain_map;
+
+ if (context >= MDP3_IOMMU_CTX_MAX)
+ return -EINVAL;
+
+ context_map = mdp3_res->iommu_contexts + context;
+ if (!context_map->attached) {
+ pr_warn("mdp iommu not attached\n");
+ return 0;
+ }
+
+ domain_map = context_map->domain;
+ iommu_detach_device(domain_map->domain, context_map->ctx);
+ context_map->attached = false;
+
+ return 0;
+}
+
+int mdp3_iommu_domain_init(void)
+{
+ struct msm_iova_layout layout;
+ int i;
+
+ if (mdp3_res->domains) {
+ pr_warn("iommu domain already initialized\n");
+ return 0;
+ }
+
+ for (i = 0; i < MDP3_IOMMU_DOMAIN_MAX; i++) {
+ int domain_idx;
+ layout.client_name = mdp3_iommu_domains[i].client_name;
+ layout.partitions = mdp3_iommu_domains[i].partitions;
+ layout.npartitions = mdp3_iommu_domains[i].npartitions;
+ layout.is_secure = false;
+
+ domain_idx = msm_register_domain(&layout);
+ if (IS_ERR_VALUE(domain_idx))
+ return -EINVAL;
+
+ mdp3_iommu_domains[i].domain_idx = domain_idx;
+ mdp3_iommu_domains[i].domain = msm_get_iommu_domain(domain_idx);
+ if (!mdp3_iommu_domains[i].domain) {
+ pr_err("unable to get iommu domain(%d)\n",
+ domain_idx);
+ return -EINVAL;
+ }
+ iommu_set_fault_handler(mdp3_iommu_domains[i].domain,
+ mdp3_iommu_fault_handler,
+ NULL);
+ }
+
+ mdp3_res->domains = mdp3_iommu_domains;
+
+ return 0;
+}
+
+int mdp3_iommu_context_init(void)
+{
+ int i;
+
+ if (mdp3_res->iommu_contexts) {
+ pr_warn("iommu context already initialized\n");
+ return 0;
+ }
+
+ for (i = 0; i < MDP3_IOMMU_CTX_MAX; i++) {
+ mdp3_iommu_contexts[i].ctx =
+ msm_iommu_get_ctx(mdp3_iommu_contexts[i].ctx_name);
+
+ if (!mdp3_iommu_contexts[i].ctx) {
+ pr_warn("unable to get iommu ctx(%s)\n",
+ mdp3_iommu_contexts[i].ctx_name);
+ return -EINVAL;
+ }
+ }
+
+ mdp3_res->iommu_contexts = mdp3_iommu_contexts;
+
+ return 0;
+}
+
+int mdp3_iommu_init(void)
+{
+ int ret;
+
+ ret = mdp3_iommu_domain_init();
+ if (ret) {
+ pr_err("mdp3 iommu domain init fails\n");
+ return ret;
+ }
+
+ ret = mdp3_iommu_context_init();
+ if (ret) {
+ pr_err("mdp3 iommu context init fails\n");
+ return ret;
+ }
+ return ret;
+}
+
+static int mdp3_check_version(void)
+{
+ int rc;
+
+ rc = mdp3_clk_update(MDP3_CLK_AHB, 1);
+ rc |= mdp3_clk_update(MDP3_CLK_CORE, 1);
+ if (rc)
+ return rc;
+
+ mdp3_res->mdp_rev = MDP3_REG_READ(MDP3_REG_HW_VERSION);
+
+ rc = mdp3_clk_update(MDP3_CLK_AHB, 0);
+ rc |= mdp3_clk_update(MDP3_CLK_CORE, 0);
+ if (rc)
+ pr_err("fail to turn off the MDP3_CLK_AHB clk\n");
+
+ if (mdp3_res->mdp_rev != MDP_CORE_HW_VERSION) {
+ pr_err("mdp_hw_revision=%x mismatch\n", mdp3_res->mdp_rev);
+ rc = -ENODEV;
+ }
+ return rc;
+}
+
+static int mdp3_hw_init(void)
+{
+ int i;
+
+ for (i = MDP3_DMA_P; i < MDP3_DMA_MAX; i++) {
+ mdp3_res->dma[i].dma_sel = i;
+ mdp3_res->dma[i].capability = MDP3_DMA_CAP_ALL;
+ mdp3_res->dma[i].in_use = 0;
+ mdp3_res->dma[i].available = 1;
+ }
+ mdp3_res->dma[MDP3_DMA_S].capability = MDP3_DMA_CAP_DITHER;
+ mdp3_res->dma[MDP3_DMA_E].available = 0;
+
+ for (i = MDP3_DMA_OUTPUT_SEL_AHB; i < MDP3_DMA_OUTPUT_SEL_MAX; i++) {
+ mdp3_res->intf[i].cfg.type = i;
+ mdp3_res->intf[i].active = 0;
+ mdp3_res->intf[i].in_use = 0;
+ mdp3_res->intf[i].available = 1;
+ }
+ mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_AHB].available = 0;
+ mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_LCDC].available = 0;
+
+ return 0;
+}
+
+static int mdp3_res_init(void)
+{
+ int rc = 0;
+
+ rc = mdp3_irq_setup();
+ if (rc)
+ return rc;
+
+ rc = mdp3_clk_setup();
+ if (rc)
+ return rc;
+
+ mdp3_res->ion_client = msm_ion_client_create(-1, mdp3_res->pdev->name);
+ if (IS_ERR_OR_NULL(mdp3_res->ion_client)) {
+ pr_err("msm_ion_client_create() return error (%p)\n",
+ mdp3_res->ion_client);
+ mdp3_res->ion_client = NULL;
+ return -EINVAL;
+ }
+
+ rc = mdp3_iommu_init();
+ if (rc)
+ return rc;
+
+ rc = mdp3_iommu_attach(MDP3_IOMMU_CTX_DMA_0);
+ if (rc) {
+ pr_err("fail to attach DMA-P context 0\n");
+ return rc;
+ }
+ rc = mdp3_bus_scale_register();
+ if (rc) {
+ pr_err("unable to register bus scaling\n");
+ return rc;
+ }
+
+ rc = mdp3_hw_init();
+
+ return rc;
+}
+
+static int mdp3_parse_dt(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdp_phys");
+ if (!res) {
+ pr_err("unable to get MDP base address\n");
+ return -EINVAL;
+ }
+
+ mdp3_res->mdp_reg_size = resource_size(res);
+ mdp3_res->mdp_base = devm_ioremap(&pdev->dev, res->start,
+ mdp3_res->mdp_reg_size);
+ if (unlikely(!mdp3_res->mdp_base)) {
+ pr_err("unable to map MDP base\n");
+ return -ENOMEM;
+ }
+
+ pr_debug("MDP HW Base phy_Address=0x%x virt=0x%x\n",
+ (int) res->start,
+ (int) mdp3_res->mdp_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ pr_err("unable to get MDSS irq\n");
+ return -EINVAL;
+ }
+ mdp3_res->irq = res->start;
+
+ return 0;
+}
+
+static int mdp3_init(struct msm_fb_data_type *mfd)
+{
+ return mdp3_ctrl_init(mfd);
+}
+
+u32 mdp3_fb_stride(u32 fb_index, u32 xres, int bpp)
+{
+ /*
+ * The adreno GPU hardware requires that the pitch be aligned to
+ * 32 pixels for color buffers, so for the cases where the GPU
+ * is writing directly to fb0, the framebuffer pitch
+ * also needs to be 32 pixel aligned
+ */
+
+ if (fb_index == 0)
+ return ALIGN(xres, 32) * bpp;
+ else
+ return xres * bpp;
+}
+
+/*
+ * physical contiguous memory should be allocated in mdss_fb, and SMMU
+ * virtual address mapping can be done in the MDP h/w specific code. It
+ * should have a reference count, if none is current mapped, the SMMU context
+ * can bedetached, thus allowing power saving in SMMU.
+ */
+static int mdp3_fbmem_alloc(struct msm_fb_data_type *mfd)
+{
+ int dom;
+ void *virt = NULL;
+ unsigned long phys = 0;
+ size_t size;
+ u32 yres = mfd->fbi->var.yres_virtual;
+
+ size = PAGE_ALIGN(mfd->fbi->fix.line_length * yres);
+
+ if (mfd->index == 0) {
+ virt = allocate_contiguous_memory(size, MEMTYPE_EBI1, SZ_1M, 0);
+ if (!virt) {
+ pr_err("unable to alloc fbmem size=%u\n", size);
+ return -ENOMEM;
+ }
+ phys = memory_pool_node_paddr(virt);
+ dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN)->domain_idx;
+ msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K, 0,
+ &mfd->iova);
+
+ pr_debug("allocating %u bytes at %p (%lx phys) for fb %d\n",
+ size, virt, phys, mfd->index);
+ } else {
+ size = 0;
+ }
+
+ mfd->fbi->screen_base = virt;
+ mfd->fbi->fix.smem_start = phys;
+ mfd->fbi->fix.smem_len = size;
+ return 0;
+}
+
+struct mdp3_dma *mdp3_get_dma_pipe(int capability)
+{
+ int i;
+
+ for (i = MDP3_DMA_P; i < MDP3_DMA_MAX; i++) {
+ if (!mdp3_res->dma[i].in_use && mdp3_res->dma[i].available &&
+ mdp3_res->dma[i].capability & capability) {
+ mdp3_res->dma[i].in_use = true;
+ return &mdp3_res->dma[i];
+ }
+ }
+ return NULL;
+}
+
+struct mdp3_intf *mdp3_get_display_intf(int type)
+{
+ int i;
+
+ for (i = MDP3_DMA_OUTPUT_SEL_AHB; i < MDP3_DMA_OUTPUT_SEL_MAX; i++) {
+ if (!mdp3_res->intf[i].in_use && mdp3_res->intf[i].available &&
+ mdp3_res->intf[i].cfg.type == type) {
+ mdp3_res->intf[i].in_use = true;
+ return &mdp3_res->intf[i];
+ }
+ }
+ return NULL;
+}
+
+static int mdp3_probe(struct platform_device *pdev)
+{
+ int rc;
+ static struct msm_mdp_interface mdp3_interface = {
+ .init_fnc = mdp3_init,
+ .fb_mem_alloc_fnc = mdp3_fbmem_alloc,
+ .fb_stride = mdp3_fb_stride,
+ };
+
+ if (!pdev->dev.of_node) {
+ pr_err("MDP driver only supports device tree probe\n");
+ return -ENOTSUPP;
+ }
+
+ if (mdp3_res) {
+ pr_err("MDP already initialized\n");
+ return -EINVAL;
+ }
+
+ mdp3_res = devm_kzalloc(&pdev->dev, sizeof(struct mdp3_hw_resource),
+ GFP_KERNEL);
+ if (mdp3_res == NULL)
+ return -ENOMEM;
+
+ pdev->id = 0;
+ mdp3_res->pdev = pdev;
+ mutex_init(&mdp3_res->res_mutex);
+ spin_lock_init(&mdp3_res->irq_lock);
+ platform_set_drvdata(pdev, mdp3_res);
+
+ rc = mdp3_parse_dt(pdev);
+ if (rc)
+ goto probe_done;
+
+ rc = mdp3_res_init();
+ if (rc) {
+ pr_err("unable to initialize mdp3 resources\n");
+ goto probe_done;
+ }
+
+ rc = mdp3_check_version();
+ if (rc) {
+ pr_err("mdp3 check version failed\n");
+ goto probe_done;
+ }
+
+ rc = mdss_fb_register_mdp_instance(&mdp3_interface);
+ if (rc)
+ pr_err("unable to register mdp instance\n");
+
+probe_done:
+ if (IS_ERR_VALUE(rc)) {
+ devm_kfree(&pdev->dev, mdp3_res);
+ mdp3_res = NULL;
+ }
+
+ return rc;
+}
+
+static int mdp3_suspend_sub(struct mdp3_hw_resource *mdata)
+{
+ return 0;
+}
+
+static int mdp3_resume_sub(struct mdp3_hw_resource *mdata)
+{
+ return 0;
+}
+
+static int mdp3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+
+ if (!mdata)
+ return -ENODEV;
+
+ pr_debug("display suspend\n");
+
+ return mdp3_suspend_sub(mdata);
+}
+
+static int mdp3_resume(struct platform_device *pdev)
+{
+ struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+
+ if (!mdata)
+ return -ENODEV;
+
+ pr_debug("display resume\n");
+
+ return mdp3_resume_sub(mdata);
+}
+
+static int mdp3_remove(struct platform_device *pdev)
+{
+ struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+
+ if (!mdata)
+ return -ENODEV;
+ pm_runtime_disable(&pdev->dev);
+ mdp3_bus_scale_unregister();
+ mdp3_clk_remove();
+ return 0;
+}
+
+static const struct of_device_id mdp3_dt_match[] = {
+ { .compatible = "qcom,mdss_mdp3",},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdp3_dt_match);
+EXPORT_COMPAT("qcom,mdss_mdp3");
+
+static struct platform_driver mdp3_driver = {
+ .probe = mdp3_probe,
+ .remove = mdp3_remove,
+ .suspend = mdp3_suspend,
+ .resume = mdp3_resume,
+ .shutdown = NULL,
+ .driver = {
+ .name = "mdp3",
+ .of_match_table = mdp3_dt_match,
+ },
+};
+
+static int __init mdp3_driver_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mdp3_driver);
+ if (ret) {
+ pr_err("register mdp3 driver failed!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+module_init(mdp3_driver_init);
diff --git a/drivers/video/msm/mdss/mdp3.h b/drivers/video/msm/mdss/mdp3.h
new file mode 100644
index 0000000..5774e5a
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MDP3_H
+#define MDP3_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/earlysuspend.h>
+
+#include <mach/iommu_domains.h>
+
+#include "mdp3_dma.h"
+
+enum {
+ MDP3_CLK_AHB,
+ MDP3_CLK_CORE,
+ MDP3_CLK_VSYNC,
+ MDP3_CLK_LCDC,
+ MDP3_CLK_DSI,
+ MDP3_MAX_CLK
+};
+
+enum {
+ MDP3_IOMMU_DOMAIN,
+ MDP3_IOMMU_DOMAIN_MAX
+};
+
+enum {
+ MDP3_IOMMU_CTX_PPP_0,
+ MDP3_IOMMU_CTX_PPP_1,
+ MDP3_IOMMU_CTX_DMA_0,
+ MDP3_IOMMU_CTX_DMA_1,
+ MDP3_IOMMU_CTX_MAX
+};
+
+enum {
+ MDP3_BW_CLIENT_DMA_P,
+ MDP3_BW_CLIENT_DMA_S,
+ MDP3_BW_CLIENT_DMA_E,
+ MDP3_BW_CLIENT_PPP,
+};
+
+struct mdp3_iommu_domain_map {
+ u32 domain_type;
+ char *client_name;
+ struct msm_iova_partition partitions[1];
+ int npartitions;
+ int domain_idx;
+ struct iommu_domain *domain;
+};
+
+struct mdp3_iommu_ctx_map {
+ u32 ctx_type;
+ struct mdp3_iommu_domain_map *domain;
+ char *ctx_name;
+ struct device *ctx;
+ int attached;
+};
+
+#define MDP3_MAX_INTR 28
+
+struct mdp3_intr_cb {
+ void (*cb)(int type, void *);
+ void *data;
+};
+
+struct mdp3_hw_resource {
+ struct platform_device *pdev;
+ u32 mdp_rev;
+
+ struct mutex res_mutex;
+
+ struct clk *clocks[MDP3_MAX_CLK];
+ int clock_ref_count[MDP3_MAX_CLK];
+
+ char __iomem *mdp_base;
+ size_t mdp_reg_size;
+
+ u32 irq;
+ u32 bus_handle;
+
+ struct ion_client *ion_client;
+ struct mdp3_iommu_domain_map *domains;
+ struct mdp3_iommu_ctx_map *iommu_contexts;
+
+ struct mdp3_dma dma[MDP3_DMA_MAX];
+ struct mdp3_intf intf[MDP3_DMA_OUTPUT_SEL_MAX];
+
+ spinlock_t irq_lock;
+ u32 irqMask;
+ struct mdp3_intr_cb callbacks[MDP3_MAX_INTR];
+
+ struct early_suspend suspend_handler;
+};
+
+extern struct mdp3_hw_resource *mdp3_res;
+
+struct mdp3_dma *mdp3_get_dma_pipe(int capability);
+struct mdp3_intf *mdp3_get_display_intf(int type);
+void mdp3_irq_enable(int type);
+void mdp3_irq_disable(int type);
+void mdp3_irq_disable_nosync(int type);
+int mdp3_set_intr_callback(u32 type, struct mdp3_intr_cb *cb);
+int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate);
+int mdp3_clk_enable(int enable);
+int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota);
+
+#define MDP3_REG_WRITE(addr, val) writel_relaxed(val, mdp3_res->mdp_base + addr)
+#define MDP3_REG_READ(addr) readl_relaxed(mdp3_res->mdp_base + addr)
+
+#endif /* MDP3_H */
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
new file mode 100644
index 0000000..929e5f8
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -0,0 +1,560 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include "mdp3_ctrl.h"
+#include "mdp3.h"
+
+#define MDP_VSYNC_CLK_RATE 19200000
+#define VSYNC_PERIOD 16
+
+void vsync_notify_handler(void *arg)
+{
+ struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
+ complete(&session->vsync_comp);
+}
+
+static int mdp3_ctrl_vsync_enable(struct msm_fb_data_type *mfd, int enable)
+{
+ struct mdp3_session_data *mdp3_session;
+ struct mdp3_vsync_notification vsync_client;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+ !mdp3_session->intf)
+ return -ENODEV;
+
+ vsync_client.handler = vsync_notify_handler;
+ vsync_client.arg = mdp3_session;
+
+ mutex_lock(&mdp3_session->lock);
+ if (!mdp3_session->status) {
+ pr_debug("fb%d is not on yet", mfd->index);
+ mutex_unlock(&mdp3_session->lock);
+ return -EINVAL;
+ }
+
+ mdp3_session->dma->vsync_enable(mdp3_session->dma, &vsync_client);
+ mutex_unlock(&mdp3_session->lock);
+ return 0;
+}
+
+static ssize_t mdp3_vsync_show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdp3_session_data *mdp3_session = NULL;
+ u64 vsync_ticks;
+ ktime_t vsync_time;
+ int rc;
+
+ if (!mfd || !mfd->mdp.private1)
+ return 0;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+ rc = wait_for_completion_interruptible_timeout(
+ &mdp3_session->vsync_comp,
+ msecs_to_jiffies(VSYNC_PERIOD * 5));
+ if (rc <= 0) {
+ pr_warn("vsync wait on fb%d interrupted (%d)\n",
+ mfd->index, rc);
+ return -EBUSY;
+ }
+
+ vsync_time = mdp3_session->dma->get_vsync_time(mdp3_session->dma);
+ vsync_ticks = ktime_to_ns(vsync_time);
+
+ pr_debug("fb%d vsync=%llu", mfd->index, vsync_ticks);
+ rc = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_ticks);
+ return rc;
+}
+
+static DEVICE_ATTR(vsync_event, S_IRUGO, mdp3_vsync_show_event, NULL);
+
+static struct attribute *vsync_fs_attrs[] = {
+ &dev_attr_vsync_event.attr,
+ NULL,
+};
+
+static struct attribute_group vsync_fs_attr_group = {
+ .attrs = vsync_fs_attrs,
+};
+
+static int mdp3_ctrl_res_req_bus(struct msm_fb_data_type *mfd, int status)
+{
+ int rc = 0;
+ if (status) {
+ struct mdss_panel_info *panel_info = mfd->panel_info;
+ int ab = 0;
+ int ib = 0;
+ ab = panel_info->xres * panel_info->yres * 4;
+ ab *= panel_info->mipi.frame_rate;
+ ib = (ab * 3) / 2;
+ rc = mdp3_bus_scale_set_quota(MDP3_BW_CLIENT_DMA_P, ab, ib);
+ } else {
+ rc = mdp3_bus_scale_set_quota(MDP3_BW_CLIENT_DMA_P, 0, 0);
+ }
+ return rc;
+}
+
+static int mdp3_ctrl_res_req_clk(struct msm_fb_data_type *mfd, int status)
+{
+ int rc = 0;
+ if (status) {
+ struct mdss_panel_info *panel_info = mfd->panel_info;
+ unsigned long core_clk;
+ int vtotal;
+ vtotal = panel_info->lcdc.v_back_porch +
+ panel_info->lcdc.v_front_porch +
+ panel_info->lcdc.v_pulse_width +
+ panel_info->yres;
+ core_clk = panel_info->xres * panel_info->yres;
+ core_clk *= panel_info->mipi.frame_rate;
+ core_clk = (core_clk / panel_info->yres) * vtotal;
+ mdp3_clk_set_rate(MDP3_CLK_CORE, core_clk);
+ mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE);
+
+ rc = mdp3_clk_enable(true);
+ if (rc)
+ return rc;
+
+ } else {
+ rc = mdp3_clk_enable(false);
+ }
+ return rc;
+}
+
+static int mdp3_ctrl_get_intf_type(struct msm_fb_data_type *mfd)
+{
+ int type;
+ switch (mfd->panel.type) {
+ case MIPI_VIDEO_PANEL:
+ type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO;
+ break;
+ case MIPI_CMD_PANEL:
+ type = MDP3_DMA_OUTPUT_SEL_DSI_CMD;
+ break;
+ case LCDC_PANEL:
+ type = MDP3_DMA_OUTPUT_SEL_LCDC;
+ break;
+ default:
+ type = MDP3_DMA_OUTPUT_SEL_MAX;
+ }
+ return type;
+}
+
+static int mdp3_ctrl_get_source_format(struct msm_fb_data_type *mfd)
+{
+ int format;
+ switch (mfd->fb_imgType) {
+ case MDP_RGB_565:
+ format = MDP3_DMA_IBUF_FORMAT_RGB565;
+ break;
+ case MDP_RGB_888:
+ format = MDP3_DMA_IBUF_FORMAT_RGB888;
+ break;
+ case MDP_ARGB_8888:
+ case MDP_RGBA_8888:
+ format = MDP3_DMA_IBUF_FORMAT_XRGB8888;
+ break;
+ default:
+ format = MDP3_DMA_IBUF_FORMAT_UNDEFINED;
+ }
+ return format;
+}
+
+static int mdp3_ctrl_get_pack_pattern(struct msm_fb_data_type *mfd)
+{
+ int packPattern = MDP3_DMA_OUTPUT_PACK_PATTERN_RGB;
+ if (mfd->fb_imgType == MDP_RGBA_8888)
+ packPattern = MDP3_DMA_OUTPUT_PACK_PATTERN_BGR;
+ return packPattern;
+}
+
+static int mdp3_ctrl_intf_init(struct msm_fb_data_type *mfd,
+ struct mdp3_intf *intf)
+{
+ int rc;
+ struct mdp3_intf_cfg cfg;
+ struct mdp3_video_intf_cfg *video = &cfg.video;
+ struct mdss_panel_info *p = mfd->panel_info;
+ int h_back_porch = p->lcdc.h_back_porch;
+ int h_front_porch = p->lcdc.h_front_porch;
+ int w = p->xres;
+ int v_back_porch = p->lcdc.v_back_porch;
+ int v_front_porch = p->lcdc.v_front_porch;
+ int h = p->yres;
+ int h_sync_skew = p->lcdc.hsync_skew;
+ int h_pulse_width = p->lcdc.h_pulse_width;
+ int v_pulse_width = p->lcdc.v_pulse_width;
+ int hsync_period = h_front_porch + h_back_porch + w + h_pulse_width;
+ int vsync_period = v_front_porch + v_back_porch + h + v_pulse_width;
+ vsync_period *= hsync_period;
+
+ cfg.type = mdp3_ctrl_get_intf_type(mfd);
+ if (cfg.type == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+ cfg.type == MDP3_DMA_OUTPUT_SEL_LCDC) {
+ video->hsync_period = hsync_period;
+ video->hsync_pulse_width = h_pulse_width;
+ video->vsync_period = vsync_period;
+ video->vsync_pulse_width = v_pulse_width * hsync_period;
+ video->display_start_x = h_back_porch + h_pulse_width;
+ video->display_end_x = hsync_period - h_front_porch - 1;
+ video->display_start_y =
+ (v_back_porch + v_pulse_width) * hsync_period;
+ video->display_end_y =
+ vsync_period - v_front_porch * hsync_period - 1;
+ video->active_start_x = video->display_start_x;
+ video->active_end_x = video->display_end_x;
+ video->active_h_enable = true;
+ video->active_start_y = video->display_start_y;
+ video->active_end_y = video->display_end_y;
+ video->active_v_enable = true;
+ video->hsync_skew = h_sync_skew;
+ video->hsync_polarity = 1;
+ video->vsync_polarity = 1;
+ video->de_polarity = 1;
+ } else if (cfg.type == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ cfg.dsi_cmd.primary_dsi_cmd_id = 0;
+ cfg.dsi_cmd.secondary_dsi_cmd_id = 1;
+ cfg.dsi_cmd.dsi_cmd_tg_intf_sel = 0;
+ } else
+ return -EINVAL;
+ rc = mdp3_intf_init(intf, &cfg);
+ return rc;
+}
+
+static int mdp3_ctrl_dma_init(struct msm_fb_data_type *mfd,
+ struct mdp3_dma *dma)
+{
+ int rc;
+ struct mdss_panel_info *panel_info = mfd->panel_info;
+ struct fb_info *fbi = mfd->fbi;
+ struct fb_fix_screeninfo *fix;
+ struct fb_var_screeninfo *var;
+ struct mdp3_dma_output_config outputConfig;
+ struct mdp3_dma_source sourceConfig;
+
+ fix = &fbi->fix;
+ var = &fbi->var;
+
+ sourceConfig.format = mdp3_ctrl_get_source_format(mfd);
+ sourceConfig.width = panel_info->xres;
+ sourceConfig.height = panel_info->yres;
+ sourceConfig.x = 0;
+ sourceConfig.y = 0;
+ sourceConfig.stride = fix->line_length;
+ sourceConfig.buf = (void *)mfd->iova;
+
+ outputConfig.dither_en = 0;
+ outputConfig.out_sel = mdp3_ctrl_get_intf_type(mfd);
+ outputConfig.bit_mask_polarity = 0;
+ outputConfig.color_components_flip = 0;
+ outputConfig.pack_pattern = mdp3_ctrl_get_pack_pattern(mfd);
+ outputConfig.pack_align = MDP3_DMA_OUTPUT_PACK_ALIGN_LSB;
+ outputConfig.color_comp_out_bits = (MDP3_DMA_OUTPUT_COMP_BITS_8 << 4) |
+ (MDP3_DMA_OUTPUT_COMP_BITS_8 << 2)|
+ MDP3_DMA_OUTPUT_COMP_BITS_8;
+
+ rc = mdp3_dma_init(dma, &sourceConfig, &outputConfig);
+ return rc;
+}
+
+static int mdp3_ctrl_on(struct msm_fb_data_type *mfd)
+{
+ int rc = 0;
+ struct mdp3_session_data *mdp3_session;
+ struct mdss_panel_data *panel;
+
+ pr_debug("mdp3_ctrl_on\n");
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+ !mdp3_session->intf) {
+ pr_err("mdp3_ctrl_on no device");
+ return -ENODEV;
+ }
+ mutex_lock(&mdp3_session->lock);
+ if (mdp3_session->status) {
+ pr_debug("fb%d is on already", mfd->index);
+ goto on_error;
+ }
+
+ /* request bus bandwidth before DSI DMA traffic */
+ rc = mdp3_ctrl_res_req_bus(mfd, 1);
+ if (rc)
+ pr_err("fail to request bus resource\n");
+
+ panel = mdp3_session->panel;
+ if (panel->event_handler)
+ rc = panel->event_handler(panel, MDSS_EVENT_PANEL_ON, NULL);
+ if (rc) {
+ pr_err("fail to turn on the panel\n");
+ goto on_error;
+ }
+ rc = mdp3_ctrl_res_req_clk(mfd, 1);
+ if (rc) {
+ pr_err("fail to request mdp clk resource\n");
+ goto on_error;
+ }
+
+ rc = mdp3_ctrl_dma_init(mfd, mdp3_session->dma);
+ if (rc) {
+ pr_err("dma init failed\n");
+ goto on_error;
+ }
+
+ rc = mdp3_ctrl_intf_init(mfd, mdp3_session->intf);
+ if (rc) {
+ pr_err("display interface init failed\n");
+
+
+
+ goto on_error;
+ }
+
+ rc = mdp3_session->dma->start(mdp3_session->dma, mdp3_session->intf);
+ if (rc) {
+ pr_err("fail to start the MDP display interface\n");
+ goto on_error;
+ }
+
+on_error:
+ if (!rc)
+ mdp3_session->status = 1;
+
+ mutex_unlock(&mdp3_session->lock);
+ return rc;
+}
+
+static int mdp3_ctrl_off(struct msm_fb_data_type *mfd)
+{
+ int rc = 0;
+ struct mdp3_session_data *mdp3_session;
+ struct mdss_panel_data *panel;
+
+ pr_debug("mdp3_ctrl_off\n");
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+ !mdp3_session->intf) {
+ pr_err("mdp3_ctrl_on no device");
+ return -ENODEV;
+ }
+
+ mutex_lock(&mdp3_session->lock);
+
+ if (!mdp3_session->status) {
+ pr_debug("fb%d is off already", mfd->index);
+ goto off_error;
+ }
+
+ pr_debug("mdp3_ctrl_off stop mdp3 dma engine\n");
+
+ rc = mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf);
+
+ if (rc)
+ pr_err("fail to stop the MDP3 dma\n");
+
+ pr_debug("mdp3_ctrl_off stop dsi panel and controller\n");
+ panel = mdp3_session->panel;
+ if (panel->event_handler)
+ rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF, NULL);
+ if (rc)
+ pr_err("fail to turn off the panel\n");
+
+ pr_debug("mdp3_ctrl_off release bus and clock\n");
+ rc = mdp3_ctrl_res_req_bus(mfd, 0);
+ if (rc)
+ pr_err("mdp bus resource release failed\n");
+ rc = mdp3_ctrl_res_req_clk(mfd, 0);
+ if (rc)
+ pr_err("mdp clock resource release failed\n");
+off_error:
+ mdp3_session->status = 0;
+
+ mutex_unlock(&mdp3_session->lock);
+ return 0;
+}
+
+static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd)
+{
+ struct fb_info *fbi;
+ struct mdp3_session_data *mdp3_session;
+ u32 offset;
+ int bpp;
+
+ pr_debug("mdp3_ctrl_pan_display\n");
+ if (!mfd || !mfd->mdp.private1)
+ return;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ if (!mdp3_session || !mdp3_session->dma)
+ return;
+
+ if (!mdp3_session->status) {
+ pr_err("mdp3_ctrl_pan_display, display off!\n");
+ return;
+ }
+
+ mutex_lock(&mdp3_session->lock);
+ fbi = mfd->fbi;
+
+ bpp = fbi->var.bits_per_pixel / 8;
+ offset = fbi->var.xoffset * bpp +
+ fbi->var.yoffset * fbi->fix.line_length;
+
+ if (offset > fbi->fix.smem_len) {
+ pr_err("invalid fb offset=%u total length=%u\n",
+ offset, fbi->fix.smem_len);
+ goto pan_error;
+ }
+
+ mdp3_session->dma->update(mdp3_session->dma,
+ (void *)mfd->iova + offset);
+pan_error:
+ mutex_unlock(&mdp3_session->lock);
+}
+
+static int mdp3_get_metadata(struct msm_fb_data_type *mfd,
+ struct msmfb_metadata *metadata)
+{
+ int ret = 0;
+ switch (metadata->op) {
+ case metadata_op_frame_rate:
+ metadata->data.panel_frame_rate =
+ mfd->panel_info->mipi.frame_rate;
+ break;
+ case metadata_op_get_caps:
+ metadata->data.caps.mdp_rev = 304;
+ break;
+ default:
+ pr_warn("Unsupported request to MDP META IOCTL.\n");
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int mdp3_ctrl_ioctl_handler(struct msm_fb_data_type *mfd,
+ u32 cmd, void __user *argp)
+{
+ int rc = -EINVAL;
+ struct mdp3_session_data *mdp3_session;
+ struct msmfb_metadata metadata;
+ int val;
+
+ pr_debug("mdp3_ctrl_ioctl_handler\n");
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ if (!mdp3_session)
+ return -ENODEV;
+
+ if (!mdp3_session->status) {
+ pr_err("mdp3_ctrl_ioctl_handler, display off!\n");
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case MSMFB_VSYNC_CTRL:
+ case MSMFB_OVERLAY_VSYNC_CTRL:
+ if (!copy_from_user(&val, argp, sizeof(val))) {
+ rc = mdp3_ctrl_vsync_enable(mfd, val);
+ if (!val)
+ init_completion(&mdp3_session->vsync_comp);
+ } else {
+ pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed\n");
+ rc = -EFAULT;
+ }
+ break;
+ case MSMFB_METADATA_GET:
+ rc = copy_from_user(&metadata, argp, sizeof(metadata));
+ if (rc)
+ return rc;
+ rc = mdp3_get_metadata(mfd, &metadata);
+ if (!rc)
+ rc = copy_to_user(argp, &metadata, sizeof(metadata));
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
+{
+ struct device *dev = mfd->fbi->dev;
+ struct msm_mdp_interface *mdp3_interface = &mfd->mdp;
+ struct mdp3_session_data *mdp3_session = NULL;
+ u32 intf_type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO;
+ int rc;
+
+ pr_debug("mdp3_ctrl_init\n");
+ mdp3_interface->on_fnc = mdp3_ctrl_on;
+ mdp3_interface->off_fnc = mdp3_ctrl_off;
+ mdp3_interface->do_histogram = NULL;
+ mdp3_interface->cursor_update = NULL;
+ mdp3_interface->dma_fnc = mdp3_ctrl_pan_display;
+ mdp3_interface->ioctl_handler = mdp3_ctrl_ioctl_handler;
+ mdp3_interface->kickoff_fnc = NULL;
+
+ mdp3_session = kmalloc(sizeof(struct mdp3_session_data), GFP_KERNEL);
+ if (!mdp3_session) {
+ pr_err("fail to allocate mdp3 private data structure");
+ return -ENOMEM;
+ }
+ memset(mdp3_session, 0, sizeof(struct mdp3_session_data));
+ mutex_init(&mdp3_session->lock);
+ init_completion(&mdp3_session->vsync_comp);
+ mdp3_session->dma = mdp3_get_dma_pipe(MDP3_DMA_CAP_ALL);
+ if (!mdp3_session->dma) {
+ rc = -ENODEV;
+ goto init_done;
+ }
+
+ intf_type = mdp3_ctrl_get_intf_type(mfd);
+ mdp3_session->intf = mdp3_get_display_intf(intf_type);
+ if (!mdp3_session->intf) {
+ rc = -ENODEV;
+ goto init_done;
+ }
+
+ mdp3_session->panel = dev_get_platdata(&mfd->pdev->dev);
+ mdp3_session->status = 0;
+
+ mfd->mdp.private1 = mdp3_session;
+
+ rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group);
+ if (rc) {
+ pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
+ goto init_done;
+ }
+
+ kobject_uevent(&dev->kobj, KOBJ_ADD);
+ pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");
+
+init_done:
+ if (IS_ERR_VALUE(rc))
+ kfree(mdp3_session);
+
+ return rc;
+}
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.h b/drivers/video/msm/mdss/mdp3_ctrl.h
new file mode 100644
index 0000000..d42ece7
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3_ctrl.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP3_CTRL_H
+#define MDP3_CTRL_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+
+#include "mdp3_dma.h"
+#include "mdss_fb.h"
+#include "mdss_panel.h"
+
+struct mdp3_session_data {
+ struct mutex lock;
+ int status;
+ struct mdp3_dma *dma;
+ struct mdss_panel_data *panel;
+ struct mdp3_intf *intf;
+ struct msm_fb_data_type *mfd;
+ struct completion vsync_comp;
+};
+
+int mdp3_ctrl_init(struct msm_fb_data_type *mfd);
+
+#endif /* MDP3_CTRL_H */
diff --git a/drivers/video/msm/mdss/mdp3_dma.c b/drivers/video/msm/mdss/mdp3_dma.c
new file mode 100644
index 0000000..69e3d7e
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3_dma.c
@@ -0,0 +1,914 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+
+#include "mdp3.h"
+#include "mdp3_dma.h"
+#include "mdp3_hwio.h"
+
+#define DMA_STOP_POLL_SLEEP_US 1000
+#define DMA_STOP_POLL_TIMEOUT_US 16000
+
+static ktime_t mdp3_get_vsync_time(struct mdp3_dma *dma)
+{
+ unsigned long flag;
+ ktime_t time;
+
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ time = dma->vsync_time;
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+ return time;
+}
+
+static void mdp3_vsync_intr_handler(int type, void *arg)
+{
+ struct mdp3_dma *dma = (struct mdp3_dma *)arg;
+ struct mdp3_vsync_notification vsync_client;
+
+ pr_debug("mdp3_vsync_intr_handler\n");
+ spin_lock(&dma->dma_lock);
+ vsync_client = dma->vsync_client;
+ if (!vsync_client.handler)
+ dma->cb_type &= ~MDP3_DMA_CALLBACK_TYPE_VSYNC;
+ dma->vsync_time = ktime_get();
+ complete(&dma->vsync_comp);
+ if (vsync_client.handler)
+ vsync_client.handler(vsync_client.arg);
+ spin_unlock(&dma->dma_lock);
+
+ if (!vsync_client.handler)
+ mdp3_irq_disable_nosync(type);
+}
+
+static void mdp3_dma_done_intr_handler(int type, void *arg)
+{
+ struct mdp3_dma *dma = (struct mdp3_dma *)arg;
+
+ pr_debug("mdp3_dma_done_intr_handler\n");
+ spin_lock(&dma->dma_lock);
+ dma->busy = false;
+ dma->cb_type &= ~MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+ spin_unlock(&dma->dma_lock);
+ complete(&dma->dma_comp);
+ mdp3_irq_disable_nosync(type);
+}
+
+void mdp3_dma_callback_enable(struct mdp3_dma *dma, int type)
+{
+ int irq_bit;
+ unsigned long flag;
+
+ pr_debug("mdp3_dma_callback_enable type=%d\n", type);
+
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ if (dma->cb_type & type) {
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+ return;
+ } else {
+ dma->cb_type |= type;
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+ }
+
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+ dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) {
+ if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC)
+ mdp3_irq_enable(MDP3_INTR_LCDC_START_OF_FRAME);
+ } else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC) {
+ irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE;
+ irq_bit += dma->dma_sel;
+ mdp3_irq_enable(irq_bit);
+ }
+
+ if (type & MDP3_DMA_CALLBACK_TYPE_DMA_DONE) {
+ irq_bit = MDP3_INTR_DMA_P_DONE;
+ if (dma->dma_sel == MDP3_DMA_S)
+ irq_bit = MDP3_INTR_DMA_S_DONE;
+ mdp3_irq_enable(irq_bit);
+ }
+ } else {
+ pr_err("mdp3_dma_callback_enable not supported interface\n");
+ }
+}
+
+void mdp3_dma_callback_disable(struct mdp3_dma *dma, int type)
+{
+ int irq_bit;
+ unsigned long flag;
+
+ pr_debug("mdp3_dma_callback_disable type=%d\n", type);
+
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ if ((dma->cb_type & type) == 0) {
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+ return;
+ } else {
+ dma->cb_type &= ~type;
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+ }
+
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+ dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) {
+ if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC)
+ mdp3_irq_disable(MDP3_INTR_LCDC_START_OF_FRAME);
+ } else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC) {
+ irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE;
+ irq_bit += dma->dma_sel;
+ mdp3_irq_disable(irq_bit);
+ }
+
+ if (type & MDP3_DMA_CALLBACK_TYPE_DMA_DONE) {
+ irq_bit = MDP3_INTR_DMA_P_DONE;
+ if (dma->dma_sel == MDP3_DMA_S)
+ irq_bit = MDP3_INTR_DMA_S_DONE;
+ mdp3_irq_disable(irq_bit);
+ }
+ }
+}
+
+static int mdp3_dma_callback_setup(struct mdp3_dma *dma)
+{
+ int rc;
+ struct mdp3_intr_cb vsync_cb = {
+ .cb = mdp3_vsync_intr_handler,
+ .data = dma,
+ };
+
+ struct mdp3_intr_cb dma_cb = {
+ .cb = mdp3_dma_done_intr_handler,
+ .data = dma,
+ };
+
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+ dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC)
+ rc = mdp3_set_intr_callback(MDP3_INTR_LCDC_START_OF_FRAME,
+ &vsync_cb);
+ else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ int irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE;
+ irq_bit += dma->dma_sel;
+ rc = mdp3_set_intr_callback(irq_bit, &vsync_cb);
+ irq_bit = MDP3_INTR_DMA_P_DONE;
+ if (dma->dma_sel == MDP3_DMA_S)
+ irq_bit = MDP3_INTR_DMA_S_DONE;
+ rc |= mdp3_set_intr_callback(irq_bit, &dma_cb);
+ } else {
+ pr_err("mdp3_dma_callback_setup not suppported interface\n");
+ rc = -ENODEV;
+ }
+ return rc;
+}
+
+static void mdp3_dma_vsync_enable(struct mdp3_dma *dma,
+ struct mdp3_vsync_notification *vsync_client)
+{
+ unsigned long flag;
+ int updated = 0;
+ int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+
+ pr_debug("mdp3_dma_vsync_enable\n");
+
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ if (vsync_client) {
+ if (dma->vsync_client.handler != vsync_client->handler) {
+ dma->vsync_client = *vsync_client;
+ updated = 1;
+ }
+ } else {
+ if (!dma->vsync_client.handler) {
+ dma->vsync_client.handler = NULL;
+ dma->vsync_client.arg = NULL;
+ updated = 1;
+ }
+ }
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+ if (updated) {
+ if (vsync_client && vsync_client->handler)
+ mdp3_dma_callback_enable(dma, cb_type);
+ else
+ mdp3_dma_callback_disable(dma, cb_type);
+ }
+}
+
+static int mdp3_dmap_config(struct mdp3_dma *dma,
+ struct mdp3_dma_source *source_config,
+ struct mdp3_dma_output_config *output_config)
+{
+ u32 dma_p_cfg_reg, dma_p_size, dma_p_out_xy;
+
+ dma_p_cfg_reg = source_config->format << 25;
+ if (output_config->dither_en)
+ dma_p_cfg_reg |= BIT(24);
+ dma_p_cfg_reg |= output_config->out_sel << 19;
+ dma_p_cfg_reg |= output_config->bit_mask_polarity << 18;
+ dma_p_cfg_reg |= output_config->color_components_flip << 14;
+ dma_p_cfg_reg |= output_config->pack_pattern << 8;
+ dma_p_cfg_reg |= output_config->pack_align << 7;
+ dma_p_cfg_reg |= output_config->color_comp_out_bits;
+
+ dma_p_size = source_config->width | (source_config->height << 16);
+ dma_p_out_xy = source_config->x | (source_config->y << 16);
+
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CONFIG, dma_p_cfg_reg);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_SIZE, dma_p_size);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, (u32)source_config->buf);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_Y_STRIDE, source_config->stride);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_OUT_XY, dma_p_out_xy);
+
+ /*
+ * NOTE: MDP_DMA_P_FETCH_CFG: max_burst_size need to use value 4, not
+ * the default 16 for MDP hang issue workaround
+ */
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_FETCH_CFG, 0x10);
+ MDP3_REG_WRITE(MDP3_REG_PRIMARY_RD_PTR_IRQ, 0x10);
+
+ dma->source_config = *source_config;
+ dma->output_config = *output_config;
+
+ mdp3_dma_callback_setup(dma);
+ return 0;
+}
+
+static int mdp3_dmas_config(struct mdp3_dma *dma,
+ struct mdp3_dma_source *source_config,
+ struct mdp3_dma_output_config *output_config)
+{
+ u32 dma_s_cfg_reg, dma_s_size, dma_s_out_xy;
+
+ dma_s_cfg_reg = source_config->format << 25;
+ if (output_config->dither_en)
+ dma_s_cfg_reg |= BIT(24);
+ dma_s_cfg_reg |= output_config->out_sel << 19;
+ dma_s_cfg_reg |= output_config->bit_mask_polarity << 18;
+ dma_s_cfg_reg |= output_config->color_components_flip << 14;
+ dma_s_cfg_reg |= output_config->pack_pattern << 8;
+ dma_s_cfg_reg |= output_config->pack_align << 7;
+ dma_s_cfg_reg |= output_config->color_comp_out_bits;
+
+ dma_s_size = source_config->width | (source_config->height << 16);
+ dma_s_out_xy = source_config->x | (source_config->y << 16);
+
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_CONFIG, dma_s_cfg_reg);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_SIZE, dma_s_size);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_ADDR, (u32)source_config->buf);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_Y_STRIDE, source_config->stride);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_OUT_XY, dma_s_out_xy);
+
+ MDP3_REG_WRITE(MDP3_REG_SECONDARY_RD_PTR_IRQ, 0x10);
+
+ dma->source_config = *source_config;
+ dma->output_config = *output_config;
+
+ mdp3_dma_callback_setup(dma);
+ return 0;
+}
+
+static int mdp3_dmap_cursor_config(struct mdp3_dma *dma,
+ struct mdp3_dma_cursor *cursor)
+{
+ u32 cursor_size, cursor_pos, blend_param, trans_mask;
+
+ cursor_size = cursor->width | (cursor->height << 16);
+ cursor_pos = cursor->x | (cursor->y << 16);
+ trans_mask = 0;
+ if (cursor->blend_config.mode == MDP3_DMA_CURSOR_BLEND_CONSTANT_ALPHA) {
+ blend_param = cursor->blend_config.constant_alpha << 24;
+ } else if (cursor->blend_config.mode ==
+ MDP3_DMA_CURSOR_BLEND_COLOR_KEYING) {
+ blend_param = cursor->blend_config.transparent_color;
+ trans_mask = cursor->blend_config.transparency_mask;
+ } else {
+ blend_param = 0;
+ }
+
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_FORMAT, cursor->format);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_SIZE, cursor_size);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BUF_ADDR, (u32)cursor->buf);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_POS, cursor_pos);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_CONFIG,
+ cursor->blend_config.mode);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_PARAM, blend_param);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_TRANS_MASK, trans_mask);
+ dma->cursor = *cursor;
+ return 0;
+}
+
+static int mdp3_dmap_ccs_config(struct mdp3_dma *dma,
+ struct mdp3_dma_color_correct_config *config,
+ struct mdp3_dma_ccs *ccs,
+ struct mdp3_dma_lut *lut)
+{
+ int i;
+ u32 addr, cc_config, color;
+
+ cc_config = config->lut_enable;
+ if (config->ccs_enable)
+ cc_config |= BIT(3);
+ cc_config |= config->lut_position << 4;
+ cc_config |= config->ccs_sel << 5;
+ cc_config |= config->pre_bias_sel << 6;
+ cc_config |= config->post_bias_sel << 7;
+ cc_config |= config->pre_limit_sel << 8;
+ cc_config |= config->post_limit_sel << 9;
+ cc_config |= config->lut_sel << 10;
+
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG, cc_config);
+
+ if (config->ccs_enable && ccs) {
+ if (ccs->mv1) {
+ addr = MDP3_REG_DMA_P_CSC_MV1;
+ for (i = 0; i < 9; i++) {
+ MDP3_REG_WRITE(addr, ccs->mv1[i]);
+ addr += 4;
+ }
+ }
+
+ if (ccs->mv2) {
+ addr = MDP3_REG_DMA_P_CSC_MV2;
+ for (i = 0; i < 9; i++) {
+ MDP3_REG_WRITE(addr, ccs->mv2[i]);
+ addr += 4;
+ }
+ }
+
+ if (ccs->pre_bv1) {
+ addr = MDP3_REG_DMA_P_CSC_PRE_BV1;
+ for (i = 0; i < 3; i++) {
+ MDP3_REG_WRITE(addr, ccs->pre_bv1[i]);
+ addr += 4;
+ }
+ }
+
+ if (ccs->pre_bv2) {
+ addr = MDP3_REG_DMA_P_CSC_PRE_BV2;
+ for (i = 0; i < 3; i++) {
+ MDP3_REG_WRITE(addr, ccs->pre_bv2[i]);
+ addr += 4;
+ }
+ }
+
+ if (ccs->post_bv1) {
+ addr = MDP3_REG_DMA_P_CSC_POST_BV1;
+ for (i = 0; i < 3; i++) {
+ MDP3_REG_WRITE(addr, ccs->post_bv1[i]);
+ addr += 4;
+ }
+ }
+
+ if (ccs->post_bv2) {
+ addr = MDP3_REG_DMA_P_CSC_POST_BV2;
+ for (i = 0; i < 3; i++) {
+ MDP3_REG_WRITE(addr, ccs->post_bv2[i]);
+ addr += 4;
+ }
+ }
+
+ if (ccs->pre_lv1) {
+ addr = MDP3_REG_DMA_P_CSC_PRE_LV1;
+ for (i = 0; i < 6; i++) {
+ MDP3_REG_WRITE(addr, ccs->pre_lv1[i]);
+ addr += 4;
+ }
+ }
+
+ if (ccs->pre_lv2) {
+ addr = MDP3_REG_DMA_P_CSC_PRE_LV2;
+ for (i = 0; i < 6; i++) {
+ MDP3_REG_WRITE(addr, ccs->pre_lv2[i]);
+ addr += 4;
+ }
+ }
+
+ if (ccs->post_lv1) {
+ addr = MDP3_REG_DMA_P_CSC_POST_LV1;
+ for (i = 0; i < 6; i++) {
+ MDP3_REG_WRITE(addr, ccs->post_lv1[i]);
+ addr += 4;
+ }
+ }
+
+ if (ccs->post_lv2) {
+ addr = MDP3_REG_DMA_P_CSC_POST_LV2;
+ for (i = 0; i < 6; i++) {
+ MDP3_REG_WRITE(addr, ccs->post_lv2[i]);
+ addr += 4;
+ }
+ }
+ }
+
+ if (config->lut_enable && lut) {
+ if (lut->color0_lut1 && lut->color1_lut1 && lut->color2_lut1) {
+ addr = MDP3_REG_DMA_P_CSC_LUT1;
+ for (i = 0; i < 256; i++) {
+ color = lut->color0_lut1[i];
+ color |= lut->color1_lut1[i] << 8;
+ color |= lut->color2_lut1[i] << 16;
+ MDP3_REG_WRITE(addr, color);
+ addr += 4;
+ }
+ }
+
+ if (lut->color0_lut2 && lut->color1_lut2 && lut->color2_lut2) {
+ addr = MDP3_REG_DMA_P_CSC_LUT2;
+ for (i = 0; i < 256; i++) {
+ color = lut->color0_lut2[i];
+ color |= lut->color1_lut2[i] << 8;
+ color |= lut->color2_lut2[i] << 16;
+ MDP3_REG_WRITE(addr, color);
+ addr += 4;
+ }
+ }
+ }
+
+ dma->ccs_config = *config;
+ return 0;
+}
+
+static int mdp3_dmap_histo_config(struct mdp3_dma *dma,
+ struct mdp3_dma_histogram_config *histo_config)
+{
+ u32 hist_bit_mask, hist_control;
+
+ if (histo_config->bit_mask_polarity)
+ hist_bit_mask = BIT(31);
+ hist_bit_mask |= histo_config->bit_mask;
+
+ if (histo_config->auto_clear_en)
+ hist_control = BIT(0);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_FRAME_CNT,
+ histo_config->frame_count);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_BIT_MASK, hist_bit_mask);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_CONTROL, hist_control);
+ return 0;
+}
+
+static int mdp3_dmap_update(struct mdp3_dma *dma, void *buf)
+{
+ int wait_for_dma_done = 0;
+ unsigned long flag;
+ int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+
+ pr_debug("mdp3_dmap_update\n");
+
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ cb_type |= MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ if (dma->busy)
+ wait_for_dma_done = 1;
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+ if (wait_for_dma_done)
+ wait_for_completion_killable(&dma->dma_comp);
+ }
+
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, (u32)buf);
+ dma->source_config.buf = buf;
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_START, 1);
+ dma->busy = true;
+ }
+ wmb();
+ init_completion(&dma->vsync_comp);
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+ mdp3_dma_callback_enable(dma, cb_type);
+ pr_debug("mdp3_dmap_update wait for vsync_comp in\n");
+ wait_for_completion_killable(&dma->vsync_comp);
+ pr_debug("mdp3_dmap_update wait for vsync_comp out\n");
+ return 0;
+}
+
+static int mdp3_dmas_update(struct mdp3_dma *dma, void *buf)
+{
+ int wait_for_dma_done = 0;
+ unsigned long flag;
+ int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ cb_type |= MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ if (dma->busy)
+ wait_for_dma_done = 1;
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+ if (wait_for_dma_done)
+ wait_for_completion_killable(&dma->dma_comp);
+ }
+
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_ADDR, (u32)buf);
+ dma->source_config.buf = buf;
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_START, 1);
+ dma->busy = true;
+ }
+ wmb();
+ init_completion(&dma->vsync_comp);
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+ mdp3_dma_callback_enable(dma, cb_type);
+ wait_for_completion_killable(&dma->vsync_comp);
+ return 0;
+}
+
+static int mdp3_dmap_cursor_update(struct mdp3_dma *dma, int x, int y)
+{
+ u32 cursor_pos;
+
+ cursor_pos = x | (y << 16);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_POS, cursor_pos);
+ dma->cursor.x = x;
+ dma->cursor.y = y;
+ return 0;
+}
+
+static int mdp3_dmap_histo_get(struct mdp3_dma *dma,
+ struct mdp3_dma_histogram_data *data)
+{
+ int i;
+ u32 addr, extra;
+
+ addr = MDP3_REG_DMA_P_HIST_R_DATA;
+ for (i = 0; i < 32; i++) {
+ data->r_data[i] = MDP3_REG_READ(addr);
+ addr += 4;
+ }
+
+ addr = MDP3_REG_DMA_P_HIST_G_DATA;
+ for (i = 0; i < 32; i++) {
+ data->g_data[i] = MDP3_REG_READ(addr);
+ addr += 4;
+ }
+
+ addr = MDP3_REG_DMA_P_HIST_B_DATA;
+ for (i = 0; i < 32; i++) {
+ data->b_data[i] = MDP3_REG_READ(addr);
+ addr += 4;
+ }
+
+ extra = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_0);
+ data->r_min_value = (extra & 0x1F0000) >> 16;
+ data->r_max_value = (extra & 0x1F000000) >> 24;
+ extra = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_1);
+ data->g_min_value = extra & 0x1F;
+ data->g_max_value = (extra & 0x1F00) >> 8;
+ data->b_min_value = (extra & 0x1F0000) >> 16;
+ data->b_max_value = (extra & 0x1F000000) >> 24;
+ return 0;
+}
+
+static int mdp3_dmap_histo_op(struct mdp3_dma *dma, u32 op)
+{
+ switch (op) {
+ case MDP3_DMA_HISTO_OP_START:
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1);
+ break;
+ case MDP3_DMA_HISTO_OP_STOP:
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_STOP_REQ, 1);
+ break;
+ case MDP3_DMA_HISTO_OP_CANCEL:
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_CANCEL_REQ, 1);
+ break;
+ case MDP3_DMA_HISTO_OP_RESET:
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_RESET_SEQ_START, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mdp3_dmap_histo_intr_status(struct mdp3_dma *dma, int *status)
+{
+ *status = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_INTR_STATUS);
+ return 0;
+}
+
+static int mdp3_dmap_histo_intr_enable(struct mdp3_dma *dma, u32 mask)
+{
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, mask);
+ return 0;
+}
+
+static int mdp3_dmap_histo_intr_clear(struct mdp3_dma *dma, u32 mask)
+{
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_CLEAR, mask);
+ return 0;
+}
+
+static int mdp3_dma_start(struct mdp3_dma *dma, struct mdp3_intf *intf)
+{
+ unsigned long flag;
+ int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+ u32 dma_start_offset = MDP3_REG_DMA_P_START;
+
+ if (dma->dma_sel == MDP3_DMA_P)
+ dma_start_offset = MDP3_REG_DMA_P_START;
+ else if (dma->dma_sel == MDP3_DMA_S)
+ dma_start_offset = MDP3_REG_DMA_S_START;
+ else
+ return -EINVAL;
+
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ cb_type |= MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+ MDP3_REG_WRITE(dma_start_offset, 1);
+ dma->busy = true;
+ }
+
+ intf->start(intf);
+ wmb();
+ init_completion(&dma->vsync_comp);
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+ mdp3_dma_callback_enable(dma, cb_type);
+ pr_debug("mdp3_dma_start wait for vsync_comp in\n");
+ wait_for_completion_killable(&dma->vsync_comp);
+ pr_debug("mdp3_dma_start wait for vsync_comp out\n");
+ return 0;
+}
+
+static int mdp3_dma_stop(struct mdp3_dma *dma, struct mdp3_intf *intf)
+{
+ int ret = 0;
+ u32 status, display_status_bit;
+
+ if (dma->dma_sel == MDP3_DMA_P)
+ display_status_bit = BIT(6);
+ else if (dma->dma_sel == MDP3_DMA_S)
+ display_status_bit = BIT(7);
+ else
+ return -EINVAL;
+
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+ display_status_bit |= BIT(11);
+
+ intf->stop(intf);
+ ret = readl_poll_timeout((mdp3_res->mdp_base + MDP3_REG_DISPLAY_STATUS),
+ status,
+ ((status & display_status_bit) == 0),
+ DMA_STOP_POLL_SLEEP_US,
+ DMA_STOP_POLL_TIMEOUT_US);
+
+ mdp3_dma_callback_disable(dma, MDP3_DMA_CALLBACK_TYPE_VSYNC |
+ MDP3_DMA_CALLBACK_TYPE_DMA_DONE);
+
+ dma->busy = false;
+ return ret;
+}
+
+int mdp3_dma_init(struct mdp3_dma *dma,
+ struct mdp3_dma_source *source_config,
+ struct mdp3_dma_output_config *output_config)
+{
+ int ret = 0;
+
+ pr_debug("mdp3_dma_init\n");
+ switch (dma->dma_sel) {
+ case MDP3_DMA_P:
+ dma->busy = 0;
+
+ ret = mdp3_dmap_config(dma, source_config, output_config);
+ if (ret < 0)
+ return ret;
+
+ dma->config_cursor = mdp3_dmap_cursor_config;
+ dma->config_ccs = mdp3_dmap_ccs_config;
+ dma->config_histo = mdp3_dmap_histo_config;
+ dma->update = mdp3_dmap_update;
+ dma->update_cursor = mdp3_dmap_cursor_update;
+ dma->get_histo = mdp3_dmap_histo_get;
+ dma->histo_op = mdp3_dmap_histo_op;
+ dma->histo_intr_status = mdp3_dmap_histo_intr_status;
+ dma->histo_intr_enable = mdp3_dmap_histo_intr_enable;
+ dma->histo_intr_clear = mdp3_dmap_histo_intr_clear;
+ dma->vsync_enable = mdp3_dma_vsync_enable;
+ dma->get_vsync_time = mdp3_get_vsync_time;
+ dma->start = mdp3_dma_start;
+ dma->stop = mdp3_dma_stop;
+ break;
+ case MDP3_DMA_S:
+ dma->busy = 0;
+ ret = mdp3_dmas_config(dma, source_config, output_config);
+ if (ret < 0)
+ return ret;
+
+ dma->config_cursor = NULL;
+ dma->config_ccs = NULL;
+ dma->config_histo = NULL;
+ dma->update = mdp3_dmas_update;
+ dma->update_cursor = NULL;
+ dma->get_histo = NULL;
+ dma->histo_op = NULL;
+ dma->histo_intr_status = NULL;
+ dma->histo_intr_enable = NULL;
+ dma->histo_intr_clear = NULL;
+ dma->vsync_enable = mdp3_dma_vsync_enable;
+ dma->get_vsync_time = mdp3_get_vsync_time;
+ dma->start = mdp3_dma_start;
+ dma->stop = mdp3_dma_stop;
+ break;
+ case MDP3_DMA_E:
+ default:
+ ret = -ENODEV;
+ break;
+ }
+
+ spin_lock_init(&dma->dma_lock);
+ init_completion(&dma->vsync_comp);
+ init_completion(&dma->dma_comp);
+ dma->cb_type = 0;
+ dma->vsync_client.handler = NULL;
+ dma->vsync_client.arg = NULL;
+
+ memset(&dma->cursor, 0, sizeof(dma->cursor));
+ memset(&dma->ccs_config, 0, sizeof(dma->ccs_config));
+ memset(&dma->histogram_config, 0, sizeof(dma->histogram_config));
+
+ return ret;
+}
+
+int lcdc_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+ u32 temp;
+ struct mdp3_video_intf_cfg *v = &cfg->video;
+ temp = v->hsync_pulse_width | (v->hsync_period << 16);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_HSYNC_CTL, temp);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_VSYNC_PERIOD, v->vsync_period);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_VSYNC_PULSE_WIDTH, v->vsync_pulse_width);
+ temp = v->display_start_x | (v->display_end_x << 16);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_HCTL, temp);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_V_START, v->display_start_y);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_V_END, v->display_end_y);
+ temp = v->active_start_x | (v->active_end_x);
+ if (v->active_h_enable)
+ temp |= BIT(31);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_HCTL, temp);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_V_START, v->active_start_y);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_V_END, v->active_end_y);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_HSYNC_SKEW, v->hsync_skew);
+ temp = 0;
+ if (!v->hsync_polarity)
+ temp = BIT(0);
+ if (!v->vsync_polarity)
+ temp = BIT(1);
+ if (!v->de_polarity)
+ temp = BIT(2);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_CTL_POLARITY, temp);
+
+ return 0;
+}
+
+int lcdc_start(struct mdp3_intf *intf)
+{
+ MDP3_REG_WRITE(MDP3_REG_LCDC_EN, BIT(0));
+ wmb();
+ intf->active = true;
+ return 0;
+}
+
+int lcdc_stop(struct mdp3_intf *intf)
+{
+ MDP3_REG_WRITE(MDP3_REG_LCDC_EN, 0);
+ wmb();
+ intf->active = false;
+ return 0;
+}
+
+int dsi_video_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+ u32 temp;
+ struct mdp3_video_intf_cfg *v = &cfg->video;
+
+ pr_debug("dsi_video_config\n");
+
+ temp = v->hsync_pulse_width | (v->hsync_period << 16);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_HSYNC_CTL, temp);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_VSYNC_PERIOD, v->vsync_period);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_VSYNC_PULSE_WIDTH,
+ v->vsync_pulse_width);
+ temp = v->display_start_x | (v->display_end_x << 16);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_HCTL, temp);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_V_START, v->display_start_y);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_V_END, v->display_end_y);
+ temp = v->active_start_x | (v->active_end_x << 16);
+ if (v->active_h_enable)
+ temp |= BIT(31);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_HCTL, temp);
+
+ temp = v->active_start_y;
+ if (v->active_v_enable)
+ temp |= BIT(31);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_V_START, temp);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_V_END, v->active_end_y);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_HSYNC_SKEW, v->hsync_skew);
+ temp = 0;
+ if (!v->hsync_polarity)
+ temp |= BIT(0);
+ if (!v->vsync_polarity)
+ temp |= BIT(1);
+ if (!v->de_polarity)
+ temp |= BIT(2);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_CTL_POLARITY, temp);
+
+ return 0;
+}
+
+int dsi_video_start(struct mdp3_intf *intf)
+{
+ pr_debug("dsi_video_start\n");
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, BIT(0));
+ wmb();
+ intf->active = true;
+ return 0;
+}
+
+int dsi_video_stop(struct mdp3_intf *intf)
+{
+ pr_debug("dsi_video_stop\n");
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 0);
+ wmb();
+ intf->active = false;
+ return 0;
+}
+
+int dsi_cmd_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+ u32 id_map = 0;
+ u32 trigger_en = 0;
+
+ if (cfg->dsi_cmd.primary_dsi_cmd_id)
+ id_map = BIT(0);
+ if (cfg->dsi_cmd.secondary_dsi_cmd_id)
+ id_map = BIT(4);
+
+ if (cfg->dsi_cmd.dsi_cmd_tg_intf_sel)
+ trigger_en = BIT(4);
+
+ MDP3_REG_WRITE(MDP3_REG_DSI_CMD_MODE_ID_MAP, id_map);
+ MDP3_REG_WRITE(MDP3_REG_DSI_CMD_MODE_TRIGGER_EN, trigger_en);
+
+ return 0;
+}
+
+int dsi_cmd_start(struct mdp3_intf *intf)
+{
+ intf->active = true;
+ return 0;
+}
+
+int dsi_cmd_stop(struct mdp3_intf *intf)
+{
+ intf->active = false;
+ return 0;
+}
+
+int mdp3_intf_init(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+ int ret = 0;
+ switch (cfg->type) {
+ case MDP3_DMA_OUTPUT_SEL_LCDC:
+ intf->config = lcdc_config;
+ intf->start = lcdc_start;
+ intf->stop = lcdc_stop;
+ break;
+ case MDP3_DMA_OUTPUT_SEL_DSI_VIDEO:
+ intf->config = dsi_video_config;
+ intf->start = dsi_video_start;
+ intf->stop = dsi_video_stop;
+ break;
+ case MDP3_DMA_OUTPUT_SEL_DSI_CMD:
+ intf->config = dsi_cmd_config;
+ intf->start = dsi_cmd_start;
+ intf->stop = dsi_cmd_stop;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ intf->active = false;
+ if (intf->config)
+ ret = intf->config(intf, cfg);
+
+ if (ret) {
+ pr_err("MDP interface initialization failed\n");
+ return ret;
+ }
+
+ intf->cfg = *cfg;
+ return 0;
+}
diff --git a/drivers/video/msm/mdss/mdp3_dma.h b/drivers/video/msm/mdss/mdp3_dma.h
new file mode 100644
index 0000000..2fb8427
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3_dma.h
@@ -0,0 +1,336 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP3_DMA_H
+#define MDP3_DMA_H
+
+#include <linux/sched.h>
+
+enum {
+ MDP3_DMA_P,
+ MDP3_DMA_S,
+ MDP3_DMA_E,
+ MDP3_DMA_MAX
+};
+
+enum {
+ MDP3_DMA_CAP_CURSOR = 0x1,
+ MDP3_DMA_CAP_COLOR_CORRECTION = 0x2,
+ MDP3_DMA_CAP_HISTOGRAM = 0x4,
+ MDP3_DMA_CAP_GAMMA_CORRECTION = 0x8,
+ MDP3_DMA_CAP_DITHER = 0x10,
+ MDP3_DMA_CAP_ALL = 0x1F
+};
+
+enum {
+ MDP3_DMA_OUTPUT_SEL_AHB,
+ MDP3_DMA_OUTPUT_SEL_DSI_CMD,
+ MDP3_DMA_OUTPUT_SEL_LCDC,
+ MDP3_DMA_OUTPUT_SEL_DSI_VIDEO,
+ MDP3_DMA_OUTPUT_SEL_MAX
+};
+
+enum {
+ MDP3_DMA_IBUF_FORMAT_RGB888,
+ MDP3_DMA_IBUF_FORMAT_RGB565,
+ MDP3_DMA_IBUF_FORMAT_XRGB8888,
+ MDP3_DMA_IBUF_FORMAT_UNDEFINED
+};
+
+enum {
+ MDP3_DMA_OUTPUT_PACK_PATTERN_RGB = 0x21,
+ MDP3_DMA_OUTPUT_PACK_PATTERN_RBG = 0x24,
+ MDP3_DMA_OUTPUT_PACK_PATTERN_BGR = 0x12,
+ MDP3_DMA_OUTPUT_PACK_PATTERN_BRG = 0x18,
+ MDP3_DMA_OUTPUT_PACK_PATTERN_GBR = 0x06,
+ MDP3_DMA_OUTPUT_PACK_PATTERN_GRB = 0x09,
+};
+
+enum {
+ MDP3_DMA_OUTPUT_PACK_ALIGN_LSB,
+ MDP3_DMA_OUTPUT_PACK_ALIGN_MSB
+};
+
+enum {
+ MDP3_DMA_OUTPUT_COMP_BITS_4, /*4 bits per color component*/
+ MDP3_DMA_OUTPUT_COMP_BITS_5,
+ MDP3_DMA_OUTPUT_COMP_BITS_6,
+ MDP3_DMA_OUTPUT_COMP_BITS_8,
+};
+
+enum {
+ MDP3_DMA_CURSOR_FORMAT_ARGB888,
+};
+
+enum {
+ MDP3_DMA_COLOR_CORRECT_SET_1,
+ MDP3_DMA_COLOR_CORRECT_SET_2
+};
+
+enum {
+ MDP3_DMA_LUT_POSITION_PRE,
+ MDP3_DMA_LUT_POSITION_POST
+};
+
+enum {
+ MDP3_DMA_LUT_DISABLE = 0x0,
+ MDP3_DMA_LUT_ENABLE_C0 = 0x01,
+ MDP3_DMA_LUT_ENABLE_C1 = 0x02,
+ MDP3_DMA_LUT_ENABLE_C2 = 0x04,
+ MDP3_DMA_LUT_ENABLE_ALL = 0x07,
+};
+
+enum {
+ MDP3_DMA_HISTOGRAM_BIT_MASK_NONE = 0X0,
+ MDP3_DMA_HISTOGRAM_BIT_MASK_ONE_MSB = 0x1,
+ MDP3_DMA_HISTOGRAM_BIT_MASK_TWO_MSB = 0x2,
+ MDP3_DMA_HISTOGRAM_BIT_MASK_THREE_MSB = 0x3
+};
+
+enum {
+ MDP3_DMA_COLOR_FLIP_NONE,
+ MDP3_DMA_COLOR_FLIP_COMP1 = 0x1,
+ MDP3_DMA_COLOR_FLIP_COMP2 = 0x2,
+ MDP3_DMA_COLOR_FLIP_COMP3 = 0x4,
+};
+
+enum {
+ MDP3_DMA_CURSOR_BLEND_NONE = 0x0,
+ MDP3_DMA_CURSOR_BLEND_PER_PIXEL_ALPHA = 0x3,
+ MDP3_DMA_CURSOR_BLEND_CONSTANT_ALPHA = 0x5,
+ MDP3_DMA_CURSOR_BLEND_COLOR_KEYING = 0x9
+};
+
+enum {
+ MDP3_DMA_HISTO_OP_START,
+ MDP3_DMA_HISTO_OP_STOP,
+ MDP3_DMA_HISTO_OP_CANCEL,
+ MDP3_DMA_HISTO_OP_RESET
+};
+
+enum {
+ MDP3_DMA_CALLBACK_TYPE_VSYNC = 0x01,
+ MDP3_DMA_CALLBACK_TYPE_DMA_DONE = 0x02,
+};
+
+struct mdp3_dma_source {
+ u32 format;
+ int width;
+ int height;
+ int x;
+ int y;
+ void *buf;
+ int stride;
+};
+
+struct mdp3_dma_output_config {
+ int dither_en;
+ u32 out_sel;
+ u32 bit_mask_polarity;
+ u32 color_components_flip;
+ u32 pack_pattern;
+ u32 pack_align;
+ u32 color_comp_out_bits;
+};
+
+struct mdp3_dma_cursor_blend_config {
+ u32 mode;
+ u32 transparent_color; /*color keying*/
+ u32 transparency_mask;
+ u32 constant_alpha;
+};
+
+struct mdp3_dma_cursor {
+ int enable; /* enable cursor or not*/
+ u32 format;
+ int width;
+ int height;
+ int x;
+ int y;
+ void *buf;
+ struct mdp3_dma_cursor_blend_config blend_config;
+};
+
+struct mdp3_dma_ccs {
+ u32 *mv1; /*set1 matrix vector, 3x3 */
+ u32 *mv2;
+ u32 *pre_bv1; /*pre-bias vector for set1, 1x3*/
+ u32 *pre_bv2;
+ u32 *post_bv1; /*post-bias vecotr for set1, */
+ u32 *post_bv2;
+ u32 *pre_lv1; /*pre-limit vector for set 1, 1x6*/
+ u32 *pre_lv2;
+ u32 *post_lv1;
+ u32 *post_lv2;
+};
+
+struct mdp3_dma_lut {
+ uint8_t *color0_lut1;
+ uint8_t *color1_lut1;
+ uint8_t *color2_lut1;
+ uint8_t *color0_lut2;
+ uint8_t *color1_lut2;
+ uint8_t *color2_lut2;
+};
+
+struct mdp3_dma_color_correct_config {
+ int ccs_enable;
+ int lut_enable;
+ u32 lut_sel;
+ u32 post_limit_sel;
+ u32 pre_limit_sel;
+ u32 post_bias_sel;
+ u32 pre_bias_sel;
+ u32 ccs_sel;
+ u32 lut_position;
+};
+
+struct mdp3_dma_histogram_config {
+ int frame_count;
+ u32 bit_mask_polarity;
+ u32 bit_mask;
+ int auto_clear_en;
+};
+
+struct mdp3_dma_histogram_data {
+ uint8_t r_max_value;
+ uint8_t r_min_value;
+ uint8_t b_max_value;
+ uint8_t b_min_value;
+ uint8_t g_max_value;
+ uint8_t g_min_value;
+ uint8_t r_data[32];
+ uint8_t g_data[32];
+ uint8_t b_data[32];
+};
+
+struct mdp3_vsync_notification {
+ void (*handler)(void *arg);
+ void *arg;
+};
+
+struct mdp3_intf;
+
+struct mdp3_dma {
+ u32 dma_sel;
+ u32 capability;
+ int in_use;
+ int available;
+ int busy;
+
+ spinlock_t dma_lock;
+ struct completion vsync_comp;
+ struct completion dma_comp;
+ ktime_t vsync_time;
+ struct mdp3_vsync_notification vsync_client;
+ u32 cb_type;
+
+ struct mdp3_dma_output_config output_config;
+ struct mdp3_dma_source source_config;
+
+ struct mdp3_dma_cursor cursor;
+ struct mdp3_dma_color_correct_config ccs_config;
+ struct mdp3_dma_histogram_config histogram_config;
+
+ int (*start)(struct mdp3_dma *dma, struct mdp3_intf *intf);
+
+ int (*stop)(struct mdp3_dma *dma, struct mdp3_intf *intf);
+
+ int (*config_cursor)(struct mdp3_dma *dma,
+ struct mdp3_dma_cursor *cursor);
+
+ int (*config_ccs)(struct mdp3_dma *dma,
+ struct mdp3_dma_color_correct_config *config,
+ struct mdp3_dma_ccs *ccs,
+ struct mdp3_dma_lut *lut);
+
+ int (*update)(struct mdp3_dma *dma, void *buf);
+
+ int (*update_cursor)(struct mdp3_dma *dma, int x, int y);
+
+ int (*get_histo)(struct mdp3_dma *dma,
+ struct mdp3_dma_histogram_data *data);
+
+ int (*config_histo)(struct mdp3_dma *dma,
+ struct mdp3_dma_histogram_config *histo_config);
+
+ int (*histo_op)(struct mdp3_dma *dma,
+ u32 op);
+
+ int (*histo_intr_status)(struct mdp3_dma *dma, int *status);
+
+ int (*histo_intr_enable)(struct mdp3_dma *dma, u32 mask);
+
+ int (*histo_intr_clear)(struct mdp3_dma *dma, u32 mask);
+
+ void (*vsync_enable)(struct mdp3_dma *dma,
+ struct mdp3_vsync_notification *vsync_client);
+
+ ktime_t (*get_vsync_time)(struct mdp3_dma *dma);
+
+};
+
+struct mdp3_video_intf_cfg {
+ int hsync_period;
+ int hsync_pulse_width;
+ int vsync_period;
+ int vsync_pulse_width;
+ int display_start_x;
+ int display_end_x;
+ int display_start_y;
+ int display_end_y;
+ int active_start_x;
+ int active_end_x;
+ int active_h_enable;
+ int active_start_y;
+ int active_end_y;
+ int active_v_enable;
+ int hsync_skew;
+ int hsync_polarity;
+ int vsync_polarity;
+ int de_polarity;
+};
+
+struct mdp3_dsi_cmd_intf_cfg {
+ int primary_dsi_cmd_id;
+ int secondary_dsi_cmd_id;
+ int dsi_cmd_tg_intf_sel;
+};
+
+struct mdp3_intf_cfg {
+ u32 type;
+ struct mdp3_video_intf_cfg video;
+ struct mdp3_dsi_cmd_intf_cfg dsi_cmd;
+};
+
+struct mdp3_intf {
+ struct mdp3_intf_cfg cfg;
+ int active;
+ int available;
+ int in_use;
+ int (*config)(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg);
+ int (*start)(struct mdp3_intf *intf);
+ int (*stop)(struct mdp3_intf *intf);
+};
+
+int mdp3_dma_init(struct mdp3_dma *dma,
+ struct mdp3_dma_source *source_config,
+ struct mdp3_dma_output_config *output_config);
+
+int mdp3_intf_init(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg);
+
+void mdp3_dma_callback_enable(struct mdp3_dma *dma, int type);
+
+void mdp3_dma_callback_disable(struct mdp3_dma *dma, int type);
+
+#endif /* MDP3_DMA_H */
diff --git a/drivers/video/msm/mdss/mdp3_hwio.h b/drivers/video/msm/mdss/mdp3_hwio.h
new file mode 100644
index 0000000..2763f46
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3_hwio.h
@@ -0,0 +1,216 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP3_HWIO_H
+#define MDP3_HWIO_H
+
+#include <linux/bitops.h>
+
+/*synchronization*/
+#define MDP3_REG_SYNC_CONFIG_0 0x0300
+#define MDP3_REG_SYNC_CONFIG_1 0x0304
+#define MDP3_REG_SYNC_CONFIG_2 0x0308
+#define MDP3_REG_SYNC_STATUS_0 0x030c
+#define MDP3_REG_SYNC_STATUS_1 0x0310
+#define MDP3_REG_SYNC_STATUS_2 0x0314
+#define MDP3_REG_PRIMARY_VSYNC_OUT_CTRL 0x0318
+#define MDP3_REG_SECONDARY_VSYNC_OUT_CTRL 0x031c
+#define MDP3_REG_EXTERNAL_VSYNC_OUT_CTRL 0x0320
+#define MDP3_REG_VSYNC_SEL 0x0324
+
+/*interrupt*/
+#define MDP3_REG_INTR_ENABLE 0x0020
+#define MDP3_REG_INTR_STATUS 0x0024
+#define MDP3_REG_INTR_CLEAR 0x0028
+
+#define MDP3_REG_PRIMARY_RD_PTR_IRQ 0x021C
+#define MDP3_REG_SECONDARY_RD_PTR_IRQ 0x0220
+
+/*operation control*/
+#define MDP3_REG_DMA_P_START 0x0044
+#define MDP3_REG_DMA_S_START 0x0048
+#define MDP3_REG_DMA_E_START 0x004c
+
+#define MDP3_REG_DISPLAY_STATUS 0x0038
+
+#define MDP3_REG_HW_VERSION 0x0070
+#define MDP3_REG_SW_RESET 0x0074
+
+/*EBI*/
+#define MDP3_REG_EBI2_LCD0 0x003c
+#define MDP3_REG_EBI2_LCD0_YSTRIDE 0x0050
+
+/*DMA_P*/
+#define MDP3_REG_DMA_P_CONFIG 0x90000
+#define MDP3_REG_DMA_P_SIZE 0x90004
+#define MDP3_REG_DMA_P_IBUF_ADDR 0x90008
+#define MDP3_REG_DMA_P_IBUF_Y_STRIDE 0x9000C
+#define MDP3_REG_DMA_P_PROFILE_EN 0x90020
+#define MDP3_REG_DMA_P_OUT_XY 0x90010
+#define MDP3_REG_DMA_P_CURSOR_FORMAT 0x90040
+#define MDP3_REG_DMA_P_CURSOR_SIZE 0x90044
+#define MDP3_REG_DMA_P_CURSOR_BUF_ADDR 0x90048
+#define MDP3_REG_DMA_P_CURSOR_POS 0x9004c
+#define MDP3_REG_DMA_P_CURSOR_BLEND_CONFIG 0x90060
+#define MDP3_REG_DMA_P_CURSOR_BLEND_PARAM 0x90064
+#define MDP3_REG_DMA_P_CURSOR_BLEND_TRANS_MASK 0x90068
+#define MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG 0x90070
+#define MDP3_REG_DMA_P_CSC_BYPASS 0X93004
+#define MDP3_REG_DMA_P_CSC_MV1 0x93400
+#define MDP3_REG_DMA_P_CSC_MV2 0x93440
+#define MDP3_REG_DMA_P_CSC_PRE_BV1 0x93500
+#define MDP3_REG_DMA_P_CSC_PRE_BV2 0x93540
+#define MDP3_REG_DMA_P_CSC_POST_BV1 0x93580
+#define MDP3_REG_DMA_P_CSC_POST_BV2 0x935c0
+#define MDP3_REG_DMA_P_CSC_PRE_LV1 0x93600
+#define MDP3_REG_DMA_P_CSC_PRE_LV2 0x93640
+#define MDP3_REG_DMA_P_CSC_POST_LV1 0x93680
+#define MDP3_REG_DMA_P_CSC_POST_LV2 0x936c0
+#define MDP3_REG_DMA_P_CSC_LUT1 0x93800
+#define MDP3_REG_DMA_P_CSC_LUT2 0x93c00
+#define MDP3_REG_DMA_P_HIST_START 0x94000
+#define MDP3_REG_DMA_P_HIST_FRAME_CNT 0x94004
+#define MDP3_REG_DMA_P_HIST_BIT_MASK 0x94008
+#define MDP3_REG_DMA_P_HIST_RESET_SEQ_START 0x9400c
+#define MDP3_REG_DMA_P_HIST_CONTROL 0x94010
+#define MDP3_REG_DMA_P_HIST_INTR_STATUS 0x94014
+#define MDP3_REG_DMA_P_HIST_INTR_CLEAR 0x94018
+#define MDP3_REG_DMA_P_HIST_INTR_ENABLE 0x9401c
+#define MDP3_REG_DMA_P_HIST_STOP_REQ 0x94020
+#define MDP3_REG_DMA_P_HIST_CANCEL_REQ 0x94024
+#define MDP3_REG_DMA_P_HIST_EXTRA_INFO_0 0x94028
+#define MDP3_REG_DMA_P_HIST_EXTRA_INFO_1 0x9402c
+#define MDP3_REG_DMA_P_HIST_R_DATA 0x94100
+#define MDP3_REG_DMA_P_HIST_G_DATA 0x94200
+#define MDP3_REG_DMA_P_HIST_B_DATA 0x94300
+#define MDP3_REG_DMA_P_FETCH_CFG 0x90074
+#define MDP3_REG_DMA_P_DCVS_CTRL 0x90080
+#define MDP3_REG_DMA_P_DCVS_STATUS 0x90084
+
+/*DMA_S*/
+#define MDP3_REG_DMA_S_CONFIG 0x90000
+#define MDP3_REG_DMA_S_SIZE 0x90004
+#define MDP3_REG_DMA_S_IBUF_ADDR 0x90008
+#define MDP3_REG_DMA_S_IBUF_Y_STRIDE 0x9000C
+#define MDP3_REG_DMA_S_OUT_XY 0x90010
+
+/*interface*/
+#define MDP3_REG_LCDC_EN 0xE0000
+#define MDP3_REG_LCDC_HSYNC_CTL 0xE0004
+#define MDP3_REG_LCDC_VSYNC_PERIOD 0xE0008
+#define MDP3_REG_LCDC_VSYNC_PULSE_WIDTH 0xE000C
+#define MDP3_REG_LCDC_DISPLAY_HCTL 0xE0010
+#define MDP3_REG_LCDC_DISPLAY_V_START 0xE0014
+#define MDP3_REG_LCDC_DISPLAY_V_END 0xE0018
+#define MDP3_REG_LCDC_ACTIVE_HCTL 0xE001C
+#define MDP3_REG_LCDC_ACTIVE_V_START 0xE0020
+#define MDP3_REG_LCDC_ACTIVE_V_END 0xE0024
+#define MDP3_REG_LCDC_BORDER_COLOR 0xE0028
+#define MDP3_REG_LCDC_UNDERFLOW_CTL 0xE002C
+#define MDP3_REG_LCDC_HSYNC_SKEW 0xE0030
+#define MDP3_REG_LCDC_TEST_CTL 0xE0034
+#define MDP3_REG_LCDC_CTL_POLARITY 0xE0038
+#define MDP3_REG_LCDC_TEST_COL_VAR1 0xE003C
+#define MDP3_REG_LCDC_TEST_COL_VAR2 0xE0040
+#define MDP3_REG_LCDC_UFLOW_HIDING_CTL 0xE0044
+#define MDP3_REG_LCDC_LOST_PIXEL_CNT_VALUE 0xE0048
+
+#define MDP3_REG_DSI_VIDEO_EN 0xF0000
+#define MDP3_REG_DSI_VIDEO_HSYNC_CTL 0xF0004
+#define MDP3_REG_DSI_VIDEO_VSYNC_PERIOD 0xF0008
+#define MDP3_REG_DSI_VIDEO_VSYNC_PULSE_WIDTH 0xF000C
+#define MDP3_REG_DSI_VIDEO_DISPLAY_HCTL 0xF0010
+#define MDP3_REG_DSI_VIDEO_DISPLAY_V_START 0xF0014
+#define MDP3_REG_DSI_VIDEO_DISPLAY_V_END 0xF0018
+#define MDP3_REG_DSI_VIDEO_ACTIVE_HCTL 0xF001C
+#define MDP3_REG_DSI_VIDEO_ACTIVE_V_START 0xF0020
+#define MDP3_REG_DSI_VIDEO_ACTIVE_V_END 0xF0024
+#define MDP3_REG_DSI_VIDEO_BORDER_COLOR 0xF0028
+#define MDP3_REG_DSI_VIDEO_UNDERFLOW_CTL 0xF002C
+#define MDP3_REG_DSI_VIDEO_HSYNC_SKEW 0xF0030
+#define MDP3_REG_DSI_VIDEO_TEST_CTL 0xF0034
+#define MDP3_REG_DSI_VIDEO_CTL_POLARITY 0xF0038
+#define MDP3_REG_DSI_VIDEO_TEST_COL_VAR1 0xF003C
+#define MDP3_REG_DSI_VIDEO_TEST_COL_VAR2 0xF0040
+#define MDP3_REG_DSI_VIDEO_UFLOW_HIDING_CTL 0xF0044
+#define MDP3_REG_DSI_VIDEO_LOST_PIXEL_CNT_VALUE 0xF0048
+
+#define MDP3_REG_DSI_CMD_MODE_ID_MAP 0xF1000
+#define MDP3_REG_DSI_CMD_MODE_TRIGGER_EN 0xF1004
+
+/*interrupt mask*/
+
+#define MDP3_INTR_DP0_ROI_DONE_BIT BIT(0)
+#define MDP3_INTR_DP1_ROI_DONE_BIT BIT(1)
+#define MDP3_INTR_DMA_S_DONE_BIT BIT(2)
+#define MDP3_INTR_DMA_E_DONE_BIT BIT(3)
+#define MDP3_INTR_DP0_TERMINAL_FRAME_DONE_BIT BIT(4)
+#define MDP3_INTR_DP1_TERMINAL_FRAME_DONE_BIT BIT(5)
+#define MDP3_INTR_DMA_TV_DONE_BIT BIT(6)
+#define MDP3_INTR_TV_ENCODER_UNDER_RUN_BIT BIT(7)
+#define MDP3_INTR_SYNC_PRIMARY_LINE_BIT BIT(8)
+#define MDP3_INTR_SYNC_SECONDARY_LINE_BIT BIT(9)
+#define MDP3_INTR_SYNC_EXTERNAL_LINE_BIT BIT(10)
+#define MDP3_INTR_DP0_FETCH_DONE_BIT BIT(11)
+#define MDP3_INTR_DP1_FETCH_DONE_BIT BIT(12)
+#define MDP3_INTR_TV_OUT_FRAME_START_BIT BIT(13)
+#define MDP3_INTR_DMA_P_DONE_BIT BIT(14)
+#define MDP3_INTR_LCDC_START_OF_FRAME_BIT BIT(15)
+#define MDP3_INTR_LCDC_UNDERFLOW_BIT BIT(16)
+#define MDP3_INTR_DMA_P_LINE_BIT BIT(17)
+#define MDP3_INTR_DMA_S_LINE_BIT BIT(18)
+#define MDP3_INTR_DMA_E_LINE_BIT BIT(19)
+#define MDP3_INTR_DMA_P_HISTO_BIT BIT(20)
+#define MDP3_INTR_DTV_OUT_DONE_BIT BIT(21)
+#define MDP3_INTR_DTV_OUT_START_OF_FRAME_BIT BIT(22)
+#define MDP3_INTR_DTV_OUT_UNDERFLOW_BIT BIT(23)
+#define MDP3_INTR_DTV_OUT_LINE_BIT BIT(24)
+#define MDP3_INTR_DMA_P_AUTO_FREFRESH_START_BIT BIT(25)
+#define MDP3_INTR_DMA_S_AUTO_FREFRESH_START_BIT BIT(26)
+#define MDP3_INTR_QPIC_EOF_ENABLE_BIT BIT(27)
+
+enum {
+ MDP3_INTR_DP0_ROI_DONE,
+ MDP3_INTR_DP1_ROI_DONE,
+ MDP3_INTR_DMA_S_DONE,
+ MDP3_INTR_DMA_E_DONE,
+ MDP3_INTR_DP0_TERMINAL_FRAME_DONE,
+ MDP3_INTR_DP1_TERMINAL_FRAME_DONE,
+ MDP3_INTR_DMA_TV_DONE,
+ MDP3_INTR_TV_ENCODER_UNDER_RUN,
+ MDP3_INTR_SYNC_PRIMARY_LINE,
+ MDP3_INTR_SYNC_SECONDARY_LINE,
+ MDP3_INTR_SYNC_EXTERNAL_LINE,
+ MDP3_INTR_DP0_FETCH_DONE,
+ MDP3_INTR_DP1_FETCH_DONE,
+ MDP3_INTR_TV_OUT_FRAME_START,
+ MDP3_INTR_DMA_P_DONE,
+ MDP3_INTR_LCDC_START_OF_FRAME,
+ MDP3_INTR_LCDC_UNDERFLOW,
+ MDP3_INTR_DMA_P_LINE,
+ MDP3_INTR_DMA_S_LINE,
+ MDP3_INTR_DMA_E_LINE,
+ MDP3_INTR_DMA_P_HISTO,
+ MDP3_INTR_DTV_OUT_DONE,
+ MDP3_INTR_DTV_OUT_START_OF_FRAME,
+ MDP3_INTR_DTV_OUT_UNDERFLOW,
+ MDP3_INTR_DTV_OUT_LINE,
+ MDP3_INTR_DMA_P_AUTO_FREFRESH_START,
+ MDP3_INTR_DMA_S_AUTO_FREFRESH_START,
+ MDP3_INTR_QPIC_EOF_ENABLE,
+};
+
+#define MDP3_DMA_P_HIST_INTR_RESET_DONE_BIT BIT(0)
+#define MDP3_DMA_P_HIST_INTR_HIST_DONE_BIT BIT(1)
+
+#endif /* MDP3_HWIO_H */
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index c847ee6..5be0173 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -62,8 +62,6 @@
struct regulator *fs;
u32 max_mdp_clk_rate;
- struct workqueue_struct *clk_ctrl_wq;
- struct work_struct clk_ctrl_worker;
struct platform_device *pdev;
char __iomem *mdp_base;
size_t mdp_reg_size;
@@ -73,12 +71,13 @@
u32 irq_mask;
u32 irq_ena;
u32 irq_buzy;
+ u32 has_bwc;
+ u32 has_decimation;
u32 mdp_irq_mask;
u32 mdp_hist_irq_mask;
int suspend_fs_ena;
- atomic_t clk_ref;
u8 clk_ena;
u8 fs_ena;
u8 vsync_ena;
@@ -135,6 +134,7 @@
irqreturn_t (*irq_handler)(int irq, void *ptr);
};
+int mdss_register_irq(struct mdss_hw *hw);
void mdss_enable_irq(struct mdss_hw *hw);
void mdss_disable_irq(struct mdss_hw *hw);
void mdss_disable_irq_nosync(struct mdss_hw *hw);
diff --git a/drivers/video/msm/mdss/mdss_debug.c b/drivers/video/msm/mdss/mdss_debug.c
index 7dc4f49..0918db1 100644
--- a/drivers/video/msm/mdss/mdss_debug.c
+++ b/drivers/video/msm/mdss/mdss_debug.c
@@ -24,7 +24,7 @@
#include "mdss_mdp.h"
#include "mdss_debug.h"
-#define DEFAULT_BASE_REG_CNT 128
+#define DEFAULT_BASE_REG_CNT 0x100
#define GROUP_BYTES 4
#define ROW_BYTES 16
@@ -67,7 +67,8 @@
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct mdss_debug_base *dbg = file->private_data;
- u32 off, cnt;
+ u32 off = 0;
+ u32 cnt = DEFAULT_BASE_REG_CNT;
char buf[24];
if (!dbg)
@@ -81,14 +82,11 @@
buf[count] = 0; /* end of string */
- sscanf(buf, "%5x %d", &off, &cnt);
+ sscanf(buf, "%5x %x", &off, &cnt);
if (off > dbg->max_offset)
return -EINVAL;
- if (cnt <= 0)
- cnt = DEFAULT_BASE_REG_CNT;
-
if (cnt > (dbg->max_offset - off))
cnt = dbg->max_offset - off;
@@ -113,7 +111,7 @@
if (*ppos)
return 0; /* the end */
- len = snprintf(buf, sizeof(buf), "0x%08x %d\n", dbg->off, dbg->off);
+ len = snprintf(buf, sizeof(buf), "0x%08x %x\n", dbg->off, dbg->cnt);
if (len < 0)
return 0;
@@ -288,6 +286,96 @@
return -ENODEV;
}
+
+static int mdss_debug_stat_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int mdss_debug_stat_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int mdss_debug_stat_ctl_dump(struct mdss_mdp_ctl *ctl,
+ char *bp, int len)
+{
+ int tot = 0;
+
+ if (!ctl->ref_cnt)
+ return 0;
+
+ if (ctl->intf_num) {
+ tot = scnprintf(bp, len,
+ "intf%d: play: %08u \tvsync: %08u \tunderrun: %08u\n",
+ ctl->intf_num, ctl->play_cnt,
+ ctl->vsync_cnt, ctl->underrun_cnt);
+ } else {
+ tot = scnprintf(bp, len, "wb: \tmode=%x \tplay: %08u\n",
+ ctl->opmode, ctl->play_cnt);
+ }
+
+ return tot;
+}
+
+static ssize_t mdss_debug_stat_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ struct mdss_data_type *mdata = file->private_data;
+ struct mdss_mdp_pipe *pipe;
+ int i, len, tot;
+ char bp[512];
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = sizeof(bp);
+
+ tot = scnprintf(bp, len, "\nmdp:\n");
+
+ for (i = 0; i < mdata->nctl; i++)
+ tot += mdss_debug_stat_ctl_dump(mdata->ctl_off + i,
+ bp + tot, len - tot);
+ tot += scnprintf(bp + tot, len - tot, "\n");
+
+ for (i = 0; i < mdata->nvig_pipes; i++) {
+ pipe = mdata->vig_pipes + i;
+ tot += scnprintf(bp + tot, len - tot,
+ "VIG%d : %08u\t", i, pipe->play_cnt);
+ }
+ tot += scnprintf(bp + tot, len - tot, "\n");
+
+ for (i = 0; i < mdata->nrgb_pipes; i++) {
+ pipe = mdata->rgb_pipes + i;
+ tot += scnprintf(bp + tot, len - tot,
+ "RGB%d : %08u\t", i, pipe->play_cnt);
+ }
+ tot += scnprintf(bp + tot, len - tot, "\n");
+
+ for (i = 0; i < mdata->ndma_pipes; i++) {
+ pipe = mdata->dma_pipes + i;
+ tot += scnprintf(bp + tot, len - tot,
+ "DMA%d : %08u\t", i, pipe->play_cnt);
+ }
+ tot += scnprintf(bp + tot, len - tot, "\n");
+
+ if (copy_to_user(buff, bp, tot))
+ return -EFAULT;
+
+ *ppos += tot; /* increase offset */
+
+ return tot;
+}
+
+static const struct file_operations mdss_stat_fops = {
+ .open = mdss_debug_stat_open,
+ .release = mdss_debug_stat_release,
+ .read = mdss_debug_stat_read,
+};
+
static int mdss_debugfs_cleanup(struct mdss_debug_data *mdd)
{
struct mdss_debug_base *base, *tmp;
@@ -332,6 +420,7 @@
mdss_debugfs_cleanup(mdd);
return -ENODEV;
}
+ debugfs_create_file("stat", 0644, mdd->root, mdata, &mdss_stat_fops);
debugfs_create_u32("min_mdp_clk", 0644, mdd->root,
(u32 *)&mdata->min_mdp_clk);
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 8bf8c95..acac6b9 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -326,19 +326,6 @@
snprintf(mp->vreg_config[i].vreg_name,
ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st);
- /* vreg-type */
- rc = of_property_read_string_index(of_node, "qcom,supply-type",
- i, &st);
- if (rc) {
- pr_err("%s: error reading vreg type. rc=%d\n",
- __func__, rc);
- goto error;
- }
- if (!strncmp(st, "regulator", 9))
- mp->vreg_config[i].type = 0;
- else if (!strncmp(st, "switch", 6))
- mp->vreg_config[i].type = 1;
-
/* vreg-min-voltage */
memset(val_array, 0, sizeof(u32) * dt_vreg_total);
rc = of_property_read_u32_array(of_node,
@@ -373,14 +360,13 @@
__func__, rc);
goto error;
}
- mp->vreg_config[i].optimum_voltage = val_array[i];
+ mp->vreg_config[i].peak_current = val_array[i];
- pr_debug("%s: %s type=%d, min=%d, max=%d, op=%d\n",
- __func__, mp->vreg_config[i].vreg_name,
- mp->vreg_config[i].type,
+ pr_debug("%s: %s min=%d, max=%d, pc=%d\n", __func__,
+ mp->vreg_config[i].vreg_name,
mp->vreg_config[i].min_voltage,
mp->vreg_config[i].max_voltage,
- mp->vreg_config[i].optimum_voltage);
+ mp->vreg_config[i].peak_current);
}
devm_kfree(dev, val_array);
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index ccec0fc..1a64be4 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -59,6 +59,9 @@
mdss_dsi1_hw.ptr = (void *)(ctrl);
ctrl->mdss_hw = &mdss_dsi1_hw;
}
+
+ if (!mdss_register_irq(ctrl->mdss_hw))
+ pr_err("%s: mdss_register_irq failed.\n", __func__);
}
void mdss_dsi_irq_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable, int isr)
@@ -810,7 +813,6 @@
dsi_ctrl |= BIT(0); /* enable dsi */
MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
- mdss_dsi_irq_ctrl(ctrl_pdata, 1, 0); /* enable dsi irq */
wmb();
}
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index ed730b3..a7ea948 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -1143,6 +1143,7 @@
struct mdp_display_commit disp_commit;
memset(&disp_commit, 0, sizeof(disp_commit));
disp_commit.wait_for_finish = true;
+ memcpy(&disp_commit.var, var, sizeof(struct fb_var_screeninfo));
return mdss_fb_pan_display_ex(info, &disp_commit);
}
@@ -1209,6 +1210,7 @@
mdss_fb_wait_for_fence(mfd);
if (mfd->mdp.kickoff_fnc)
mfd->mdp.kickoff_fnc(mfd);
+ mdss_fb_update_backlight(mfd);
mdss_fb_signal_timeline(mfd);
} else {
var = &fb_backup->disp_commit.var;
@@ -1431,6 +1433,7 @@
int i, fence_cnt = 0, ret = 0;
int acq_fen_fd[MDP_MAX_FENCE_FD];
struct sync_fence *fence;
+ u32 threshold;
if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
(mfd->timeline == NULL))
@@ -1464,8 +1467,13 @@
if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT)
mdss_fb_wait_for_fence(mfd);
+ if (mfd->panel.type == WRITEBACK_PANEL)
+ threshold = 1;
+ else
+ threshold = 2;
+
mfd->cur_rel_sync_pt = sw_sync_pt_create(mfd->timeline,
- mfd->timeline_value + 2);
+ mfd->timeline_value + threshold);
if (mfd->cur_rel_sync_pt == NULL) {
pr_err("%s: cannot create sync point", __func__);
ret = -ENOMEM;
@@ -1729,4 +1737,4 @@
return 0;
}
-device_initcall_sync(mdss_fb_init);
+module_init(mdss_fb_init);
diff --git a/drivers/video/msm/mdss/mdss_hdmi_cec.c b/drivers/video/msm/mdss/mdss_hdmi_cec.c
new file mode 100644
index 0000000..2cf47fc
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_cec.c
@@ -0,0 +1,953 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <mach/board.h>
+
+#include "mdss_hdmi_cec.h"
+
+#define CEC_STATUS_WR_ERROR BIT(0)
+#define CEC_STATUS_WR_DONE BIT(1)
+
+/* Reference: HDMI 1.4a Specification section 7.1 */
+#define RETRANSMIT_MAX_NUM 5
+
+/*
+ * Ref. HDMI 1.4a: Supplement-1 CEC Section 6, 7
+ */
+struct hdmi_cec_msg {
+ u8 sender_id;
+ u8 recvr_id;
+ u8 opcode;
+ u8 operand[15];
+ u8 frame_size;
+ u8 retransmit;
+};
+
+struct hdmi_cec_msg_node {
+ struct hdmi_cec_msg msg;
+ struct list_head list;
+};
+
+struct hdmi_cec_ctrl {
+ bool cec_enabled;
+ bool compliance_response_enabled;
+ bool cec_engine_configed;
+
+ u8 cec_logical_addr;
+ u32 cec_msg_wr_status;
+
+ spinlock_t lock;
+ struct list_head msg_head;
+ struct work_struct cec_read_work;
+ struct completion cec_msg_wr_done;
+ struct hdmi_cec_init_data init_data;
+};
+
+static int hdmi_cec_msg_send(struct hdmi_cec_ctrl *cec_ctrl,
+ struct hdmi_cec_msg *msg);
+
+static void hdmi_cec_dump_msg(struct hdmi_cec_ctrl *cec_ctrl,
+ struct hdmi_cec_msg *msg)
+{
+ int i;
+ unsigned long flags;
+
+ if (!cec_ctrl || !msg) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ DEV_DBG("=================%pS dump start =====================\n",
+ __builtin_return_address(0));
+
+ DEV_DBG("sender_id : %d", msg->sender_id);
+ DEV_DBG("recvr_id : %d", msg->recvr_id);
+
+ if (msg->frame_size < 2) {
+ DEV_DBG("polling message");
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+ return;
+ }
+
+ DEV_DBG("opcode : %02x", msg->opcode);
+ for (i = 0; i < msg->frame_size - 2; i++)
+ DEV_DBG("operand(%2d) : %02x", i + 1, msg->operand[i]);
+
+ DEV_DBG("=================%pS dump end =====================\n",
+ __builtin_return_address(0));
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+} /* hdmi_cec_dump_msg */
+
+static inline void hdmi_cec_write_logical_addr(struct hdmi_cec_ctrl *cec_ctrl,
+ u8 addr)
+{
+ if (!cec_ctrl || !cec_ctrl->init_data.io) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return;
+ }
+
+ DSS_REG_W(cec_ctrl->init_data.io, HDMI_CEC_ADDR, addr & 0xF);
+} /* hdmi_cec_write_logical_addr */
+
+static void hdmi_cec_disable(struct hdmi_cec_ctrl *cec_ctrl)
+{
+ u32 reg_val;
+ unsigned long flags;
+ struct dss_io_data *io = NULL;
+ struct hdmi_cec_msg_node *msg_node, *tmp;
+
+ if (!cec_ctrl || !cec_ctrl->init_data.io) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return;
+ }
+
+ io = cec_ctrl->init_data.io;
+
+ /* Disable Engine */
+ DSS_REG_W(io, HDMI_CEC_CTRL, 0);
+
+ /* Disable CEC interrupts */
+ reg_val = DSS_REG_R(io, HDMI_CEC_INT);
+ DSS_REG_W(io, HDMI_CEC_INT, reg_val & !BIT(1) & !BIT(3) & !BIT(7));
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ list_for_each_entry_safe(msg_node, tmp, &cec_ctrl->msg_head, list) {
+ list_del(&msg_node->list);
+ kfree(msg_node);
+ }
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+} /* hdmi_cec_disable */
+
+static void hdmi_cec_enable(struct hdmi_cec_ctrl *cec_ctrl)
+{
+ struct dss_io_data *io = NULL;
+
+ if (!cec_ctrl || !cec_ctrl->init_data.io) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return;
+ }
+
+ io = cec_ctrl->init_data.io;
+
+ INIT_LIST_HEAD(&cec_ctrl->msg_head);
+
+ /* Enable CEC interrupts */
+ DSS_REG_W(io, HDMI_CEC_INT, BIT(1) | BIT(3) | BIT(7));
+
+ /* Enable Engine */
+ DSS_REG_W(io, HDMI_CEC_CTRL, BIT(0));
+} /* hdmi_cec_enable */
+
+static int hdmi_cec_send_abort_opcode(struct hdmi_cec_ctrl *cec_ctrl,
+ struct hdmi_cec_msg *in_msg, u8 reason_operand)
+{
+ int i = 0;
+ struct hdmi_cec_msg out_msg;
+
+ if (!cec_ctrl || !in_msg) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ out_msg.sender_id = 0x4;
+ out_msg.recvr_id = in_msg->sender_id;
+ out_msg.opcode = 0x0; /* opcode for feature abort */
+ out_msg.operand[i++] = in_msg->opcode;
+ out_msg.operand[i++] = reason_operand;
+ out_msg.frame_size = i + 2;
+
+ return hdmi_cec_msg_send(cec_ctrl, &out_msg);
+} /* hdmi_cec_send_abort_opcode */
+
+static int hdmi_cec_msg_parser(struct hdmi_cec_ctrl *cec_ctrl,
+ struct hdmi_cec_msg *in_msg)
+{
+ int rc = 0, i = 0;
+ struct hdmi_cec_msg out_msg;
+
+ if (!cec_ctrl || !in_msg) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ DEV_DBG("%s: in_msg->opcode = 0x%x\n", __func__, in_msg->opcode);
+ switch (in_msg->opcode) {
+ case 0x64:
+ /* Set OSD String */
+ DEV_INFO("%s: Recvd OSD Str=[0x%x]\n", __func__,
+ in_msg->operand[3]);
+ break;
+ case 0x83:
+ /* Give Phy Addr */
+ DEV_INFO("%s: Recvd a Give Phy Addr cmd\n", __func__);
+
+ out_msg.sender_id = 0x4;
+ out_msg.recvr_id = 0xF; /* Broadcast */
+ out_msg.opcode = 0x84;
+ out_msg.operand[i++] = 0x10;
+ out_msg.operand[i++] = 0x0;
+ out_msg.operand[i++] = 0x04;
+ out_msg.frame_size = i + 2;
+
+ rc = hdmi_cec_msg_send(cec_ctrl, &out_msg);
+ break;
+ case 0xFF:
+ /* Abort */
+ DEV_INFO("%s: Recvd an abort cmd.\n", __func__);
+
+ /* reason = "Refused" */
+ rc = hdmi_cec_send_abort_opcode(cec_ctrl, in_msg, 0x04);
+ break;
+ case 0x46:
+ /* Give OSD name */
+ DEV_INFO("%s: Recvd 'Give OSD name' cmd.\n", __func__);
+
+ out_msg.sender_id = 0x4;
+ out_msg.recvr_id = in_msg->sender_id;
+ out_msg.opcode = 0x47; /* OSD Name */
+ /* Display control byte */
+ out_msg.operand[i++] = 0x0;
+ out_msg.operand[i++] = 'H';
+ out_msg.operand[i++] = 'e';
+ out_msg.operand[i++] = 'l';
+ out_msg.operand[i++] = 'l';
+ out_msg.operand[i++] = 'o';
+ out_msg.operand[i++] = ' ';
+ out_msg.operand[i++] = 'W';
+ out_msg.operand[i++] = 'o';
+ out_msg.operand[i++] = 'r';
+ out_msg.operand[i++] = 'l';
+ out_msg.operand[i++] = 'd';
+ out_msg.frame_size = i + 2;
+
+ rc = hdmi_cec_msg_send(cec_ctrl, &out_msg);
+ break;
+ case 0x8F:
+ /* Give Device Power status */
+ DEV_INFO("%s: Recvd a Power status message\n", __func__);
+
+ out_msg.sender_id = 0x4;
+ out_msg.recvr_id = in_msg->sender_id;
+ out_msg.opcode = 0x90; /* OSD String */
+ out_msg.operand[i++] = 'H';
+ out_msg.operand[i++] = 'e';
+ out_msg.operand[i++] = 'l';
+ out_msg.operand[i++] = 'l';
+ out_msg.operand[i++] = 'o';
+ out_msg.operand[i++] = ' ';
+ out_msg.operand[i++] = 'W';
+ out_msg.operand[i++] = 'o';
+ out_msg.operand[i++] = 'r';
+ out_msg.operand[i++] = 'l';
+ out_msg.operand[i++] = 'd';
+ out_msg.frame_size = i + 2;
+
+ rc = hdmi_cec_msg_send(cec_ctrl, &out_msg);
+ break;
+ case 0x80:
+ /* Routing Change cmd */
+ case 0x86:
+ /* Set Stream Path */
+ DEV_INFO("%s: Recvd Set Stream or Routing Change cmd\n",
+ __func__);
+
+ out_msg.sender_id = 0x4;
+ out_msg.recvr_id = 0xF; /* broadcast this message */
+ out_msg.opcode = 0x82; /* Active Source */
+ out_msg.operand[i++] = 0x10;
+ out_msg.operand[i++] = 0x0;
+ out_msg.frame_size = i + 2;
+
+ rc = hdmi_cec_msg_send(cec_ctrl, &out_msg);
+
+ /* todo: check if need to wait for msg response from sink */
+
+ /* sending <Image View On> message */
+ memset(&out_msg, 0x0, sizeof(struct hdmi_cec_msg));
+ i = 0;
+ out_msg.sender_id = 0x4;
+ out_msg.recvr_id = in_msg->sender_id;
+ out_msg.opcode = 0x04; /* opcode for Image View On */
+ out_msg.frame_size = i + 2;
+
+ rc = hdmi_cec_msg_send(cec_ctrl, &out_msg);
+ break;
+ case 0x44:
+ /* User Control Pressed */
+ DEV_INFO("%s: User Control Pressed\n", __func__);
+ break;
+ case 0x45:
+ /* User Control Released */
+ DEV_INFO("%s: User Control Released\n", __func__);
+ break;
+ default:
+ DEV_INFO("%s: Recvd an unknown cmd = [%u]\n", __func__,
+ in_msg->opcode);
+
+ /* reason = "Unrecognized opcode" */
+ rc = hdmi_cec_send_abort_opcode(cec_ctrl, in_msg, 0x0);
+ break;
+ }
+
+ return rc;
+} /* hdmi_cec_msg_parser */
+
+static int hdmi_cec_msg_send(struct hdmi_cec_ctrl *cec_ctrl,
+ struct hdmi_cec_msg *msg)
+{
+ int i, line_check_retry = 10;
+ u32 frame_retransmit = RETRANSMIT_MAX_NUM;
+ bool frame_type;
+ unsigned long flags;
+ struct dss_io_data *io = NULL;
+
+ if (!cec_ctrl || !cec_ctrl->init_data.io || !msg) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ io = cec_ctrl->init_data.io;
+
+ INIT_COMPLETION(cec_ctrl->cec_msg_wr_done);
+ cec_ctrl->cec_msg_wr_status = 0;
+ frame_type = (msg->recvr_id == 15 ? BIT(0) : 0);
+ if (msg->retransmit > 0 && msg->retransmit < RETRANSMIT_MAX_NUM)
+ frame_retransmit = msg->retransmit;
+
+ /* toggle cec in order to flush out bad hw state, if any */
+ DSS_REG_W(io, HDMI_CEC_CTRL, 0);
+ DSS_REG_W(io, HDMI_CEC_CTRL, BIT(0));
+
+ frame_retransmit = (frame_retransmit & 0xF) << 4;
+ DSS_REG_W(io, HDMI_CEC_RETRANSMIT, BIT(0) | frame_retransmit);
+
+ /* header block */
+ DSS_REG_W_ND(io, HDMI_CEC_WR_DATA,
+ (((msg->sender_id << 4) | msg->recvr_id) << 8) | frame_type);
+
+ /* data block 0 : opcode */
+ DSS_REG_W_ND(io, HDMI_CEC_WR_DATA,
+ ((msg->frame_size < 2 ? 0 : msg->opcode) << 8) | frame_type);
+
+ /* data block 1-14 : operand 0-13 */
+ for (i = 0; i < msg->frame_size - 2; i++)
+ DSS_REG_W_ND(io, HDMI_CEC_WR_DATA,
+ (msg->operand[i] << 8) | frame_type);
+
+ while ((DSS_REG_R(io, HDMI_CEC_STATUS) & BIT(0)) &&
+ line_check_retry--) {
+ DEV_DBG("%s: CEC line is busy(%d)\n", __func__,
+ line_check_retry);
+ schedule();
+ }
+
+ if (!line_check_retry && (DSS_REG_R(io, HDMI_CEC_STATUS) & BIT(0))) {
+ DEV_ERR("%s: CEC line is busy. Retry\n", __func__);
+ return -EAGAIN;
+ }
+
+ /* start transmission */
+ DSS_REG_W(io, HDMI_CEC_CTRL, BIT(0) | BIT(1) |
+ ((msg->frame_size & 0x1F) << 4) | BIT(9));
+
+ if (!wait_for_completion_interruptible_timeout(
+ &cec_ctrl->cec_msg_wr_done, HZ)) {
+ DEV_ERR("%s: timedout", __func__);
+ hdmi_cec_dump_msg(cec_ctrl, msg);
+ return -ETIMEDOUT;
+ }
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ if (cec_ctrl->cec_msg_wr_status == CEC_STATUS_WR_ERROR)
+ DEV_ERR("%s: msg write failed.\n", __func__);
+ else
+ DEV_DBG("%s: CEC write frame done (frame len=%d)", __func__,
+ msg->frame_size);
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+ hdmi_cec_dump_msg(cec_ctrl, msg);
+
+ return 0;
+} /* hdmi_cec_msg_send */
+
+static void hdmi_cec_msg_recv(struct work_struct *work)
+{
+ int i;
+ u32 data;
+ unsigned long flags;
+ struct hdmi_cec_ctrl *cec_ctrl = NULL;
+ struct dss_io_data *io = NULL;
+ struct hdmi_cec_msg_node *msg_node = NULL;
+
+ cec_ctrl = container_of(work, struct hdmi_cec_ctrl, cec_read_work);
+ if (!cec_ctrl || !cec_ctrl->cec_enabled || !cec_ctrl->init_data.io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ io = cec_ctrl->init_data.io;
+
+ msg_node = kzalloc(sizeof(*msg_node), GFP_KERNEL);
+ if (!msg_node) {
+ DEV_ERR("%s: FAILED: out of memory\n", __func__);
+ return;
+ }
+
+ data = DSS_REG_R(io, HDMI_CEC_RD_DATA);
+
+ msg_node->msg.recvr_id = (data & 0x000F);
+ msg_node->msg.sender_id = (data & 0x00F0) >> 4;
+ msg_node->msg.frame_size = (data & 0x1F00) >> 8;
+ DEV_DBG("%s: Recvd init=[%u] dest=[%u] size=[%u]\n", __func__,
+ msg_node->msg.sender_id, msg_node->msg.recvr_id,
+ msg_node->msg.frame_size);
+
+ if (msg_node->msg.frame_size < 1) {
+ DEV_ERR("%s: invalid message (frame length = %d)",
+ __func__, msg_node->msg.frame_size);
+ kfree(msg_node);
+ return;
+ } else if (msg_node->msg.frame_size == 1) {
+ DEV_DBG("%s: polling message (dest[%x] <- init[%x])", __func__,
+ msg_node->msg.recvr_id, msg_node->msg.sender_id);
+ kfree(msg_node);
+ return;
+ }
+
+ /* data block 0 : opcode */
+ data = DSS_REG_R_ND(io, HDMI_CEC_RD_DATA);
+ msg_node->msg.opcode = data & 0xFF;
+
+ /* data block 1-14 : operand 0-13 */
+ for (i = 0; i < msg_node->msg.frame_size - 2; i++) {
+ data = DSS_REG_R_ND(io, HDMI_CEC_RD_DATA);
+ msg_node->msg.operand[i] = data & 0xFF;
+ }
+
+ for (; i < 14; i++)
+ msg_node->msg.operand[i] = 0;
+
+ DEV_DBG("%s: CEC read frame done\n", __func__);
+ hdmi_cec_dump_msg(cec_ctrl, &msg_node->msg);
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ if (cec_ctrl->compliance_response_enabled) {
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ if (hdmi_cec_msg_parser(cec_ctrl, &msg_node->msg) != 0) {
+ DEV_ERR("%s: cec_msg_parser fail. Sending abort msg\n",
+ __func__);
+ /* reason = "Unrecognized opcode" */
+ hdmi_cec_send_abort_opcode(cec_ctrl,
+ &msg_node->msg, 0x0);
+ }
+ kfree(msg_node);
+ } else {
+ list_add_tail(&msg_node->list, &cec_ctrl->msg_head);
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ /* wake-up sysfs read_msg context */
+ sysfs_notify(cec_ctrl->init_data.sysfs_kobj, "cec", "rd_msg");
+ }
+} /* hdmi_cec_msg_recv*/
+
+static ssize_t hdmi_rda_cec_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ unsigned long flags;
+ struct hdmi_cec_ctrl *cec_ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+ if (!cec_ctrl || !cec_ctrl->init_data.io) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return -EPERM;
+ }
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ if (cec_ctrl->cec_enabled && cec_ctrl->cec_engine_configed) {
+ DEV_DBG("%s: cec is enabled\n", __func__);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", 1);
+ } else if (cec_ctrl->cec_enabled && !cec_ctrl->cec_engine_configed) {
+ DEV_ERR("%s: CEC will be enabled when HDMI mirroring is on\n",
+ __func__);
+ ret = -EPERM;
+ } else {
+ DEV_DBG("%s: cec is disabled\n", __func__);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", 0);
+ }
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ return ret;
+} /* hdmi_rda_cec_enable */
+
+static ssize_t hdmi_wta_cec_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+ bool cec_en;
+ unsigned long flags;
+ ssize_t ret = strnlen(buf, PAGE_SIZE);
+ struct hdmi_cec_ctrl *cec_ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+ if (!cec_ctrl) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return -EPERM;
+ }
+
+ if (kstrtoint(buf, 10, &val)) {
+ DEV_ERR("%s: kstrtoint failed.\n", __func__);
+ return -EPERM;
+ }
+ cec_en = (val == 1) ? true : false;
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ if (cec_ctrl->cec_enabled == cec_en) {
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+ DEV_INFO("%s: cec is already %s\n", __func__,
+ cec_en ? "enabled" : "disabled");
+ return ret;
+ }
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ if (!cec_en) {
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ if (!cec_ctrl->cec_engine_configed) {
+ DEV_DBG("%s: hdmi is already off. disable cec\n",
+ __func__);
+ cec_ctrl->cec_enabled = false;
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+ return ret;
+ }
+ cec_ctrl->cec_enabled = false;
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ hdmi_cec_disable(cec_ctrl);
+ return ret;
+ } else {
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ if (!cec_ctrl->cec_engine_configed) {
+ DEV_DBG("%s: CEC will be enabled on mirroring\n",
+ __func__);
+ cec_ctrl->cec_enabled = true;
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+ return ret;
+ }
+ cec_ctrl->cec_enabled = true;
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ hdmi_cec_enable(cec_ctrl);
+
+ return ret;
+ }
+} /* hdmi_wta_cec_enable */
+
+static ssize_t hdmi_rda_cec_enable_compliance(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long flags;
+ ssize_t ret;
+ struct hdmi_cec_ctrl *cec_ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+ if (!cec_ctrl) {
+ DEV_ERR("%s: Invalid cec_ctrl\n", __func__);
+ return -EPERM;
+ }
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ cec_ctrl->compliance_response_enabled);
+
+ cec_ctrl->cec_logical_addr = 0x4;
+ hdmi_cec_write_logical_addr(cec_ctrl, cec_ctrl->cec_logical_addr);
+
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ return ret;
+} /* hdmi_rda_cec_enable_compliance */
+
+static ssize_t hdmi_wta_cec_enable_compliance(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+ unsigned long flags;
+ ssize_t ret = strnlen(buf, PAGE_SIZE);
+ struct hdmi_cec_ctrl *cec_ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+ if (!cec_ctrl) {
+ DEV_ERR("%s: Invalid cec_ctrl\n", __func__);
+ return -EPERM;
+ }
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ if (cec_ctrl->cec_enabled && cec_ctrl->cec_engine_configed) {
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+ DEV_ERR("%s: Cannot en/dis compliance when CEC session is on\n",
+ __func__);
+ return -EPERM;
+ } else {
+ if (kstrtoint(buf, 10, &val)) {
+ DEV_ERR("%s: kstrtoint failed.\n", __func__);
+ return -EPERM;
+ }
+ cec_ctrl->compliance_response_enabled =
+ (val == 1) ? true : false;
+ }
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ return ret;
+} /* hdmi_wta_cec_enable_compliance */
+
+static ssize_t hdmi_rda_cec_logical_addr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long flags;
+ ssize_t ret = strnlen(buf, PAGE_SIZE);
+ struct hdmi_cec_ctrl *cec_ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+ if (!cec_ctrl) {
+ DEV_ERR("%s: Invalid cec_ctrl\n", __func__);
+ return -EPERM;
+ }
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", cec_ctrl->cec_logical_addr);
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ return ret;
+} /* hdmi_rda_cec_logical_addr */
+
+static ssize_t hdmi_wta_cec_logical_addr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int logical_addr;
+ unsigned long flags;
+ ssize_t ret = strnlen(buf, PAGE_SIZE);
+ struct hdmi_cec_ctrl *cec_ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+ if (!cec_ctrl) {
+ DEV_ERR("%s: Invalid cec_ctrl\n", __func__);
+ return -EPERM;
+ }
+
+ if (kstrtoint(buf, 10, &logical_addr)) {
+ DEV_ERR("%s: kstrtoint failed\n", __func__);
+ return -EPERM;
+ }
+
+ if (logical_addr < 0 || logical_addr > 15) {
+ DEV_ERR("%s: Invalid logical address\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ cec_ctrl->cec_logical_addr = (u8)logical_addr;
+ if (cec_ctrl->cec_enabled && cec_ctrl->cec_engine_configed)
+ hdmi_cec_write_logical_addr(cec_ctrl,
+ cec_ctrl->cec_logical_addr);
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ return ret;
+} /* hdmi_wta_cec_logical_addr */
+
+static ssize_t hdmi_rda_cec_msg(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i = 0;
+ unsigned long flags;
+ struct hdmi_cec_msg_node *msg_node, *tmp;
+ struct hdmi_cec_ctrl *cec_ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+ if (!cec_ctrl) {
+ DEV_ERR("%s: Invalid cec_ctrl\n", __func__);
+ return -EPERM;
+ }
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+
+ if (cec_ctrl->compliance_response_enabled) {
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+ DEV_ERR("%s: Read is disabled coz compliance response is on\n",
+ __func__);
+ return -EPERM;
+ }
+
+ if (list_empty_careful(&cec_ctrl->msg_head)) {
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+ DEV_ERR("%s: CEC message queue is empty\n", __func__);
+ return -EPERM;
+ }
+
+ list_for_each_entry_safe(msg_node, tmp, &cec_ctrl->msg_head, list) {
+ if ((i+1) * sizeof(struct hdmi_cec_msg) > PAGE_SIZE) {
+ DEV_DBG("%s: Overflowing PAGE_SIZE.\n", __func__);
+ break;
+ }
+
+ memcpy(buf + (i * sizeof(struct hdmi_cec_msg)), &msg_node->msg,
+ sizeof(struct hdmi_cec_msg));
+ list_del(&msg_node->list);
+ kfree(msg_node);
+ i++;
+ }
+
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ return i * sizeof(struct hdmi_cec_msg);
+} /* hdmi_rda_cec_msg */
+
+static ssize_t hdmi_wta_cec_msg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int rc;
+ unsigned long flags;
+ struct hdmi_cec_msg *msg = (struct hdmi_cec_msg *)buf;
+ struct hdmi_cec_ctrl *cec_ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+ if (!cec_ctrl) {
+ DEV_ERR("%s: Invalid cec_ctrl\n", __func__);
+ return -EPERM;
+ }
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ if (cec_ctrl->compliance_response_enabled) {
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+ DEV_ERR("%s: Write disabled coz compliance response is on.\n",
+ __func__);
+ return -EPERM;
+ }
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ rc = hdmi_cec_msg_send(cec_ctrl, msg);
+ if (rc) {
+ DEV_ERR("%s: hdmi_cec_msg_send failed\n", __func__);
+ return rc;
+ } else {
+ return sizeof(struct hdmi_cec_msg);
+ }
+} /* hdmi_wta_cec_msg */
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, hdmi_rda_cec_enable,
+ hdmi_wta_cec_enable);
+static DEVICE_ATTR(enable_compliance, S_IRUGO | S_IWUSR,
+ hdmi_rda_cec_enable_compliance, hdmi_wta_cec_enable_compliance);
+static DEVICE_ATTR(logical_addr, S_IRUGO | S_IWUSR,
+ hdmi_rda_cec_logical_addr, hdmi_wta_cec_logical_addr);
+static DEVICE_ATTR(rd_msg, S_IRUGO, hdmi_rda_cec_msg, NULL);
+static DEVICE_ATTR(wr_msg, S_IWUSR, NULL, hdmi_wta_cec_msg);
+
+static struct attribute *hdmi_cec_fs_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_enable_compliance.attr,
+ &dev_attr_logical_addr.attr,
+ &dev_attr_rd_msg.attr,
+ &dev_attr_wr_msg.attr,
+ NULL,
+};
+
+static struct attribute_group hdmi_cec_fs_attr_group = {
+ .name = "cec",
+ .attrs = hdmi_cec_fs_attrs,
+};
+
+int hdmi_cec_isr(void *input)
+{
+ int rc = 0;
+ u32 cec_intr, cec_status;
+ unsigned long flags;
+ struct dss_io_data *io = NULL;
+ struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+ if (!cec_ctrl || !cec_ctrl->init_data.io) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return -EPERM;
+ }
+
+ io = cec_ctrl->init_data.io;
+
+ cec_intr = DSS_REG_R_ND(io, HDMI_CEC_INT);
+ DEV_DBG("%s: cec interrupt status is [0x%x]\n", __func__, cec_intr);
+
+ if (!cec_ctrl->cec_enabled) {
+ DEV_ERR("%s: cec is not enabled. Just clear int and return.\n",
+ __func__);
+ DSS_REG_W(io, HDMI_CEC_INT, cec_intr);
+ return 0;
+ }
+
+ cec_status = DSS_REG_R_ND(io, HDMI_CEC_STATUS);
+ DEV_DBG("%s: cec status is [0x%x]\n", __func__, cec_status);
+
+ if ((cec_intr & BIT(0)) && (cec_intr & BIT(1))) {
+ DEV_DBG("%s: CEC_IRQ_FRAME_WR_DONE\n", __func__);
+ DSS_REG_W(io, HDMI_CEC_INT, cec_intr | BIT(0));
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ cec_ctrl->cec_msg_wr_status |= CEC_STATUS_WR_DONE;
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ if (!completion_done(&cec_ctrl->cec_msg_wr_done))
+ complete_all(&cec_ctrl->cec_msg_wr_done);
+ }
+
+ if ((cec_intr & BIT(2)) && (cec_intr & BIT(3))) {
+ DEV_DBG("%s: CEC_IRQ_FRAME_ERROR\n", __func__);
+ DSS_REG_W(io, HDMI_CEC_INT, cec_intr | BIT(2));
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ cec_ctrl->cec_msg_wr_status |= CEC_STATUS_WR_ERROR;
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ if (!completion_done(&cec_ctrl->cec_msg_wr_done))
+ complete_all(&cec_ctrl->cec_msg_wr_done);
+ }
+
+ if ((cec_intr & BIT(6)) && (cec_intr & BIT(7))) {
+ DEV_DBG("%s: CEC_IRQ_FRAME_RD_DONE\n", __func__);
+
+ DSS_REG_W(io, HDMI_CEC_INT, cec_intr | BIT(6));
+ queue_work(cec_ctrl->init_data.workq, &cec_ctrl->cec_read_work);
+ }
+
+ return rc;
+} /* hdmi_cec_isr */
+
+int hdmi_cec_deconfig(void *input)
+{
+ unsigned long flags;
+ struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+ if (!cec_ctrl) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return -EPERM;
+ }
+
+ hdmi_cec_disable(cec_ctrl);
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ cec_ctrl->cec_engine_configed = false;
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ return 0;
+} /* hdmi_cec_deconfig */
+
+int hdmi_cec_config(void *input)
+{
+ unsigned long flags;
+ u32 hdmi_hw_version;
+ struct dss_io_data *io = NULL;
+ struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+ if (!cec_ctrl || !cec_ctrl->init_data.io) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return -EPERM;
+ }
+
+ io = cec_ctrl->init_data.io;
+
+ /* 19.2Mhz * 0.00005 us = 950 = 0x3B6 */
+ DSS_REG_W(io, HDMI_CEC_REFTIMER, (0x3B6 & 0xFFF) | BIT(16));
+
+ hdmi_hw_version = DSS_REG_R(io, HDMI_VERSION);
+ if (hdmi_hw_version == 0x30000001) {
+ DSS_REG_W(io, HDMI_CEC_RD_RANGE, 0x30AB9888);
+ DSS_REG_W(io, HDMI_CEC_WR_RANGE, 0x888AA888);
+
+ DSS_REG_W(io, HDMI_CEC_RD_START_RANGE, 0x88888888);
+ DSS_REG_W(io, HDMI_CEC_RD_TOTAL_RANGE, 0x99);
+ DSS_REG_W(io, HDMI_CEC_COMPL_CTL, 0xF);
+ DSS_REG_W(io, HDMI_CEC_WR_CHECK_CONFIG, 0x4);
+ } else {
+ DEV_INFO("%s: CEC is not supported on %d HDMI HW version.\n",
+ __func__, hdmi_hw_version);
+ return -EPERM;
+ }
+
+ DSS_REG_W(io, HDMI_CEC_RD_FILTER, BIT(0) | (0x7FF << 4));
+ DSS_REG_W(io, HDMI_CEC_TIME, BIT(0) | ((7 * 0x30) << 7));
+
+ if (cec_ctrl->cec_enabled)
+ hdmi_cec_enable(cec_ctrl);
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ cec_ctrl->cec_engine_configed = true;
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ return 0;
+} /* hdmi_cec_config */
+
+void hdmi_cec_deinit(void *input)
+{
+ struct hdmi_cec_msg_node *msg_node, *tmp;
+ struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+ if (cec_ctrl) {
+ list_for_each_entry_safe(msg_node, tmp, &cec_ctrl->msg_head,
+ list) {
+ list_del(&msg_node->list);
+ kfree(msg_node);
+ }
+
+ sysfs_remove_group(cec_ctrl->init_data.sysfs_kobj,
+ &hdmi_cec_fs_attr_group);
+
+ kfree(cec_ctrl);
+ }
+} /* hdmi_cec_deinit */
+
+void *hdmi_cec_init(struct hdmi_cec_init_data *init_data)
+{
+ struct hdmi_cec_ctrl *cec_ctrl = NULL;
+
+ if (!init_data) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ goto error;
+ }
+
+ cec_ctrl = kzalloc(sizeof(*cec_ctrl), GFP_KERNEL);
+ if (!cec_ctrl) {
+ DEV_ERR("%s: FAILED: out of memory\n", __func__);
+ goto error;
+ }
+
+ cec_ctrl->init_data = *init_data;
+
+ if (sysfs_create_group(init_data->sysfs_kobj,
+ &hdmi_cec_fs_attr_group)) {
+ DEV_ERR("%s: cec sysfs group creation failed\n", __func__);
+ goto error;
+ }
+
+ spin_lock_init(&cec_ctrl->lock);
+ INIT_LIST_HEAD(&cec_ctrl->msg_head);
+ INIT_WORK(&cec_ctrl->cec_read_work, hdmi_cec_msg_recv);
+ init_completion(&cec_ctrl->cec_msg_wr_done);
+
+ goto exit;
+
+error:
+ kfree(cec_ctrl);
+ cec_ctrl = NULL;
+exit:
+ return (void *)cec_ctrl;
+} /* hdmi_cec_init */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_cec.h b/drivers/video/msm/mdss/mdss_hdmi_cec.h
new file mode 100644
index 0000000..a554507
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_cec.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_CEC_H__
+#define __MDSS_HDMI_CEC_H__
+
+#include "mdss_hdmi_util.h"
+
+struct hdmi_cec_init_data {
+ struct workqueue_struct *workq;
+ struct kobject *sysfs_kobj;
+ struct dss_io_data *io;
+};
+
+int hdmi_cec_deconfig(void *cec_ctrl);
+int hdmi_cec_config(void *cec_ctrl);
+int hdmi_cec_isr(void *cec_ctrl);
+void hdmi_cec_deinit(void *cec_ctrl);
+void *hdmi_cec_init(struct hdmi_cec_init_data *init_data);
+#endif /* __MDSS_HDMI_CEC_H__ */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
index 2e20787..f726e79 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
@@ -618,7 +618,7 @@
/* Wait until READY bit is set in BCAPS */
timeout_count = 50;
- while (!(bcaps && BIT(5)) && timeout_count) {
+ while (!(bcaps & BIT(5)) && timeout_count) {
msleep(100);
timeout_count--;
/* Read BCAPS at offset 0x40 */
@@ -1057,11 +1057,16 @@
io = hdcp_ctrl->init_data.core_io;
- /* Ignore HDCP interrupts if HDCP is disabled */
- if (HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state)
- return 0;
-
hdcp_int_val = DSS_REG_R(io, HDMI_HDCP_INT_CTRL);
+
+ /* Ignore HDCP interrupts if HDCP is disabled */
+ if (HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) {
+ DEV_ERR("%s: HDCP inactive. Just clear int and return.\n",
+ __func__);
+ DSS_REG_W(io, HDMI_HDCP_INT_CTRL, hdcp_int_val);
+ return 0;
+ }
+
if (hdcp_int_val & BIT(0)) {
/* AUTH_SUCCESS_INT */
DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(1)));
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index 94c0da2..1ff8acf 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -26,9 +26,10 @@
#include "mdss_debug.h"
#include "mdss_fb.h"
-#include "mdss_hdmi_tx.h"
+#include "mdss_hdmi_cec.h"
#include "mdss_hdmi_edid.h"
#include "mdss_hdmi_hdcp.h"
+#include "mdss_hdmi_tx.h"
#include "mdss.h"
#include "mdss_panel.h"
#include "mdss_hdmi_mhl.h"
@@ -397,13 +398,133 @@
return ret;
} /* hdmi_tx_sysfs_wta_hpd */
+static ssize_t hdmi_tx_sysfs_wta_vendor_name(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ ssize_t ret;
+ u8 *s = (u8 *) buf;
+ u8 *d = NULL;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ d = hdmi_ctrl->spd_vendor_name;
+ ret = strnlen(buf, PAGE_SIZE);
+ ret = (ret > 8) ? 8 : ret;
+
+ memset(hdmi_ctrl->spd_vendor_name, 0, 8);
+ while (*s) {
+ if (*s & 0x60 && *s ^ 0x7f) {
+ *d = *s;
+ } else {
+ /* stop copying if control character found */
+ break;
+ }
+
+ if (++s > (u8 *) (buf + ret))
+ break;
+
+ d++;
+ }
+
+ DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_vendor_name);
+
+ return ret;
+} /* hdmi_tx_sysfs_wta_vendor_name */
+
+static ssize_t hdmi_tx_sysfs_rda_vendor_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", hdmi_ctrl->spd_vendor_name);
+ DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_vendor_name);
+
+ return ret;
+} /* hdmi_tx_sysfs_rda_vendor_name */
+
+static ssize_t hdmi_tx_sysfs_wta_product_description(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ ssize_t ret;
+ u8 *s = (u8 *) buf;
+ u8 *d = NULL;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ d = hdmi_ctrl->spd_product_description;
+ ret = strnlen(buf, PAGE_SIZE);
+ ret = (ret > 16) ? 16 : ret;
+
+ memset(hdmi_ctrl->spd_product_description, 0, 16);
+ while (*s) {
+ if (*s & 0x60 && *s ^ 0x7f) {
+ *d = *s;
+ } else {
+ /* stop copying if control character found */
+ break;
+ }
+
+ if (++s > (u8 *) (buf + ret))
+ break;
+
+ d++;
+ }
+
+ DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_product_description);
+
+ return ret;
+} /* hdmi_tx_sysfs_wta_product_description */
+
+static ssize_t hdmi_tx_sysfs_rda_product_description(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n",
+ hdmi_ctrl->spd_product_description);
+ DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_product_description);
+
+ return ret;
+} /* hdmi_tx_sysfs_rda_product_description */
+
static DEVICE_ATTR(connected, S_IRUGO, hdmi_tx_sysfs_rda_connected, NULL);
static DEVICE_ATTR(hpd, S_IRUGO | S_IWUSR, hdmi_tx_sysfs_rda_hpd,
hdmi_tx_sysfs_wta_hpd);
+static DEVICE_ATTR(vendor_name, S_IRUGO | S_IWUSR,
+ hdmi_tx_sysfs_rda_vendor_name, hdmi_tx_sysfs_wta_vendor_name);
+static DEVICE_ATTR(product_description, S_IRUGO | S_IWUSR,
+ hdmi_tx_sysfs_rda_product_description,
+ hdmi_tx_sysfs_wta_product_description);
static struct attribute *hdmi_tx_fs_attrs[] = {
&dev_attr_connected.attr,
&dev_attr_hpd.attr,
+ &dev_attr_vendor_name.attr,
+ &dev_attr_product_description.attr,
NULL,
};
static struct attribute_group hdmi_tx_fs_attrs_group = {
@@ -524,12 +645,14 @@
{
struct hdmi_edid_init_data edid_init_data;
struct hdmi_hdcp_init_data hdcp_init_data;
+ struct hdmi_cec_init_data cec_init_data;
if (!hdmi_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return -EINVAL;
}
+ /* Initialize EDID feature */
edid_init_data.io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
edid_init_data.mutex = &hdmi_ctrl->mutex;
edid_init_data.sysfs_kobj = hdmi_ctrl->kobj;
@@ -568,6 +691,17 @@
DEV_DBG("%s: HDCP feature initialized\n", __func__);
}
+
+ /* Initialize CEC feature */
+ cec_init_data.io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ cec_init_data.sysfs_kobj = hdmi_ctrl->kobj;
+ cec_init_data.workq = hdmi_ctrl->workq;
+
+ hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC] =
+ hdmi_cec_init(&cec_init_data);
+ if (!hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC])
+ DEV_WARN("%s: hdmi_cec_init failed\n", __func__);
+
return 0;
} /* hdmi_tx_init_features */
@@ -2049,6 +2183,8 @@
hdmi_ctrl->hpd_off_pending = false;
}
+ hdmi_cec_deconfig(hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC]);
+
mutex_lock(&hdmi_ctrl->mutex);
hdmi_ctrl->panel_power_on = false;
mutex_unlock(&hdmi_ctrl->mutex);
@@ -2133,10 +2269,12 @@
mutex_lock(&hdmi_ctrl->mutex);
hdmi_ctrl->panel_power_on = true;
+ mutex_unlock(&hdmi_ctrl->mutex);
+
+ hdmi_cec_config(hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC]);
if (hdmi_ctrl->hpd_state) {
DEV_DBG("%s: Turning HDMI on\n", __func__);
- mutex_unlock(&hdmi_ctrl->mutex);
rc = hdmi_tx_start(hdmi_ctrl);
if (rc) {
DEV_ERR("%s: hdmi_tx_start failed. rc=%d\n",
@@ -2144,8 +2282,6 @@
hdmi_tx_power_off(panel_data);
return rc;
}
- } else {
- mutex_unlock(&hdmi_ctrl->mutex);
}
dss_reg_dump(io->base, io->len, "HDMI-ON: ", REG_DUMP);
@@ -2162,6 +2298,7 @@
static void hdmi_tx_hpd_off(struct hdmi_tx_ctrl *hdmi_ctrl)
{
int rc = 0;
+ struct dss_io_data *io = NULL;
if (!hdmi_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
@@ -2173,6 +2310,15 @@
return;
}
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: core io not inititalized\n", __func__);
+ return;
+ }
+
+ /* Turn off HPD interrupts */
+ DSS_REG_W(io, HDMI_HPD_INT_CTRL, 0);
+
mdss_disable_irq(&hdmi_tx_hw);
hdmi_tx_set_mode(hdmi_ctrl, false);
@@ -2225,6 +2371,10 @@
hdmi_ctrl->hpd_initialized = true;
+ DEV_INFO("%s: HDMI HW version = 0x%x\n", __func__,
+ DSS_REG_R_ND(&hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO],
+ HDMI_VERSION));
+
/* set timeout to 4.1ms (max) for hardware debounce */
reg_val = DSS_REG_R(io, HDMI_HPD_CTRL) | 0x1FFF;
@@ -2271,7 +2421,7 @@
struct dss_io_data *io = NULL;
struct hdmi_tx_ctrl *hdmi_ctrl = (struct hdmi_tx_ctrl *)data;
- if (!hdmi_ctrl || !hdmi_ctrl->hpd_initialized) {
+ if (!hdmi_ctrl) {
DEV_WARN("%s: invalid input data, ISR ignored\n", __func__);
return IRQ_HANDLED;
}
@@ -2295,12 +2445,15 @@
queue_work(hdmi_ctrl->workq, &hdmi_ctrl->hpd_int_work);
}
- if (hdmi_ddc_isr(&hdmi_ctrl->ddc_ctrl) < 0)
+ if (hdmi_ddc_isr(&hdmi_ctrl->ddc_ctrl))
DEV_ERR("%s: hdmi_ddc_isr failed\n", __func__);
+ if (hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC])
+ if (hdmi_cec_isr(hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC]))
+ DEV_ERR("%s: hdmi_cec_isr failed\n", __func__);
+
if (hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP])
- if (hdmi_hdcp_isr(
- hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP]) < 0)
+ if (hdmi_hdcp_isr(hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP]))
DEV_ERR("%s: hdmi_hdcp_isr failed\n", __func__);
return IRQ_HANDLED;
@@ -2313,13 +2466,20 @@
return;
}
+ if (hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC]) {
+ hdmi_cec_deinit(hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC]);
+ hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC] = NULL;
+ }
+
if (hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP]) {
hdmi_hdcp_deinit(hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP]);
hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP] = NULL;
}
- if (hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID])
+ if (hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID]) {
hdmi_edid_deinit(hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID]);
+ hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID] = NULL;
+ }
switch_dev_unregister(&hdmi_ctrl->audio_sdev);
switch_dev_unregister(&hdmi_ctrl->sdev);
@@ -2584,6 +2744,10 @@
return rc;
}
+ rc = mdss_register_irq(&hdmi_tx_hw);
+ if (rc)
+ DEV_ERR("%s: mdss_register_irq failed.\n", __func__);
+
return rc;
} /* hdmi_tx_register_panel */
@@ -2867,20 +3031,6 @@
}
snprintf(mp->vreg_config[j].vreg_name, 32, "%s", st);
- /* vreg-type */
- memset(prop_name, 0, sizeof(prop_name));
- snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME,
- "supply-type");
- memset(val_array, 0, sizeof(u32) * dt_vreg_total);
- rc = of_property_read_u32_array(of_node,
- prop_name, val_array, dt_vreg_total);
- if (rc) {
- DEV_ERR("%s: error read '%s' vreg type. rc=%d\n",
- __func__, hdmi_tx_pm_name(module_type), rc);
- goto error;
- }
- mp->vreg_config[j].type = val_array[i];
-
/* vreg-min-voltage */
memset(prop_name, 0, sizeof(prop_name));
snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME,
@@ -2914,24 +3064,23 @@
/* vreg-op-mode */
memset(prop_name, 0, sizeof(prop_name));
snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME,
- "op-mode");
+ "peak-current");
memset(val_array, 0, sizeof(u32) * dt_vreg_total);
rc = of_property_read_u32_array(of_node,
prop_name, val_array,
dt_vreg_total);
if (rc) {
- DEV_ERR("%s: error read '%s' min volt. rc=%d\n",
+ DEV_ERR("%s: error read '%s' peak current. rc=%d\n",
__func__, hdmi_tx_pm_name(module_type), rc);
goto error;
}
- mp->vreg_config[j].optimum_voltage = val_array[i];
+ mp->vreg_config[j].peak_current = val_array[i];
- DEV_DBG("%s: %s type=%d, min=%d, max=%d, op=%d\n",
- __func__, mp->vreg_config[j].vreg_name,
- mp->vreg_config[j].type,
+ DEV_DBG("%s: %s min=%d, max=%d, pc=%d\n", __func__,
+ mp->vreg_config[j].vreg_name,
mp->vreg_config[j].min_voltage,
mp->vreg_config[j].max_voltage,
- mp->vreg_config[j].optimum_voltage);
+ mp->vreg_config[j].peak_current);
ndx_mask >>= 1;
j++;
diff --git a/drivers/video/msm/mdss/mdss_hdmi_util.h b/drivers/video/msm/mdss/mdss_hdmi_util.h
index cf42346..e99e549 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_util.h
+++ b/drivers/video/msm/mdss/mdss_hdmi_util.h
@@ -201,6 +201,11 @@
#define HDMI_TPG_INITIAL_VALUE (0x00000354)
#define HDMI_TPG_BLK_WHT_PATTERN_FRAMES (0x00000358)
#define HDMI_TPG_RGB_MAPPING (0x0000035C)
+#define HDMI_CEC_COMPL_CTL (0x00000360)
+#define HDMI_CEC_RD_START_RANGE (0x00000364)
+#define HDMI_CEC_RD_TOTAL_RANGE (0x00000368)
+#define HDMI_CEC_RD_ERR_RESP_LO (0x0000036C)
+#define HDMI_CEC_WR_CHECK_CONFIG (0x00000370)
/* HDMI PHY Registers */
#define HDMI_PHY_ANA_CFG0 (0x00000000)
diff --git a/drivers/video/msm/mdss/mdss_io_util.c b/drivers/video/msm/mdss/mdss_io_util.c
index c38eaa4..ff52e4c 100644
--- a/drivers/video/msm/mdss/mdss_io_util.c
+++ b/drivers/video/msm/mdss/mdss_io_util.c
@@ -131,6 +131,7 @@
{
int i = 0, rc = 0;
struct dss_vreg *curr_vreg = NULL;
+ enum dss_vreg_type type;
if (config) {
for (i = 0; i < num_vreg; i++) {
@@ -145,7 +146,9 @@
curr_vreg->vreg = NULL;
goto vreg_get_fail;
}
- if (curr_vreg->type == DSS_REG_LDO) {
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
rc = regulator_set_voltage(
curr_vreg->vreg,
curr_vreg->min_voltage,
@@ -157,10 +160,10 @@
curr_vreg->vreg_name);
goto vreg_set_voltage_fail;
}
- if (curr_vreg->optimum_voltage >= 0) {
+ if (curr_vreg->peak_current >= 0) {
rc = regulator_set_optimum_mode(
curr_vreg->vreg,
- curr_vreg->optimum_voltage);
+ curr_vreg->peak_current);
if (rc < 0) {
DEV_ERR(
"%pS->%s: %s set opt m fail\n",
@@ -176,8 +179,11 @@
for (i = num_vreg-1; i >= 0; i--) {
curr_vreg = &in_vreg[i];
if (curr_vreg->vreg) {
- if (curr_vreg->type == DSS_REG_LDO) {
- if (curr_vreg->optimum_voltage >= 0) {
+ type = (regulator_count_voltages(
+ curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
+ if (curr_vreg->peak_current >= 0) {
regulator_set_optimum_mode(
curr_vreg->vreg, 0);
}
@@ -192,11 +198,11 @@
return 0;
vreg_unconfig:
-if (curr_vreg->type == DSS_REG_LDO)
+if (type == DSS_REG_LDO)
regulator_set_optimum_mode(curr_vreg->vreg, 0);
vreg_set_opt_mode_fail:
-if (curr_vreg->type == DSS_REG_LDO)
+if (type == DSS_REG_LDO)
regulator_set_voltage(curr_vreg->vreg, 0, curr_vreg->max_voltage);
vreg_set_voltage_fail:
@@ -206,6 +212,8 @@
vreg_get_fail:
for (i--; i >= 0; i--) {
curr_vreg = &in_vreg[i];
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
goto vreg_unconfig;
}
return rc;
diff --git a/drivers/video/msm/mdss/mdss_io_util.h b/drivers/video/msm/mdss/mdss_io_util.h
index 0ae62a3..23341d6 100644
--- a/drivers/video/msm/mdss/mdss_io_util.h
+++ b/drivers/video/msm/mdss/mdss_io_util.h
@@ -50,10 +50,9 @@
struct dss_vreg {
struct regulator *vreg; /* vreg handle */
char vreg_name[32];
- enum dss_vreg_type type;
int min_voltage;
int max_voltage;
- int optimum_voltage;
+ int peak_current;
};
struct dss_gpio {
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 2745c96..2f09fee 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -184,14 +184,15 @@
static inline int mdss_irq_dispatch(u32 hw_ndx, int irq, void *ptr)
{
struct mdss_hw *hw;
+ int rc = -ENODEV;
spin_lock(&mdss_lock);
hw = mdss_irq_handlers[hw_ndx];
- spin_unlock(&mdss_lock);
if (hw)
- return hw->irq_handler(irq, hw->ptr);
+ rc = hw->irq_handler(irq, hw->ptr);
+ spin_unlock(&mdss_lock);
- return -ENODEV;
+ return rc;
}
static irqreturn_t mdss_irq_handler(int irq, void *ptr)
@@ -204,8 +205,11 @@
mdata->irq_buzy = true;
- if (intr & MDSS_INTR_MDP)
+ if (intr & MDSS_INTR_MDP) {
+ spin_lock(&mdp_lock);
mdss_irq_dispatch(MDSS_HW_MDP, irq, ptr);
+ spin_unlock(&mdp_lock);
+ }
if (intr & MDSS_INTR_DSI0)
mdss_irq_dispatch(MDSS_HW_DSI0, irq, ptr);
@@ -224,6 +228,27 @@
return IRQ_HANDLED;
}
+int mdss_register_irq(struct mdss_hw *hw)
+{
+ unsigned long irq_flags;
+ u32 ndx_bit;
+
+ if (!hw || hw->hw_ndx >= MDSS_MAX_HW_BLK)
+ return -EINVAL;
+
+ ndx_bit = BIT(hw->hw_ndx);
+
+ spin_lock_irqsave(&mdss_lock, irq_flags);
+ if (!mdss_irq_handlers[hw->hw_ndx])
+ mdss_irq_handlers[hw->hw_ndx] = hw;
+ else
+ pr_err("panel %d's irq at %p is already registered\n",
+ hw->hw_ndx, hw->irq_handler);
+ spin_unlock_irqrestore(&mdss_lock, irq_flags);
+
+ return 0;
+} /* mdss_regsiter_irq */
+EXPORT_SYMBOL(mdss_register_irq);
void mdss_enable_irq(struct mdss_hw *hw)
{
@@ -233,6 +258,11 @@
if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
return;
+ if (!mdss_irq_handlers[hw->hw_ndx]) {
+ pr_err("failed. First register the irq then enable it.\n");
+ return;
+ }
+
ndx_bit = BIT(hw->hw_ndx);
pr_debug("Enable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
@@ -243,7 +273,6 @@
pr_debug("MDSS HW ndx=%d is already set, mask=%x\n",
hw->hw_ndx, mdss_res->irq_mask);
} else {
- mdss_irq_handlers[hw->hw_ndx] = hw;
mdss_res->irq_mask |= ndx_bit;
if (!mdss_res->irq_ena) {
mdss_res->irq_ena = true;
@@ -273,7 +302,6 @@
hw->hw_ndx, mdss_res->mdp_irq_mask,
mdss_res->mdp_hist_irq_mask);
} else {
- mdss_irq_handlers[hw->hw_ndx] = NULL;
mdss_res->irq_mask &= ~ndx_bit;
if (mdss_res->irq_mask == 0) {
mdss_res->irq_ena = false;
@@ -284,6 +312,7 @@
}
EXPORT_SYMBOL(mdss_disable_irq);
+/* called from interrupt context */
void mdss_disable_irq_nosync(struct mdss_hw *hw)
{
u32 ndx_bit;
@@ -296,20 +325,17 @@
pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
mdss_res->irq_ena, mdss_res->irq_mask);
- spin_lock(&mdss_lock);
if (!(mdss_res->irq_mask & ndx_bit)) {
pr_warn("MDSS HW ndx=%d is NOT set, mask=%x, hist mask=%x\n",
hw->hw_ndx, mdss_res->mdp_irq_mask,
mdss_res->mdp_hist_irq_mask);
} else {
- mdss_irq_handlers[hw->hw_ndx] = NULL;
mdss_res->irq_mask &= ~ndx_bit;
if (mdss_res->irq_mask == 0) {
mdss_res->irq_ena = false;
disable_irq_nosync(mdss_res->irq);
}
}
- spin_unlock(&mdss_lock);
}
EXPORT_SYMBOL(mdss_disable_irq_nosync);
@@ -390,6 +416,21 @@
return 1 << (intr_type + intf_num);
}
+/* function assumes that mdp is clocked to access hw registers */
+void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
+ u32 intr_type, u32 intf_num)
+{
+ unsigned long irq_flags;
+ u32 irq;
+
+ irq = mdss_mdp_irq_mask(intr_type, intf_num);
+
+ pr_debug("clearing mdp irq mask=%x\n", irq);
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ writel_relaxed(irq, mdata->mdp_base + MDSS_MDP_REG_INTR_CLEAR);
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+}
+
int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
{
u32 irq;
@@ -482,13 +523,13 @@
spin_unlock_irqrestore(&mdp_lock, irq_flags);
}
+/* called from interrupt context */
void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num)
{
u32 irq;
irq = mdss_mdp_irq_mask(intr_type, intf_num);
- spin_lock(&mdp_lock);
if (!(mdss_res->mdp_irq_mask & irq)) {
pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
irq, mdss_res->mdp_irq_mask);
@@ -500,7 +541,6 @@
(mdss_res->mdp_hist_irq_mask == 0))
mdss_disable_irq_nosync(&mdss_mdp_hw);
}
- spin_unlock(&mdp_lock);
}
static inline struct clk *mdss_mdp_get_clk(u32 clk_idx)
@@ -581,65 +621,43 @@
return clk_rate;
}
-static void mdss_mdp_clk_ctrl_update(struct mdss_data_type *mdata)
-{
- int enable;
-
- mutex_lock(&mdp_clk_lock);
- enable = atomic_read(&mdata->clk_ref) > 0;
- if (mdata->clk_ena == enable) {
- mutex_unlock(&mdp_clk_lock);
- return;
- }
- mdata->clk_ena = enable;
-
- pr_debug("MDP CLKS %s\n", (enable ? "Enable" : "Disable"));
- mb();
-
- mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
- mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
-
- mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
- mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
- if (mdata->vsync_ena)
- mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
-
- mutex_unlock(&mdp_clk_lock);
-}
-
-static void mdss_mdp_clk_ctrl_workqueue_handler(struct work_struct *work)
-{
- struct mdss_data_type *mdata;
-
- mdata = container_of(work, struct mdss_data_type, clk_ctrl_worker);
- mdss_mdp_clk_ctrl_update(mdata);
-}
-
void mdss_mdp_clk_ctrl(int enable, int isr)
{
struct mdss_data_type *mdata = mdss_res;
+ static int mdp_clk_cnt;
+ int changed = 0;
- pr_debug("clk enable=%d isr=%d ref= %d\n", enable, isr,
- atomic_read(&mdata->clk_ref));
-
- if (enable == MDP_BLOCK_POWER_ON) {
- BUG_ON(isr);
-
- if (atomic_inc_return(&mdata->clk_ref) == 1)
- mdss_mdp_clk_ctrl_update(mdata);
+ mutex_lock(&mdp_clk_lock);
+ if (enable) {
+ if (mdp_clk_cnt == 0)
+ changed++;
+ mdp_clk_cnt++;
} else {
- BUG_ON(atomic_read(&mdata->clk_ref) == 0);
-
- if (atomic_dec_and_test(&mdata->clk_ref)) {
- if (isr)
- queue_work(mdata->clk_ctrl_wq,
- &mdata->clk_ctrl_worker);
- else
- mdss_mdp_clk_ctrl_update(mdata);
- }
+ mdp_clk_cnt--;
+ if (mdp_clk_cnt == 0)
+ changed++;
}
+ pr_debug("%s: clk_cnt=%d changed=%d enable=%d\n",
+ __func__, mdp_clk_cnt, changed, enable);
+ if (changed) {
+ mdata->clk_ena = enable;
+ if (enable)
+ pm_runtime_get_sync(&mdata->pdev->dev);
+
+ mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
+ mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
+ mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
+ mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
+ if (mdata->vsync_ena)
+ mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
+
+ if (!enable)
+ pm_runtime_put(&mdata->pdev->dev);
+ }
+
+ mutex_unlock(&mdp_clk_lock);
}
static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata,
@@ -831,6 +849,7 @@
{
int i, j;
char *offset;
+ struct mdss_mdp_pipe *vig;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
mdata->mdp_rev = MDSS_MDP_REG_READ(MDSS_MDP_REG_HW_VERSION);
@@ -852,7 +871,16 @@
writel_relaxed(j, offset);
/* swap */
- writel_relaxed(i, offset + 4);
+ writel_relaxed(1, offset + 4);
+ }
+ vig = mdata->vig_pipes;
+ for (i = 0; i < mdata->nvig_pipes; i++) {
+ offset = vig[i].base +
+ MDSS_MDP_REG_VIG_HIST_LUT_BASE;
+ for (j = 0; j < ENHIST_LUT_ENTRIES; j++)
+ writel_relaxed(j, offset);
+ /* swap */
+ writel_relaxed(1, offset + 16);
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
pr_debug("MDP hw init done\n");
@@ -878,9 +906,6 @@
if (rc)
return rc;
- mdata->clk_ctrl_wq = create_singlethread_workqueue("mdp_clk_wq");
- INIT_WORK(&mdata->clk_ctrl_worker, mdss_mdp_clk_ctrl_workqueue_handler);
-
mdata->iclient = msm_ion_client_create(-1, mdata->pdev->name);
if (IS_ERR_OR_NULL(mdata->iclient)) {
pr_err("msm_ion_client_create() return error (%p)\n",
@@ -1019,6 +1044,10 @@
if (rc)
pr_err("unable to register mdp instance\n");
+ rc = mdss_register_irq(&mdss_mdp_hw);
+ if (rc)
+ pr_err("mdss_register_irq failed.\n");
+
probe_done:
if (IS_ERR_VALUE(rc)) {
mdss_res = NULL;
@@ -1447,6 +1476,10 @@
&data);
mdata->rot_block_size = (!rc ? data : 128);
+ mdata->has_bwc = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-bwc");
+ mdata->has_decimation = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-decimation");
return 0;
}
@@ -1508,8 +1541,6 @@
static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
{
- flush_workqueue(mdata->clk_ctrl_wq);
-
mdata->suspend_fs_ena = mdata->fs_ena;
mdss_mdp_footswitch_ctrl(mdata, false);
@@ -1607,8 +1638,6 @@
dev_dbg(dev, "pm_runtime: idling...\n");
- flush_workqueue(mdata->clk_ctrl_wq);
-
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index df5e5d3..6018e6f 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -38,6 +38,7 @@
#define MAX_PLANES 4
#define MAX_DOWNSCALE_RATIO 4
#define MAX_UPSCALE_RATIO 20
+#define MAX_DECIMATION 4
#define C3_ALPHA 3 /* alpha */
#define C2_R_Cr 2 /* R/Cr */
@@ -121,12 +122,15 @@
u32 opmode;
u32 flush_bits;
+ bool is_video_mode;
u32 play_cnt;
+ u32 vsync_cnt;
u32 underrun_cnt;
u16 width;
u16 height;
u32 dst_format;
+ bool is_secure;
u32 bus_ab_quota;
u32 bus_ib_quota;
@@ -147,6 +151,7 @@
int (*display_fnc) (struct mdss_mdp_ctl *ctl, void *arg);
int (*wait_fnc) (struct mdss_mdp_ctl *ctl, void *arg);
int (*set_vsync_handler) (struct mdss_mdp_ctl *, mdp_vsync_handler_t);
+ u32 (*read_line_cnt_fnc) (struct mdss_mdp_ctl *);
void *priv_data;
};
@@ -217,6 +222,21 @@
struct mdss_mdp_img_data p[MAX_PLANES];
};
+struct pp_hist_col_info {
+ u32 col_state;
+ u32 col_en;
+ u32 read_request;
+ u32 hist_cnt_read;
+ u32 hist_cnt_sent;
+ u32 hist_cnt_time;
+ u32 frame_cnt;
+ u32 is_kick_ready;
+ struct completion comp;
+ u32 data[HIST_V_SIZE];
+ struct mutex hist_mutex;
+ spinlock_t hist_lock;
+};
+
struct pp_sts_type {
u32 pa_sts;
u32 pcc_sts;
@@ -233,6 +253,8 @@
struct mdss_pipe_pp_res {
u32 igc_c0_c1[IGC_LUT_ENTRIES];
u32 igc_c2[IGC_LUT_ENTRIES];
+ u32 hist_lut[ENHIST_LUT_ENTRIES];
+ struct pp_hist_col_info hist;
struct pp_sts_type pp_sts;
};
@@ -250,6 +272,8 @@
u16 img_width;
u16 img_height;
+ u8 horz_deci;
+ u8 vert_deci;
struct mdss_mdp_img_rect src;
struct mdss_mdp_img_rect dst;
@@ -269,6 +293,7 @@
u32 params_changed;
unsigned long smp[MAX_PLANES];
+ unsigned long smp_reserved[MAX_PLANES];
struct mdss_mdp_data back_buf;
struct mdss_mdp_data front_buf;
@@ -294,6 +319,7 @@
int borderfill_enable;
int overlay_play_enable;
int hw_refresh;
+ void *cpu_pm_hdl;
struct mdss_data_type *mdata;
struct mutex ov_lock;
@@ -302,6 +328,7 @@
struct list_head overlay_list;
struct list_head pipes_used;
struct list_head pipes_cleanup;
+ bool mixer_swap;
};
#define is_vig_pipe(_pipe_id_) ((_pipe_id_) <= MDSS_MDP_SSPP_VIG2)
@@ -330,6 +357,8 @@
irqreturn_t mdss_mdp_isr(int irq, void *ptr);
int mdss_iommu_attach(struct mdss_data_type *mdata);
int mdss_mdp_copy_splash_screen(struct mdss_panel_data *pdata);
+void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
+ u32 intr_type, u32 intf_num);
int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num);
void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num);
int mdss_mdp_hist_irq_enable(u32 irq);
@@ -374,6 +403,8 @@
int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe);
int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg);
int mdss_mdp_display_wait4comp(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl,
+ ktime_t *wakeup_time);
int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 tbl_idx, u32 csc_type);
int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, u32 tbl_idx,
@@ -419,17 +450,21 @@
struct mdp_histogram_start_req *req);
int mdss_mdp_histogram_stop(struct mdss_mdp_ctl *ctl, u32 block);
int mdss_mdp_hist_collect(struct mdss_mdp_ctl *ctl,
- struct mdp_histogram_data *hist,
- u32 *hist_data_addr);
+ struct mdp_histogram_data *hist);
void mdss_mdp_hist_intr_done(u32 isr);
struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer,
u32 type);
struct mdss_mdp_pipe *mdss_mdp_pipe_get(struct mdss_data_type *mdata, u32 ndx);
+struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
+ u32 ndx);
int mdss_mdp_pipe_map(struct mdss_mdp_pipe *pipe);
void mdss_mdp_pipe_unmap(struct mdss_mdp_pipe *pipe);
struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_dma(struct mdss_mdp_mixer *mixer);
+int mdss_mdp_smp_reserve(struct mdss_mdp_pipe *pipe);
+void mdss_mdp_smp_unreserve(struct mdss_mdp_pipe *pipe);
+
int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata, u32 *offsets,
u32 *ftch_y_id, u32 type, u32 num_base, u32 len);
int mdss_mdp_mixer_addr_setup(struct mdss_data_type *mdata, u32 *mixer_offsets,
@@ -447,6 +482,8 @@
struct mdss_mdp_plane_sizes *ps, u32 bwc_mode);
int mdss_mdp_get_rau_strides(u32 w, u32 h, struct mdss_mdp_format_params *fmt,
struct mdss_mdp_plane_sizes *ps);
+void mdss_mdp_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
+ struct mdss_mdp_plane_sizes *ps, struct mdss_mdp_format_params *fmt);
struct mdss_mdp_format_params *mdss_mdp_get_format_params(u32 format);
int mdss_mdp_put_img(struct mdss_mdp_img_data *data);
int mdss_mdp_get_img(struct msmfb_data *img, struct mdss_mdp_img_data *data);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 1ced200..6c9cce2 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -47,6 +47,15 @@
writel_relaxed(val, mixer->base + reg);
}
+static inline u32 mdss_mdp_get_pclk_rate(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+
+ return (ctl->intf_type == MDSS_INTF_DSI) ?
+ pinfo->mipi.dsi_pclk_rate :
+ pinfo->clk_rate;
+}
+
static int mdss_mdp_ctl_perf_commit(struct mdss_data_type *mdata, u32 flags)
{
struct mdss_mdp_ctl *ctl;
@@ -202,13 +211,7 @@
max_clk_rate = clk_rate;
if (ctl->intf_type) {
- struct mdss_panel_info *pinfo;
-
- pinfo = &ctl->panel_data->panel_info;
- clk_rate = (ctl->intf_type == MDSS_INTF_DSI) ?
- pinfo->mipi.dsi_pclk_rate :
- pinfo->clk_rate;
-
+ clk_rate = mdss_mdp_get_pclk_rate(ctl);
/* minimum clock rate due to inefficiency in 3dmux */
clk_rate = mult_frac(clk_rate >> 1, 9, 8);
if (clk_rate > max_clk_rate)
@@ -279,8 +282,6 @@
return -EINVAL;
}
- mutex_lock(&mdss_mdp_ctl_lock);
- ctl->ref_cnt--;
if (ctl->mixer_left) {
mdss_mdp_mixer_free(ctl->mixer_left);
ctl->mixer_left = NULL;
@@ -289,11 +290,18 @@
mdss_mdp_mixer_free(ctl->mixer_right);
ctl->mixer_right = NULL;
}
+ mutex_lock(&mdss_mdp_ctl_lock);
+ ctl->ref_cnt--;
+ ctl->intf_num = MDSS_MDP_NO_INTF;
+ ctl->is_secure = false;
ctl->power_on = false;
ctl->start_fnc = NULL;
ctl->stop_fnc = NULL;
ctl->prepare_fnc = NULL;
ctl->display_fnc = NULL;
+ ctl->wait_fnc = NULL;
+ ctl->set_vsync_handler = NULL;
+ ctl->read_line_cnt_fnc = NULL;
mutex_unlock(&mdss_mdp_ctl_lock);
return 0;
@@ -435,7 +443,6 @@
if (ctl->stop_fnc)
ctl->stop_fnc(ctl);
- mdss_mdp_mixer_free(mixer);
mdss_mdp_ctl_free(ctl);
mdss_mdp_ctl_perf_commit(ctl->mdata, MDSS_MDP_PERF_UPDATE_ALL);
@@ -645,15 +652,18 @@
}
ctl->mfd = mfd;
ctl->panel_data = pdata;
+ ctl->is_video_mode = false;
switch (pdata->panel_info.type) {
case EDP_PANEL:
+ ctl->is_video_mode = true;
ctl->intf_num = MDSS_MDP_INTF0;
ctl->intf_type = MDSS_INTF_EDP;
ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
ctl->start_fnc = mdss_mdp_video_start;
break;
case MIPI_VIDEO_PANEL:
+ ctl->is_video_mode = true;
if (pdata->panel_info.pdest == DISPLAY_1)
ctl->intf_num = MDSS_MDP_INTF1;
else
@@ -672,6 +682,7 @@
ctl->start_fnc = mdss_mdp_cmd_start;
break;
case DTV_PANEL:
+ ctl->is_video_mode = true;
ctl->intf_num = MDSS_MDP_INTF3;
ctl->intf_type = MDSS_INTF_HDMI;
ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
@@ -986,6 +997,19 @@
if (ret) {
pr_warn("error powering off intf ctl=%d\n", ctl->num);
} else {
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, 0);
+ if (sctl)
+ mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, 0);
+
+ if (ctl->mixer_left) {
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_LAYER(
+ ctl->mixer_left->num), 0);
+ }
+ if (ctl->mixer_right) {
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_LAYER(
+ ctl->mixer_right->num), 0);
+ }
+
ctl->power_on = false;
ctl->play_cnt = 0;
ctl->clk_rate = 0;
@@ -1070,14 +1094,6 @@
stage);
}
- if (mixercfg == MDSS_MDP_LM_BORDER_COLOR &&
- pipe->src_fmt->alpha_enable &&
- pipe->dst.w == mixer->width &&
- pipe->dst.h == mixer->height) {
- pr_debug("setting pipe=%d as BG_PIPE\n", pipe->num);
- bgalpha = 1;
- }
-
mixercfg |= stage << (3 * pipe->num);
mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_OP_MODE, blend_op);
@@ -1182,16 +1198,19 @@
struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux)
{
struct mdss_mdp_mixer *mixer = NULL;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(ctl->mfd);
if (!ctl)
return NULL;
switch (mux) {
case MDSS_MDP_MIXER_MUX_DEFAULT:
case MDSS_MDP_MIXER_MUX_LEFT:
- mixer = ctl->mixer_left;
+ mixer = mdp5_data->mixer_swap ?
+ ctl->mixer_right : ctl->mixer_left;
break;
case MDSS_MDP_MIXER_MUX_RIGHT:
- mixer = ctl->mixer_right;
+ mixer = mdp5_data->mixer_swap ?
+ ctl->mixer_left : ctl->mixer_right;
break;
}
@@ -1221,6 +1240,7 @@
{
struct mdss_mdp_ctl *ctl;
struct mdss_mdp_mixer *mixer;
+ int i;
if (!pipe)
return -EINVAL;
@@ -1244,7 +1264,12 @@
if (params_changed) {
mixer->params_changed++;
- mixer->stage_pipe[pipe->mixer_stage] = pipe;
+ for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) {
+ if (i == pipe->mixer_stage)
+ mixer->stage_pipe[i] = pipe;
+ else if (mixer->stage_pipe[i] == pipe)
+ mixer->stage_pipe[i] = NULL;
+ }
}
if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA)
@@ -1277,9 +1302,10 @@
if (mutex_lock_interruptible(&ctl->lock))
return -EINTR;
- mixer->params_changed++;
- mixer->stage_pipe[pipe->mixer_stage] = NULL;
-
+ if (pipe == mixer->stage_pipe[pipe->mixer_stage]) {
+ mixer->params_changed++;
+ mixer->stage_pipe[pipe->mixer_stage] = NULL;
+ }
mutex_unlock(&ctl->lock);
return 0;
@@ -1296,6 +1322,71 @@
return 0;
}
+int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl,
+ ktime_t *wakeup_time)
+{
+ struct mdss_panel_info *pinfo;
+ u32 clk_rate, clk_period;
+ u32 current_line, total_line;
+ u32 time_of_line, time_to_vsync;
+ ktime_t current_time = ktime_get();
+
+ if (!ctl->read_line_cnt_fnc)
+ return -ENOSYS;
+
+ pinfo = &ctl->panel_data->panel_info;
+ if (!pinfo)
+ return -ENODEV;
+
+ clk_rate = mdss_mdp_get_pclk_rate(ctl);
+
+ clk_rate /= 1000; /* in kHz */
+ if (!clk_rate)
+ return -EINVAL;
+
+ /*
+ * calculate clk_period as pico second to maintain good
+ * accuracy with high pclk rate and this number is in 17 bit
+ * range.
+ */
+ clk_period = 1000000000 / clk_rate;
+ if (!clk_period)
+ return -EINVAL;
+
+ time_of_line = (pinfo->lcdc.h_back_porch +
+ pinfo->lcdc.h_front_porch +
+ pinfo->lcdc.h_pulse_width +
+ pinfo->xres) * clk_period;
+
+ time_of_line /= 1000; /* in nano second */
+ if (!time_of_line)
+ return -EINVAL;
+
+ current_line = ctl->read_line_cnt_fnc(ctl);
+
+ total_line = pinfo->lcdc.v_back_porch +
+ pinfo->lcdc.v_front_porch +
+ pinfo->lcdc.v_pulse_width +
+ pinfo->yres;
+
+ if (current_line > total_line)
+ return -EINVAL;
+
+ time_to_vsync = time_of_line * (total_line - current_line);
+ if (!time_to_vsync)
+ return -EINVAL;
+
+ *wakeup_time = ktime_add_ns(current_time, time_to_vsync);
+
+ pr_debug("clk_rate=%dkHz clk_period=%d cur_line=%d tot_line=%d\n",
+ clk_rate, clk_period, current_line, total_line);
+ pr_debug("time_to_vsync=%d current_time=%d wakeup_time=%d\n",
+ time_to_vsync, (int)ktime_to_ms(current_time),
+ (int)ktime_to_ms(*wakeup_time));
+
+ return 0;
+}
+
int mdss_mdp_display_wait4comp(struct mdss_mdp_ctl *ctl)
{
int ret;
diff --git a/drivers/video/msm/mdss/mdss_mdp_formats.h b/drivers/video/msm/mdss/mdss_mdp_formats.h
index c6d5fb9..acb8dc2 100644
--- a/drivers/video/msm/mdss/mdss_mdp_formats.h
+++ b/drivers/video/msm/mdss/mdss_mdp_formats.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -108,6 +108,8 @@
FMT_YUV_COMMON(fmt), \
.fetch_planes = MDSS_MDP_PLANE_PLANAR, \
.chroma_sample = samp, \
+ .bpp = 1, \
+ .unpack_count = 1, \
.element = { (e0), (e1) } \
}
@@ -134,9 +136,9 @@
FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V2_VENUS, MDSS_MDP_CHROMA_420,
C1_B_Cb, C2_R_Cr),
- FMT_YUV_PLANR(MDP_Y_CR_CB_H2V2, MDSS_MDP_CHROMA_420, C2_R_Cr, C1_B_Cb),
- FMT_YUV_PLANR(MDP_Y_CB_CR_H2V2, MDSS_MDP_CHROMA_420, C1_B_Cb, C2_R_Cr),
- FMT_YUV_PLANR(MDP_Y_CR_CB_GH2V2, MDSS_MDP_CHROMA_420, C2_R_Cr, C1_B_Cb),
+ FMT_YUV_PLANR(MDP_Y_CB_CR_H2V2, MDSS_MDP_CHROMA_420, C2_R_Cr, C1_B_Cb),
+ FMT_YUV_PLANR(MDP_Y_CR_CB_H2V2, MDSS_MDP_CHROMA_420, C1_B_Cb, C2_R_Cr),
+ FMT_YUV_PLANR(MDP_Y_CR_CB_GH2V2, MDSS_MDP_CHROMA_420, C1_B_Cb, C2_R_Cr),
{
FMT_YUV_COMMON(MDP_YCBCR_H1V1),
diff --git a/drivers/video/msm/mdss/mdss_mdp_hwio.h b/drivers/video/msm/mdss/mdss_mdp_hwio.h
index d50f47e..a59560e 100644
--- a/drivers/video/msm/mdss/mdss_mdp_hwio.h
+++ b/drivers/video/msm/mdss/mdss_mdp_hwio.h
@@ -190,8 +190,7 @@
#define MDSS_MDP_REG_SSPP_CURRENT_SRC1_ADDR 0x0A8
#define MDSS_MDP_REG_SSPP_CURRENT_SRC2_ADDR 0x0AC
#define MDSS_MDP_REG_SSPP_CURRENT_SRC3_ADDR 0x0B0
-#define MDSS_MDP_REG_SSPP_LINE_SKIP_STEP_C03 0x0B4
-#define MDSS_MDP_REG_SSPP_LINE_SKIP_STEP_C12 0x0B8
+#define MDSS_MDP_REG_SSPP_DECIMATION_CONFIG 0x0B4
#define MDSS_MDP_REG_VIG_OP_MODE 0x200
#define MDSS_MDP_REG_VIG_QSEED2_CONFIG 0x204
@@ -349,6 +348,8 @@
#define MDSS_MDP_REG_WB_OUT_SIZE 0x074
#define MDSS_MDP_REG_WB_ALPHA_X_VALUE 0x078
#define MDSS_MDP_REG_WB_CSC_BASE 0x260
+#define MDSS_MDP_REG_WB_DST_ADDR_SW_STATUS 0x2B0
+
enum mdss_mdp_dspp_index {
MDSS_MDP_DSPP0,
@@ -402,6 +403,9 @@
#define MDSS_MDP_REG_INTF_TEST_CTL 0x054
#define MDSS_MDP_REG_INTF_TP_COLOR0 0x058
#define MDSS_MDP_REG_INTF_TP_COLOR1 0x05C
+#define MDSS_MDP_REG_INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define MDSS_MDP_REG_INTF_FRAME_COUNT 0x0AC
+#define MDSS_MDP_REG_INTF_LINE_COUNT 0x0B0
#define MDSS_MDP_REG_INTF_DEFLICKER_CONFIG 0x0F0
#define MDSS_MDP_REG_INTF_DEFLICKER_STRNG_COEFF 0x0F4
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index d6b0fb2..e0be862 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -15,30 +15,50 @@
#include "mdss_panel.h"
#include "mdss_mdp.h"
+#define VSYNC_EXPIRE_TICK 4
+
#define START_THRESHOLD 4
#define CONTINUE_TRESHOLD 4
#define MAX_SESSIONS 2
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KOFF_TIMEOUT msecs_to_jiffies(84)
+
struct mdss_mdp_cmd_ctx {
u32 pp_num;
u8 ref_cnt;
-
struct completion pp_comp;
+ struct completion stop_comp;
atomic_t vsync_ref;
- spinlock_t vsync_lock;
- mdp_vsync_handler_t vsync_handler;
+ mdp_vsync_handler_t send_vsync;
int panel_on;
+ int koff_cnt;
+ int clk_enabled;
+ int clk_control;
+ int vsync_enabled;
+ int expire;
+ struct mutex clk_mtx;
+ spinlock_t clk_lock;
+ struct work_struct clk_work;
/* te config */
u8 tear_check;
- u16 total_lcd_lines;
- u16 v_porch; /* vertical porches */
- u32 vsync_cnt;
+ u16 height; /* panel height */
+ u16 vporch; /* vertical porches */
+ u32 vclk_line; /* vsync clock per line */
};
struct mdss_mdp_cmd_ctx mdss_mdp_cmd_ctx_list[MAX_SESSIONS];
+/*
+ * TE configuration:
+ * dsi byte clock calculated base on 70 fps
+ * around 14 ms to complete a kickoff cycle if te disabled
+ * vclk_line base on 60 fps
+ * write is faster than read
+ * init == start == rdptr
+ */
static int mdss_mdp_cmd_tearcheck_cfg(struct mdss_mdp_mixer *mixer,
struct mdss_mdp_cmd_ctx *ctx, int enable)
{
@@ -47,15 +67,21 @@
cfg = BIT(19); /* VSYNC_COUNTER_EN */
if (ctx->tear_check)
cfg |= BIT(20); /* VSYNC_IN_EN */
- cfg |= ctx->vsync_cnt;
+ cfg |= ctx->vclk_line;
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT,
0xfff0); /* set to verh height */
- mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_VSYNC_INIT_VAL, 0);
- mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_RD_PTR_IRQ, 0);
- mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_START_POS, ctx->v_porch);
+ mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_VSYNC_INIT_VAL,
+ ctx->height);
+
+ mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_RD_PTR_IRQ,
+ ctx->height + 1);
+
+ mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_START_POS,
+ ctx->height);
+
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_SYNC_THRESH,
(CONTINUE_TRESHOLD << 16) | (START_THRESHOLD));
@@ -73,7 +99,6 @@
if (pinfo->mipi.vsync_enable && enable) {
u32 mdp_vsync_clk_speed_hz, total_lines;
- u32 vsync_cnt_cfg_dem;
mdss_mdp_vsync_clk_enable(1);
@@ -88,21 +113,18 @@
}
ctx->tear_check = pinfo->mipi.hw_vsync_mode;
-
- total_lines = pinfo->lcdc.v_back_porch +
- pinfo->lcdc.v_front_porch +
- pinfo->lcdc.v_pulse_width + pinfo->yres;
-
- vsync_cnt_cfg_dem =
- mult_frac(pinfo->mipi.frame_rate * total_lines,
- 1, 100);
-
- ctx->vsync_cnt = mdp_vsync_clk_speed_hz / vsync_cnt_cfg_dem;
-
- ctx->v_porch = pinfo->lcdc.v_back_porch +
+ ctx->height = pinfo->yres;
+ ctx->vporch = pinfo->lcdc.v_back_porch +
pinfo->lcdc.v_front_porch +
pinfo->lcdc.v_pulse_width;
- ctx->total_lcd_lines = total_lines;
+
+ total_lines = ctx->height + ctx->vporch;
+ total_lines *= pinfo->mipi.frame_rate;
+ ctx->vclk_line = mdp_vsync_clk_speed_hz / total_lines;
+
+ pr_debug("%s: fr=%d tline=%d vcnt=%d vrate=%d\n",
+ __func__, pinfo->mipi.frame_rate, total_lines,
+ ctx->vclk_line, mdp_vsync_clk_speed_hz);
} else {
enable = 0;
}
@@ -118,54 +140,6 @@
return 0;
}
-static inline void cmd_readptr_irq_enable(struct mdss_mdp_ctl *ctl)
-{
- struct mdss_mdp_cmd_ctx *ctx = ctl->priv_data;
-
- if (atomic_inc_return(&ctx->vsync_ref) == 1) {
- pr_debug("%s:\n", __func__);
- mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num);
- }
-}
-
-static inline void cmd_readptr_irq_disable(struct mdss_mdp_ctl *ctl)
-{
- struct mdss_mdp_cmd_ctx *ctx = ctl->priv_data;
-
- if (atomic_dec_return(&ctx->vsync_ref) == 0) {
- pr_debug("%s:\n", __func__);
- mdss_mdp_irq_disable(MDSS_MDP_IRQ_PING_PONG_RD_PTR,
- ctx->pp_num);
- }
-}
-
-int mdss_mdp_cmd_set_vsync_handler(struct mdss_mdp_ctl *ctl,
- mdp_vsync_handler_t vsync_handler)
-{
- struct mdss_mdp_cmd_ctx *ctx;
- unsigned long flags;
-
- ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
- if (!ctx) {
- pr_err("invalid ctx for ctl=%d\n", ctl->num);
- return -ENODEV;
- }
-
- spin_lock_irqsave(&ctx->vsync_lock, flags);
-
- if (!ctx->vsync_handler && vsync_handler) {
- ctx->vsync_handler = vsync_handler;
- cmd_readptr_irq_enable(ctl);
- } else if (ctx->vsync_handler && !vsync_handler) {
- cmd_readptr_irq_disable(ctl);
- ctx->vsync_handler = vsync_handler;
- }
-
- spin_unlock_irqrestore(&ctx->vsync_lock, flags);
-
- return 0;
-}
-
static void mdss_mdp_cmd_readptr_done(void *arg)
{
struct mdss_mdp_ctl *ctl = arg;
@@ -177,14 +151,29 @@
return;
}
- pr_debug("%s: ctl=%d intf_num=%d\n", __func__, ctl->num, ctl->intf_num);
+ pr_debug("%s: num=%d ctx=%d expire=%d koff=%d\n", __func__, ctl->num,
+ ctx->pp_num, ctx->expire, ctx->koff_cnt);
vsync_time = ktime_get();
+ ctl->vsync_cnt++;
- spin_lock(&ctx->vsync_lock);
- if (ctx->vsync_handler)
- ctx->vsync_handler(ctl, vsync_time);
- spin_unlock(&ctx->vsync_lock);
+ spin_lock(&ctx->clk_lock);
+ if (ctx->send_vsync)
+ ctx->send_vsync(ctl, vsync_time);
+
+ if (ctx->expire) {
+ ctx->expire--;
+ if (ctx->expire == 0) {
+ if (ctx->koff_cnt <= 0) {
+ ctx->clk_control = 1;
+ schedule_work(&ctx->clk_work);
+ } else {
+ /* put off one vsync */
+ ctx->expire += 1;
+ }
+ }
+ }
+ spin_unlock(&ctx->clk_lock);
}
static void mdss_mdp_cmd_pingpong_done(void *arg)
@@ -192,12 +181,161 @@
struct mdss_mdp_ctl *ctl = arg;
struct mdss_mdp_cmd_ctx *ctx = ctl->priv_data;
- pr_debug("%s: intf_num=%d ctx=%p\n", __func__, ctl->intf_num, ctx);
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return;
+ }
+ spin_lock(&ctx->clk_lock);
mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num);
- if (ctx)
- complete(&ctx->pp_comp);
+ complete_all(&ctx->pp_comp);
+
+ if (ctx->koff_cnt)
+ ctx->koff_cnt--;
+
+ pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d kcnt=%d\n", __func__,
+ ctl->num, ctl->intf_num, ctx->pp_num, ctx->koff_cnt);
+
+ spin_unlock(&ctx->clk_lock);
+}
+
+static void clk_ctrl_work(struct work_struct *work)
+{
+ unsigned long flags;
+ struct mdss_mdp_cmd_ctx *ctx =
+ container_of(work, typeof(*ctx), clk_work);
+
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return;
+ }
+
+ pr_debug("%s:ctx=%p num=%d\n", __func__, ctx, ctx->pp_num);
+
+ mutex_lock(&ctx->clk_mtx);
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ if (ctx->clk_control && ctx->clk_enabled) {
+ ctx->clk_enabled = 0;
+ ctx->clk_control = 0;
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+ /*
+ * make sure dsi link is idle here
+ */
+ ctx->vsync_enabled = 0;
+ mdss_mdp_irq_disable(MDSS_MDP_IRQ_PING_PONG_RD_PTR,
+ ctx->pp_num);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ complete(&ctx->stop_comp);
+ pr_debug("%s: SET_CLK_OFF, pid=%d\n", __func__, current->pid);
+ } else {
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+ }
+ mutex_unlock(&ctx->clk_mtx);
+}
+
+static int mdss_mdp_cmd_vsync_ctrl(struct mdss_mdp_ctl *ctl,
+ mdp_vsync_handler_t send_vsync)
+{
+ struct mdss_mdp_cmd_ctx *ctx;
+ unsigned long flags;
+ int enable;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return -ENODEV;
+ }
+
+ enable = (send_vsync != NULL);
+
+ pr_debug("%s: ctx=%p ctx=%d enabled=%d %d clk_enabled=%d clk_ctrl=%d\n",
+ __func__, ctx, ctx->pp_num, ctx->vsync_enabled, enable,
+ ctx->clk_enabled, ctx->clk_control);
+
+ mutex_lock(&ctx->clk_mtx);
+ if (ctx->vsync_enabled == enable) {
+ mutex_unlock(&ctx->clk_mtx);
+ return 0;
+ }
+
+ if (enable) {
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ ctx->clk_control = 0;
+ ctx->expire = 0;
+ ctx->send_vsync = send_vsync;
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+ if (ctx->clk_enabled == 0) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_RD_PTR,
+ ctx->pp_num);
+ ctx->vsync_enabled = 1;
+ ctx->clk_enabled = 1;
+ pr_debug("%s: SET_CLK_ON, pid=%d\n", __func__,
+ current->pid);
+ }
+ } else {
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ ctx->expire = VSYNC_EXPIRE_TICK;
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+ }
+ mutex_unlock(&ctx->clk_mtx);
+
+ return 0;
+}
+
+static void mdss_mdp_cmd_chk_clock(struct mdss_mdp_cmd_ctx *ctx)
+{
+ unsigned long flags;
+ int set_clk_on = 0;
+
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return;
+ }
+
+ pr_debug("%s: ctx=%p num=%d clk_enabled=%d\n", __func__,
+ ctx, ctx->pp_num, ctx->clk_enabled);
+
+ mutex_lock(&ctx->clk_mtx);
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ ctx->koff_cnt++;
+ ctx->clk_control = 0;
+ ctx->expire = VSYNC_EXPIRE_TICK;
+ if (ctx->clk_enabled == 0) {
+ set_clk_on++;
+ ctx->clk_enabled = 1;
+ }
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+ if (set_clk_on) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ ctx->vsync_enabled = 1;
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num);
+ pr_debug("%s: ctx=%p num=%d SET_CLK_ON\n", __func__,
+ ctx, ctx->pp_num);
+ }
+ mutex_unlock(&ctx->clk_mtx);
+}
+
+static int mdss_mdp_cmd_wait4comp(struct mdss_mdp_ctl *ctl, void *arg)
+{
+ struct mdss_mdp_cmd_ctx *ctx;
+ int rc;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ pr_debug("%s: intf_num=%d ctx=%p\n", __func__, ctl->intf_num, ctx);
+
+ rc = wait_for_completion_interruptible_timeout(&ctx->pp_comp,
+ KOFF_TIMEOUT);
+ WARN(rc <= 0, "cmd kickoff timed out (%d) ctl=%d\n", rc, ctl->num);
+
+ return rc;
}
int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
@@ -206,14 +344,16 @@
int rc;
ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
- pr_debug("%s: kickoff intf_num=%d ctx=%p\n", __func__,
- ctl->intf_num, ctx);
-
if (!ctx) {
pr_err("invalid ctx\n");
return -ENODEV;
}
+ pr_debug("%s: kickoff intf_num=%d ctx=%p\n", __func__,
+ ctl->intf_num, ctx);
+
+ mdss_mdp_cmd_chk_clock(ctx);
+
if (ctx->panel_on == 0) {
rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL);
WARN(rc, "intf %d unblank error (%d)\n", ctl->intf_num, rc);
@@ -229,27 +369,37 @@
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
- wait_for_completion_interruptible(&ctx->pp_comp);
-
return 0;
}
int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_cmd_ctx *ctx;
+ int need_wait = 0;
int ret;
- pr_debug("%s: +\n", __func__);
-
ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
if (!ctx) {
pr_err("invalid ctx\n");
return -ENODEV;
}
+ pr_debug("%s:+ vaync_enable=%d expire=%d\n", __func__,
+ ctx->vsync_enabled, ctx->expire);
+
+ mutex_lock(&ctx->clk_mtx);
+ if (ctx->vsync_enabled) {
+ INIT_COMPLETION(ctx->stop_comp);
+ need_wait = 1;
+ }
+ mutex_unlock(&ctx->clk_mtx);
+
+ if (need_wait)
+ wait_for_completion_interruptible(&ctx->stop_comp);
+
ctx->panel_on = 0;
- mdss_mdp_cmd_set_vsync_handler(ctl, NULL);
+ mdss_mdp_cmd_vsync_ctrl(ctl, NULL);
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctl->intf_num,
NULL, NULL);
@@ -264,7 +414,6 @@
ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_OFF, NULL);
WARN(ret, "intf %d unblank error (%d)\n", ctl->intf_num, ret);
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
pr_debug("%s:-\n", __func__);
@@ -279,8 +428,6 @@
pr_debug("%s:+\n", __func__);
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
-
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
if (!mixer) {
pr_err("mixer not setup correctly\n");
@@ -307,8 +454,14 @@
ctx->pp_num = mixer->num;
init_completion(&ctx->pp_comp);
- spin_lock_init(&ctx->vsync_lock);
+ init_completion(&ctx->stop_comp);
atomic_set(&ctx->vsync_ref, 0);
+ spin_lock_init(&ctx->clk_lock);
+ mutex_init(&ctx->clk_mtx);
+ INIT_WORK(&ctx->clk_work, clk_ctrl_work);
+
+ pr_debug("%s: ctx=%p num=%d mixer=%d\n", __func__,
+ ctx, ctx->pp_num, mixer->num);
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num,
mdss_mdp_cmd_readptr_done, ctl);
@@ -324,7 +477,8 @@
ctl->stop_fnc = mdss_mdp_cmd_stop;
ctl->display_fnc = mdss_mdp_cmd_kickoff;
- ctl->set_vsync_handler = mdss_mdp_cmd_set_vsync_handler;
+ ctl->wait_fnc = mdss_mdp_cmd_wait4comp;
+ ctl->set_vsync_handler = mdss_mdp_cmd_vsync_ctrl;
pr_debug("%s:-\n", __func__);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 8c57c8c..94ae710 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -59,6 +59,22 @@
writel_relaxed(val, ctx->base + reg);
}
+static inline u32 mdp_video_read(struct mdss_mdp_video_ctx *ctx,
+ u32 reg)
+{
+ return readl_relaxed(ctx->base + reg);
+}
+
+static inline u32 mdss_mdp_video_line_count(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_video_ctx *ctx = ctl->priv_data;
+ u32 line_cnt = 0;
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ line_cnt = mdp_video_read(ctx, MDSS_MDP_REG_INTF_LINE_COUNT);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ return line_cnt;
+}
+
int mdss_mdp_video_addr_setup(struct mdss_data_type *mdata,
u32 *offsets, u32 count)
{
@@ -171,6 +187,7 @@
p->underflow_clr);
mdp_video_write(ctx, MDSS_MDP_REG_INTF_HSYNC_SKEW, p->hsync_skew);
mdp_video_write(ctx, MDSS_MDP_REG_INTF_POLARITY_CTL, polarity_ctl);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_FRAME_LINE_COUNT_EN, 0x3);
return 0;
}
@@ -182,6 +199,9 @@
if (atomic_inc_return(&ctx->vsync_ref) == 1)
mdss_mdp_irq_enable(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num);
+ else
+ mdss_mdp_irq_clear(ctl->mdata, MDSS_MDP_IRQ_INTF_VSYNC,
+ ctl->intf_num);
}
static inline void video_vsync_irq_disable(struct mdss_mdp_ctl *ctl)
@@ -197,6 +217,7 @@
{
struct mdss_mdp_video_ctx *ctx;
unsigned long flags;
+ int need_update;
ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data;
if (!ctx) {
@@ -205,14 +226,18 @@
}
spin_lock_irqsave(&ctx->vsync_lock, flags);
- if (!ctx->vsync_handler && vsync_handler)
- video_vsync_irq_enable(ctl);
- else if (ctx->vsync_handler && !vsync_handler)
- video_vsync_irq_disable(ctl);
-
+ need_update = (!ctx->vsync_handler && vsync_handler) ||
+ (ctx->vsync_handler && !vsync_handler);
ctx->vsync_handler = vsync_handler;
spin_unlock_irqrestore(&ctx->vsync_lock, flags);
+ if (need_update) {
+ if (vsync_handler)
+ video_vsync_irq_enable(ctl);
+ else
+ video_vsync_irq_disable(ctl);
+ }
+
return 0;
}
@@ -274,8 +299,10 @@
}
vsync_time = ktime_get();
+ ctl->vsync_cnt++;
- pr_debug("intr ctl=%d\n", ctl->num);
+ pr_debug("intr ctl=%d vsync cnt=%u vsync_time=%d\n",
+ ctl->num, ctl->vsync_cnt, (int)ktime_to_ms(vsync_time));
complete_all(&ctx->vsync_comp);
spin_lock(&ctx->vsync_lock);
@@ -335,8 +362,8 @@
if (!ctx->wait_pending) {
ctx->wait_pending++;
- INIT_COMPLETION(ctx->vsync_comp);
video_vsync_irq_enable(ctl);
+ INIT_COMPLETION(ctx->vsync_comp);
} else {
WARN(1, "commit without wait! ctl=%d", ctl->num);
}
@@ -445,6 +472,7 @@
ctl->display_fnc = mdss_mdp_video_display;
ctl->wait_fnc = mdss_mdp_video_wait4comp;
ctl->set_vsync_handler = mdss_mdp_video_set_vsync_handler;
+ ctl->read_line_cnt_fnc = mdss_mdp_video_line_count;
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index 7fbb031..0c08eda 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -17,6 +17,9 @@
#include "mdss_mdp.h"
#include "mdss_mdp_rotator.h"
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KOFF_TIMEOUT msecs_to_jiffies(84)
+
enum mdss_mdp_writeback_type {
MDSS_MDP_WRITEBACK_TYPE_ROTATOR,
MDSS_MDP_WRITEBACK_TYPE_LINE,
@@ -28,14 +31,18 @@
char __iomem *base;
u8 ref_cnt;
u8 type;
+ struct completion wb_comp;
+ int comp_cnt;
u32 intr_type;
u32 intf_num;
u32 opmode;
- u32 format;
+ struct mdss_mdp_format_params *dst_fmt;
u16 width;
u16 height;
+ struct mdss_mdp_img_rect dst_rect;
+
u8 rot90;
u32 bwc_mode;
int initialized;
@@ -81,48 +88,55 @@
}
static int mdss_mdp_writeback_addr_setup(struct mdss_mdp_writeback_ctx *ctx,
- struct mdss_mdp_data *data)
+ const struct mdss_mdp_data *in_data)
{
int ret;
+ struct mdss_mdp_data data;
- if (!data)
+ if (!in_data)
return -EINVAL;
+ data = *in_data;
- pr_debug("wb_num=%d addr=0x%x\n", ctx->wb_num, data->p[0].addr);
+ pr_debug("wb_num=%d addr=0x%x\n", ctx->wb_num, data.p[0].addr);
if (ctx->bwc_mode)
- data->bwc_enabled = 1;
+ data.bwc_enabled = 1;
- ret = mdss_mdp_data_check(data, &ctx->dst_planes);
+ ret = mdss_mdp_data_check(&data, &ctx->dst_planes);
if (ret)
return ret;
- mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST0_ADDR, data->p[0].addr);
- mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST1_ADDR, data->p[1].addr);
- mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST2_ADDR, data->p[2].addr);
- mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST3_ADDR, data->p[3].addr);
+ mdss_mdp_data_calc_offset(&data, ctx->dst_rect.x, ctx->dst_rect.y,
+ &ctx->dst_planes, ctx->dst_fmt);
+
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST0_ADDR, data.p[0].addr);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST1_ADDR, data.p[1].addr);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST2_ADDR, data.p[2].addr);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST3_ADDR, data.p[3].addr);
return 0;
}
-static int mdss_mdp_writeback_format_setup(struct mdss_mdp_writeback_ctx *ctx)
+static int mdss_mdp_writeback_format_setup(struct mdss_mdp_writeback_ctx *ctx,
+ u32 format)
{
struct mdss_mdp_format_params *fmt;
u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
u32 opmode = ctx->opmode;
struct mdss_data_type *mdata;
- pr_debug("wb_num=%d format=%d\n", ctx->wb_num, ctx->format);
+ pr_debug("wb_num=%d format=%d\n", ctx->wb_num, format);
- mdss_mdp_get_plane_sizes(ctx->format, ctx->width, ctx->height,
+ mdss_mdp_get_plane_sizes(format, ctx->width, ctx->height,
&ctx->dst_planes,
ctx->opmode & MDSS_MDP_OP_BWC_EN);
- fmt = mdss_mdp_get_format_params(ctx->format);
+ fmt = mdss_mdp_get_format_params(format);
if (!fmt) {
- pr_err("wb format=%d not supported\n", ctx->format);
+ pr_err("wb format=%d not supported\n", format);
return -EINVAL;
}
+ ctx->dst_fmt = fmt;
chroma_samp = fmt->chroma_sample;
@@ -164,33 +178,29 @@
dst_format |= BIT(14); /* DST_ALPHA_X */
}
- if (fmt->fetch_planes != MDSS_MDP_PLANE_PLANAR) {
- mdata = mdss_mdp_get_mdata();
- if (mdata && mdata->mdp_rev >= MDSS_MDP_HW_REV_102) {
- pattern = (fmt->element[3] << 24) |
- (fmt->element[2] << 16) |
- (fmt->element[1] << 8) |
- (fmt->element[0] << 0);
- } else {
- pattern = (fmt->element[3] << 24) |
- (fmt->element[2] << 15) |
- (fmt->element[1] << 8) |
- (fmt->element[0] << 0);
- }
-
- dst_format |= (fmt->unpack_align_msb << 18) |
- (fmt->unpack_tight << 17) |
- ((fmt->unpack_count - 1) << 12) |
- ((fmt->bpp - 1) << 9);
+ mdata = mdss_mdp_get_mdata();
+ if (mdata && mdata->mdp_rev >= MDSS_MDP_HW_REV_102) {
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
} else {
- pattern = 0;
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 15) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
}
+ dst_format |= (fmt->unpack_align_msb << 18) |
+ (fmt->unpack_tight << 17) |
+ ((fmt->unpack_count - 1) << 12) |
+ ((fmt->bpp - 1) << 9);
+
ystride0 = (ctx->dst_planes.ystride[0]) |
(ctx->dst_planes.ystride[1] << 16);
ystride1 = (ctx->dst_planes.ystride[2]) |
(ctx->dst_planes.ystride[3] << 16);
- outsize = (ctx->height << 16) | ctx->width;
+ outsize = (ctx->dst_rect.h << 16) | ctx->dst_rect.w;
mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_FORMAT, dst_format);
mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_OP_MODE, opmode);
@@ -217,11 +227,14 @@
pr_debug("wfd setup ctl=%d\n", ctl->num);
ctx->opmode = 0;
- ctx->format = ctl->dst_format;
ctx->width = ctl->width;
ctx->height = ctl->height;
+ ctx->dst_rect.x = 0;
+ ctx->dst_rect.y = 0;
+ ctx->dst_rect.w = ctx->width;
+ ctx->dst_rect.h = ctx->height;
- ret = mdss_mdp_writeback_format_setup(ctx);
+ ret = mdss_mdp_writeback_format_setup(ctx, ctl->dst_format);
if (ret) {
pr_err("format setup failed\n");
return ret;
@@ -237,6 +250,7 @@
struct mdss_mdp_writeback_ctx *ctx;
struct mdss_mdp_writeback_arg *wb_args;
struct mdss_mdp_rotator_session *rot;
+ u32 format;
ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
if (!ctx)
@@ -259,24 +273,26 @@
ctx->bwc_mode = rot->bwc_mode;
ctx->opmode |= ctx->bwc_mode;
- ctx->width = rot->src_rect.w;
- ctx->height = rot->src_rect.h;
-
- ctx->format = rot->format;
+ ctx->width = rot->dst.w;
+ ctx->height = rot->dst.h;
+ ctx->dst_rect.x = rot->dst.x;
+ ctx->dst_rect.y = rot->dst.y;
+ ctx->dst_rect.w = rot->src_rect.w;
+ ctx->dst_rect.h = rot->src_rect.h;
ctx->rot90 = !!(rot->flags & MDP_ROT_90);
if (ctx->bwc_mode || ctx->rot90)
- ctx->format = mdss_mdp_get_rotator_dst_format(rot->format);
+ format = mdss_mdp_get_rotator_dst_format(rot->format);
else
- ctx->format = rot->format;
+ format = rot->format;
if (ctx->rot90) {
ctx->opmode |= BIT(5); /* ROT 90 */
- swap(ctx->width, ctx->height);
+ swap(ctx->dst_rect.w, ctx->dst_rect.h);
}
- return mdss_mdp_writeback_format_setup(ctx);
+ return mdss_mdp_writeback_format_setup(ctx, format);
}
static int mdss_mdp_writeback_stop(struct mdss_mdp_ctl *ctl)
@@ -310,10 +326,37 @@
pr_debug("intr wb_num=%d\n", ctx->wb_num);
mdss_mdp_irq_disable_nosync(ctx->intr_type, ctx->intf_num);
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, true);
if (ctx->callback_fnc)
ctx->callback_fnc(ctx->callback_arg);
+
+ complete_all(&ctx->wb_comp);
+}
+
+static int mdss_mdp_wb_wait4comp(struct mdss_mdp_ctl *ctl, void *arg)
+{
+ struct mdss_mdp_writeback_ctx *ctx;
+ int rc = 0;
+
+ ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ if (ctx->comp_cnt == 0)
+ return rc;
+
+ rc = wait_for_completion_interruptible_timeout(&ctx->wb_comp,
+ KOFF_TIMEOUT);
+ WARN(rc <= 0, "writeback kickoff timed out (%d) ctl=%d\n",
+ rc, ctl->num);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); /* clock off */
+
+ ctx->comp_cnt--;
+
+ return rc;
}
static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
@@ -327,6 +370,12 @@
if (!ctx)
return -ENODEV;
+ if (ctx->comp_cnt) {
+ pr_err("previous kickoff not completed yet, ctl=%d\n",
+ ctl->num);
+ return -EPERM;
+ }
+
wb_args = (struct mdss_mdp_writeback_arg *) arg;
if (!wb_args)
return -ENOENT;
@@ -341,14 +390,18 @@
ctx->callback_arg = wb_args->priv_data;
flush_bits = BIT(16); /* WB */
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_ADDR_SW_STATUS, ctl->is_secure);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
+ INIT_COMPLETION(ctx->wb_comp);
mdss_mdp_irq_enable(ctx->intr_type, ctx->intf_num);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
wmb();
+ ctx->comp_cnt++;
+
return 0;
}
@@ -376,6 +429,7 @@
ctx->wb_num = ctl->num; /* wb num should match ctl num */
ctx->base = ctl->wb_base;
ctx->initialized = false;
+ init_completion(&ctx->wb_comp);
mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
mdss_mdp_writeback_intr_done, ctx);
@@ -386,6 +440,7 @@
ctl->prepare_fnc = mdss_mdp_writeback_prepare_wfd;
ctl->stop_fnc = mdss_mdp_writeback_stop;
ctl->display_fnc = mdss_mdp_writeback_display;
+ ctl->wait_fnc = mdss_mdp_wb_wait4comp;
return ret;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index dae3e05..6c90794 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -24,6 +24,7 @@
#include <linux/msm_mdp.h>
#include <mach/iommu_domains.h>
+#include <mach/event_timer.h>
#include "mdss.h"
#include "mdss_fb.h"
@@ -37,6 +38,7 @@
static atomic_t ov_active_panels = ATOMIC_INIT(0);
static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd);
+static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd);
static int mdss_mdp_overlay_get(struct msm_fb_data_type *mfd,
struct mdp_overlay *req)
@@ -92,15 +94,26 @@
return -EOVERFLOW;
}
- if (req->dst_rect.w < min_dst_size || req->dst_rect.h < min_dst_size ||
- req->dst_rect.w > MAX_DST_W || req->dst_rect.h > MAX_DST_H) {
+ if (req->dst_rect.w < min_dst_size || req->dst_rect.h < min_dst_size) {
pr_err("invalid destination resolution (%dx%d)",
req->dst_rect.w, req->dst_rect.h);
return -EOVERFLOW;
}
+ if (req->horz_deci || req->vert_deci) {
+ if (!mdata->has_decimation) {
+ pr_err("No Decimation in MDP V=%x\n", mdata->mdp_rev);
+ return -EINVAL;
+ } else if ((req->horz_deci > MAX_DECIMATION) ||
+ (req->vert_deci > MAX_DECIMATION)) {
+ pr_err("Invalid decimation factors horz=%d vert=%d\n",
+ req->horz_deci, req->vert_deci);
+ return -EINVAL;
+ }
+ }
+
if (!(req->flags & MDSS_MDP_ROT_ONLY)) {
- u32 dst_w, dst_h;
+ u32 src_w, src_h, dst_w, dst_h;
if ((CHECK_BOUNDS(req->dst_rect.x, req->dst_rect.w, xres) ||
CHECK_BOUNDS(req->dst_rect.y, req->dst_rect.h, yres))) {
@@ -118,27 +131,36 @@
dst_h = req->dst_rect.h;
}
- if ((req->src_rect.w * MAX_UPSCALE_RATIO) < dst_w) {
+ src_w = req->src_rect.w >> req->horz_deci;
+ src_h = req->src_rect.h >> req->vert_deci;
+
+ if (src_w > MAX_MIXER_WIDTH) {
+ pr_err("invalid source width=%d HDec=%d\n",
+ req->src_rect.w, req->horz_deci);
+ return -EINVAL;
+ }
+
+ if ((src_w * MAX_UPSCALE_RATIO) < dst_w) {
pr_err("too much upscaling Width %d->%d\n",
req->src_rect.w, req->dst_rect.w);
return -EINVAL;
}
- if ((req->src_rect.h * MAX_UPSCALE_RATIO) < dst_h) {
+ if ((src_h * MAX_UPSCALE_RATIO) < dst_h) {
pr_err("too much upscaling. Height %d->%d\n",
req->src_rect.h, req->dst_rect.h);
return -EINVAL;
}
- if (req->src_rect.w > (dst_w * MAX_DOWNSCALE_RATIO)) {
- pr_err("too much downscaling. Width %d->%d\n",
- req->src_rect.w, req->dst_rect.w);
+ if (src_w > (dst_w * MAX_DOWNSCALE_RATIO)) {
+ pr_err("too much downscaling. Width %d->%d H Dec=%d\n",
+ src_w, req->dst_rect.w, req->horz_deci);
return -EINVAL;
}
- if (req->src_rect.h > (dst_h * MAX_DOWNSCALE_RATIO)) {
- pr_err("too much downscaling. Height %d->%d\n",
- req->src_rect.h, req->dst_rect.h);
+ if (src_h > (dst_h * MAX_DOWNSCALE_RATIO)) {
+ pr_err("too much downscaling. Height %d->%d V Dec=%d\n",
+ src_h, req->dst_rect.h, req->vert_deci);
return -EINVAL;
}
@@ -177,6 +199,7 @@
struct mdss_mdp_rotator_session *rot;
struct mdss_mdp_format_params *fmt;
int ret = 0;
+ u32 bwc_enabled;
pr_debug("rot ctl=%u req id=%x\n", mdp5_data->ctl->num, req->id);
@@ -213,7 +236,14 @@
rot->flags = req->flags & (MDP_ROT_90 | MDP_FLIP_LR | MDP_FLIP_UD |
MDP_SECURE_OVERLAY_SESSION);
- rot->bwc_mode = (req->flags & MDP_BWC_EN) ? 1 : 0;
+ bwc_enabled = req->flags & MDP_BWC_EN;
+ if (bwc_enabled && !mdp5_data->mdata->has_bwc) {
+ pr_err("BWC is not supported in MDP version %x\n",
+ mdp5_data->mdata->mdp_rev);
+ rot->bwc_mode = 0;
+ } else {
+ rot->bwc_mode = bwc_enabled ? 1 : 0;
+ }
rot->format = fmt->format;
rot->img_width = req->src.width;
rot->img_height = req->src.height;
@@ -227,9 +257,13 @@
rot->src_rect.h /= 2;
}
- rot->params_changed++;
-
- req->id = rot->session_id;
+ ret = mdss_mdp_rotator_setup(rot);
+ if (ret == 0) {
+ req->id = rot->session_id;
+ } else {
+ pr_err("Unable to setup rotator session\n");
+ mdss_mdp_rotator_release(rot->session_id);
+ }
return ret;
}
@@ -243,11 +277,24 @@
struct mdss_mdp_mixer *mixer = NULL;
u32 pipe_type, mixer_mux, len, src_format;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdp_histogram_start_req hist;
int ret;
+ u32 bwc_enabled;
if (mdp5_data->ctl == NULL)
return -ENODEV;
+ if (req->flags & MDP_ROT_90) {
+ pr_err("unsupported inline rotation\n");
+ return -ENOTSUPP;
+ }
+
+ if ((req->dst_rect.w > MAX_DST_W) || (req->dst_rect.h > MAX_DST_H)) {
+ pr_err("exceeded max mixer supported resolution %dx%d\n",
+ req->dst_rect.w, req->dst_rect.h);
+ return -EOVERFLOW;
+ }
+
if (req->flags & MDSS_MDP_RIGHT_MIXER)
mixer_mux = MDSS_MDP_MIXER_MUX_RIGHT;
else
@@ -256,11 +303,6 @@
pr_debug("pipe ctl=%u req id=%x mux=%d\n", mdp5_data->ctl->num, req->id,
mixer_mux);
- if (req->flags & MDP_ROT_90) {
- pr_err("unsupported inline rotation\n");
- return -ENOTSUPP;
- }
-
src_format = req->src.format;
if (req->flags & (MDP_SOURCE_ROTATED_90 | MDP_BWC_EN))
src_format = mdss_mdp_get_rotator_dst_format(src_format);
@@ -334,8 +376,8 @@
pr_err("Can't switch mixer %d->%d pnum %d!\n",
pipe->mixer->num, mixer->num,
pipe->num);
- mdss_mdp_pipe_unmap(pipe);
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_fail;
}
pr_debug("switching pipe mixer %d->%d pnum %d\n",
pipe->mixer->num, mixer->num,
@@ -346,8 +388,15 @@
}
pipe->flags = req->flags;
- pipe->bwc_mode = pipe->mixer->rotator_mode ?
- 0 : (req->flags & MDP_BWC_EN ? 1 : 0) ;
+ bwc_enabled = req->flags & MDP_BWC_EN;
+ if (bwc_enabled && !mdp5_data->mdata->has_bwc) {
+ pr_err("BWC is not supported in MDP version %x\n",
+ mdp5_data->mdata->mdp_rev);
+ pipe->bwc_mode = 0;
+ } else {
+ pipe->bwc_mode = pipe->mixer->rotator_mode ?
+ 0 : (bwc_enabled ? 1 : 0) ;
+ }
pipe->img_width = req->src.width & 0x3fff;
pipe->img_height = req->src.height & 0x3fff;
pipe->src.x = req->src_rect.x;
@@ -358,14 +407,16 @@
pipe->dst.y = req->dst_rect.y;
pipe->dst.w = req->dst_rect.w;
pipe->dst.h = req->dst_rect.h;
-
+ pipe->horz_deci = req->horz_deci;
+ pipe->vert_deci = req->vert_deci;
pipe->src_fmt = fmt;
pipe->mixer_stage = req->z_order;
pipe->is_fg = req->is_fg;
pipe->alpha = req->alpha;
pipe->transp = req->transp_mask;
- pipe->overfetch_disable = fmt->is_yuv;
+ pipe->overfetch_disable = fmt->is_yuv &&
+ !(pipe->flags & MDP_SOURCE_ROTATED_90);
pipe->req_data = *req;
@@ -389,6 +440,31 @@
pipe->pp_res.igc_c0_c1;
pipe->pp_cfg.igc_cfg.c2_data = pipe->pp_res.igc_c2;
}
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_CFG) {
+ if (pipe->pp_cfg.hist_cfg.ops & MDP_PP_OPS_ENABLE) {
+ hist.block = pipe->pp_cfg.hist_cfg.block;
+ hist.frame_cnt =
+ pipe->pp_cfg.hist_cfg.frame_cnt;
+ hist.bit_mask = pipe->pp_cfg.hist_cfg.bit_mask;
+ hist.num_bins = pipe->pp_cfg.hist_cfg.num_bins;
+ mdss_mdp_histogram_start(pipe->mixer->ctl,
+ &hist);
+ } else if (pipe->pp_cfg.hist_cfg.ops &
+ MDP_PP_OPS_DISABLE) {
+ mdss_mdp_histogram_stop(pipe->mixer->ctl,
+ pipe->pp_cfg.hist_cfg.block);
+ }
+ }
+ len = pipe->pp_cfg.hist_lut_cfg.len;
+ if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_LUT_CFG) &&
+ (len == ENHIST_LUT_ENTRIES)) {
+ ret = copy_from_user(pipe->pp_res.hist_lut,
+ pipe->pp_cfg.hist_lut_cfg.data,
+ sizeof(uint32_t) * len);
+ if (ret)
+ return -ENOMEM;
+ pipe->pp_cfg.hist_lut_cfg.data = pipe->pp_res.hist_lut;
+ }
}
if (pipe->flags & MDP_DEINTERLACE) {
@@ -400,6 +476,12 @@
}
}
+ ret = mdss_mdp_smp_reserve(pipe);
+ if (ret) {
+ pr_debug("mdss_mdp_smp_reserve failed. ret=%d\n", ret);
+ goto exit_fail;
+ }
+
pipe->params_changed++;
req->id = pipe->ndx;
@@ -409,6 +491,25 @@
mdss_mdp_pipe_unmap(pipe);
return ret;
+
+exit_fail:
+ mdss_mdp_pipe_unmap(pipe);
+
+ mutex_lock(&mfd->lock);
+ if (pipe->play_cnt == 0) {
+ pr_debug("failed for pipe %d\n", pipe->num);
+ list_del(&pipe->used_list);
+ mdss_mdp_pipe_destroy(pipe);
+ }
+
+ /* invalidate any overlays in this framebuffer after failure */
+ list_for_each_entry(pipe, &mdp5_data->pipes_used, used_list) {
+ pr_debug("freeing allocations for pipe %d\n", pipe->num);
+ mdss_mdp_smp_unreserve(pipe);
+ pipe->params_changed = 0;
+ }
+ mutex_unlock(&mfd->lock);
+ return ret;
}
static int mdss_mdp_overlay_set(struct msm_fb_data_type *mfd,
@@ -499,6 +600,7 @@
list_move(&pipe->cleanup_list, &destroy_pipes);
mdss_mdp_overlay_free_buf(&pipe->back_buf);
mdss_mdp_overlay_free_buf(&pipe->front_buf);
+ pipe->mfd = NULL;
}
list_for_each_entry(pipe, &mdp5_data->pipes_used, used_list) {
@@ -641,6 +743,19 @@
return rc;
}
+static void mdss_mdp_overlay_update_pm(struct mdss_overlay_private *mdp5_data)
+{
+ ktime_t wakeup_time;
+
+ if (!mdp5_data->cpu_pm_hdl)
+ return;
+
+ if (mdss_mdp_display_wakeup_time(mdp5_data->ctl, &wakeup_time))
+ return;
+
+ activate_event_timer(mdp5_data->cpu_pm_hdl, wakeup_time);
+}
+
int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
@@ -658,15 +773,17 @@
} else if (pipe->front_buf.num_planes) {
buf = &pipe->front_buf;
} else {
- pr_warn("pipe queue without buffer\n");
- buf = NULL;
+ pr_warn("pipe queue w/o buffer. unstaging layer\n");
+ pipe->params_changed = 0;
+ mdss_mdp_mixer_pipe_unstage(pipe);
+ continue;
}
ret = mdss_mdp_pipe_queue_data(pipe, buf);
if (IS_ERR_VALUE(ret)) {
pr_warn("Unable to queue data for pnum=%d\n",
pipe->num);
- mdss_mdp_overlay_free_buf(buf);
+ mdss_mdp_mixer_pipe_unstage(pipe);
}
}
@@ -680,6 +797,8 @@
if (IS_ERR_VALUE(ret))
goto commit_fail;
+ mdss_mdp_overlay_update_pm(mdp5_data);
+
ret = mdss_mdp_display_wait4comp(mdp5_data->ctl);
complete(&mfd->update.comp);
@@ -742,12 +861,13 @@
if (ndx == BORDERFILL_NDX) {
pr_debug("borderfill disable\n");
mdp5_data->borderfill_enable = false;
- return 0;
+ ret = 0;
+ goto done;
}
if (!mfd->panel_power_on) {
- mutex_unlock(&mdp5_data->ov_lock);
- return -EPERM;
+ ret = -EPERM;
+ goto done;
}
pr_debug("unset ndx=%x\n", ndx);
@@ -757,6 +877,7 @@
else
ret = mdss_mdp_overlay_release(mfd, ndx);
+done:
mutex_unlock(&mdp5_data->ov_lock);
return ret;
@@ -885,6 +1006,41 @@
return ret;
}
+static int mdss_mdp_overlay_force_cleanup(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+ int ret;
+
+ pr_debug("forcing cleanup to unset dma pipes on fb%d\n", mfd->index);
+
+ /*
+ * video mode panels require the layer to be unstaged and wait for
+ * vsync to be able to release buffer.
+ */
+ if (ctl && ctl->is_video_mode) {
+ ret = mdss_mdp_display_commit(ctl, NULL);
+ if (!IS_ERR_VALUE(ret))
+ mdss_mdp_display_wait4comp(ctl);
+ }
+
+ ret = mdss_mdp_overlay_cleanup(mfd);
+
+ return ret;
+}
+
+static void mdss_mdp_overlay_force_dma_cleanup(struct mdss_data_type *mdata)
+{
+ struct mdss_mdp_pipe *pipe;
+ int i;
+
+ for (i = 0; i < mdata->ndma_pipes; i++) {
+ pipe = mdata->dma_pipes + i;
+ if (atomic_read(&pipe->ref_cnt) && pipe->mfd)
+ mdss_mdp_overlay_force_cleanup(pipe->mfd);
+ }
+}
+
static int mdss_mdp_overlay_play(struct msm_fb_data_type *mfd,
struct msmfb_overlay_data *req)
{
@@ -898,17 +1054,19 @@
return ret;
if (!mfd->panel_power_on) {
- mutex_unlock(&mdp5_data->ov_lock);
- return -EPERM;
+ ret = -EPERM;
+ goto done;
}
ret = mdss_mdp_overlay_start(mfd);
if (ret) {
pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
- return ret;
+ goto done;
}
if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
+ mdss_mdp_overlay_force_dma_cleanup(mfd_to_mdata(mfd));
+
ret = mdss_mdp_overlay_rotate(mfd, req);
} else if (req->id == BORDERFILL_NDX) {
pr_debug("borderfill enable\n");
@@ -918,6 +1076,7 @@
ret = mdss_mdp_overlay_queue(mfd, req);
}
+done:
mutex_unlock(&mdp5_data->ov_lock);
return ret;
@@ -1457,7 +1616,7 @@
int ret = -ENOSYS;
struct mdp_histogram_data hist;
struct mdp_histogram_start_req hist_req;
- u32 block, hist_data_addr = 0;
+ u32 block;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
switch (cmd) {
@@ -1488,15 +1647,9 @@
if (ret)
return ret;
- ret = mdss_mdp_hist_collect(mdp5_data->ctl, &hist,
- &hist_data_addr);
- if ((ret == 0) && hist_data_addr) {
- ret = copy_to_user(hist.c0, (u32 *)hist_data_addr,
- sizeof(u32) * hist.bin_cnt);
- if (ret == 0)
- ret = copy_to_user(argp, &hist,
- sizeof(hist));
- }
+ ret = mdss_mdp_hist_collect(mdp5_data->ctl, &hist);
+ if (!ret)
+ ret = copy_to_user(argp, &hist, sizeof(hist));
break;
default:
break;
@@ -1532,6 +1685,10 @@
caps->vig_pipes = mdata->nvig_pipes;
caps->rgb_pipes = mdata->nrgb_pipes;
caps->dma_pipes = mdata->ndma_pipes;
+ if (mdata->has_bwc)
+ caps->features |= MDP_BWC_EN;
+ if (mdata->has_decimation)
+ caps->features |= MDP_DECIMATION_EN;
return 0;
}
@@ -1583,10 +1740,8 @@
ret = copy_to_user(argp, &req, sizeof(req));
}
- if (ret) {
+ if (ret)
pr_debug("OVERLAY_GET failed (%d)\n", ret);
- ret = -EFAULT;
- }
break;
case MSMFB_OVERLAY_SET:
@@ -1597,10 +1752,8 @@
if (!IS_ERR_VALUE(ret))
ret = copy_to_user(argp, &req, sizeof(req));
}
- if (ret) {
+ if (ret)
pr_debug("OVERLAY_SET failed (%d)\n", ret);
- ret = -EFAULT;
- }
break;
@@ -1623,16 +1776,11 @@
struct msmfb_overlay_data data;
ret = copy_from_user(&data, argp, sizeof(data));
- if (!ret) {
+ if (!ret)
ret = mdss_mdp_overlay_play(mfd, &data);
- if (!IS_ERR_VALUE(ret))
- mdss_fb_update_backlight(mfd);
- }
- if (ret) {
+ if (ret)
pr_debug("OVERLAY_PLAY failed (%d)\n", ret);
- ret = -EFAULT;
- }
} else {
ret = 0;
}
@@ -1646,10 +1794,8 @@
if (!ret)
ret = mdss_mdp_overlay_play_wait(mfd, &data);
- if (ret) {
+ if (ret)
pr_err("OVERLAY_PLAY_WAIT failed (%d)\n", ret);
- ret = -EFAULT;
- }
} else {
ret = 0;
}
@@ -1842,6 +1988,10 @@
}
mfd->mdp.private1 = mdp5_data;
+ rc = mdss_mdp_overlay_fb_parse_dt(mfd);
+ if (rc)
+ return rc;
+
rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group);
if (rc) {
pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
@@ -1854,8 +2004,27 @@
kobject_uevent(&dev->kobj, KOBJ_ADD);
pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");
+ mdp5_data->cpu_pm_hdl = add_event_timer(NULL, (void *)mdp5_data);
+ if (!mdp5_data->cpu_pm_hdl)
+ pr_warn("%s: unable to add event timer\n", __func__);
+
return rc;
init_fail:
kfree(mdp5_data);
return rc;
}
+
+static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd)
+{
+ struct platform_device *pdev = mfd->pdev;
+ struct mdss_overlay_private *mdp5_mdata = mfd_to_mdp5_data(mfd);
+
+ mdp5_mdata->mixer_swap = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-mixer-swap");
+ if (mdp5_mdata->mixer_swap) {
+ pr_info("mixer swap is enabled for fb device=%s\n",
+ pdev->name);
+ }
+
+ return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index b169c43..0f65530 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -27,8 +27,6 @@
static DEFINE_MUTEX(mdss_mdp_smp_lock);
static DECLARE_BITMAP(mdss_mdp_smp_mmb_pool, MDSS_MDP_SMP_MMB_BLOCKS);
-static struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
- u32 ndx);
static int mdss_mdp_pipe_free(struct mdss_mdp_pipe *pipe);
static inline void mdss_mdp_pipe_write(struct mdss_mdp_pipe *pipe,
@@ -42,17 +40,18 @@
return readl_relaxed(pipe->base + reg);
}
-static u32 mdss_mdp_smp_mmb_reserve(unsigned long *smp, size_t n)
+static u32 mdss_mdp_smp_mmb_reserve(unsigned long *existing,
+ unsigned long *reserve, size_t n)
{
u32 i, mmb;
/* reserve more blocks if needed, but can't free mmb at this point */
- for (i = bitmap_weight(smp, SMP_MB_CNT); i < n; i++) {
+ for (i = bitmap_weight(existing, SMP_MB_CNT); i < n; i++) {
if (bitmap_full(mdss_mdp_smp_mmb_pool, SMP_MB_CNT))
break;
mmb = find_first_zero_bit(mdss_mdp_smp_mmb_pool, SMP_MB_CNT);
- set_bit(mmb, smp);
+ set_bit(mmb, reserve);
set_bit(mmb, mdss_mdp_smp_mmb_pool);
}
return i;
@@ -76,10 +75,17 @@
return cnt;
}
-static void mdss_mdp_smp_mmb_free(unsigned long *smp)
+static void mdss_mdp_smp_mmb_amend(unsigned long *smp, unsigned long *extra)
+{
+ bitmap_or(smp, smp, extra, SMP_MB_CNT);
+ bitmap_zero(extra, SMP_MB_CNT);
+}
+
+static void mdss_mdp_smp_mmb_free(unsigned long *smp, bool write)
{
if (!bitmap_empty(smp, SMP_MB_CNT)) {
- mdss_mdp_smp_mmb_set(MDSS_MDP_SMP_CLIENT_UNUSED, smp);
+ if (write)
+ mdss_mdp_smp_mmb_set(MDSS_MDP_SMP_CLIENT_UNUSED, smp);
bitmap_andnot(mdss_mdp_smp_mmb_pool, mdss_mdp_smp_mmb_pool,
smp, SMP_MB_CNT);
bitmap_zero(smp, SMP_MB_CNT);
@@ -106,19 +112,33 @@
static void mdss_mdp_smp_free(struct mdss_mdp_pipe *pipe)
{
+ int i;
+
mutex_lock(&mdss_mdp_smp_lock);
- mdss_mdp_smp_mmb_free(&pipe->smp[0]);
- mdss_mdp_smp_mmb_free(&pipe->smp[1]);
- mdss_mdp_smp_mmb_free(&pipe->smp[2]);
+ for (i = 0; i < MAX_PLANES; i++) {
+ mdss_mdp_smp_mmb_free(&pipe->smp_reserved[i], false);
+ mdss_mdp_smp_mmb_free(&pipe->smp[i], true);
+ }
mutex_unlock(&mdss_mdp_smp_lock);
}
-static int mdss_mdp_smp_reserve(struct mdss_mdp_pipe *pipe)
+void mdss_mdp_smp_unreserve(struct mdss_mdp_pipe *pipe)
+{
+ int i;
+
+ mutex_lock(&mdss_mdp_smp_lock);
+ for (i = 0; i < MAX_PLANES; i++)
+ mdss_mdp_smp_mmb_free(&pipe->smp_reserved[i], false);
+ mutex_unlock(&mdss_mdp_smp_lock);
+}
+
+int mdss_mdp_smp_reserve(struct mdss_mdp_pipe *pipe)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
u32 num_blks = 0, reserved = 0;
struct mdss_mdp_plane_sizes ps;
- int i, rc;
+ int i;
+ int rc = 0;
u32 nlines;
if (pipe->bwc_mode) {
@@ -128,11 +148,10 @@
return rc;
pr_debug("BWC SMP strides ystride0=%x ystride1=%x\n",
ps.ystride[0], ps.ystride[1]);
- } else if ((mdata->mdp_rev >= MDSS_MDP_HW_REV_102) &&
- pipe->src_fmt->is_yuv) {
+ } else if (mdata->has_decimation && pipe->src_fmt->is_yuv) {
ps.num_planes = 2;
- ps.ystride[0] = pipe->src.w;
- ps.ystride[1] = pipe->src.w;
+ ps.ystride[0] = pipe->src.w >> pipe->horz_deci;
+ ps.ystride[1] = pipe->src.h >> pipe->vert_deci;
} else {
rc = mdss_mdp_get_plane_sizes(pipe->src_fmt->format,
pipe->src.w, pipe->src.h, &ps, 0);
@@ -143,29 +162,29 @@
mutex_lock(&mdss_mdp_smp_lock);
for (i = 0; i < ps.num_planes; i++) {
nlines = pipe->bwc_mode ? ps.rau_h[i] : 2;
- num_blks = DIV_ROUND_UP(nlines * ps.ystride[i],
- mdss_res->smp_mb_size);
+ num_blks = DIV_ROUND_UP(nlines * ps.ystride[i], SMP_MB_SIZE);
if (mdata->mdp_rev == MDSS_MDP_HW_REV_100)
num_blks = roundup_pow_of_two(num_blks);
pr_debug("reserving %d mmb for pnum=%d plane=%d\n",
num_blks, pipe->num, i);
- reserved = mdss_mdp_smp_mmb_reserve(&pipe->smp[i], num_blks);
+ reserved = mdss_mdp_smp_mmb_reserve(&pipe->smp[i],
+ &pipe->smp_reserved[i], num_blks);
if (reserved < num_blks)
break;
}
if (reserved < num_blks) {
- pr_err("insufficient MMB blocks\n");
+ pr_debug("insufficient MMB blocks\n");
for (; i >= 0; i--)
- mdss_mdp_smp_mmb_free(&pipe->smp[i]);
- return -ENOMEM;
+ mdss_mdp_smp_mmb_free(&pipe->smp_reserved[i], false);
+ rc = -ENOMEM;
}
mutex_unlock(&mdss_mdp_smp_lock);
- return 0;
+ return rc;
}
static int mdss_mdp_smp_alloc(struct mdss_mdp_pipe *pipe)
@@ -174,8 +193,10 @@
int cnt = 0;
mutex_lock(&mdss_mdp_smp_lock);
- for (i = 0; i < MAX_PLANES; i++)
+ for (i = 0; i < MAX_PLANES; i++) {
+ mdss_mdp_smp_mmb_amend(&pipe->smp[i], &pipe->smp_reserved[i]);
cnt += mdss_mdp_smp_mmb_set(pipe->ftch_id + i, &pipe->smp[i]);
+ }
mdss_mdp_smp_set_wm_levels(pipe, cnt);
mutex_unlock(&mdss_mdp_smp_lock);
return 0;
@@ -257,10 +278,13 @@
pipe = NULL;
}
- if (pipe)
+ if (pipe) {
pr_debug("type=%x pnum=%d\n", pipe->type, pipe->num);
- else
+ mutex_init(&pipe->pp_res.hist.hist_mutex);
+ spin_lock_init(&pipe->pp_res.hist.hist_lock);
+ } else {
pr_err("no %d type pipes available\n", type);
+ }
return pipe;
}
@@ -307,18 +331,20 @@
mutex_lock(&mdss_mdp_sspp_lock);
pipe = mdss_mdp_pipe_search(mdata, ndx);
- if (!pipe)
- return ERR_PTR(-EINVAL);
+ if (!pipe) {
+ pipe = ERR_PTR(-EINVAL);
+ goto error;
+ }
if (mdss_mdp_pipe_map(pipe))
- return ERR_PTR(-EACCES);
+ pipe = ERR_PTR(-EACCES);
+error:
mutex_unlock(&mdss_mdp_sspp_lock);
-
return pipe;
}
-static struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
+struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
u32 ndx)
{
u32 i;
@@ -376,6 +402,7 @@
{
u32 img_size, src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
u32 width, height;
+ u32 decimation;
pr_debug("pnum=%d wh=%dx%d src={%d,%d,%d,%d} dst={%d,%d,%d,%d}\n",
pipe->num, pipe->img_width, pipe->img_height,
@@ -396,6 +423,12 @@
height /= 2;
}
+ decimation = ((1 << pipe->horz_deci) - 1) << 8;
+ decimation |= ((1 << pipe->vert_deci) - 1);
+ if (decimation)
+ pr_debug("Image decimation h=%d v=%d\n",
+ pipe->horz_deci, pipe->vert_deci);
+
img_size = (height << 16) | width;
src_size = (pipe->src.h << 16) | pipe->src.w;
src_xy = (pipe->src.y << 16) | pipe->src.x;
@@ -418,6 +451,8 @@
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_OUT_XY, dst_xy);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_YSTRIDE0, ystride0);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_YSTRIDE1, ystride1);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_DECIMATION_CONFIG,
+ decimation);
return 0;
}
@@ -465,17 +500,12 @@
fmt->fetch_planes != MDSS_MDP_PLANE_INTERLEAVED)
src_format |= BIT(8); /* SRCC3_EN */
- if (fmt->fetch_planes != MDSS_MDP_PLANE_PLANAR) {
- unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
(fmt->element[1] << 8) | (fmt->element[0] << 0);
-
- src_format |= ((fmt->unpack_count - 1) << 12) |
- (fmt->unpack_tight << 17) |
- (fmt->unpack_align_msb << 18) |
- ((fmt->bpp - 1) << 9);
- } else {
- unpack = 0;
- }
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
mdss_mdp_pipe_sspp_setup(pipe, &opmode);
@@ -487,28 +517,6 @@
return 0;
}
-static void mdss_mdp_addr_add_offset(struct mdss_mdp_pipe *pipe,
- struct mdss_mdp_data *data)
-{
- data->p[0].addr += pipe->src.x +
- (pipe->src.y * pipe->src_planes.ystride[0]);
- if (data->num_planes > 1) {
- u8 hmap[] = { 1, 2, 1, 2 };
- u8 vmap[] = { 1, 1, 2, 2 };
- u16 xoff = pipe->src.x / hmap[pipe->src_fmt->chroma_sample];
- u16 yoff = pipe->src.y / vmap[pipe->src_fmt->chroma_sample];
-
- if (data->num_planes == 2) /* pseudo planar */
- xoff *= 2;
- data->p[1].addr += xoff + (yoff * pipe->src_planes.ystride[1]);
-
- if (data->num_planes > 2) { /* planar */
- data->p[2].addr += xoff +
- (yoff * pipe->src_planes.ystride[2]);
- }
- }
-}
-
int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata, u32 *offsets,
u32 *ftch_id, u32 type, u32 num_base, u32 len)
{
@@ -559,7 +567,7 @@
static int mdss_mdp_src_addr_setup(struct mdss_mdp_pipe *pipe,
struct mdss_mdp_data *data)
{
- int is_rot = pipe->mixer->rotator_mode;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
int ret = 0;
pr_debug("pnum=%d\n", pipe->num);
@@ -571,11 +579,13 @@
return ret;
if (pipe->overfetch_disable)
- mdss_mdp_addr_add_offset(pipe, data);
+ mdss_mdp_data_calc_offset(data, pipe->src.x, pipe->src.y,
+ &pipe->src_planes, pipe->src_fmt);
/* planar format expects YCbCr, swap chroma planes if YCrCb */
- if (!is_rot && (pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_PLANAR) &&
- (pipe->src_fmt->element[0] == C2_R_Cr))
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_102 &&
+ (pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_PLANAR)
+ && (pipe->src_fmt->element[0] == C1_B_Cb))
swap(data->p[1].addr, data->p[2].addr);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC0_ADDR, data->p[0].addr);
@@ -661,13 +671,6 @@
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_OP_MODE,
opmode);
- ret = mdss_mdp_smp_reserve(pipe);
- if (ret) {
- pr_err("unable to reserve smp for pnum=%d\n",
- pipe->num);
- goto done;
- }
-
mdss_mdp_smp_alloc(pipe);
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
index 75fb7d6..8bd5674 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pp.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
@@ -78,7 +78,7 @@
#define MDSS_BLOCK_DISP_NUM (MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0)
-#define HIST_WAIT_TIMEOUT(frame) ((60 * HZ * (frame)) / 1000)
+#define HIST_WAIT_TIMEOUT(frame) ((75 * HZ * (frame)) / 1000)
/* hist collect state */
enum {
HIST_UNKNOWN,
@@ -88,18 +88,6 @@
HIST_READY,
};
-struct pp_hist_col_info {
- u32 col_state;
- u32 col_en;
- u32 read_request;
- u32 hist_cnt_read;
- u32 hist_cnt_sent;
- u32 frame_cnt;
- u32 is_kick_ready;
- struct completion comp;
- u32 data[HIST_V_SIZE];
-};
-
static u32 dither_matrix[16] = {
15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10};
static u32 dither_depth_map[9] = {
@@ -166,11 +154,13 @@
};
static DEFINE_MUTEX(mdss_pp_mutex);
-static DEFINE_SPINLOCK(mdss_hist_lock);
-static DEFINE_MUTEX(mdss_mdp_hist_mutex);
static struct mdss_pp_res_type *mdss_pp_res;
-static void pp_hist_read(u32 v_base, struct pp_hist_col_info *hist_info);
+static void pp_hist_read(char __iomem *v_base,
+ struct pp_hist_col_info *hist_info);
+static int pp_histogram_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix);
+static int pp_histogram_disable(struct pp_hist_col_info *hist_info,
+ u32 done_bit, char __iomem *ctl_base);
static void pp_update_pcc_regs(u32 offset,
struct mdp_pcc_cfg_data *cfg_ptr);
static void pp_update_igc_lut(struct mdp_igc_lut_data *cfg,
@@ -179,7 +169,8 @@
struct mdp_ar_gc_lut_data *lut_data);
static void pp_update_argc_lut(u32 offset,
struct mdp_pgc_lut_data *config);
-static void pp_update_hist_lut(u32 offset, struct mdp_hist_lut_data *cfg);
+static void pp_update_hist_lut(char __iomem *base,
+ struct mdp_hist_lut_data *cfg);
static void pp_pa_config(unsigned long flags, u32 base,
struct pp_sts_type *pp_sts,
struct mdp_pa_cfg *pa_config);
@@ -190,7 +181,7 @@
struct pp_sts_type *pp_sts,
struct mdp_igc_lut_data *igc_config,
u32 pipe_num);
-static void pp_enhist_config(unsigned long flags, u32 base,
+static void pp_enhist_config(unsigned long flags, char __iomem *base,
struct pp_sts_type *pp_sts,
struct mdp_hist_lut_data *enhist_cfg);
static void pp_sharp_config(char __iomem *offset,
@@ -391,7 +382,7 @@
}
}
-static void pp_enhist_config(unsigned long flags, u32 base,
+static void pp_enhist_config(unsigned long flags, char __iomem *base,
struct pp_sts_type *pp_sts,
struct mdp_hist_lut_data *enhist_cfg)
{
@@ -431,6 +422,7 @@
{
u32 opmode = 0, base = 0;
unsigned long flags = 0;
+ char __iomem *offset;
pr_debug("pnum=%x\n", pipe->num);
@@ -464,6 +456,8 @@
}
}
+ pp_histogram_setup(&opmode, MDSS_PP_SSPP_CFG | pipe->num, pipe->mixer);
+
if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_CFG) {
flags = PP_FLAGS_DIRTY_PA;
@@ -475,6 +469,26 @@
if (pipe->pp_res.pp_sts.pa_sts & PP_STS_ENABLE)
opmode |= (1 << 4); /* PA_EN */
}
+
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_LUT_CFG) {
+ pp_enhist_config(PP_FLAGS_DIRTY_ENHIST,
+ pipe->base + MDSS_MDP_REG_VIG_HIST_LUT_BASE,
+ &pipe->pp_res.pp_sts,
+ &pipe->pp_cfg.hist_lut_cfg);
+ }
+ }
+
+ if (pipe->pp_res.pp_sts.enhist_sts & PP_STS_ENABLE) {
+ /* Enable HistLUT and PA */
+ opmode |= BIT(10) | BIT(4);
+ if (!(pipe->pp_res.pp_sts.pa_sts & PP_STS_ENABLE)) {
+ /* Program default value */
+ offset = pipe->base + MDSS_MDP_REG_VIG_PA_BASE;
+ writel_relaxed(0, offset);
+ writel_relaxed(0, offset + 4);
+ writel_relaxed(0, offset + 8);
+ writel_relaxed(0, offset + 12);
+ }
}
*op = opmode;
@@ -522,9 +536,10 @@
u32 chroma_sample;
u32 filter_mode;
struct mdss_data_type *mdata;
+ u32 src_w, src_h;
mdata = mdss_mdp_get_mdata();
- if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102)
+ if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102 && pipe->src_fmt->is_yuv)
filter_mode = MDSS_MDP_SCALE_FILTER_CA;
else
filter_mode = MDSS_MDP_SCALE_FILTER_BIL;
@@ -538,6 +553,9 @@
}
}
+ src_w = pipe->src.w >> pipe->horz_deci;
+ src_h = pipe->src.h >> pipe->vert_deci;
+
chroma_sample = pipe->src_fmt->chroma_sample;
if (pipe->flags & MDP_SOURCE_ROTATED_90) {
if (chroma_sample == MDSS_MDP_CHROMA_H1V2)
@@ -556,23 +574,22 @@
}
if ((pipe->src_fmt->is_yuv) &&
- !((pipe->dst.w < pipe->src.w) || (pipe->dst.h < pipe->src.h))) {
+ !((pipe->dst.w < src_w) || (pipe->dst.h < src_h))) {
pp_sharp_config(pipe->base +
MDSS_MDP_REG_VIG_QSEED2_SHARP,
&pipe->pp_res.pp_sts,
&pipe->pp_cfg.sharp_cfg);
}
- if ((pipe->src.h != pipe->dst.h) ||
+ if ((src_h != pipe->dst.h) ||
(pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE) ||
(chroma_sample == MDSS_MDP_CHROMA_420) ||
(chroma_sample == MDSS_MDP_CHROMA_H1V2)) {
- pr_debug("scale y - src_h=%d dst_h=%d\n",
- pipe->src.h, pipe->dst.h);
+ pr_debug("scale y - src_h=%d dst_h=%d\n", src_h, pipe->dst.h);
- if ((pipe->src.h / MAX_DOWNSCALE_RATIO) > pipe->dst.h) {
+ if ((src_h / MAX_DOWNSCALE_RATIO) > pipe->dst.h) {
pr_err("too much downscaling height=%d->%d",
- pipe->src.h, pipe->dst.h);
+ src_h, pipe->dst.h);
return -EINVAL;
}
@@ -580,11 +597,12 @@
if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
u32 chr_dst_h = pipe->dst.h;
- if ((chroma_sample == MDSS_MDP_CHROMA_420) ||
- (chroma_sample == MDSS_MDP_CHROMA_H1V2))
+ if (!pipe->vert_deci &&
+ ((chroma_sample == MDSS_MDP_CHROMA_420) ||
+ (chroma_sample == MDSS_MDP_CHROMA_H1V2)))
chr_dst_h *= 2; /* 2x upsample chroma */
- if (pipe->src.h <= pipe->dst.h) {
+ if (src_h <= pipe->dst.h) {
scale_config |= /* G/Y, A */
(filter_mode << 10) |
(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
@@ -593,7 +611,7 @@
(MDSS_MDP_SCALE_FILTER_PCMN << 10) |
(MDSS_MDP_SCALE_FILTER_PCMN << 18);
- if (pipe->src.h <= chr_dst_h)
+ if (src_h <= chr_dst_h)
scale_config |= /* CrCb */
(MDSS_MDP_SCALE_FILTER_BIL << 14);
else
@@ -601,12 +619,12 @@
(MDSS_MDP_SCALE_FILTER_PCMN << 14);
phasey_step = mdss_mdp_scale_phase_step(
- PHASE_STEP_SHIFT, pipe->src.h, chr_dst_h);
+ PHASE_STEP_SHIFT, src_h, chr_dst_h);
writel_relaxed(phasey_step, pipe->base +
MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
} else {
- if (pipe->src.h <= pipe->dst.h)
+ if (src_h <= pipe->dst.h)
scale_config |= /* RGB, A */
(MDSS_MDP_SCALE_FILTER_BIL << 10) |
(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
@@ -617,19 +635,18 @@
}
phasey_step = mdss_mdp_scale_phase_step(
- PHASE_STEP_SHIFT, pipe->src.h, pipe->dst.h);
+ PHASE_STEP_SHIFT, src_h, pipe->dst.h);
}
- if ((pipe->src.w != pipe->dst.w) ||
+ if ((src_w != pipe->dst.w) ||
(pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE) ||
(chroma_sample == MDSS_MDP_CHROMA_420) ||
(chroma_sample == MDSS_MDP_CHROMA_H2V1)) {
- pr_debug("scale x - src_w=%d dst_w=%d\n",
- pipe->src.w, pipe->dst.w);
+ pr_debug("scale x - src_w=%d dst_w=%d\n", src_w, pipe->dst.w);
- if ((pipe->src.w / MAX_DOWNSCALE_RATIO) > pipe->dst.w) {
+ if ((src_w / MAX_DOWNSCALE_RATIO) > pipe->dst.w) {
pr_err("too much downscaling width=%d->%d",
- pipe->src.w, pipe->dst.w);
+ src_w, pipe->dst.w);
return -EINVAL;
}
@@ -638,11 +655,12 @@
if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
u32 chr_dst_w = pipe->dst.w;
- if ((chroma_sample == MDSS_MDP_CHROMA_420) ||
- (chroma_sample == MDSS_MDP_CHROMA_H2V1))
+ if (!pipe->horz_deci &&
+ ((chroma_sample == MDSS_MDP_CHROMA_420) ||
+ (chroma_sample == MDSS_MDP_CHROMA_H2V1)))
chr_dst_w *= 2; /* 2x upsample chroma */
- if (pipe->src.w <= pipe->dst.w) {
+ if (src_w <= pipe->dst.w) {
scale_config |= /* G/Y, A */
(filter_mode << 8) |
(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
@@ -651,7 +669,7 @@
(MDSS_MDP_SCALE_FILTER_PCMN << 8) |
(MDSS_MDP_SCALE_FILTER_PCMN << 16);
- if (pipe->src.w <= chr_dst_w)
+ if (src_w <= chr_dst_w)
scale_config |= /* CrCb */
(MDSS_MDP_SCALE_FILTER_BIL << 12);
else
@@ -659,11 +677,11 @@
(MDSS_MDP_SCALE_FILTER_PCMN << 12);
phasex_step = mdss_mdp_scale_phase_step(
- PHASE_STEP_SHIFT, pipe->src.w, chr_dst_w);
+ PHASE_STEP_SHIFT, src_w, chr_dst_w);
writel_relaxed(phasex_step, pipe->base +
MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
} else {
- if (pipe->src.w <= pipe->dst.w)
+ if (src_w <= pipe->dst.w)
scale_config |= /* RGB, A */
(MDSS_MDP_SCALE_FILTER_BIL << 8) |
(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
@@ -674,7 +692,7 @@
}
phasex_step = mdss_mdp_scale_phase_step(
- PHASE_STEP_SHIFT, pipe->src.w, pipe->dst.w);
+ PHASE_STEP_SHIFT, src_w, pipe->dst.w);
}
writel_relaxed(scale_config, pipe->base +
@@ -704,6 +722,17 @@
void mdss_mdp_pipe_sspp_term(struct mdss_mdp_pipe *pipe)
{
+ u32 done_bit;
+ struct pp_hist_col_info *hist_info;
+ char __iomem *ctl_base;
+
+ if (!pipe && pipe->pp_res.hist.col_en) {
+ done_bit = 3 << (pipe->num * 4);
+ hist_info = &pipe->pp_res.hist;
+ ctl_base = pipe->base +
+ MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+ pp_histogram_disable(hist_info, done_bit, ctl_base);
+ }
memset(&pipe->pp_cfg, 0, sizeof(struct mdp_overlay_pp_params));
memset(&pipe->pp_res, 0, sizeof(struct mdss_pipe_pp_res));
}
@@ -796,16 +825,86 @@
return 0;
}
+static char __iomem *mdss_mdp_get_dspp_addr_off(u32 dspp_num)
+{
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_mixer *mixer;
+
+ mdata = mdss_mdp_get_mdata();
+ if (mdata->nmixers_intf <= dspp_num) {
+ pr_err("Invalid dspp_num=%d", dspp_num);
+ return ERR_PTR(-EINVAL);
+ }
+ mixer = mdata->mixer_intf + dspp_num;
+ return mixer->dspp_base;
+}
+
+/* Assumes that function will be called from within clock enabled space*/
+static int pp_histogram_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix)
+{
+ int ret = -EINVAL;
+ char __iomem *base;
+ u32 op_flags, kick_base, col_state;
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_pipe *pipe;
+ struct pp_hist_col_info *hist_info;
+ unsigned long flag;
+
+ if (mix && (PP_LOCAT(block) == MDSS_PP_DSPP_CFG)) {
+ /* HIST_EN & AUTO_CLEAR */
+ op_flags = BIT(16) | BIT(17);
+ hist_info = &mdss_pp_res->dspp_hist[mix->num];
+ base = mdss_mdp_get_dspp_addr_off(PP_BLOCK(block));
+ kick_base = MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
+ } else if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG) {
+ mdata = mdss_mdp_get_mdata();
+ pipe = mdss_mdp_pipe_get(mdata, BIT(PP_BLOCK(block)));
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_debug("pipe DNE (%d)", (u32) BIT(PP_BLOCK(block)));
+ ret = -ENODEV;
+ goto error;
+ }
+ /* HIST_EN & AUTO_CLEAR */
+ op_flags = BIT(8) + BIT(9);
+ hist_info = &pipe->pp_res.hist;
+ base = pipe->base;
+ kick_base = MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+ mdss_mdp_pipe_unmap(pipe);
+ } else {
+ pr_warn("invalid histogram location (%d)", block);
+ goto error;
+ }
+
+ if (hist_info->col_en) {
+ *op |= op_flags;
+ mutex_lock(&hist_info->hist_mutex);
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ col_state = hist_info->col_state;
+ if (hist_info->is_kick_ready &&
+ ((col_state == HIST_IDLE) ||
+ ((false == hist_info->read_request) &&
+ col_state == HIST_READY))) {
+ /* Kick off collection */
+ writel_relaxed(1, base + kick_base);
+ hist_info->col_state = HIST_START;
+ }
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ mutex_unlock(&hist_info->hist_mutex);
+ }
+ ret = 0;
+error:
+ return ret;
+}
+
static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_ctl *ctl,
struct mdss_mdp_mixer *mixer)
{
u32 flags, base, offset, dspp_num, opmode = 0;
struct mdp_dither_cfg_data *dither_cfg;
- struct pp_hist_col_info *hist_info;
struct mdp_pgc_lut_data *pgc_config;
struct pp_sts_type *pp_sts;
- u32 data, col_state;
- unsigned long flag;
+ u32 data;
+ char __iomem *basel;
int i, ret = 0;
if (!mixer || !ctl)
@@ -817,28 +916,13 @@
(dspp_num >= MDSS_MDP_MAX_DSPP))
return -EINVAL;
base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num);
- hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+ basel = mdss_mdp_get_dspp_addr_off(dspp_num);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- if (hist_info->col_en) {
- /* HIST_EN & AUTO_CLEAR */
- opmode |= (1 << 16) | (1 << 17);
- mutex_lock(&mdss_mdp_hist_mutex);
- spin_lock_irqsave(&mdss_hist_lock, flag);
- col_state = hist_info->col_state;
- if (hist_info->is_kick_ready &&
- ((col_state == HIST_IDLE) ||
- ((false == hist_info->read_request) &&
- col_state == HIST_READY))) {
- /* Kick off collection */
- MDSS_MDP_REG_WRITE(base +
- MDSS_MDP_REG_DSPP_HIST_CTL_BASE, 1);
- hist_info->col_state = HIST_START;
- }
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- mutex_unlock(&mdss_mdp_hist_mutex);
- }
+ ret = pp_histogram_setup(&opmode, MDSS_PP_DSPP_CFG | dspp_num, mixer);
+ if (ret)
+ goto dspp_exit;
if (disp_num < MDSS_BLOCK_DISP_NUM)
flags = mdss_pp_res->pp_disp_flags[disp_num];
@@ -846,7 +930,7 @@
flags = 0;
/* nothing to update */
- if ((!flags) && (!(hist_info->col_en)))
+ if ((!flags) && (!(opmode)))
goto dspp_exit;
pp_sts = &mdss_pp_res->pp_dspp_sts[dspp_num];
@@ -860,7 +944,7 @@
pp_igc_config(flags, MDSS_MDP_REG_IGC_DSPP_BASE, pp_sts,
&mdss_pp_res->igc_disp_cfg[disp_num], dspp_num);
- pp_enhist_config(flags, base + MDSS_MDP_REG_DSPP_HIST_LUT_BASE,
+ pp_enhist_config(flags, basel + MDSS_MDP_REG_DSPP_HIST_LUT_BASE,
pp_sts, &mdss_pp_res->enhist_disp_cfg[disp_num]);
if (pp_sts->pa_sts & PP_STS_ENABLE)
@@ -934,7 +1018,7 @@
if (pp_sts->pgc_sts & PP_STS_ENABLE)
opmode |= (1 << 22);
- MDSS_MDP_REG_WRITE(base + MDSS_MDP_REG_DSPP_OP_MODE, opmode);
+ writel_relaxed(opmode, basel + MDSS_MDP_REG_DSPP_OP_MODE);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, BIT(13 + dspp_num));
wmb();
dspp_exit:
@@ -1068,7 +1152,9 @@
int mdss_mdp_pp_init(struct device *dev)
{
- int ret = 0;
+ int i, ret = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_pipe *vig;
mutex_lock(&mdss_pp_mutex);
if (!mdss_pp_res) {
@@ -1078,6 +1164,18 @@
pr_err("%s mdss_pp_res allocation failed!", __func__);
ret = -ENOMEM;
}
+
+ for (i = 0; i < MDSS_MDP_MAX_DSPP; i++) {
+ mutex_init(&mdss_pp_res->dspp_hist[i].hist_mutex);
+ spin_lock_init(&mdss_pp_res->dspp_hist[i].hist_lock);
+ }
+ }
+ if (mdata) {
+ vig = mdata->vig_pipes;
+ for (i = 0; i < mdata->nvig_pipes; i++) {
+ mutex_init(&vig[i].pp_res.hist.hist_mutex);
+ spin_lock_init(&vig[i].pp_res.hist.hist_lock);
+ }
}
mutex_unlock(&mdss_pp_mutex);
return ret;
@@ -1425,11 +1523,13 @@
if (copy_to_user(config->c0_c1_data, local_cfg.c2_data,
config->len * sizeof(u32))) {
ret = -EFAULT;
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
goto igc_config_exit;
}
if (copy_to_user(config->c2_data, local_cfg.c0_c1_data,
config->len * sizeof(u32))) {
ret = -EFAULT;
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
goto igc_config_exit;
}
*copyback = 1;
@@ -1545,13 +1645,17 @@
}
/* Note: Assumes that its inputs have been checked by calling function */
-static void pp_update_hist_lut(u32 offset, struct mdp_hist_lut_data *cfg)
+static void pp_update_hist_lut(char __iomem *offset,
+ struct mdp_hist_lut_data *cfg)
{
int i;
for (i = 0; i < ENHIST_LUT_ENTRIES; i++)
- MDSS_MDP_REG_WRITE(offset, cfg->data[i]);
+ writel_relaxed(cfg->data[i], offset);
/* swap */
- MDSS_MDP_REG_WRITE(offset + 4, 1);
+ if (PP_LOCAT(cfg->block) == MDSS_PP_DSPP_CFG)
+ writel_relaxed(1, offset + 4);
+ else
+ writel_relaxed(1, offset + 16);
}
int mdss_mdp_argc_config(struct mdss_mdp_ctl *ctl,
@@ -1574,7 +1678,7 @@
mutex_lock(&mdss_pp_mutex);
disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
- switch (config->block & MDSS_PP_LOCATION_MASK) {
+ switch (PP_LOCAT(config->block)) {
case MDSS_PP_LM_CFG:
argc_offset = MDSS_MDP_REG_LM_OFFSET(dspp_num) +
MDSS_MDP_REG_LM_GC_LUT_BASE;
@@ -1614,16 +1718,19 @@
pp_read_argc_lut(&local_cfg, argc_offset);
if (copy_to_user(config->r_data,
&mdss_pp_res->gc_lut_r[disp_num][0], tbl_size)) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ret = -EFAULT;
goto argc_config_exit;
}
if (copy_to_user(config->g_data,
&mdss_pp_res->gc_lut_g[disp_num][0], tbl_size)) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ret = -EFAULT;
goto argc_config_exit;
}
if (copy_to_user(config->b_data,
&mdss_pp_res->gc_lut_b[disp_num][0], tbl_size)) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ret = -EFAULT;
goto argc_config_exit;
}
@@ -1670,12 +1777,12 @@
if (!ctl)
return -EINVAL;
- if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
- (config->block >= MDP_BLOCK_MAX))
+ if ((PP_BLOCK(config->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (PP_BLOCK(config->block) >= MDP_BLOCK_MAX))
return -EINVAL;
mutex_lock(&mdss_pp_mutex);
- disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+ disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
if (config->ops & MDP_PP_OPS_READ) {
ret = pp_get_dspp_num(disp_num, &dspp_num);
@@ -1694,6 +1801,7 @@
if (copy_to_user(config->data,
&mdss_pp_res->enhist_lut[disp_num][0],
ENHIST_LUT_ENTRIES * sizeof(u32))) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ret = -EFAULT;
goto enhist_config_exit;
}
@@ -1833,124 +1941,206 @@
mdss_mdp_pp_setup(ctl);
return ret;
}
-static void pp_hist_read(u32 v_base, struct pp_hist_col_info *hist_info)
+static void pp_hist_read(char __iomem *v_base,
+ struct pp_hist_col_info *hist_info)
{
int i, i_start;
u32 data;
- data = MDSS_MDP_REG_READ(v_base);
+ data = readl_relaxed(v_base);
i_start = data >> 24;
hist_info->data[i_start] = data & 0xFFFFFF;
for (i = i_start + 1; i < HIST_V_SIZE; i++)
- hist_info->data[i] = MDSS_MDP_REG_READ(v_base) & 0xFFFFFF;
+ hist_info->data[i] = readl_relaxed(v_base) & 0xFFFFFF;
for (i = 0; i < i_start - 1; i++)
- hist_info->data[i] = MDSS_MDP_REG_READ(v_base) & 0xFFFFFF;
+ hist_info->data[i] = readl_relaxed(v_base) & 0xFFFFFF;
hist_info->hist_cnt_read++;
}
-int mdss_mdp_histogram_start(struct mdss_mdp_ctl *ctl,
- struct mdp_histogram_start_req *req)
+/* Assumes that relevant clocks are enabled */
+static int pp_histogram_enable(struct pp_hist_col_info *hist_info,
+ struct mdp_histogram_start_req *req,
+ u32 shift_bit, char __iomem *ctl_base)
{
- u32 ctl_base, done_shift_bit;
+ unsigned long flag;
+ int ret = 0;
+ mutex_lock(&hist_info->hist_mutex);
+ /* check if it is idle */
+ if (hist_info->col_en) {
+ pr_info("%s Hist collection has already been enabled %d",
+ __func__, (u32) ctl_base);
+ ret = -EINVAL;
+ goto exit;
+ }
+ hist_info->frame_cnt = req->frame_cnt;
+ init_completion(&hist_info->comp);
+ hist_info->hist_cnt_read = 0;
+ hist_info->hist_cnt_sent = 0;
+ hist_info->hist_cnt_time = 0;
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ hist_info->read_request = false;
+ hist_info->col_state = HIST_RESET;
+ hist_info->col_en = true;
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ hist_info->is_kick_ready = false;
+ mdss_mdp_hist_irq_enable(3 << shift_bit);
+ writel_relaxed(req->frame_cnt, ctl_base + 8);
+ /* Kick out reset start */
+ writel_relaxed(1, ctl_base + 4);
+exit:
+ mutex_unlock(&hist_info->hist_mutex);
+ return ret;
+}
+
+int mdss_mdp_histogram_start(struct mdss_mdp_ctl *ctl,
+ struct mdp_histogram_start_req *req)
+{
+ u32 done_shift_bit;
+ char __iomem *ctl_base;
struct pp_hist_col_info *hist_info;
int i, ret = 0;
u32 disp_num, dspp_num = 0;
u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
- unsigned long flag;
-
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (!ctl)
return -EINVAL;
- if ((req->block < MDP_LOGICAL_BLOCK_DISP_0) ||
- (req->block >= MDP_BLOCK_MAX))
+ if ((PP_BLOCK(req->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (PP_BLOCK(req->block) >= MDP_BLOCK_MAX))
return -EINVAL;
- mutex_lock(&mdss_mdp_hist_mutex);
- disp_num = req->block - MDP_LOGICAL_BLOCK_DISP_0;
+ disp_num = PP_BLOCK(req->block) - MDP_LOGICAL_BLOCK_DISP_0;
mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
if (!mixer_cnt) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
ret = -EPERM;
- goto hist_start_exit;
+ goto hist_exit;
}
if (mixer_cnt >= MDSS_MDP_MAX_DSPP) {
pr_err("%s, Too many dspp connects to disp %d",
__func__, mixer_cnt);
ret = -EPERM;
- goto hist_start_exit;
+ goto hist_exit;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- for (i = 0; i < mixer_cnt; i++) {
- dspp_num = mixer_id[i];
- hist_info = &mdss_pp_res->dspp_hist[dspp_num];
- done_shift_bit = (dspp_num * 4) + 12;
- /* check if it is idle */
- if (hist_info->col_en) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
- pr_info("%s Hist collection has already been enabled %d",
- __func__, dspp_num);
- goto hist_start_exit;
+
+ if (PP_LOCAT(req->block) == MDSS_PP_SSPP_CFG) {
+ i = MDSS_PP_ARG_MASK & req->block;
+ if (!i) {
+ ret = -EINVAL;
+ pr_warn("Must pass pipe arguments, %d", i);
+ goto hist_exit;
}
- spin_lock_irqsave(&mdss_hist_lock, flag);
- hist_info->frame_cnt = req->frame_cnt;
- init_completion(&hist_info->comp);
- hist_info->hist_cnt_read = 0;
- hist_info->hist_cnt_sent = 0;
- hist_info->read_request = false;
- hist_info->col_state = HIST_RESET;
- hist_info->col_en = true;
- hist_info->is_kick_ready = false;
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- mdss_pp_res->hist_col[disp_num][i] =
- &mdss_pp_res->dspp_hist[dspp_num];
- mdss_mdp_hist_irq_enable(3 << done_shift_bit);
- ctl_base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
- MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
- MDSS_MDP_REG_WRITE(ctl_base + 8, req->frame_cnt);
- /* Kick out reset start */
- MDSS_MDP_REG_WRITE(ctl_base + 4, 1);
+
+ for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, req->block))
+ continue;
+ pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ if (IS_ERR_OR_NULL(pipe))
+ continue;
+ if (!pipe || pipe->num > MDSS_MDP_SSPP_VIG2) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ ret = -EINVAL;
+ pr_warn("Invalid Hist pipe (%d)", i);
+ goto hist_exit;
+ }
+ done_shift_bit = (pipe->num * 4);
+ hist_info = &pipe->pp_res.hist;
+ ctl_base = pipe->base +
+ MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+ ret = pp_histogram_enable(hist_info, req,
+ done_shift_bit, ctl_base);
+ mdss_mdp_pipe_unmap(pipe);
+ }
+ } else if (PP_LOCAT(req->block) == MDSS_PP_DSPP_CFG) {
+ for (i = 0; i < mixer_cnt; i++) {
+ dspp_num = mixer_id[i];
+ done_shift_bit = (dspp_num * 4) + 12;
+ hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+ ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num) +
+ MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
+ ret = pp_histogram_enable(hist_info, req,
+ done_shift_bit, ctl_base);
+ mdss_pp_res->pp_disp_flags[disp_num] |=
+ PP_FLAGS_DIRTY_HIST_COL;
+ }
}
- for (i = mixer_cnt; i < MDSS_MDP_MAX_DSPP; i++)
- mdss_pp_res->hist_col[disp_num][i] = 0;
- mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_HIST_COL;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
-hist_start_exit:
- mutex_unlock(&mdss_mdp_hist_mutex);
- if (!ret) {
+
+hist_exit:
+ if (!ret && (PP_LOCAT(req->block) == MDSS_PP_DSPP_CFG)) {
mdss_mdp_pp_setup(ctl);
/* wait for a frame to let histrogram enable itself */
+ /* TODO add hysteresis value to be able to remove this sleep */
usleep(41666);
for (i = 0; i < mixer_cnt; i++) {
dspp_num = mixer_id[i];
hist_info = &mdss_pp_res->dspp_hist[dspp_num];
- mutex_lock(&mdss_mdp_hist_mutex);
- spin_lock_irqsave(&mdss_hist_lock, flag);
+ mutex_lock(&hist_info->hist_mutex);
hist_info->is_kick_ready = true;
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- mutex_unlock(&mdss_mdp_hist_mutex);
+ mutex_unlock(&hist_info->hist_mutex);
+ }
+ } else if (!ret) {
+ for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, req->block))
+ continue;
+ pr_info("PP_ARG(%d) = %d", i, PP_ARG(i, req->block));
+ pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ if (IS_ERR_OR_NULL(pipe))
+ continue;
+ hist_info = &pipe->pp_res.hist;
+ hist_info->is_kick_ready = true;
+ mdss_mdp_pipe_unmap(pipe);
}
}
return ret;
}
+static int pp_histogram_disable(struct pp_hist_col_info *hist_info,
+ u32 done_bit, char __iomem *ctl_base)
+{
+ int ret = 0;
+ unsigned long flag;
+ mutex_lock(&hist_info->hist_mutex);
+ if (hist_info->col_en == false) {
+ pr_debug("Histogram already disabled (%d)", (u32) ctl_base);
+ ret = -EINVAL;
+ goto exit;
+ }
+ complete_all(&hist_info->comp);
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ hist_info->col_en = false;
+ hist_info->col_state = HIST_UNKNOWN;
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ hist_info->is_kick_ready = false;
+ mdss_mdp_hist_irq_disable(done_bit);
+ writel_relaxed(BIT(1), ctl_base);/* cancel */
+ ret = 0;
+exit:
+ mutex_unlock(&hist_info->hist_mutex);
+ return ret;
+}
+
int mdss_mdp_histogram_stop(struct mdss_mdp_ctl *ctl, u32 block)
{
int i, ret = 0;
- u32 dspp_num, disp_num, ctl_base, done_bit;
+ char __iomem *ctl_base;
+ u32 dspp_num, disp_num, done_bit;
struct pp_hist_col_info *hist_info;
u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
- unsigned long flag;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (!ctl)
return -EINVAL;
- if ((block < MDP_LOGICAL_BLOCK_DISP_0) ||
- (block >= MDP_BLOCK_MAX))
+ if ((PP_BLOCK(block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (PP_BLOCK(block) >= MDP_BLOCK_MAX))
return -EINVAL;
- mutex_lock(&mdss_mdp_hist_mutex);
- disp_num = block - MDP_LOGICAL_BLOCK_DISP_0;
+ disp_num = PP_BLOCK(block) - MDP_LOGICAL_BLOCK_DISP_0;
mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
if (!mixer_cnt) {
@@ -1966,162 +2156,313 @@
goto hist_stop_exit;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- for (i = 0; i < mixer_cnt; i++) {
- dspp_num = mixer_id[i];
- hist_info = &mdss_pp_res->dspp_hist[dspp_num];
- done_bit = 3 << ((dspp_num * 4) + 12);
- ctl_base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
- MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
- if (hist_info->col_en == false) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
- goto hist_stop_exit;
+ if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG) {
+ i = MDSS_PP_ARG_MASK & block;
+ if (!i) {
+ pr_warn("Must pass pipe arguments, %d", i);
+ goto hist_stop_clk;
}
- complete_all(&hist_info->comp);
- spin_lock_irqsave(&mdss_hist_lock, flag);
- hist_info->col_en = false;
- hist_info->col_state = HIST_UNKNOWN;
- hist_info->is_kick_ready = false;
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- mdss_mdp_hist_irq_disable(done_bit);
- MDSS_MDP_REG_WRITE(ctl_base, (1 << 1));/* cancel */
+
+ for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, block))
+ continue;
+ pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ if (IS_ERR_OR_NULL(pipe) ||
+ pipe->num > MDSS_MDP_SSPP_VIG2) {
+ pr_warn("Invalid Hist pipe (%d)", i);
+ continue;
+ }
+ done_bit = 3 << (pipe->num * 4);
+ hist_info = &pipe->pp_res.hist;
+ ctl_base = pipe->base +
+ MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+ ret = pp_histogram_disable(hist_info, done_bit,
+ ctl_base);
+ mdss_mdp_pipe_unmap(pipe);
+ if (ret)
+ goto hist_stop_clk;
+ }
+ } else if (PP_LOCAT(block) == MDSS_PP_DSPP_CFG) {
+ for (i = 0; i < mixer_cnt; i++) {
+ dspp_num = mixer_id[i];
+ done_bit = 3 << ((dspp_num * 4) + 12);
+ hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+ ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num) +
+ MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
+ ret = pp_histogram_disable(hist_info, done_bit,
+ ctl_base);
+ if (ret)
+ goto hist_stop_clk;
+ mdss_pp_res->pp_disp_flags[disp_num] |=
+ PP_FLAGS_DIRTY_HIST_COL;
+ }
}
- for (i = 0; i < MDSS_MDP_MAX_DSPP; i++)
- mdss_pp_res->hist_col[disp_num][i] = 0;
- mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_HIST_COL;
+hist_stop_clk:
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
hist_stop_exit:
- mutex_unlock(&mdss_mdp_hist_mutex);
- if (!ret)
+ if (!ret && (PP_LOCAT(block) == MDSS_PP_DSPP_CFG))
mdss_mdp_pp_setup(ctl);
return ret;
}
-int mdss_mdp_hist_collect(struct mdss_mdp_ctl *ctl,
- struct mdp_histogram_data *hist,
- u32 *hist_data_addr)
+static int pp_hist_collect(struct mdss_mdp_ctl *ctl,
+ struct mdp_histogram_data *hist,
+ struct pp_hist_col_info *hist_info,
+ char __iomem *ctl_base)
{
- int i, j, wait_ret, ret = 0;
- u32 timeout, v_base;
- struct pp_hist_col_info *hist_info;
- u32 dspp_num, disp_num, ctl_base;
- u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ int wait_ret, ret = 0;
+ u32 timeout;
+ char __iomem *v_base;
unsigned long flag;
+ struct mdss_pipe_pp_res *res;
+ struct mdss_mdp_pipe *pipe;
+
+ mutex_lock(&hist_info->hist_mutex);
+ if ((hist_info->col_en == 0) ||
+ (hist_info->col_state == HIST_UNKNOWN)) {
+ ret = -EINVAL;
+ goto hist_collect_exit;
+ }
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ /* wait for hist done if cache has no data */
+ if (hist_info->col_state != HIST_READY) {
+ hist_info->read_request = true;
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ timeout = HIST_WAIT_TIMEOUT(hist_info->frame_cnt);
+ mutex_unlock(&hist_info->hist_mutex);
+ /* flush updates before wait*/
+ if (PP_LOCAT(hist->block) == MDSS_PP_DSPP_CFG)
+ mdss_mdp_pp_setup(ctl);
+ if (PP_LOCAT(hist->block) == MDSS_PP_SSPP_CFG) {
+ res = container_of(hist_info, struct mdss_pipe_pp_res,
+ hist);
+ pipe = container_of(res, struct mdss_mdp_pipe, pp_res);
+ pipe->params_changed++;
+ }
+ wait_ret = wait_for_completion_killable_timeout(
+ &(hist_info->comp), timeout);
+
+ mutex_lock(&hist_info->hist_mutex);
+ if (wait_ret == 0) {
+ ret = -ETIMEDOUT;
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ pr_debug("bin collection timedout, state %d",
+ hist_info->col_state);
+ /*
+ * When the histogram has timed out (usually
+ * underrun) change the SW state back to idle
+ * since histogram hardware will have done the
+ * same. Histogram data also needs to be
+ * cleared in this case, which is done by the
+ * histogram being read (triggered by READY
+ * state, which also moves the histogram SW back
+ * to IDLE).
+ */
+ hist_info->hist_cnt_time++;
+ hist_info->col_state = HIST_READY;
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ } else if (wait_ret < 0) {
+ ret = -EINTR;
+ pr_debug("%s: bin collection interrupted",
+ __func__);
+ goto hist_collect_exit;
+ }
+ if (hist_info->col_state != HIST_READY) {
+ ret = -ENODATA;
+ pr_debug("%s: state is not ready: %d",
+ __func__, hist_info->col_state);
+ goto hist_collect_exit;
+ }
+ } else {
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ }
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ if (hist_info->col_state == HIST_READY) {
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ v_base = ctl_base + 0x1C;
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ pp_hist_read(v_base, hist_info);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ hist_info->read_request = false;
+ hist_info->col_state = HIST_IDLE;
+ }
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+hist_collect_exit:
+ mutex_unlock(&hist_info->hist_mutex);
+ return ret;
+}
+
+int mdss_mdp_hist_collect(struct mdss_mdp_ctl *ctl,
+ struct mdp_histogram_data *hist)
+{
+ int i, j, off, ret = 0;
+ struct pp_hist_col_info *hist_info;
+ u32 dspp_num, disp_num;
+ char __iomem *ctl_base;
+ u32 hist_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ u32 *hist_concat = NULL;
+ u32 *hist_data_addr;
+ u32 pipe_cnt = 0;
+ u32 pipe_num = MDSS_MDP_SSPP_VIG0;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (!ctl)
return -EINVAL;
- if ((hist->block < MDP_LOGICAL_BLOCK_DISP_0) ||
- (hist->block >= MDP_BLOCK_MAX))
+ if ((PP_BLOCK(hist->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (PP_BLOCK(hist->block) >= MDP_BLOCK_MAX))
return -EINVAL;
- mutex_lock(&mdss_mdp_hist_mutex);
- disp_num = hist->block - MDP_LOGICAL_BLOCK_DISP_0;
- mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+ disp_num = PP_BLOCK(hist->block) - MDP_LOGICAL_BLOCK_DISP_0;
+ hist_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
- if (!mixer_cnt) {
+ if (!hist_cnt) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
ret = -EPERM;
goto hist_collect_exit;
}
- if (mixer_cnt >= MDSS_MDP_MAX_DSPP) {
+ if (hist_cnt >= MDSS_MDP_MAX_DSPP) {
pr_err("%s, Too many dspp connects to disp %d",
- __func__, mixer_cnt);
+ __func__, hist_cnt);
ret = -EPERM;
goto hist_collect_exit;
}
- hist_info = &mdss_pp_res->dspp_hist[0];
- for (i = 0; i < mixer_cnt; i++) {
- dspp_num = mixer_id[i];
- hist_info = &mdss_pp_res->dspp_hist[dspp_num];
- ctl_base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
- MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
- if ((hist_info->col_en == 0) ||
- (hist_info->col_state == HIST_UNKNOWN)) {
- ret = -EINVAL;
- goto hist_collect_exit;
- }
- spin_lock_irqsave(&mdss_hist_lock, flag);
- /* wait for hist done if cache has no data */
- if (hist_info->col_state != HIST_READY) {
- hist_info->read_request = true;
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- timeout = HIST_WAIT_TIMEOUT(hist_info->frame_cnt);
- mutex_unlock(&mdss_mdp_hist_mutex);
- /* flush updates before wait*/
- mdss_mdp_pp_setup(ctl);
- wait_ret = wait_for_completion_killable_timeout(
- &(hist_info->comp), timeout);
-
- mutex_lock(&mdss_mdp_hist_mutex);
- if (wait_ret == 0) {
- ret = -ETIMEDOUT;
- spin_lock_irqsave(&mdss_hist_lock, flag);
- pr_debug("bin collection timedout, state %d",
- hist_info->col_state);
- /*
- * When the histogram has timed out (usually
- * underrun) change the SW state back to idle
- * since histogram hardware will have done the
- * same. Histogram data also needs to be
- * cleared in this case, which is done by the
- * histogram being read (triggered by READY
- * state, which also moves the histogram SW back
- * to IDLE).
- */
- hist_info->col_state = HIST_READY;
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- } else if (wait_ret < 0) {
- ret = -EINTR;
- pr_debug("%s: bin collection interrupted",
- __func__);
- goto hist_collect_exit;
- }
- if (hist_info->col_state != HIST_READY) {
- ret = -ENODATA;
- pr_debug("%s: state is not ready: %d",
- __func__, hist_info->col_state);
- goto hist_collect_exit;
- }
- } else {
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- }
- spin_lock_irqsave(&mdss_hist_lock, flag);
- if (hist_info->col_state == HIST_READY) {
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- v_base = ctl_base + 0x1C;
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- pp_hist_read(v_base, hist_info);
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
- spin_lock_irqsave(&mdss_hist_lock, flag);
- hist_info->read_request = false;
- hist_info->col_state = HIST_IDLE;
- }
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- }
- if (mixer_cnt > 1) {
- memset(&mdss_pp_res->hist_data[disp_num][0],
- 0, HIST_V_SIZE * sizeof(u32));
- for (i = 0; i < mixer_cnt; i++) {
+ if (PP_LOCAT(hist->block) == MDSS_PP_DSPP_CFG) {
+ hist_info = &mdss_pp_res->dspp_hist[disp_num];
+ for (i = 0; i < hist_cnt; i++) {
dspp_num = mixer_id[i];
hist_info = &mdss_pp_res->dspp_hist[dspp_num];
- for (j = 0; j < HIST_V_SIZE; j++)
- mdss_pp_res->hist_data[disp_num][i] +=
- hist_info->data[i];
+ ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num) +
+ MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
+ ret = pp_hist_collect(ctl, hist, hist_info, ctl_base);
+ if (ret)
+ goto hist_collect_exit;
}
- *hist_data_addr = (u32)&mdss_pp_res->hist_data[disp_num][0];
+ if (hist_cnt > 1) {
+ if (hist->bin_cnt != HIST_V_SIZE) {
+ pr_err("User not expecting size %d output",
+ HIST_V_SIZE);
+ ret = -EINVAL;
+ goto hist_collect_exit;
+ }
+ hist_concat = kmalloc(HIST_V_SIZE * sizeof(u32),
+ GFP_KERNEL);
+ if (!hist_concat) {
+ ret = -ENOMEM;
+ goto hist_collect_exit;
+ }
+ memset(hist_concat, 0, HIST_V_SIZE * sizeof(u32));
+ for (i = 0; i < hist_cnt; i++) {
+ dspp_num = mixer_id[i];
+ hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+ mutex_lock(&hist_info->hist_mutex);
+ for (j = 0; j < HIST_V_SIZE; j++)
+ hist_concat[i] += hist_info->data[i];
+ mutex_unlock(&hist_info->hist_mutex);
+ }
+ hist_data_addr = hist_concat;
+ } else {
+ hist_data_addr = hist_info->data;
+ }
+ hist_info = &mdss_pp_res->dspp_hist[disp_num];
+ hist_info->hist_cnt_sent++;
+ } else if (PP_LOCAT(hist->block) == MDSS_PP_SSPP_CFG) {
+
+ hist_cnt = MDSS_PP_ARG_MASK & hist->block;
+ if (!hist_cnt) {
+ pr_warn("Must pass pipe arguments, %d", hist_cnt);
+ goto hist_collect_exit;
+ }
+
+ /* Find the first pipe requested */
+ for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+ if (PP_ARG(i, hist_cnt)) {
+ pipe_num = i;
+ break;
+ }
+ }
+
+ pipe = mdss_mdp_pipe_get(mdata, BIT(pipe_num));
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_warn("Invalid starting hist pipe, %d", pipe_num);
+ ret = -ENODEV;
+ goto hist_collect_exit;
+ }
+ hist_info = &pipe->pp_res.hist;
+ mdss_mdp_pipe_unmap(pipe);
+ for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, hist->block))
+ continue;
+ pipe_cnt++;
+ pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ if (IS_ERR_OR_NULL(pipe) ||
+ pipe->num > MDSS_MDP_SSPP_VIG2) {
+ pr_warn("Invalid Hist pipe (%d)", i);
+ continue;
+ }
+ hist_info = &pipe->pp_res.hist;
+ ctl_base = pipe->base +
+ MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+ ret = pp_hist_collect(ctl, hist, hist_info, ctl_base);
+ mdss_mdp_pipe_unmap(pipe);
+ if (ret)
+ goto hist_collect_exit;
+ }
+ if (pipe_cnt > 1) {
+ if (hist->bin_cnt != (HIST_V_SIZE * pipe_cnt)) {
+ pr_err("User not expecting size %d output",
+ pipe_cnt * HIST_V_SIZE);
+ ret = -EINVAL;
+ goto hist_collect_exit;
+ }
+ hist_concat = kmalloc(HIST_V_SIZE * pipe_cnt *
+ sizeof(u32), GFP_KERNEL);
+ if (!hist_concat) {
+ ret = -ENOMEM;
+ goto hist_collect_exit;
+ }
+
+ memset(hist_concat, 0, pipe_cnt * HIST_V_SIZE *
+ sizeof(u32));
+ for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, hist->block))
+ continue;
+ pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ hist_info = &pipe->pp_res.hist;
+ off = HIST_V_SIZE * i;
+ mutex_lock(&hist_info->hist_mutex);
+ for (j = off; j < off + HIST_V_SIZE; j++)
+ hist_concat[j] =
+ hist_info->data[j - off];
+ hist_info->hist_cnt_sent++;
+ mutex_unlock(&hist_info->hist_mutex);
+ mdss_mdp_pipe_unmap(pipe);
+ }
+
+ hist_data_addr = hist_concat;
+ } else {
+ hist_data_addr = hist_info->data;
+ }
} else {
- *hist_data_addr = (u32)hist_info->data;
+ pr_info("No Histogram at location %d", PP_LOCAT(hist->block));
+ goto hist_collect_exit;
}
- hist_info->hist_cnt_sent++;
+ ret = copy_to_user(hist->c0, hist_data_addr, sizeof(u32) *
+ hist->bin_cnt);
hist_collect_exit:
- mutex_unlock(&mdss_mdp_hist_mutex);
+ kfree(hist_concat);
+
return ret;
}
void mdss_mdp_hist_intr_done(u32 isr)
{
u32 isr_blk, blk_idx;
- struct pp_hist_col_info *hist_info;
+ struct pp_hist_col_info *hist_info = NULL;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
isr &= 0x333333;
while (isr != 0) {
if (isr & 0xFFF000) {
@@ -2141,36 +2482,40 @@
hist_info = &mdss_pp_res->dspp_hist[blk_idx];
} else {
if (isr & 0x3) {
- blk_idx = 0;
+ blk_idx = MDSS_MDP_SSPP_VIG0;
isr_blk = isr & 0x3;
isr &= ~0x3;
} else if (isr & 0x30) {
- blk_idx = 1;
+ blk_idx = MDSS_MDP_SSPP_VIG1;
isr_blk = (isr >> 4) & 0x3;
isr &= ~0x30;
} else {
- blk_idx = 2;
+ blk_idx = MDSS_MDP_SSPP_VIG2;
isr_blk = (isr >> 8) & 0x3;
isr &= ~0x300;
}
- /* SSPP block, not support yet*/
- continue;
+ pipe = mdss_mdp_pipe_search(mdata, BIT(blk_idx));
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_debug("pipe DNE, %d", blk_idx);
+ continue;
+ }
+ hist_info = &pipe->pp_res.hist;
}
/* Histogram Done Interrupt */
- if ((isr_blk & 0x1) &&
+ if (hist_info && (isr_blk & 0x1) &&
(hist_info->col_en)) {
- spin_lock(&mdss_hist_lock);
+ spin_lock(&hist_info->hist_lock);
hist_info->col_state = HIST_READY;
- spin_unlock(&mdss_hist_lock);
+ spin_unlock(&hist_info->hist_lock);
if (hist_info->read_request)
complete(&hist_info->comp);
}
/* Histogram Reset Done Interrupt */
if ((isr_blk & 0x2) &&
(hist_info->col_en)) {
- spin_lock(&mdss_hist_lock);
+ spin_lock(&hist_info->hist_lock);
hist_info->col_state = HIST_IDLE;
- spin_unlock(&mdss_hist_lock);
+ spin_unlock(&hist_info->hist_lock);
}
};
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index 5711653..016c973 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -27,6 +27,8 @@
static struct mdss_mdp_rotator_session rotator_session[MAX_ROTATOR_SESSIONS];
static LIST_HEAD(rotator_queue);
+static int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot);
+
struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_alloc(void)
{
struct mdss_mdp_rotator_session *rot;
@@ -39,7 +41,6 @@
rot->ref_cnt++;
rot->session_id = i | MDSS_MDP_ROT_SESSION_MASK;
mutex_init(&rot->lock);
- init_completion(&rot->comp);
break;
}
}
@@ -85,10 +86,18 @@
static int mdss_mdp_rotator_busy_wait(struct mdss_mdp_rotator_session *rot)
{
+ struct mdss_mdp_pipe *rot_pipe = NULL;
+ struct mdss_mdp_ctl *ctl = NULL;
+
+ rot_pipe = rot->pipe;
+ if (!rot_pipe)
+ return -ENODEV;
+
+ ctl = rot_pipe->mixer->ctl;
mutex_lock(&rot->lock);
if (rot->busy) {
pr_debug("waiting for rot=%d to complete\n", rot->pipe->num);
- wait_for_completion_interruptible(&rot->comp);
+ mdss_mdp_display_wait4comp(ctl);
rot->busy = false;
}
@@ -97,28 +106,18 @@
return 0;
}
-static void mdss_mdp_rotator_callback(void *arg)
-{
- struct mdss_mdp_rotator_session *rot;
-
- rot = (struct mdss_mdp_rotator_session *) arg;
- if (rot)
- complete(&rot->comp);
-}
-
static int mdss_mdp_rotator_kickoff(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_rotator_session *rot,
struct mdss_mdp_data *dst_data)
{
int ret;
struct mdss_mdp_writeback_arg wb_args = {
- .callback_fnc = mdss_mdp_rotator_callback,
+ .callback_fnc = NULL,
.data = dst_data,
.priv_data = rot,
};
mutex_lock(&rot->lock);
- INIT_COMPLETION(rot->comp);
rot->busy = true;
ret = mdss_mdp_display_commit(ctl, &wb_args);
if (ret) {
@@ -166,27 +165,21 @@
return 0;
}
-int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
+static int mdss_mdp_rotator_queue_sub(struct mdss_mdp_rotator_session *rot,
struct mdss_mdp_data *src_data,
struct mdss_mdp_data *dst_data)
{
struct mdss_mdp_pipe *rot_pipe = NULL;
struct mdss_mdp_ctl *ctl;
- int ret, need_wait = false;
+ int ret;
- ret = mutex_lock_interruptible(&rotator_lock);
- if (ret)
- return ret;
-
- if (!rot || !rot->ref_cnt) {
- mutex_unlock(&rotator_lock);
- return -ENODEV;
- }
+ if (!rot || !rot->ref_cnt)
+ return -ENOENT;
ret = mdss_mdp_rotator_pipe_dequeue(rot);
if (ret) {
pr_err("unable to acquire rotator\n");
- goto done;
+ return ret;
}
rot_pipe = rot->pipe;
@@ -203,31 +196,148 @@
rot_pipe->img_height = rot->img_height;
rot_pipe->src = rot->src_rect;
rot_pipe->dst = rot->src_rect;
+ rot_pipe->dst.x = 0;
+ rot_pipe->dst.y = 0;
rot_pipe->params_changed++;
}
+ ret = mdss_mdp_smp_reserve(rot->pipe);
+ if (ret) {
+ pr_err("unable to mdss_mdp_smp_reserve rot data\n");
+ return ret;
+ }
+
ret = mdss_mdp_pipe_queue_data(rot->pipe, src_data);
if (ret) {
pr_err("unable to queue rot data\n");
- goto done;
+ mdss_mdp_smp_unreserve(rot->pipe);
+ return ret;
}
ret = mdss_mdp_rotator_kickoff(ctl, rot, dst_data);
- if (ret == 0 && !rot->no_wait)
- need_wait = true;
-done:
+ return ret;
+}
+
+int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
+ struct mdss_mdp_data *src_data,
+ struct mdss_mdp_data *dst_data)
+{
+ int ret;
+ struct mdss_mdp_rotator_session *tmp = rot;
+
+ ret = mutex_lock_interruptible(&rotator_lock);
+ if (ret)
+ return ret;
+
+ pr_debug("rotator session=%x start\n", rot->session_id);
+
+ for (ret = 0, tmp = rot; ret == 0 && tmp; tmp = tmp->next)
+ ret = mdss_mdp_rotator_queue_sub(tmp, src_data, dst_data);
+
mutex_unlock(&rotator_lock);
- if (need_wait)
- mdss_mdp_rotator_busy_wait(rot);
+ if (ret) {
+ pr_err("rotation failed %d for rot=%d\n", ret, rot->session_id);
+ return ret;
+ }
- if (rot_pipe)
- pr_debug("end of rotator pnum=%d enqueue\n", rot_pipe->num);
+ for (tmp = rot; tmp; tmp = tmp->next)
+ mdss_mdp_rotator_busy_wait(tmp);
+
+ pr_debug("rotator session=%x queue done\n", rot->session_id);
return ret;
}
+int mdss_mdp_rotator_setup(struct mdss_mdp_rotator_session *rot)
+{
+
+ rot->dst = rot->src_rect;
+ /*
+ * by default, rotator output should be placed directly on
+ * output buffer address without any offset.
+ */
+ rot->dst.x = 0;
+ rot->dst.y = 0;
+
+ if (rot->flags & MDP_ROT_90)
+ swap(rot->dst.w, rot->dst.h);
+
+ if (rot->src_rect.w > MAX_MIXER_WIDTH) {
+ struct mdss_mdp_rotator_session *tmp;
+ u32 width;
+
+ if (rot->bwc_mode) {
+ pr_err("Unable to do split rotation with bwc set\n");
+ return -EINVAL;
+ }
+
+ width = rot->src_rect.w;
+
+ pr_debug("setting up split rotation src=%dx%d\n",
+ rot->src_rect.w, rot->src_rect.h);
+
+ if (width > (MAX_MIXER_WIDTH * 2)) {
+ pr_err("unsupported source width %d\n", width);
+ return -EOVERFLOW;
+ }
+
+ if (!rot->next) {
+ tmp = mdss_mdp_rotator_session_alloc();
+ if (!tmp) {
+ pr_err("unable to allocate rot dual session\n");
+ return -ENOMEM;
+ }
+ rot->next = tmp;
+ }
+ tmp = rot->next;
+
+ tmp->session_id = rot->session_id & ~MDSS_MDP_ROT_SESSION_MASK;
+ tmp->flags = rot->flags;
+ tmp->format = rot->format;
+ tmp->img_width = rot->img_width;
+ tmp->img_height = rot->img_height;
+ tmp->src_rect = rot->src_rect;
+
+ tmp->src_rect.w = width / 2;
+ width -= tmp->src_rect.w;
+ tmp->src_rect.x += width;
+
+ tmp->dst = rot->dst;
+ rot->src_rect.w = width;
+
+ if (rot->flags & MDP_ROT_90) {
+ /*
+ * If rotated by 90 first half should be on top.
+ * But if horizontally flipped should be on bottom.
+ */
+ if (rot->flags & MDP_FLIP_LR)
+ rot->dst.y = tmp->src_rect.w;
+ else
+ tmp->dst.y = rot->src_rect.w;
+ } else {
+ /*
+ * If not rotated, first half should be the left part
+ * of the frame, unless horizontally flipped
+ */
+ if (rot->flags & MDP_FLIP_LR)
+ rot->dst.x = tmp->src_rect.w;
+ else
+ tmp->dst.x = rot->src_rect.w;
+ }
+
+ tmp->params_changed++;
+ } else if (rot->next) {
+ mdss_mdp_rotator_finish(rot->next);
+ rot->next = NULL;
+ }
+
+ rot->params_changed++;
+
+ return 0;
+}
+
static int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
{
struct mdss_mdp_pipe *rot_pipe;
@@ -237,6 +347,9 @@
pr_debug("finish rot id=%x\n", rot->session_id);
+ if (rot->next)
+ mdss_mdp_rotator_finish(rot->next);
+
rot_pipe = rot->pipe;
if (rot_pipe) {
mdss_mdp_rotator_busy_wait(rot);
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.h b/drivers/video/msm/mdss/mdss_mdp_rotator.h
index 21ee9bb..3401fe8 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.h
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.h
@@ -30,16 +30,17 @@
u16 img_width;
u16 img_height;
struct mdss_mdp_img_rect src_rect;
+ struct mdss_mdp_img_rect dst;
u32 bwc_mode;
struct mdss_mdp_pipe *pipe;
struct mutex lock;
- struct completion comp;
u8 busy;
u8 no_wait;
struct list_head head;
+ struct mdss_mdp_rotator_session *next;
};
static inline u32 mdss_mdp_get_rotator_dst_format(u32 in_format)
@@ -61,6 +62,7 @@
struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_alloc(void);
struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_get(u32 session_id);
+int mdss_mdp_rotator_setup(struct mdss_mdp_rotator_session *rot);
int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
struct mdss_mdp_data *src_data,
struct mdss_mdp_data *dst_data);
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index 5915f61..60f05ca 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -240,7 +240,7 @@
ps->ystride[1] = 32 * 2;
} else if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
ps->rau_cnt = DIV_ROUND_UP(w, 32);
- ps->ystride[0] = 32 * 4;
+ ps->ystride[0] = 32 * 4 * fmt->bpp;
ps->ystride[1] = 0;
ps->rau_h[0] = 4;
ps->rau_h[1] = 0;
@@ -250,8 +250,8 @@
}
stride_off = DIV_ROUND_UP(ps->rau_cnt, 8);
- ps->ystride[0] = ps->ystride[0] * ps->rau_cnt * fmt->bpp + stride_off;
- ps->ystride[1] = ps->ystride[1] * ps->rau_cnt * fmt->bpp + stride_off;
+ ps->ystride[0] = ps->ystride[0] * ps->rau_cnt + stride_off;
+ ps->ystride[1] = ps->ystride[1] * ps->rau_cnt + stride_off;
ps->num_planes = 2;
return 0;
@@ -262,8 +262,7 @@
{
struct mdss_mdp_format_params *fmt;
int i, rc;
- u32 bpp, stride_off;
-
+ u32 bpp, ystride0_off, ystride1_off;
if (ps == NULL)
return -EINVAL;
@@ -281,12 +280,14 @@
rc = mdss_mdp_get_rau_strides(w, h, fmt, ps);
if (rc)
return rc;
- stride_off = DIV_ROUND_UP(h, ps->rau_h[0]);
- ps->ystride[0] = ps->ystride[0] + ps->ystride[1];
- ps->plane_size[0] = ps->ystride[0] * stride_off;
+ ystride0_off = DIV_ROUND_UP(h, ps->rau_h[0]);
+ ystride1_off = DIV_ROUND_UP(h, ps->rau_h[1]);
+ ps->plane_size[0] = (ps->ystride[0] * ystride0_off) +
+ (ps->ystride[1] * ystride1_off);
+ ps->ystride[0] += ps->ystride[1];
ps->ystride[1] = 2;
- ps->plane_size[1] = ps->rau_cnt * ps->ystride[1] * stride_off;
-
+ ps->plane_size[1] = ps->rau_cnt * ps->ystride[1] *
+ (ystride0_off + ystride1_off);
} else {
if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
ps->num_planes = 1;
@@ -346,47 +347,67 @@
int mdss_mdp_data_check(struct mdss_mdp_data *data,
struct mdss_mdp_plane_sizes *ps)
{
+ struct mdss_mdp_img_data *prev, *curr;
+ int i;
+
if (!ps)
return 0;
if (!data || data->num_planes == 0)
return -ENOMEM;
- if (data->bwc_enabled) {
- data->num_planes = ps->num_planes;
- data->p[1].addr = data->p[0].addr + ps->plane_size[0];
- } else {
- struct mdss_mdp_img_data *prev, *curr;
- int i;
+ pr_debug("srcp0=%x len=%u frame_size=%u\n", data->p[0].addr,
+ data->p[0].len, ps->total_size);
- pr_debug("srcp0=%x len=%u frame_size=%u\n", data->p[0].addr,
- data->p[0].len, ps->total_size);
-
- for (i = 0; i < ps->num_planes; i++) {
- curr = &data->p[i];
- if (i >= data->num_planes) {
- u32 psize = ps->plane_size[i-1];
- prev = &data->p[i-1];
- if (prev->len > psize) {
- curr->len = prev->len - psize;
- prev->len = psize;
- }
- curr->addr = prev->addr + psize;
+ for (i = 0; i < ps->num_planes; i++) {
+ curr = &data->p[i];
+ if (i >= data->num_planes) {
+ u32 psize = ps->plane_size[i-1];
+ prev = &data->p[i-1];
+ if (prev->len > psize) {
+ curr->len = prev->len - psize;
+ prev->len = psize;
}
- if (curr->len < ps->plane_size[i]) {
- pr_err("insufficient mem=%u p=%d len=%u\n",
- curr->len, i, ps->plane_size[i]);
- return -ENOMEM;
- }
- pr_debug("plane[%d] addr=%x len=%u\n", i,
- curr->addr, curr->len);
+ curr->addr = prev->addr + psize;
}
- data->num_planes = ps->num_planes;
+ if (curr->len < ps->plane_size[i]) {
+ pr_err("insufficient mem=%u p=%d len=%u\n",
+ curr->len, i, ps->plane_size[i]);
+ return -ENOMEM;
+ }
+ pr_debug("plane[%d] addr=%x len=%u\n", i,
+ curr->addr, curr->len);
}
+ data->num_planes = ps->num_planes;
return 0;
}
+void mdss_mdp_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
+ struct mdss_mdp_plane_sizes *ps, struct mdss_mdp_format_params *fmt)
+{
+ if ((x == 0) && (y == 0))
+ return;
+
+ data->p[0].addr += y * ps->ystride[0];
+
+ if (data->num_planes == 1) {
+ data->p[0].addr += x * fmt->bpp;
+ } else {
+ u8 hmap[] = { 1, 2, 1, 2 };
+ u8 vmap[] = { 1, 1, 2, 2 };
+ u16 xoff = x / hmap[fmt->chroma_sample];
+ u16 yoff = y / vmap[fmt->chroma_sample];
+
+ data->p[0].addr += x;
+ data->p[1].addr += xoff + (yoff * ps->ystride[1]);
+ if (data->num_planes == 2) /* pseudo planar */
+ data->p[1].addr += xoff;
+ else /* planar */
+ data->p[2].addr += xoff + (yoff * ps->ystride[2]);
+ }
+}
+
int mdss_mdp_put_img(struct mdss_mdp_img_data *data)
{
struct ion_client *iclient = mdss_get_ionclient();
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index 88e7605..7ccf1b9 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -25,6 +25,7 @@
#include "mdss_mdp.h"
#include "mdss_fb.h"
+#include "mdss_wb.h"
enum mdss_mdp_wb_state {
@@ -131,7 +132,13 @@
pr_debug("setting secure=%d\n", enable);
+ ctl->is_secure = enable;
wb->is_secure = enable;
+
+ /* newer revisions don't require secure src pipe for secure session */
+ if (ctl->mdata->mdp_rev > MDSS_MDP_HW_REV_100)
+ return 0;
+
pipe = wb->secure_pipe;
if (!enable) {
@@ -242,6 +249,7 @@
mdss_mdp_pipe_destroy(wb->secure_pipe);
mutex_unlock(&wb->lock);
+ mdp5_data->ctl->is_secure = false;
mdp5_data->wb = NULL;
mutex_unlock(&mdss_mdp_wb_buf_lock);
@@ -535,11 +543,37 @@
return ret;
}
+int mdss_mdp_wb_set_mirr_hint(struct msm_fb_data_type *mfd, int hint)
+{
+ struct mdss_panel_data *pdata = NULL;
+ struct mdss_wb_ctrl *wb_ctrl = NULL;
+
+ if (!mfd) {
+ pr_err("No panel data!\n");
+ return -EINVAL;
+ }
+
+ pdata = mfd->pdev->dev.platform_data;
+ wb_ctrl = container_of(pdata, struct mdss_wb_ctrl, pdata);
+
+ switch (hint) {
+ case MDP_WRITEBACK_MIRROR_ON:
+ case MDP_WRITEBACK_MIRROR_PAUSE:
+ case MDP_WRITEBACK_MIRROR_RESUME:
+ case MDP_WRITEBACK_MIRROR_OFF:
+ pr_info("wfd state switched to %d\n", hint);
+ switch_set_state(&wb_ctrl->sdev, hint);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd,
void *arg)
{
struct msmfb_data data;
- int ret = -ENOSYS;
+ int ret = -ENOSYS, hint = 0;
switch (cmd) {
case MSMFB_WRITEBACK_INIT:
@@ -570,6 +604,14 @@
case MSMFB_WRITEBACK_TERMINATE:
ret = mdss_mdp_wb_terminate(mfd);
break;
+ case MSMFB_WRITEBACK_SET_MIRRORING_HINT:
+ if (!copy_from_user(&hint, arg, sizeof(hint))) {
+ ret = mdss_mdp_wb_set_mirr_hint(mfd, hint);
+ } else {
+ pr_err("set mirroring hint failed on copy_from_user\n");
+ ret = -EFAULT;
+ }
+ break;
}
return ret;
diff --git a/drivers/video/msm/mdss/mdss_qpic.c b/drivers/video/msm/mdss/mdss_qpic.c
index be02113..fa6bd3d 100644
--- a/drivers/video/msm/mdss/mdss_qpic.c
+++ b/drivers/video/msm/mdss/mdss_qpic.c
@@ -428,7 +428,7 @@
param[0]);
param++;
bytes_left -= 4;
- space++;
+ space--;
} else if (bytes_left == 2) {
QPIC_OUTPW(QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0,
*(u16 *)param);
diff --git a/drivers/video/msm/mdss/mdss_wb.c b/drivers/video/msm/mdss/mdss_wb.c
index 1b398d3..a169302 100644
--- a/drivers/video/msm/mdss/mdss_wb.c
+++ b/drivers/video/msm/mdss/mdss_wb.c
@@ -24,6 +24,7 @@
#include <linux/version.h>
#include "mdss_panel.h"
+#include "mdss_wb.h"
/**
* mdss_wb_check_params - check new panel info params
@@ -87,22 +88,62 @@
return 0;
}
+static int mdss_wb_dev_init(struct mdss_wb_ctrl *wb_ctrl)
+{
+ int rc = 0;
+ if (!wb_ctrl) {
+ pr_err("%s: no driver data\n", __func__);
+ return -ENODEV;
+ }
+
+ wb_ctrl->sdev.name = "wfd";
+ rc = switch_dev_register(&wb_ctrl->sdev);
+ if (rc) {
+ pr_err("Failed to setup switch dev for writeback panel");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int mdss_wb_dev_uninit(struct mdss_wb_ctrl *wb_ctrl)
+{
+ if (!wb_ctrl) {
+ pr_err("%s: no driver data\n", __func__);
+ return -ENODEV;
+ }
+
+ switch_dev_unregister(&wb_ctrl->sdev);
+ return 0;
+}
+
static int mdss_wb_probe(struct platform_device *pdev)
{
struct mdss_panel_data *pdata = NULL;
+ struct mdss_wb_ctrl *wb_ctrl = NULL;
int rc = 0;
if (!pdev->dev.of_node)
return -ENODEV;
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
+ wb_ctrl = devm_kzalloc(&pdev->dev, sizeof(*wb_ctrl), GFP_KERNEL);
+ if (!wb_ctrl)
return -ENOMEM;
+ pdata = &wb_ctrl->pdata;
+ wb_ctrl->pdev = pdev;
+ platform_set_drvdata(pdev, wb_ctrl);
+
rc = !mdss_wb_parse_dt(pdev, pdata);
if (!rc)
return rc;
+ rc = mdss_wb_dev_init(wb_ctrl);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to set up device nodes for writeback panel\n");
+ return rc;
+ }
+
pdata->panel_info.type = WRITEBACK_PANEL;
pdata->panel_info.clk_rate = 74250000;
pdata->panel_info.pdest = DISPLAY_3;
@@ -120,6 +161,19 @@
return rc;
}
+static int mdss_wb_remove(struct platform_device *pdev)
+{
+ struct mdss_wb_ctrl *wb_ctrl = platform_get_drvdata(pdev);
+ if (!wb_ctrl) {
+ pr_err("%s: no driver data\n", __func__);
+ return -ENODEV;
+ }
+
+ mdss_wb_dev_uninit(wb_ctrl);
+ devm_kfree(&wb_ctrl->pdev->dev, wb_ctrl);
+ return 0;
+}
+
static const struct of_device_id mdss_wb_match[] = {
{ .compatible = "qcom,mdss_wb", },
{ { 0 } }
@@ -127,6 +181,7 @@
static struct platform_driver mdss_wb_driver = {
.probe = mdss_wb_probe,
+ .remove = mdss_wb_remove,
.driver = {
.name = "mdss_wb",
.of_match_table = mdss_wb_match,
diff --git a/arch/arm/boot/dts/msmzinc-ion.dtsi b/drivers/video/msm/mdss/mdss_wb.h
similarity index 70%
copy from arch/arm/boot/dts/msmzinc-ion.dtsi
copy to drivers/video/msm/mdss/mdss_wb.h
index 4bf078a..3b0c52a 100644
--- a/arch/arm/boot/dts/msmzinc-ion.dtsi
+++ b/drivers/video/msm/mdss/mdss_wb.h
@@ -8,20 +8,18 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
*/
-/ {
- qcom,ion {
- compatible = "qcom,msm-ion";
- #address-cells = <1>;
- #size-cells = <0>;
+#ifndef MDSS_WB_H
+#define MDSS_WB_H
- qcom,ion-heap@30 { /* SYSTEM HEAP */
- reg = <30>;
- };
+#include <linux/switch.h>
- qcom,ion-heap@25 { /* IOMMU HEAP */
- reg = <25>;
- };
- };
+struct mdss_wb_ctrl {
+ struct platform_device *pdev;
+ struct mdss_panel_data pdata;
+ struct switch_dev sdev;
};
+
+#endif
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 797d4a3..18c63a0 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -3238,6 +3238,29 @@
return mdp4_writeback_terminate(info);
}
+static int msmfb_overlay_ioctl_writeback_set_mirr_hint(struct fb_info *
+ info, void *argp)
+{
+ int ret = 0, hint;
+
+ if (!info) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = copy_from_user(&hint, argp, sizeof(hint));
+ if (ret)
+ goto error;
+
+ ret = mdp4_writeback_set_mirroring_hint(info, hint);
+ if (ret)
+ goto error;
+error:
+ if (ret)
+ pr_err("%s: ioctl failed\n", __func__);
+ return ret;
+}
+
#else
static int msmfb_overlay_ioctl_writeback_init(struct fb_info *info)
{
@@ -3270,6 +3293,12 @@
{
return -ENOTSUPP;
}
+
+static int msmfb_overlay_ioctl_writeback_set_mirr_hint(struct fb_info *
+ info, void *argp)
+{
+ return -ENOTSUPP;
+}
#endif
static int msmfb_overlay_3d_sbys(struct fb_info *info, unsigned long *argp)
@@ -3745,6 +3774,10 @@
case MSMFB_WRITEBACK_TERMINATE:
ret = msmfb_overlay_ioctl_writeback_terminate(info);
break;
+ case MSMFB_WRITEBACK_SET_MIRRORING_HINT:
+ ret = msmfb_overlay_ioctl_writeback_set_mirr_hint(
+ info, argp);
+ break;
#endif
case MSMFB_VSYNC_CTRL:
case MSMFB_OVERLAY_VSYNC_CTRL:
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
index 7519ac7..a02a108 100644
--- a/drivers/video/msm/msm_fb.h
+++ b/drivers/video/msm/msm_fb.h
@@ -37,7 +37,9 @@
#include <linux/fb.h>
#include <linux/list.h>
#include <linux/types.h>
+#include <linux/switch.h>
#include <linux/msm_mdp.h>
+
#ifdef CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
#endif
@@ -180,6 +182,7 @@
struct list_head writeback_busy_queue;
struct list_head writeback_free_queue;
struct list_head writeback_register_queue;
+ struct switch_dev writeback_sdev;
wait_queue_head_t wait_q;
struct ion_client *iclient;
unsigned long display_iova;
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
index 5c15d9a..f5d7947 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
@@ -800,9 +800,9 @@
if (resource_context.vidc_platform_data->enable_ion) {
if (res_trk_check_for_sec_session()) {
if (resource_context.res_mem_type != DDL_FW_MEM)
- flags |= ION_SECURE;
+ flags |= ION_FLAG_SECURE;
else if (res_trk_is_cp_enabled())
- flags |= ION_SECURE;
+ flags |= ION_FLAG_SECURE;
}
}
return flags;
diff --git a/fs/file.c b/fs/file.c
index ba3f605..2f989c3 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -476,6 +476,7 @@
spin_unlock(&files->file_lock);
return error;
}
+EXPORT_SYMBOL(alloc_fd);
int get_unused_fd(void)
{
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index ddcd354..de8b4cb 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -209,7 +209,8 @@
if (!new_transaction)
goto alloc_transaction;
write_lock(&journal->j_state_lock);
- if (!journal->j_running_transaction) {
+ if (!journal->j_running_transaction &&
+ !journal->j_barrier_count) {
jbd2_get_transaction(journal, new_transaction);
new_transaction = NULL;
}
diff --git a/include/drm/kgsl_drm.h b/include/drm/kgsl_drm.h
index 2ad1ab2..1e65a5f 100644
--- a/include/drm/kgsl_drm.h
+++ b/include/drm/kgsl_drm.h
@@ -21,6 +21,10 @@
#define DRM_KGSL_GEM_CREATE_FD 0x0E
#define DRM_KGSL_GEM_GET_ION_FD 0x0F
#define DRM_KGSL_GEM_CREATE_FROM_ION 0x10
+#define DRM_KGSL_GEM_SET_GLOCK_HANDLES_INFO 0x11
+#define DRM_KGSL_GEM_GET_GLOCK_HANDLES_INFO 0x12
+#define DRM_KGSL_GEM_GET_BUFCOUNT 0x13
+
#define DRM_IOCTL_KGSL_GEM_CREATE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_CREATE, struct drm_kgsl_gem_create)
@@ -57,6 +61,10 @@
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_SET_BUFCOUNT, \
struct drm_kgsl_gem_bufcount)
+#define DRM_IOCTL_KGSL_GEM_GET_BUFCOUNT \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_GET_BUFCOUNT, \
+ struct drm_kgsl_gem_bufcount)
+
#define DRM_IOCTL_KGSL_GEM_SET_ACTIVE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_SET_ACTIVE, \
struct drm_kgsl_gem_active)
@@ -85,6 +93,16 @@
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_CREATE_FROM_ION, \
struct drm_kgsl_gem_create_from_ion)
+#define DRM_IOCTL_KGSL_GEM_SET_GLOCK_HANDLES_INFO \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_SET_GLOCK_HANDLES_INFO, \
+struct drm_kgsl_gem_glockinfo)
+
+#define DRM_IOCTL_KGSL_GEM_GET_GLOCK_HANDLES_INFO \
+DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_GET_GLOCK_HANDLES_INFO, \
+struct drm_kgsl_gem_glockinfo)
+
+
+
/* Maximum number of sub buffers per GEM object */
#define DRM_KGSL_GEM_MAX_BUFFERS 3
@@ -167,6 +185,11 @@
uint32_t gpuaddr[DRM_KGSL_GEM_MAX_BUFFERS];
};
+struct drm_kgsl_gem_glockinfo {
+ uint32_t handle;
+ int glockhandle[DRM_KGSL_GEM_MAX_BUFFERS];
+};
+
struct drm_kgsl_gem_bufcount {
uint32_t handle;
uint32_t bufcount;
diff --git a/include/linux/bluetooth-power.h b/include/linux/bluetooth-power.h
new file mode 100644
index 0000000..ba53a40
--- /dev/null
+++ b/include/linux/bluetooth-power.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_BLUETOOTH_POWER_H
+#define __LINUX_BLUETOOTH_POWER_H
+
+/*
+ * voltage regulator information required for configuring the
+ * bluetooth chipset
+ */
+struct bt_power_vreg_data {
+ /* voltage regulator handle */
+ struct regulator *reg;
+ /* regulator name */
+ const char *name;
+ /* voltage levels to be set */
+ unsigned int low_vol_level;
+ unsigned int high_vol_level;
+ /*
+ * is set voltage supported for this regulator?
+ * false => set voltage is not supported
+ * true => set voltage is supported
+ *
+ * Some regulators (like gpio-regulators, LVS (low voltage swtiches)
+ * PMIC regulators) dont have the capability to call
+ * regulator_set_voltage or regulator_set_optimum_mode
+ * Use this variable to indicate if its a such regulator or not
+ */
+ bool set_voltage_sup;
+ /* is this regulator enabled? */
+ bool is_enabled;
+};
+
+/*
+ * Platform data for the bluetooth power driver.
+ */
+struct bluetooth_power_platform_data {
+ /* Bluetooth reset gpio */
+ int bt_gpio_sys_rst;
+ /* VDDIO voltage regulator */
+ struct bt_power_vreg_data *bt_vdd_io;
+ /* VDD_PA voltage regulator */
+ struct bt_power_vreg_data *bt_vdd_pa;
+ /* VDD_LDOIN voltage regulator */
+ struct bt_power_vreg_data *bt_vdd_ldo;
+ /* Optional: chip power down gpio-regulator
+ * chip power down data is required when bluetooth module
+ * and other modules like wifi co-exist in a single chip and
+ * shares a common gpio to bring chip out of reset.
+ */
+ struct bt_power_vreg_data *bt_chip_pwd;
+ /* Optional: Bluetooth power setup function */
+ int (*bt_power_setup) (int);
+};
+
+#endif /* __LINUX_BLUETOOTH_POWER_H */
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 969b400..2f77d29 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -118,10 +118,10 @@
/* This needs to be modified manually now, when we add
a new RANGE of SSIDs to the msg_mask_tbl */
#define MSG_MASK_TBL_CNT 24
-#define EVENT_LAST_ID 0x09AB
+#define EVENT_LAST_ID 0x09B2
#define MSG_SSID_0 0
-#define MSG_SSID_0_LAST 94
+#define MSG_SSID_0_LAST 97
#define MSG_SSID_1 500
#define MSG_SSID_1_LAST 506
#define MSG_SSID_2 1000
@@ -287,6 +287,9 @@
MSG_LVL_LOW,
MSG_LVL_LOW,
MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
MSG_LVL_LOW
};
@@ -722,7 +725,7 @@
/* LOG CODES */
#define LOG_0 0x0
-#define LOG_1 0x1755
+#define LOG_1 0x17F4
#define LOG_2 0x0
#define LOG_3 0x0
#define LOG_4 0x4910
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index 6e50578..c219725 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -108,7 +108,6 @@
#define DMX_CHECK_CRC 0x01
#define DMX_ONESHOT 0x02
#define DMX_IMMEDIATE_START 0x04
-#define DMX_ENABLE_INDEXING 0x08
#define DMX_KERNEL_CLIENT 0x8000
struct dmx_sct_filter_params
@@ -120,25 +119,39 @@
};
-/* Indexing: supported video standards */
-enum dmx_indexing_video_standard {
- DMX_INDEXING_MPEG2,
- DMX_INDEXING_H264,
- DMX_INDEXING_VC1
+enum dmx_video_codec {
+ DMX_VIDEO_CODEC_MPEG2,
+ DMX_VIDEO_CODEC_H264,
+ DMX_VIDEO_CODEC_VC1
};
-/* Indexing: Supported video profiles */
-enum dmx_indexing_video_profile {
- DMX_INDEXING_MPEG2_ANY,
- DMX_INDEXING_H264_ANY,
- DMX_INDEXING_VC1_ANY
-};
-
-/* Indexing: video configuration parameters */
-struct dmx_indexing_video_params {
- enum dmx_indexing_video_standard standard;
- enum dmx_indexing_video_profile profile;
-};
+/* Index entries types */
+#define DMX_IDX_RAI 0x00000001
+#define DMX_IDX_PUSI 0x00000002
+#define DMX_IDX_MPEG_SEQ_HEADER 0x00000004
+#define DMX_IDX_MPEG_GOP 0x00000008
+#define DMX_IDX_MPEG_FIRST_SEQ_FRAME_START 0x00000010
+#define DMX_IDX_MPEG_FIRST_SEQ_FRAME_END 0x00000020
+#define DMX_IDX_MPEG_I_FRAME_START 0x00000040
+#define DMX_IDX_MPEG_I_FRAME_END 0x00000080
+#define DMX_IDX_MPEG_P_FRAME_START 0x00000100
+#define DMX_IDX_MPEG_P_FRAME_END 0x00000200
+#define DMX_IDX_MPEG_B_FRAME_START 0x00000400
+#define DMX_IDX_MPEG_B_FRAME_END 0x00000800
+#define DMX_IDX_H264_SPS 0x00001000
+#define DMX_IDX_H264_PPS 0x00002000
+#define DMX_IDX_H264_FIRST_SPS_FRAME_START 0x00004000
+#define DMX_IDX_H264_FIRST_SPS_FRAME_END 0x00008000
+#define DMX_IDX_H264_IDR_START 0x00010000
+#define DMX_IDX_H264_IDR_END 0x00020000
+#define DMX_IDX_H264_NON_IDR_START 0x00040000
+#define DMX_IDX_H264_NON_IDR_END 0x00080000
+#define DMX_IDX_VC1_SEQ_HEADER 0x00100000
+#define DMX_IDX_VC1_ENTRY_POINT 0x00200000
+#define DMX_IDX_VC1_FIRST_SEQ_FRAME_START 0x00400000
+#define DMX_IDX_VC1_FIRST_SEQ_FRAME_END 0x00800000
+#define DMX_IDX_VC1_FRAME_START 0x01000000
+#define DMX_IDX_VC1_FRAME_END 0x02000000
struct dmx_pes_filter_params
{
@@ -153,14 +166,14 @@
* DMX_EVENT_NEW_REC_CHUNK will be triggered.
* When new recorded data is received with size
* equal or larger than this value a new event
- * will be triggered. This is relevent when
+ * will be triggered. This is relevant when
* output is DMX_OUT_TS_TAP or DMX_OUT_TSDEMUX_TAP,
* size must be at least DMX_REC_BUFF_CHUNK_MIN_SIZE
* and smaller than buffer size.
*/
__u32 rec_chunk_size;
- struct dmx_indexing_video_params video_params;
+ enum dmx_video_codec video_codec;
};
struct dmx_buffer_status {
@@ -210,7 +223,21 @@
DMX_EVENT_EOS = 0x00000040,
/* New Elementary Stream data is ready */
- DMX_EVENT_NEW_ES_DATA = 0x00000080
+ DMX_EVENT_NEW_ES_DATA = 0x00000080,
+
+ /* Data markers */
+ DMX_EVENT_MARKER = 0x00000100,
+
+ /* New indexing entry is ready */
+ DMX_EVENT_NEW_INDEX_ENTRY = 0x00000200
+};
+
+enum dmx_oob_cmd {
+ /* End-of-stream, no more data from this filter */
+ DMX_OOB_CMD_EOS,
+
+ /* Data markers */
+ DMX_OOB_CMD_MARKER,
};
/* Flags passed in filter events */
@@ -341,6 +368,9 @@
/* DTS value associated with the buffer */
__u64 dts;
+ /* STC value associated with the buffer in 27MHz */
+ __u64 stc;
+
/*
* Number of TS packets with Transport Error Indicator (TEI) set
* in the TS packet header since last reported event
@@ -360,6 +390,40 @@
__u32 ts_dropped_bytes;
};
+/* Marker details associated with DMX_EVENT_MARKER event */
+struct dmx_marker_event_info {
+ /* Marker id */
+ __u64 id;
+};
+
+/* Indexing information associated with DMX_EVENT_NEW_INDEX_ENTRY event */
+struct dmx_index_event_info {
+ /* Index entry type, one of of DMX_IDX_* */
+ __u64 type;
+
+ /*
+ * The PID the index entry belongs to.
+ * In case of recording filter, multiple PIDs may exist in the same
+ * filter through DMX_ADD_PID ioctl and each can be indexed seperatly.
+ */
+ __u16 pid;
+
+ /*
+ * The TS packet number in the recorded data at which
+ * the indexing event is found.
+ */
+ __u64 match_tsp_num;
+
+ /*
+ * The TS packet number in the recorded data preceeding
+ * match_tsp_num and has PUSI set.
+ */
+ __u64 last_pusi_tsp_num;
+
+ /* STC associated with match_tsp_num, in 27MHz */
+ __u64 stc;
+};
+
/*
* Filter's event returned through DMX_GET_EVENT.
* poll with POLLPRI would block until events are available.
@@ -373,6 +437,8 @@
struct dmx_rec_chunk_event_info recording_chunk;
struct dmx_pcr_event_info pcr;
struct dmx_es_data_event_info es_data;
+ struct dmx_marker_event_info marker;
+ struct dmx_index_event_info index;
} params;
};
@@ -406,6 +472,15 @@
#define DMX_BUFFER_LINEAR_GROUP_SUPPORT 0x10
};
+/* Out-of-band (OOB) command */
+struct dmx_oob_command {
+ enum dmx_oob_cmd type;
+
+ union {
+ struct dmx_marker_event_info marker;
+ } params;
+};
+
typedef struct dmx_caps {
__u32 caps;
@@ -615,6 +690,22 @@
__u32 wakeup_threshold;
};
+struct dmx_indexing_params {
+ /*
+ * PID to index. In case of recording filter, multiple PIDs
+ * may exist in the same filter through DMX_ADD_PID ioctl.
+ * It is assumed that the PID was already added using DMX_ADD_PID
+ * or an error will be reported.
+ */
+ __u16 pid;
+
+ /* enable or disable indexing, default is disabled */
+ int enable;
+
+ /* combination of DMX_IDX_* bits */
+ __u64 types;
+};
+
#define DMX_START _IO('o', 41)
#define DMX_STOP _IO('o', 42)
#define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params)
@@ -641,6 +732,7 @@
#define DMX_SET_SECURE_MODE _IOW('o', 65, struct dmx_secure_mode)
#define DMX_SET_EVENTS_MASK _IOW('o', 66, struct dmx_events_mask)
#define DMX_GET_EVENTS_MASK _IOR('o', 67, struct dmx_events_mask)
-
+#define DMX_PUSH_OOB_COMMAND _IOW('o', 68, struct dmx_oob_command)
+#define DMX_SET_INDEXING_PARAMS _IOW('o', 69, struct dmx_indexing_params)
#endif /*_DVBDMX_H_*/
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index a87246c..3bb38ad 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -45,14 +45,14 @@
struct list_head next_chunk; /* next chunk in pool */
atomic_t avail;
phys_addr_t phys_addr; /* physical starting address of memory chunk */
- unsigned long start_addr; /* starting address of memory chunk */
- unsigned long end_addr; /* ending address of memory chunk */
+ u64 start_addr; /* starting address of memory chunk */
+ u64 end_addr; /* ending address of memory chunk */
unsigned long bits[0]; /* bitmap for allocating memory chunk */
};
extern struct gen_pool *gen_pool_create(int, int);
-extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
-extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
+extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, u64);
+extern int gen_pool_add_virt(struct gen_pool *, u64, phys_addr_t,
size_t, int);
/**
* gen_pool_add - add a new chunk of special memory to the pool
@@ -66,19 +66,19 @@
*
* Returns 0 on success or a -ve errno on failure.
*/
-static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
+static inline int gen_pool_add(struct gen_pool *pool, u64 addr,
size_t size, int nid)
{
return gen_pool_add_virt(pool, addr, -1, size, nid);
}
extern void gen_pool_destroy(struct gen_pool *);
-extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
+extern void gen_pool_free(struct gen_pool *, u64, size_t);
extern void gen_pool_for_each_chunk(struct gen_pool *,
void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
extern size_t gen_pool_avail(struct gen_pool *);
extern size_t gen_pool_size(struct gen_pool *);
-unsigned long __must_check
+u64 __must_check
gen_pool_alloc_aligned(struct gen_pool *pool, size_t size,
unsigned alignment_order);
@@ -90,7 +90,7 @@
* Allocate the requested number of bytes from the specified pool.
* Uses a first-fit algorithm.
*/
-static inline unsigned long __must_check
+static inline u64 __must_check
gen_pool_alloc(struct gen_pool *pool, size_t size)
{
return gen_pool_alloc_aligned(pool, size, 0);
diff --git a/include/linux/genlock.h b/include/linux/genlock.h
index 587c49d..e233662 100644
--- a/include/linux/genlock.h
+++ b/include/linux/genlock.h
@@ -8,6 +8,7 @@
struct genlock_handle *genlock_get_handle(void);
struct genlock_handle *genlock_get_handle_fd(int fd);
+int genlock_get_fd_handle(struct genlock_handle *handle);
void genlock_put_handle(struct genlock_handle *handle);
struct genlock *genlock_create_lock(struct genlock_handle *);
struct genlock *genlock_attach_lock(struct genlock_handle *, int fd);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index c737eb7..2a144e6 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -39,6 +39,12 @@
void kmap_flush_unused(void);
+#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
+void kmap_atomic_flush_unused(void);
+#else
+static inline void kmap_atomic_flush_unused(void) { }
+#endif
+
#else /* CONFIG_HIGHMEM */
static inline unsigned int nr_free_highpages(void) { return 0; }
@@ -72,6 +78,7 @@
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#define kmap_flush_unused() do {} while(0)
+#define kmap_atomic_flush_unused() do {} while (0)
#endif
#endif /* CONFIG_HIGHMEM */
diff --git a/include/linux/input/synaptics_dsx.h b/include/linux/input/synaptics_dsx.h
index 56616d7..f90f59e 100644
--- a/include/linux/input/synaptics_dsx.h
+++ b/include/linux/input/synaptics_dsx.h
@@ -55,6 +55,7 @@
unsigned reset_gpio;
unsigned panel_x;
unsigned panel_y;
+ const char *fw_image_name;
int (*gpio_config)(unsigned gpio, bool configure);
struct synaptics_rmi4_capacitance_button_map *capacitance_button_map;
};
diff --git a/include/linux/ion.h b/include/linux/ion.h
index 7c54004..4983316 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -29,18 +29,13 @@
* @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
* carveout heap, allocations are physically
* contiguous
- * @ION_HEAP_TYPE_IOMMU: IOMMU memory
- * @ION_HEAP_TYPE_CP: memory allocated from a prereserved
- * carveout heap, allocations are physically
- * contiguous. Used for content protection.
- * @ION_HEAP_TYPE_DMA: memory allocated via DMA API
* @ION_HEAP_END: helper for iterating over heaps
*/
enum ion_heap_type {
ION_HEAP_TYPE_SYSTEM,
ION_HEAP_TYPE_SYSTEM_CONTIG,
ION_HEAP_TYPE_CARVEOUT,
- ION_HEAP_TYPE_DMA,
+ ION_HEAP_TYPE_CHUNK,
ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
are at the end of this enum */
ION_NUM_HEAPS,
@@ -49,10 +44,11 @@
#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
-#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
+
+#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8
/**
- * heap flags - the lower 16 bits are used by core ion, the upper 16
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves.
*/
#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
@@ -81,8 +77,9 @@
/**
* struct ion_platform_heap - defines a heap in the given platform
* @type: type of the heap from ion_heap_type enum
- * @id: unique identifier for heap. When allocating (lower numbers
- * will be allocated from first)
+ * @id: unique identifier for heap. When allocating higher numbers
+ * will be allocated from first. At allocation these are passed
+ * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
* @name: used for debug purposes
* @base: base address of heap in physical memory if applicable
* @size: size of the heap in bytes if applicable
@@ -90,6 +87,10 @@
* @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
* @extra_data: Extra data specific to each heap type
* @priv: heap private data
+ * @align: required alignment in physical memory if applicable
+ * @priv: private info passed from the board file
+ *
+ * Provided by the board file.
*/
struct ion_platform_heap {
enum ion_heap_type type;
@@ -100,6 +101,7 @@
enum ion_memory_types memory_type;
unsigned int has_outer_cache;
void *extra_data;
+ ion_phys_addr_t align;
void *priv;
};
@@ -107,11 +109,6 @@
* struct ion_platform_data - array of platform heaps passed from board file
* @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
* @nr: number of structures in the array
- * @request_region: function to be called when the number of allocations goes
- * from 0 -> 1
- * @release_region: function to be called when the number of allocations goes
- * from 1 -> 0
- * @setup_region: function to be called upon ion registration
* @heaps: array of platform_heap structions
*
* Provided by the board file in the form of platform data to a platform device.
@@ -119,9 +116,6 @@
struct ion_platform_data {
unsigned int has_outer_cache;
int nr;
- int (*request_region)(void *);
- int (*release_region)(void *);
- void *(*setup_region)(void);
struct ion_platform_heap *heaps;
};
@@ -140,23 +134,12 @@
/**
* ion_client_create() - allocate a client and returns it
- * @dev: the global ion device
- * @heap_mask: mask of heaps this client can allocate from
- * @name: used for debugging
+ * @dev: the global ion device
+ * @heap_type_mask: mask of heaps this client can allocate from
+ * @name: used for debugging
*/
struct ion_client *ion_client_create(struct ion_device *dev,
- unsigned int heap_mask, const char *name);
-
-/**
- * msm_ion_client_create - allocate a client using the ion_device specified in
- * drivers/gpu/ion/msm/msm_ion.c
- *
- * heap_mask and name are the same as ion_client_create, return values
- * are the same as ion_client_create.
- */
-
-struct ion_client *msm_ion_client_create(unsigned int heap_mask,
- const char *name);
+ const char *name);
/**
* ion_client_destroy() - free's a client and all it's handles
@@ -169,21 +152,22 @@
/**
* ion_alloc - allocate ion memory
- * @client: the client
- * @len: size of the allocation
- * @align: requested allocation alignment, lots of hardware blocks have
- * alignment requirements of some kind
- * @heap_mask: mask of heaps to allocate from, if multiple bits are set
- * heaps will be tried in order from lowest to highest order bit
- * @flags: heap flags, the low 16 bits are consumed by ion, the high 16
- * bits are passed on to the respective heap and can be heap
- * custom
+ * @client: the client
+ * @len: size of the allocation
+ * @align: requested allocation alignment, lots of hardware blocks
+ * have alignment requirements of some kind
+ * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
+ * heaps will be tried in order from highest to lowest
+ * id
+ * @flags: heap flags, the low 16 bits are consumed by ion, the
+ * high 16 bits are passed on to the respective heap and
+ * can be heap custom
*
* Allocate memory in one of the heaps provided in heap mask and return
* an opaque handle to it.
*/
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_mask,
+ size_t align, unsigned int heap_id_mask,
unsigned int flags);
/**
@@ -243,11 +227,19 @@
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
/**
- * ion_share_dma_buf() - given an ion client, create a dma-buf fd
+ * ion_share_dma_buf() - share buffer as dma-buf
* @client: the client
* @handle: the handle
*/
-int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle);
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
+ * @client: the client
+ * @handle: the handle
+ */
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
/**
* ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
@@ -260,124 +252,6 @@
*/
struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
-/**
- * ion_handle_get_flags - get the flags for a given handle
- *
- * @client - client who allocated the handle
- * @handle - handle to get the flags
- * @flags - pointer to store the flags
- *
- * Gets the current flags for a handle. These flags indicate various options
- * of the buffer (caching, security, etc.)
- */
-int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
- unsigned long *flags);
-
-
-/**
- * ion_map_iommu - map the given handle into an iommu
- *
- * @client - client who allocated the handle
- * @handle - handle to map
- * @domain_num - domain number to map to
- * @partition_num - partition number to allocate iova from
- * @align - alignment for the iova
- * @iova_length - length of iova to map. If the iova length is
- * greater than the handle length, the remaining
- * address space will be mapped to a dummy buffer.
- * @iova - pointer to store the iova address
- * @buffer_size - pointer to store the size of the buffer
- * @flags - flags for options to map
- * @iommu_flags - flags specific to the iommu.
- *
- * Maps the handle into the iova space specified via domain number. Iova
- * will be allocated from the partition specified via partition_num.
- * Returns 0 on success, negative value on error.
- */
-int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
- int domain_num, int partition_num, unsigned long align,
- unsigned long iova_length, unsigned long *iova,
- unsigned long *buffer_size,
- unsigned long flags, unsigned long iommu_flags);
-
-
-/**
- * ion_handle_get_size - get the allocated size of a given handle
- *
- * @client - client who allocated the handle
- * @handle - handle to get the size
- * @size - pointer to store the size
- *
- * gives the allocated size of a handle. returns 0 on success, negative
- * value on error
- *
- * NOTE: This is intended to be used only to get a size to pass to map_iommu.
- * You should *NOT* rely on this for any other usage.
- */
-
-int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
- unsigned long *size);
-
-/**
- * ion_unmap_iommu - unmap the handle from an iommu
- *
- * @client - client who allocated the handle
- * @handle - handle to unmap
- * @domain_num - domain to unmap from
- * @partition_num - partition to unmap from
- *
- * Decrement the reference count on the iommu mapping. If the count is
- * 0, the mapping will be removed from the iommu.
- */
-void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
- int domain_num, int partition_num);
-
-
-/**
- * ion_secure_heap - secure a heap
- *
- * @client - a client that has allocated from the heap heap_id
- * @heap_id - heap id to secure.
- * @version - version of content protection
- * @data - extra data needed for protection
- *
- * Secure a heap
- * Returns 0 on success
- */
-int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
- void *data);
-
-/**
- * ion_unsecure_heap - un-secure a heap
- *
- * @client - a client that has allocated from the heap heap_id
- * @heap_id - heap id to un-secure.
- * @version - version of content protection
- * @data - extra data needed for protection
- *
- * Un-secure a heap
- * Returns 0 on success
- */
-int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
- void *data);
-
-/**
- * msm_ion_do_cache_op - do cache operations.
- *
- * @client - pointer to ION client.
- * @handle - pointer to buffer handle.
- * @vaddr - virtual address to operate on.
- * @len - Length of data to do cache operation on.
- * @cmd - Cache operation to perform:
- * ION_IOC_CLEAN_CACHES
- * ION_IOC_INV_CACHES
- * ION_IOC_CLEAN_INV_CACHES
- *
- * Returns 0 on success
- */
-int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
- void *vaddr, unsigned long len, unsigned int cmd);
-
#else
static inline void ion_reserve(struct ion_platform_data *data)
{
@@ -390,12 +264,6 @@
return ERR_PTR(-ENODEV);
}
-static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask,
- const char *name)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline void ion_client_destroy(struct ion_client *client) { }
static inline struct ion_handle *ion_alloc(struct ion_client *client,
@@ -447,54 +315,6 @@
return -ENODEV;
}
-static inline int ion_map_iommu(struct ion_client *client,
- struct ion_handle *handle, int domain_num,
- int partition_num, unsigned long align,
- unsigned long iova_length, unsigned long *iova,
- unsigned long *buffer_size,
- unsigned long flags,
- unsigned long iommu_flags)
-{
- return -ENODEV;
-}
-
-static inline int ion_handle_get_size(struct ion_client *client,
- struct ion_handle *handle, unsigned long *size)
-{
- return -ENODEV;
-}
-
-static inline void ion_unmap_iommu(struct ion_client *client,
- struct ion_handle *handle, int domain_num,
- int partition_num)
-{
- return;
-}
-
-static inline int ion_secure_heap(struct ion_device *dev, int heap_id,
- int version, void *data)
-{
- return -ENODEV;
-
-}
-
-static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id,
- int version, void *data)
-{
- return -ENODEV;
-}
-
-static inline void ion_mark_dangling_buffers_locked(struct ion_device *dev)
-{
-}
-
-static inline int msm_ion_do_cache_op(struct ion_client *client,
- struct ion_handle *handle, void *vaddr,
- unsigned long len, unsigned int cmd)
-{
- return -ENODEV;
-}
-
#endif /* CONFIG_ION */
#endif /* __KERNEL__ */
@@ -508,12 +328,12 @@
/**
* struct ion_allocation_data - metadata passed from userspace for allocations
- * @len: size of the allocation
- * @align: required alignment of the allocation
- * @heap_mask: mask of heaps to allocate from
- * @flags: flags passed to heap
- * @handle: pointer that will be populated with a cookie to use to refer
- * to this allocation
+ * @len: size of the allocation
+ * @align: required alignment of the allocation
+ * @heap_id_mask: mask of heap ids to allocate from
+ * @flags: flags passed to heap
+ * @handle: pointer that will be populated with a cookie to use to
+ * refer to this allocation
*
* Provided by userspace as an argument to the ioctl
*/
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index 8aa758d..b882fe2 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -40,8 +40,12 @@
might_sleep_if(timeout_us); \
for (;;) { \
(val) = readl(addr); \
- if ((cond) || (timeout_us && time_after(jiffies, timeout))) \
+ if (cond) \
break; \
+ if (timeout_us && time_after(jiffies, timeout)) { \
+ (val) = readl(addr); \
+ break; \
+ } \
if (sleep_us) \
usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \
} \
diff --git a/include/linux/memory_alloc.h b/include/linux/memory_alloc.h
index b649451..8097949 100644
--- a/include/linux/memory_alloc.h
+++ b/include/linux/memory_alloc.h
@@ -20,7 +20,7 @@
struct mem_pool {
struct mutex pool_mutex;
struct gen_pool *gpool;
- unsigned long paddr;
+ phys_addr_t paddr;
unsigned long size;
unsigned long free;
unsigned int id;
@@ -28,29 +28,34 @@
struct alloc {
struct rb_node rb_node;
- void *vaddr;
- unsigned long paddr;
+ /*
+ * The physical address may be used for lookup in the tree so the
+ * 'virtual address' needs to be able to accomodate larger physical
+ * addresses.
+ */
+ phys_addr_t vaddr;
+ phys_addr_t paddr;
struct mem_pool *mpool;
unsigned long len;
void *caller;
};
-struct mem_pool *initialize_memory_pool(unsigned long start,
+struct mem_pool *initialize_memory_pool(phys_addr_t start,
unsigned long size, int mem_type);
void *allocate_contiguous_memory(unsigned long size,
int mem_type, unsigned long align, int cached);
-unsigned long _allocate_contiguous_memory_nomap(unsigned long size,
+phys_addr_t _allocate_contiguous_memory_nomap(unsigned long size,
int mem_type, unsigned long align, void *caller);
-unsigned long allocate_contiguous_memory_nomap(unsigned long size,
+phys_addr_t allocate_contiguous_memory_nomap(unsigned long size,
int mem_type, unsigned long align);
void free_contiguous_memory(void *addr);
-void free_contiguous_memory_by_paddr(unsigned long paddr);
+void free_contiguous_memory_by_paddr(phys_addr_t paddr);
-unsigned long memory_pool_node_paddr(void *vaddr);
+phys_addr_t memory_pool_node_paddr(void *vaddr);
unsigned long memory_pool_node_len(void *vaddr);
diff --git a/include/linux/mfd/pm8xxx/batterydata-lib.h b/include/linux/mfd/pm8xxx/batterydata-lib.h
index f27ceca..df9569b 100644
--- a/include/linux/mfd/pm8xxx/batterydata-lib.h
+++ b/include/linux/mfd/pm8xxx/batterydata-lib.h
@@ -91,6 +91,8 @@
* compensate for battery capacitance.
* @rbatt_capacitve_mohm: the resistance to be added to compensate for
* battery capacitance
+ * @flat_ocv_threshold_uv: the voltage where the battery's discharge curve
+ * starts flattening out.
*/
struct bms_battery_data {
@@ -103,6 +105,7 @@
int default_rbatt_mohm;
int delta_rbatt_mohm;
int rbatt_capacitive_mohm;
+ int flat_ocv_threshold_uv;
};
#if defined(CONFIG_PM8921_BMS) || \
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index 1c67b1e..5439fd1 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -165,6 +165,7 @@
unsigned int warm_bat_chg_current;
unsigned int cool_bat_voltage;
unsigned int warm_bat_voltage;
+ int hysteresis_temp;
unsigned int (*get_batt_capacity_percent) (void);
int64_t batt_id_min;
int64_t batt_id_max;
diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h
index 37a12fb..0d1f49f 100644
--- a/include/linux/mfd/wcd9xxx/core.h
+++ b/include/linux/mfd/wcd9xxx/core.h
@@ -77,7 +77,8 @@
WCD9XXX_IRQ_HPH_L_PA_STARTUP,
WCD9XXX_IRQ_HPH_R_PA_STARTUP,
WCD9XXX_IRQ_EAR_PA_STARTUP,
- WCD9XXX_IRQ_RESERVED_0,
+ WCD9310_NUM_IRQS,
+ WCD9XXX_IRQ_RESERVED_0 = WCD9310_NUM_IRQS,
WCD9XXX_IRQ_RESERVED_1,
/* INTR_REG 3 */
WCD9XXX_IRQ_MAD_AUDIO,
@@ -85,12 +86,14 @@
WCD9XXX_IRQ_MAD_ULTRASOUND,
WCD9XXX_IRQ_SPEAKER_CLIPPING,
WCD9XXX_IRQ_MBHC_JACK_SWITCH,
+ WCD9XXX_IRQ_VBAT_MONITOR_ATTACK,
+ WCD9XXX_IRQ_VBAT_MONITOR_RELEASE,
WCD9XXX_NUM_IRQS,
};
enum {
- TABLA_NUM_IRQS = WCD9XXX_NUM_IRQS,
- SITAR_NUM_IRQS = WCD9XXX_NUM_IRQS,
+ TABLA_NUM_IRQS = WCD9310_NUM_IRQS,
+ SITAR_NUM_IRQS = WCD9310_NUM_IRQS,
TAIKO_NUM_IRQS = WCD9XXX_NUM_IRQS,
TAPAN_NUM_IRQS = WCD9XXX_NUM_IRQS,
};
@@ -150,6 +153,17 @@
#define WCD9XXX_CH(xport, xshift) \
{.port = xport, .shift = xshift}
+struct wcd9xxx_codec_type {
+ u16 id_major;
+ u16 id_minor;
+ struct mfd_cell *dev;
+ int size;
+ int num_irqs;
+ int version; /* -1 to retrive version from chip version register */
+ enum wcd9xxx_slim_slave_addr_type slim_slave_type;
+ u16 i2c_chip_status;
+};
+
struct wcd9xxx {
struct device *dev;
struct slim_device *slim;
@@ -181,14 +195,14 @@
struct pm_qos_request pm_qos_req;
int wlock_holders;
- u8 idbyte[4];
+ u16 id_minor;
+ u16 id_major;
unsigned int irq_base;
unsigned int irq;
u8 irq_masks_cur[WCD9XXX_NUM_IRQ_REGS];
u8 irq_masks_cache[WCD9XXX_NUM_IRQ_REGS];
bool irq_level_high[WCD9XXX_MAX_NUM_IRQS];
- int num_irqs;
/* Slimbus or I2S port */
u32 num_rx_port;
u32 num_tx_port;
@@ -196,7 +210,7 @@
struct wcd9xxx_ch *tx_chs;
u32 mclk_rate;
- enum wcd9xxx_slim_slave_addr_type slim_slave_type;
+ const struct wcd9xxx_codec_type *codec_type;
};
int wcd9xxx_reg_read(struct wcd9xxx *wcd9xxx, unsigned short reg);
diff --git a/include/linux/mfd/wcd9xxx/pdata.h b/include/linux/mfd/wcd9xxx/pdata.h
index 813cac3..c6e4ab3 100644
--- a/include/linux/mfd/wcd9xxx/pdata.h
+++ b/include/linux/mfd/wcd9xxx/pdata.h
@@ -136,7 +136,7 @@
unsigned int hph_ocp_limit:3; /* Headphone OCP current limit */
};
-#define MAX_REGULATOR 7
+#define WCD9XXX_MAX_REGULATOR 8
/*
* format : TABLA_<POWER_SUPPLY_PIN_NAME>_CUR_MAX
*
@@ -151,11 +151,14 @@
#define WCD9XXX_VDDD_CDC_D_CUR_MAX 5000
#define WCD9XXX_VDDD_CDC_A_CUR_MAX 5000
+#define WCD9XXX_VDD_SPKDRV_NAME "cdc-vdd-spkdrv"
+
struct wcd9xxx_regulator {
const char *name;
int min_uV;
int max_uV;
int optimum_uA;
+ bool ondemand;
struct regulator *regulator;
};
@@ -168,7 +171,7 @@
struct slim_device slimbus_slave_device;
struct wcd9xxx_micbias_setting micbias;
struct wcd9xxx_ocp_setting ocp;
- struct wcd9xxx_regulator regulator[MAX_REGULATOR];
+ struct wcd9xxx_regulator regulator[WCD9XXX_MAX_REGULATOR];
u32 mclk_rate;
u32 dmic_sample_rate;
};
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 05271ba..9eef3a0 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -21,6 +21,8 @@
struct mmc_ios {
unsigned int clock; /* clock rate */
+ unsigned int old_rate; /* saved clock rate */
+ unsigned long clk_ts; /* time stamp of last updated clock */
unsigned short vdd;
/* vdd stores the bit number of the selected voltage range from below. */
@@ -452,6 +454,12 @@
static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
+ if (!host->sdio_irqs) {
+ pr_err("%s: SDIO interrupt recieved without function driver claiming an irq\n",
+ mmc_hostname(host));
+ return;
+ }
+
host->ops->enable_sdio_irq(host, 0);
host->sdio_irq_pending = true;
wake_up_process(host->sdio_irq_thread);
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 407a005..4e30082 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -122,6 +122,25 @@
* secure discard kind of operations to complete.
*/
#define SDHCI_QUIRK2_USE_MAX_DISCARD_SIZE (1<<5)
+/*
+ * Ignore data timeout error for R1B commands as there will be no
+ * data associated and the busy timeout value for these commands
+ * could be lager than the maximum timeout value that controller
+ * can handle.
+ */
+#define SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD (1<<6)
+/*
+ * The preset value registers are not properly initialized by
+ * some hardware and hence preset value must not be enabled for
+ * such controllers.
+ */
+#define SDHCI_QUIRK2_BROKEN_PRESET_VALUE (1<<7)
+/*
+ * Some controllers define the usage of 0xF in data timeout counter
+ * register (0x2E) which is actually a reserved bit as per
+ * specification.
+ */
+#define SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT (1<<8)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -208,6 +227,7 @@
struct pm_qos_request pm_qos_req_dma;
struct sdhci_next next_data;
+ ktime_t data_start_time;
unsigned long private[0] ____cacheline_aligned;
};
diff --git a/include/linux/msm_audio_ion.h b/include/linux/msm_audio_ion.h
new file mode 100644
index 0000000..83e5dff
--- /dev/null
+++ b/include/linux/msm_audio_ion.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MSM_AUDIO_ION_H
+#define _LINUX_MSM_AUDIO_ION_H
+
+#include <linux/msm_ion.h>
+
+
+int msm_audio_ion_alloc(const char *name, struct ion_client **client,
+ struct ion_handle **handle, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr);
+
+int msm_audio_ion_import(const char *name, struct ion_client **client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr);
+int msm_audio_ion_free(struct ion_client *client, struct ion_handle *handle);
+
+
+bool msm_audio_ion_is_smmu_available(void);
+
+#ifdef CONFIG_SND_SOC_QDSP6V2
+struct ion_client *msm_audio_ion_client_create(unsigned int heap_mask,
+ const char *name);
+void msm_audio_ion_client_destroy(struct ion_client *client);
+int msm_audio_ion_import_legacy(const char *name, struct ion_client *client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr);
+int msm_audio_ion_free_legacy(struct ion_client *client,
+ struct ion_handle *handle);
+#else
+static struct ion_client *msm_audio_ion_client_create(unsigned int heap_mask,
+ const char *name)
+{ return NULL; }
+static void msm_audio_ion_client_destroy(struct ion_client *client)
+{}
+static int msm_audio_ion_import_legacy(const char *name,
+ struct ion_client *client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{ return 0; }
+static int msm_audio_ion_free_legacy(struct ion_client *client,
+ struct ion_handle *handle)
+{ return 0; }
+
+#endif /* CONFIG_MSM_QDSP6V2_CODECS */
+#endif /* _LINUX_MSM_AUDIO_ION_H */
+
diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h
index 4fd77d2..20b7317 100644
--- a/include/linux/msm_ion.h
+++ b/include/linux/msm_ion.h
@@ -6,8 +6,10 @@
enum msm_ion_heap_types {
ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
ION_HEAP_TYPE_IOMMU = ION_HEAP_TYPE_MSM_START,
+ ION_HEAP_TYPE_DMA,
ION_HEAP_TYPE_CP,
ION_HEAP_TYPE_SECURE_DMA,
+ ION_HEAP_TYPE_REMOVED,
};
/**
@@ -37,7 +39,7 @@
ION_MM_FIRMWARE_HEAP_ID = 29,
ION_SYSTEM_HEAP_ID = 30,
- ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_SECURE flag */
+ ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */
};
enum ion_fixed_position {
@@ -56,18 +58,25 @@
};
#define ION_HEAP_CP_MASK (1 << ION_HEAP_TYPE_CP)
+#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
/**
* Flag to use when allocating to indicate that a heap is secure.
*/
-#define ION_SECURE (1 << ION_HEAP_ID_RESERVED)
+#define ION_FLAG_SECURE (1 << ION_HEAP_ID_RESERVED)
/**
* Flag for clients to force contiguous memort allocation
*
* Use of this flag is carefully monitored!
*/
-#define ION_FORCE_CONTIGUOUS (1 << 30)
+#define ION_FLAG_FORCE_CONTIGUOUS (1 << 30)
+
+/**
+* Deprecated! Please use the corresponding ION_FLAG_*
+*/
+#define ION_SECURE ION_FLAG_SECURE
+#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS
/**
* Macro should be used with ion_heap_ids defined above.
@@ -186,9 +195,138 @@
#ifdef CONFIG_ION
/**
+ * msm_ion_client_create - allocate a client using the ion_device specified in
+ * drivers/gpu/ion/msm/msm_ion.c
+ *
+ * heap_mask and name are the same as ion_client_create, return values
+ * are the same as ion_client_create.
+ */
+
+struct ion_client *msm_ion_client_create(unsigned int heap_mask,
+ const char *name);
+
+/**
+ * ion_handle_get_flags - get the flags for a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the flags
+ * @flags - pointer to store the flags
+ *
+ * Gets the current flags for a handle. These flags indicate various options
+ * of the buffer (caching, security, etc.)
+ */
+int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
+ unsigned long *flags);
+
+
+/**
+ * ion_map_iommu - map the given handle into an iommu
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to map
+ * @domain_num - domain number to map to
+ * @partition_num - partition number to allocate iova from
+ * @align - alignment for the iova
+ * @iova_length - length of iova to map. If the iova length is
+ * greater than the handle length, the remaining
+ * address space will be mapped to a dummy buffer.
+ * @iova - pointer to store the iova address
+ * @buffer_size - pointer to store the size of the buffer
+ * @flags - flags for options to map
+ * @iommu_flags - flags specific to the iommu.
+ *
+ * Maps the handle into the iova space specified via domain number. Iova
+ * will be allocated from the partition specified via partition_num.
+ * Returns 0 on success, negative value on error.
+ */
+int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags, unsigned long iommu_flags);
+
+
+/**
+ * ion_handle_get_size - get the allocated size of a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the size
+ * @size - pointer to store the size
+ *
+ * gives the allocated size of a handle. returns 0 on success, negative
+ * value on error
+ *
+ * NOTE: This is intended to be used only to get a size to pass to map_iommu.
+ * You should *NOT* rely on this for any other usage.
+ */
+
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+ unsigned long *size);
+
+/**
+ * ion_unmap_iommu - unmap the handle from an iommu
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to unmap
+ * @domain_num - domain to unmap from
+ * @partition_num - partition to unmap from
+ *
+ * Decrement the reference count on the iommu mapping. If the count is
+ * 0, the mapping will be removed from the iommu.
+ */
+void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num);
+
+
+/**
+ * ion_secure_heap - secure a heap
+ *
+ * @client - a client that has allocated from the heap heap_id
+ * @heap_id - heap id to secure.
+ * @version - version of content protection
+ * @data - extra data needed for protection
+ *
+ * Secure a heap
+ * Returns 0 on success
+ */
+int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
+ void *data);
+
+/**
+ * ion_unsecure_heap - un-secure a heap
+ *
+ * @client - a client that has allocated from the heap heap_id
+ * @heap_id - heap id to un-secure.
+ * @version - version of content protection
+ * @data - extra data needed for protection
+ *
+ * Un-secure a heap
+ * Returns 0 on success
+ */
+int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
+ void *data);
+
+/**
+ * msm_ion_do_cache_op - do cache operations.
+ *
+ * @client - pointer to ION client.
+ * @handle - pointer to buffer handle.
+ * @vaddr - virtual address to operate on.
+ * @len - Length of data to do cache operation on.
+ * @cmd - Cache operation to perform:
+ * ION_IOC_CLEAN_CACHES
+ * ION_IOC_INV_CACHES
+ * ION_IOC_CLEAN_INV_CACHES
+ *
+ * Returns 0 on success
+ */
+int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+ void *vaddr, unsigned long len, unsigned int cmd);
+
+/**
* msm_ion_secure_heap - secure a heap. Wrapper around ion_secure_heap.
*
- * @heap_id - heap id to secure.
+ * @heap_id - heap id to secure.
*
* Secure a heap
* Returns 0 on success
@@ -249,6 +387,60 @@
int msm_ion_unsecure_buffer(struct ion_client *client,
struct ion_handle *handle);
#else
+static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask,
+ const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int ion_map_iommu(struct ion_client *client,
+ struct ion_handle *handle, int domain_num,
+ int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags,
+ unsigned long iommu_flags)
+{
+ return -ENODEV;
+}
+
+static inline int ion_handle_get_size(struct ion_client *client,
+ struct ion_handle *handle, unsigned long *size)
+{
+ return -ENODEV;
+}
+
+static inline void ion_unmap_iommu(struct ion_client *client,
+ struct ion_handle *handle, int domain_num,
+ int partition_num)
+{
+ return;
+}
+
+static inline int ion_secure_heap(struct ion_device *dev, int heap_id,
+ int version, void *data)
+{
+ return -ENODEV;
+
+}
+
+static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id,
+ int version, void *data)
+{
+ return -ENODEV;
+}
+
+static inline void ion_mark_dangling_buffers_locked(struct ion_device *dev)
+{
+}
+
+static inline int msm_ion_do_cache_op(struct ion_client *client,
+ struct ion_handle *handle, void *vaddr,
+ unsigned long len, unsigned int cmd)
+{
+ return -ENODEV;
+}
+
static inline int msm_ion_secure_heap(int heap_id)
{
return -ENODEV;
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 7e1a709..f9e483c 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -76,6 +76,8 @@
struct mdp_display_commit)
#define MSMFB_METADATA_SET _IOW(MSMFB_IOCTL_MAGIC, 165, struct msmfb_metadata)
#define MSMFB_METADATA_GET _IOW(MSMFB_IOCTL_MAGIC, 166, struct msmfb_metadata)
+#define MSMFB_WRITEBACK_SET_MIRRORING_HINT _IOW(MSMFB_IOCTL_MAGIC, 167, \
+ unsigned int)
#define FB_TYPE_3D_PANEL 0x10101010
#define MDP_IMGTYPE2_START 0x10000
@@ -171,6 +173,7 @@
#define MDP_OV_PIPE_FORCE_DMA 0x00004000
#define MDP_MEMORY_ID_TYPE_FB 0x00001000
#define MDP_BWC_EN 0x00000400
+#define MDP_DECIMATION_EN 0x00000800
#define MDP_TRANSP_NOP 0xffffffff
#define MDP_ALPHA_NOP 0xff
@@ -289,14 +292,19 @@
#define MDP_PP_IGC_FLAG_ROM0 0x10
#define MDP_PP_IGC_FLAG_ROM1 0x20
-#define MDSS_PP_DSPP_CFG 0x0000
-#define MDSS_PP_SSPP_CFG 0x4000
-#define MDSS_PP_LM_CFG 0x8000
-#define MDSS_PP_WB_CFG 0xC000
+#define MDSS_PP_DSPP_CFG 0x000
+#define MDSS_PP_SSPP_CFG 0x100
+#define MDSS_PP_LM_CFG 0x200
+#define MDSS_PP_WB_CFG 0x300
-#define MDSS_PP_LOCATION_MASK 0xC000
-#define MDSS_PP_LOGICAL_MASK 0x3FFF
+#define MDSS_PP_ARG_MASK 0x3C00
+#define MDSS_PP_ARG_NUM 4
+#define MDSS_PP_ARG_SHIFT 10
+#define MDSS_PP_LOCATION_MASK 0x0300
+#define MDSS_PP_LOGICAL_MASK 0x00FF
+#define MDSS_PP_ADD_ARG(var, arg) ((var) | (0x1 << (MDSS_PP_ARG_SHIFT + (arg))))
+#define PP_ARG(x, var) ((var) & (0x1 << (MDSS_PP_ARG_SHIFT + (x))))
#define PP_LOCAT(var) ((var) & MDSS_PP_LOCATION_MASK)
#define PP_BLOCK(var) ((var) & MDSS_PP_LOGICAL_MASK)
@@ -326,6 +334,8 @@
#define MDP_OVERLAY_PP_PA_CFG 0x4
#define MDP_OVERLAY_PP_IGC_CFG 0x8
#define MDP_OVERLAY_PP_SHARP_CFG 0x10
+#define MDP_OVERLAY_PP_HIST_CFG 0x20
+#define MDP_OVERLAY_PP_HIST_LUT_CFG 0x40
#define MDP_CSC_FLAG_ENABLE 0x1
#define MDP_CSC_FLAG_YUV_IN 0x2
@@ -361,6 +371,21 @@
uint32_t *c2_data;
};
+struct mdp_histogram_cfg {
+ uint32_t ops;
+ uint32_t block;
+ uint8_t frame_cnt;
+ uint8_t bit_mask;
+ uint16_t num_bins;
+};
+
+struct mdp_hist_lut_data {
+ uint32_t block;
+ uint32_t ops;
+ uint32_t len;
+ uint32_t *data;
+};
+
struct mdp_overlay_pp_params {
uint32_t config_ops;
struct mdp_csc_cfg csc_cfg;
@@ -368,6 +393,8 @@
struct mdp_pa_cfg pa_cfg;
struct mdp_igc_lut_data igc_cfg;
struct mdp_sharp_cfg sharp_cfg;
+ struct mdp_histogram_cfg hist_cfg;
+ struct mdp_hist_lut_data hist_lut_cfg;
};
struct mdp_overlay {
@@ -380,7 +407,9 @@
uint32_t transp_mask;
uint32_t flags;
uint32_t id;
- uint32_t user_data[8];
+ uint32_t user_data[7];
+ uint8_t horz_deci;
+ uint8_t vert_deci;
struct mdp_overlay_pp_params overlay_pp_cfg;
};
@@ -433,7 +462,7 @@
MDP_BLOCK_DMA_S,
MDP_BLOCK_DMA_E,
MDP_BLOCK_OVERLAY_2,
- MDP_LOGICAL_BLOCK_DISP_0 = 0x1000,
+ MDP_LOGICAL_BLOCK_DISP_0 = 0x10,
MDP_LOGICAL_BLOCK_DISP_1,
MDP_LOGICAL_BLOCK_DISP_2,
MDP_BLOCK_MAX,
@@ -502,13 +531,6 @@
};
-struct mdp_hist_lut_data {
- uint32_t block;
- uint32_t ops;
- uint32_t len;
- uint32_t *data;
-};
-
struct mdp_lut_cfg_data {
uint32_t lut_type;
union {
@@ -552,6 +574,65 @@
uint32_t data;
};
+#define MDSS_AD_MODE_AUTO_BL 0x0
+#define MDSS_AD_MODE_AUTO_STR 0x1
+#define MDSS_AD_MODE_TARG_STR 0x3
+#define MDSS_AD_MODE_MAN_STR 0x7
+
+#define MDP_PP_AD_INIT 0x10
+#define MDP_PP_AD_CFG 0x20
+
+struct mdss_ad_init {
+ uint32_t asym_lut[33];
+ uint32_t color_corr_lut[33];
+ uint8_t i_control[2];
+ uint16_t black_lvl;
+ uint16_t white_lvl;
+ uint8_t var;
+ uint8_t limit_ampl;
+ uint8_t i_dither;
+ uint8_t slope_max;
+ uint8_t slope_min;
+ uint8_t dither_ctl;
+ uint8_t format;
+ uint8_t auto_size;
+ uint16_t frame_w;
+ uint16_t frame_h;
+ uint8_t logo_v;
+ uint8_t logo_h;
+};
+
+struct mdss_ad_cfg {
+ uint32_t mode;
+ uint32_t al_calib_lut[33];
+ uint16_t backlight_min;
+ uint16_t backlight_max;
+ uint16_t backlight_scale;
+ uint16_t amb_light_min;
+ uint16_t filter[2];
+ uint16_t calib[4];
+ uint8_t strength_limit;
+ uint8_t t_filter_recursion;
+};
+
+/* ops uses standard MDP_PP_* flags */
+struct mdss_ad_init_cfg {
+ uint32_t ops;
+ union {
+ struct mdss_ad_init init;
+ struct mdss_ad_cfg cfg;
+ } params;
+};
+
+/* mode uses MDSS_AD_MODE_* flags */
+struct mdss_ad_input {
+ uint32_t mode;
+ union {
+ uint32_t amb_light;
+ uint32_t strength;
+ } in;
+};
+
enum {
mdp_op_pcc_cfg,
mdp_op_csc_cfg,
@@ -562,6 +643,8 @@
mdp_op_dither_cfg,
mdp_op_gamut_cfg,
mdp_op_calib_cfg,
+ mdp_op_ad_cfg,
+ mdp_op_ad_input,
mdp_op_max,
};
@@ -586,6 +669,8 @@
struct mdp_dither_cfg_data dither_cfg_data;
struct mdp_gamut_cfg_data gamut_cfg_data;
struct mdp_calib_config_data calib_cfg;
+ struct mdss_ad_init_cfg ad_init_cfg;
+ struct mdss_ad_input ad_input;
} data;
};
@@ -614,6 +699,7 @@
uint8_t rgb_pipes;
uint8_t vig_pipes;
uint8_t dma_pipes;
+ uint32_t features;
};
struct msmfb_metadata {
@@ -685,6 +771,13 @@
MDP_IOMMU_DOMAIN_NS,
};
+enum {
+ MDP_WRITEBACK_MIRROR_OFF,
+ MDP_WRITEBACK_MIRROR_ON,
+ MDP_WRITEBACK_MIRROR_PAUSE,
+ MDP_WRITEBACK_MIRROR_RESUME,
+};
+
#ifdef __KERNEL__
int msm_fb_get_iommu_domain(struct fb_info *info, int domain);
/* get the framebuffer physical address information */
diff --git a/include/linux/msm_thermal.h b/include/linux/msm_thermal.h
index 2c9a613..f14cc52 100644
--- a/include/linux/msm_thermal.h
+++ b/include/linux/msm_thermal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,9 +17,16 @@
struct msm_thermal_data {
uint32_t sensor_id;
uint32_t poll_ms;
- uint32_t limit_temp_degC;
- uint32_t temp_hysteresis_degC;
+ int32_t limit_temp_degC;
+ int32_t temp_hysteresis_degC;
uint32_t freq_step;
+ int32_t core_limit_temp_degC;
+ int32_t core_temp_hysteresis_degC;
+ uint32_t core_control_mask;
+ int32_t vdd_rstr_temp_degC;
+ int32_t vdd_rstr_temp_hyst_degC;
+ int32_t psm_temp_degC;
+ int32_t psm_temp_hyst_degC;
};
#ifdef CONFIG_THERMAL_MONITOR
diff --git a/include/linux/msm_tsens.h b/include/linux/msm_tsens.h
index 5837094..35eacf1 100644
--- a/include/linux/msm_tsens.h
+++ b/include/linux/msm_tsens.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,4 +42,25 @@
int32_t tsens_get_temp(struct tsens_device *dev, unsigned long *temp);
int msm_tsens_early_init(struct tsens_platform_data *pdata);
+#if defined(CONFIG_THERMAL_TSENS8974)
+int __init tsens_tm_init_driver(void);
+int tsens_get_sw_id_mapping(int sensor_num, int *sensor_sw_idx);
+int tsens_get_hw_id_mapping(int sensor_sw_id, int *sensor_hw_num);
+#else
+static inline int __init tsens_tm_init_driver(void)
+{ return -ENXIO; }
+static inline int tsens_get_sw_id_mapping(
+ int sensor_num, int *sensor_sw_idx)
+{ return -ENXIO; }
+static inline int tsens_get_hw_id_mapping(
+ int sensor_sw_id, int *sensor_hw_num)
+{ return -ENXIO; }
+#endif
+
+#if defined(CONFIG_THERMAL_TSENS8974) || defined(CONFIG_THERMAL_TSENS8960)
+int tsens_get_max_sensor_num(uint32_t *tsens_num_sensors);
+#else
+static inline int tsens_get_max_sensor_num(uint32_t *tsens_num_sensors)
+{ return -ENXIO; }
+#endif
#endif /*MSM_TSENS_H */
diff --git a/include/linux/qpnp-misc.h b/include/linux/qpnp-misc.h
index b241e5d..ee614a4 100644
--- a/include/linux/qpnp-misc.h
+++ b/include/linux/qpnp-misc.h
@@ -30,7 +30,7 @@
int qpnp_misc_irqs_available(struct device *consumer_dev);
#else
-static int qpnp_misc_irq_available(struct device *consumer_dev)
+static int qpnp_misc_irqs_available(struct device *consumer_dev)
{
return 0;
}
diff --git a/include/linux/qseecom.h b/include/linux/qseecom.h
index b0f089b..294c881 100644
--- a/include/linux/qseecom.h
+++ b/include/linux/qseecom.h
@@ -6,7 +6,7 @@
#define MAX_ION_FD 4
#define MAX_APP_NAME_SIZE 32
-
+#define QSEECOM_HASH_SIZE 32
/*
* struct qseecom_register_listener_req -
* for register listener ioctl request
@@ -117,6 +117,46 @@
int app_id; /* out */
};
+struct qseecom_send_svc_cmd_req {
+ uint32_t cmd_id;
+ void *cmd_req_buf; /* in */
+ unsigned int cmd_req_len; /* in */
+ void *resp_buf; /* in/out */
+ unsigned int resp_len; /* in/out */
+};
+
+enum qseecom_key_management_usage_type {
+ QSEOS_KM_USAGE_DISK_ENCRYPTION = 0x01,
+};
+
+struct qseecom_create_key_req {
+ unsigned char hash32[QSEECOM_HASH_SIZE];
+ enum qseecom_key_management_usage_type usage;
+};
+
+struct qseecom_wipe_key_req {
+ enum qseecom_key_management_usage_type usage;
+};
+
+#define SHA256_DIGEST_LENGTH (256/8)
+/*
+ * struct qseecom_save_partition_hash_req
+ * @partition_id - partition id.
+ * @hash[SHA256_DIGEST_LENGTH] - sha256 digest.
+ */
+struct qseecom_save_partition_hash_req {
+ int partition_id; /* in */
+ char digest[SHA256_DIGEST_LENGTH]; /* in */
+};
+
+/*
+ * struct qseecom_is_es_activated_req
+ * @is_activated - 1=true , 0=false
+ */
+struct qseecom_is_es_activated_req {
+ int is_activated; /* out */
+};
+
#define QSEECOM_IOC_MAGIC 0x97
@@ -165,5 +205,19 @@
#define QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 15, struct qseecom_qseos_app_load_query)
+#define QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 16, struct qseecom_send_svc_cmd_req)
+
+#define QSEECOM_IOCTL_CREATE_KEY_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 17, struct qseecom_create_key_req)
+
+#define QSEECOM_IOCTL_WIPE_KEY_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 18, struct qseecom_wipe_key_req)
+
+#define QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 19, struct qseecom_save_partition_hash_req)
+
+#define QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 20, struct qseecom_is_es_activated_req)
#endif /* __QSEECOM_H_ */
diff --git a/include/linux/regulator/cpr-regulator.h b/include/linux/regulator/cpr-regulator.h
index 538ad15..b6fc091 100644
--- a/include/linux/regulator/cpr-regulator.h
+++ b/include/linux/regulator/cpr-regulator.h
@@ -54,6 +54,21 @@
NUM_APC_PVS,
};
+/**
+ * enum vdd_mx_vmin_method - Method to determine vmin for vdd-mx
+ * %VDD_MX_VMIN_APC: Equal to APC voltage
+ * %VDD_MX_VMIN_APC_CORNER_CEILING: Equal to PVS corner ceiling voltage
+ * %VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+ * Equal to slow speed corner ceiling
+ * %VDD_MX_VMIN_MX_VMAX: Equal to specified vdd-mx-vmax voltage
+ */
+enum vdd_mx_vmin_method {
+ VDD_MX_VMIN_APC,
+ VDD_MX_VMIN_APC_CORNER_CEILING,
+ VDD_MX_VMIN_APC_SLOW_CORNER_CEILING,
+ VDD_MX_VMIN_MX_VMAX,
+};
+
#ifdef CONFIG_MSM_CPR_REGULATOR
int __init cpr_regulator_init(void);
diff --git a/include/linux/regulator/qpnp-regulator.h b/include/linux/regulator/qpnp-regulator.h
index ec580ab..c7afeb5 100644
--- a/include/linux/regulator/qpnp-regulator.h
+++ b/include/linux/regulator/qpnp-regulator.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -93,12 +93,24 @@
* @system_load: Load in uA present on regulator that is not captured
* by any consumer request
* @enable_time: Time in us to delay after enabling the regulator
- * @ocp_enable: 1 = Enable over current protection (OCP) for voltage
- * switch type regulators so that they latch off
- * automatically when over current is detected
+ * @ocp_enable: 1 = Allow over current protection (OCP) to be
+ * enabled for voltage switch type regulators so
+ * that they latch off automatically when over
+ * current is detected. OCP is enabled when in HPM
+ * or auto mode.
* 0 = Disable OCP
* QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
* OCP state
+ * @ocp_irq: IRQ number of the voltage switch OCP IRQ. If
+ * specified the voltage switch will be toggled off
+ * and back on when OCP triggers in order to handle
+ * high in-rush current.
+ * @ocp_max_retries: Maximum number of times to try toggling a voltage
+ * switch off and back on as a result of
+ * consecutive over current events.
+ * @ocp_retry_delay_ms: Time to delay in milliseconds between each
+ * voltage switch toggle after an over current
+ * event takes place.
* @boost_current_limit: This parameter sets the current limit of boost type
* regulators. Its value should be one of
* QPNP_BOOST_CURRENT_LIMIT_*. If its value is
@@ -117,9 +129,6 @@
* its value is QPNP_VS_SOFT_START_STR_HW_DEFAULT,
* then the soft start strength will be left at its
* default hardware value.
- * @ocp_enable_time: Time to delay in us between enabling a switch and
- * subsequently enabling over current protection
- * (OCP) for the switch
* @auto_mode_enable: 1 = Enable automatic hardware selection of regulator
* mode (HPM vs LPM). Auto mode is not available
* on boost type regulators
@@ -132,6 +141,18 @@
* 0 = Do not enable bypass mode
* QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
* bypass mode state
+ * @hpm_enable: 1 = Enable high power mode (HPM), also referred to
+ * as NPM. HPM consumes more ground current than
+ * LPM, but it can source significantly higher load
+ * current. HPM is not available on boost type
+ * regulators. For voltage switch type regulators,
+ * HPM implies that over current protection and
+ * soft start are active all the time. This
+ * configuration can be overwritten by changing the
+ * regulator's mode dynamically.
+ * 0 = Do not enable HPM
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * HPM state
* @base_addr: SMPI base address for the regulator peripheral
*/
struct qpnp_regulator_platform_data {
@@ -142,12 +163,15 @@
int system_load;
int enable_time;
int ocp_enable;
+ int ocp_irq;
+ int ocp_max_retries;
+ int ocp_retry_delay_ms;
enum qpnp_boost_current_limit boost_current_limit;
int soft_start_enable;
enum qpnp_vs_soft_start_str vs_soft_start_strength;
- int ocp_enable_time;
int auto_mode_enable;
int bypass_mode_enable;
+ int hpm_enable;
u16 base_addr;
};
diff --git a/include/linux/smsc3503.h b/include/linux/smsc3503.h
index d5df871..1e28a58 100644
--- a/include/linux/smsc3503.h
+++ b/include/linux/smsc3503.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -42,9 +42,9 @@
#define OCSPINSEL (1<<5)
struct smsc_hub_platform_data {
- unsigned hub_reset;
- unsigned refclk_gpio;
- unsigned int_gpio;
+ int hub_reset;
+ int refclk_gpio;
+ int int_gpio;
};
#endif
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index bf6847a..3622616 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -201,6 +201,7 @@
* USB enters LPM.
* @delay_lpm_on_disconnect: Use a delay before entering LPM
* upon USB cable disconnection.
+ * @enable_sec_phy: Use second HSPHY with USB2 core
* @bus_scale_table: parameters for bus bandwidth requirements
* @mhl_dev_name: MHL device name used to register with MHL driver.
*/
@@ -222,12 +223,14 @@
bool core_clk_always_on_workaround;
bool delay_lpm_on_disconnect;
bool dp_manual_pullup;
+ bool enable_sec_phy;
struct msm_bus_scale_pdata *bus_scale_table;
const char *mhl_dev_name;
};
/* phy related flags */
#define ENABLE_DP_MANUAL_PULLUP BIT(0)
+#define ENABLE_SECONDARY_PHY BIT(1)
/* Timeout (in msec) values (min - max) associated with OTG timers */
@@ -402,6 +405,7 @@
unsigned strobe;
unsigned data;
bool ignore_cal_pad_config;
+ bool phy_sof_workaround;
int strobe_pad_offset;
int data_pad_offset;
@@ -418,6 +422,7 @@
u32 standalone_latency;
bool pool_64_bit_align;
bool enable_hbm;
+ bool disable_park_mode;
};
struct msm_usb_host_platform_data {
diff --git a/include/media/Kbuild b/include/media/Kbuild
index 16786a9..3992250 100644
--- a/include/media/Kbuild
+++ b/include/media/Kbuild
@@ -1,4 +1,5 @@
header-y += tavarua.h
+header-y += radio-iris-commands.h
header-y += msm_camera.h
header-y += vcap_fmt.h
diff --git a/include/media/msm_cam_sensor.h b/include/media/msm_cam_sensor.h
index bce6af3..992649f 100644
--- a/include/media/msm_cam_sensor.h
+++ b/include/media/msm_cam_sensor.h
@@ -108,10 +108,10 @@
SUB_MODULE_EEPROM,
SUB_MODULE_LED_FLASH,
SUB_MODULE_STROBE_FLASH,
- SUB_MODULE_CSIPHY,
- SUB_MODULE_CSIPHY_3D,
SUB_MODULE_CSID,
SUB_MODULE_CSID_3D,
+ SUB_MODULE_CSIPHY,
+ SUB_MODULE_CSIPHY_3D,
SUB_MODULE_MAX,
};
@@ -207,6 +207,7 @@
uint8_t settle_cnt;
uint16_t lane_mask;
uint8_t combo_mode;
+ uint8_t csid_core;
};
struct msm_camera_csi2_params {
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index afd5a42..b4b3bfc 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -1396,6 +1396,7 @@
uint8_t settle_cnt;
uint16_t lane_mask;
uint8_t combo_mode;
+ uint8_t csid_core;
};
struct msm_camera_csi2_params {
diff --git a/include/media/msm_gemini.h b/include/media/msm_gemini.h
index 0167335..2209758 100644
--- a/include/media/msm_gemini.h
+++ b/include/media/msm_gemini.h
@@ -51,10 +51,19 @@
#define MSM_GMN_IOCTL_TEST_DUMP_REGION \
_IOW(MSM_GMN_IOCTL_MAGIC, 15, unsigned long)
+#define MSM_GMN_IOCTL_SET_MODE \
+ _IOW(MSM_GMN_IOCTL_MAGIC, 16, enum msm_gmn_out_mode)
+
#define MSM_GEMINI_MODE_REALTIME_ENCODE 0
#define MSM_GEMINI_MODE_OFFLINE_ENCODE 1
#define MSM_GEMINI_MODE_REALTIME_ROTATION 2
#define MSM_GEMINI_MODE_OFFLINE_ROTATION 3
+
+enum msm_gmn_out_mode {
+ MSM_GMN_OUTMODE_FRAGMENTED,
+ MSM_GMN_OUTMODE_SINGLE
+};
+
struct msm_gemini_ctrl_cmd {
uint32_t type;
uint32_t len;
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 7f70a01..bf6b23b 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -109,6 +109,7 @@
struct msm_vfe_rdi_cfg rdi_cfg;
} d;
enum msm_vfe_input_src input_src;
+ uint32_t input_pix_clk;
};
struct msm_vfe_axi_plane_cfg {
@@ -167,12 +168,6 @@
enum msm_vfe_frame_skip_pattern skip_pattern;
};
-enum msm_vfe_stats_pipeline_policy {
- STATS_COMP_ALL,
- STATS_COMP_NONE,
- MAX_STATS_POLICY,
-};
-
enum msm_isp_stats_type {
MSM_ISP_STATS_AEC, /* legacy based AEC */
MSM_ISP_STATS_AF, /* legacy based AF */
@@ -192,11 +187,11 @@
uint32_t session_id;
uint32_t stream_id;
enum msm_isp_stats_type stats_type;
+ uint32_t composite_flag;
uint32_t framedrop_pattern;
uint32_t irq_subsample_pattern;
uint32_t buffer_offset;
uint32_t stream_handle;
- uint8_t comp_flag;
};
struct msm_vfe_stats_stream_release_cmd {
@@ -208,12 +203,6 @@
uint8_t enable;
};
-struct msm_vfe_stats_comp_policy_cfg {
- enum msm_vfe_stats_pipeline_policy stats_pipeline_policy;
- uint32_t comp_framedrop_pattern;
- uint32_t comp_irq_subsample_pattern;
-};
-
enum msm_vfe_reg_cfg_type {
VFE_WRITE,
VFE_WRITE_MB,
@@ -318,7 +307,7 @@
#define ISP_EVENT_EOF (ISP_EVENT_BASE + ISP_EOF)
#define ISP_EVENT_BUF_DIVERT (ISP_BUF_EVENT_BASE)
#define ISP_EVENT_STATS_NOTIFY (ISP_STATS_EVENT_BASE)
-
+#define ISP_EVENT_COMP_STATS_NOTIFY (ISP_EVENT_STATS_NOTIFY + MSM_ISP_STATS_MAX)
/* The msm_v4l2_event_data structure should match the
* v4l2_event.u.data field.
* should not exceed 64 bytes */
@@ -411,10 +400,6 @@
_IOWR('V', BASE_VIDIOC_PRIVATE+11, \
struct msm_vfe_stats_stream_release_cmd)
-#define VIDIOC_MSM_ISP_CFG_STATS_COMP_POLICY \
- _IOWR('V', BASE_VIDIOC_PRIVATE+12, \
- struct msm_vfe_stats_comp_policy_cfg)
-
#define VIDIOC_MSM_ISP_UPDATE_STREAM \
_IOWR('V', BASE_VIDIOC_PRIVATE+13, struct msm_vfe_axi_stream_update_cmd)
diff --git a/include/media/msmb_ispif.h b/include/media/msmb_ispif.h
index f0f015e..c9eb12a 100644
--- a/include/media/msmb_ispif.h
+++ b/include/media/msmb_ispif.h
@@ -20,6 +20,8 @@
RDI2,
INTF_MAX
};
+#define MAX_PARAM_ENTRIES (INTF_MAX * 2)
+
#define PIX0_MASK (1 << PIX0)
#define PIX1_MASK (1 << PIX1)
#define RDI0_MASK (1 << RDI0)
@@ -76,7 +78,7 @@
struct msm_ispif_param_data {
uint32_t num;
- struct msm_ispif_params_entry entries[INTF_MAX];
+ struct msm_ispif_params_entry entries[MAX_PARAM_ENTRIES];
};
struct msm_isp_info {
diff --git a/include/media/radio-iris-commands.h b/include/media/radio-iris-commands.h
new file mode 100644
index 0000000..d41baa9
--- /dev/null
+++ b/include/media/radio-iris-commands.h
@@ -0,0 +1,105 @@
+#ifndef __RADIO_IRIS_COMMANDS_H
+#define __RADIO_IRIS_COMMANDS_H
+
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+
+enum v4l2_cid_private_iris_t {
+ V4L2_CID_PRIVATE_IRIS_SRCHMODE = (0x08000000 + 1),
+ V4L2_CID_PRIVATE_IRIS_SCANDWELL,
+ V4L2_CID_PRIVATE_IRIS_SRCHON,
+ V4L2_CID_PRIVATE_IRIS_STATE,
+ V4L2_CID_PRIVATE_IRIS_TRANSMIT_MODE,
+ V4L2_CID_PRIVATE_IRIS_RDSGROUP_MASK,
+ V4L2_CID_PRIVATE_IRIS_REGION,
+ V4L2_CID_PRIVATE_IRIS_SIGNAL_TH,
+ V4L2_CID_PRIVATE_IRIS_SRCH_PTY,
+ V4L2_CID_PRIVATE_IRIS_SRCH_PI,
+ V4L2_CID_PRIVATE_IRIS_SRCH_CNT,
+ V4L2_CID_PRIVATE_IRIS_EMPHASIS,
+ V4L2_CID_PRIVATE_IRIS_RDS_STD,
+ V4L2_CID_PRIVATE_IRIS_SPACING,
+ V4L2_CID_PRIVATE_IRIS_RDSON,
+ V4L2_CID_PRIVATE_IRIS_RDSGROUP_PROC,
+ V4L2_CID_PRIVATE_IRIS_LP_MODE,
+ V4L2_CID_PRIVATE_IRIS_ANTENNA,
+ V4L2_CID_PRIVATE_IRIS_RDSD_BUF,
+ V4L2_CID_PRIVATE_IRIS_PSALL, /*0x8000014*/
+
+ /*v4l2 Tx controls*/
+ V4L2_CID_PRIVATE_IRIS_TX_SETPSREPEATCOUNT,
+ V4L2_CID_PRIVATE_IRIS_STOP_RDS_TX_PS_NAME,
+ V4L2_CID_PRIVATE_IRIS_STOP_RDS_TX_RT,
+ V4L2_CID_PRIVATE_IRIS_IOVERC,
+ V4L2_CID_PRIVATE_IRIS_INTDET,
+ V4L2_CID_PRIVATE_IRIS_MPX_DCC,
+ V4L2_CID_PRIVATE_IRIS_AF_JUMP,
+ V4L2_CID_PRIVATE_IRIS_RSSI_DELTA,
+ V4L2_CID_PRIVATE_IRIS_HLSI, /*0x800001d*/
+
+ /*Diagnostic commands*/
+ V4L2_CID_PRIVATE_IRIS_SOFT_MUTE,
+ V4L2_CID_PRIVATE_IRIS_RIVA_ACCS_ADDR,
+ V4L2_CID_PRIVATE_IRIS_RIVA_ACCS_LEN,
+ V4L2_CID_PRIVATE_IRIS_RIVA_PEEK,
+ V4L2_CID_PRIVATE_IRIS_RIVA_POKE,
+ V4L2_CID_PRIVATE_IRIS_SSBI_ACCS_ADDR,
+ V4L2_CID_PRIVATE_IRIS_SSBI_PEEK,
+ V4L2_CID_PRIVATE_IRIS_SSBI_POKE,
+ V4L2_CID_PRIVATE_IRIS_TX_TONE,
+ V4L2_CID_PRIVATE_IRIS_RDS_GRP_COUNTERS,
+ V4L2_CID_PRIVATE_IRIS_SET_NOTCH_FILTER, /* 0x8000028 */
+ V4L2_CID_PRIVATE_IRIS_SET_AUDIO_PATH, /* TAVARUA specific command */
+ V4L2_CID_PRIVATE_IRIS_DO_CALIBRATION,
+ V4L2_CID_PRIVATE_IRIS_SRCH_ALGORITHM, /* TAVARUA specific command */
+ V4L2_CID_PRIVATE_IRIS_GET_SINR,
+ V4L2_CID_PRIVATE_INTF_LOW_THRESHOLD,
+ V4L2_CID_PRIVATE_INTF_HIGH_THRESHOLD,
+ V4L2_CID_PRIVATE_SINR_THRESHOLD,
+ V4L2_CID_PRIVATE_SINR_SAMPLES,
+ V4L2_CID_PRIVATE_SPUR_FREQ,
+ V4L2_CID_PRIVATE_SPUR_FREQ_RMSSI,
+ V4L2_CID_PRIVATE_SPUR_SELECTION,
+ V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE,
+ V4L2_CID_PRIVATE_VALID_CHANNEL,
+ V4L2_CID_PRIVATE_AF_RMSSI_TH,
+ V4L2_CID_PRIVATE_AF_RMSSI_SAMPLES,
+ V4L2_CID_PRIVATE_GOOD_CH_RMSSI_TH,
+ V4L2_CID_PRIVATE_SRCHALGOTYPE,
+ V4L2_CID_PRIVATE_CF0TH12,
+ V4L2_CID_PRIVATE_SINRFIRSTSTAGE,
+ V4L2_CID_PRIVATE_RMSSIFIRSTSTAGE,
+
+ /*using private CIDs under userclass*/
+ V4L2_CID_PRIVATE_IRIS_READ_DEFAULT = 0x00980928,
+ V4L2_CID_PRIVATE_IRIS_WRITE_DEFAULT,
+ V4L2_CID_PRIVATE_IRIS_SET_CALIBRATION,
+};
+
+enum iris_evt_t {
+ IRIS_EVT_RADIO_READY,
+ IRIS_EVT_TUNE_SUCC,
+ IRIS_EVT_SEEK_COMPLETE,
+ IRIS_EVT_SCAN_NEXT,
+ IRIS_EVT_NEW_RAW_RDS,
+ IRIS_EVT_NEW_RT_RDS,
+ IRIS_EVT_NEW_PS_RDS,
+ IRIS_EVT_ERROR,
+ IRIS_EVT_BELOW_TH,
+ IRIS_EVT_ABOVE_TH,
+ IRIS_EVT_STEREO,
+ IRIS_EVT_MONO,
+ IRIS_EVT_RDS_AVAIL,
+ IRIS_EVT_RDS_NOT_AVAIL,
+ IRIS_EVT_NEW_SRCH_LIST,
+ IRIS_EVT_NEW_AF_LIST,
+ IRIS_EVT_TXRDSDAT,
+ IRIS_EVT_TXRDSDONE,
+ IRIS_EVT_RADIO_DISABLED,
+ IRIS_EVT_NEW_ODA,
+ IRIS_EVT_NEW_RT_PLUS,
+ IRIS_EVT_NEW_ERT,
+};
+#endif /* __RADIO_IRIS_COMMANDS_H */
diff --git a/include/media/radio-iris.h b/include/media/radio-iris.h
index 84789f1..53602c5 100644
--- a/include/media/radio-iris.h
+++ b/include/media/radio-iris.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright (c) 2011-2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013 The Linux Foundation. All rights reserved.
*
* This file is based on include/net/bluetooth/hci_core.h
*
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/atomic.h>
+#include "radio-iris-commands.h"
/* ---- HCI Packet structures ---- */
#define RADIO_HCI_COMMAND_HDR_SIZE sizeof(struct radio_hci_command_hdr)
@@ -621,104 +622,6 @@
FM_CALIB
};
-enum v4l2_cid_private_iris_t {
- V4L2_CID_PRIVATE_IRIS_SRCHMODE = (0x08000000 + 1),
- V4L2_CID_PRIVATE_IRIS_SCANDWELL,
- V4L2_CID_PRIVATE_IRIS_SRCHON,
- V4L2_CID_PRIVATE_IRIS_STATE,
- V4L2_CID_PRIVATE_IRIS_TRANSMIT_MODE,
- V4L2_CID_PRIVATE_IRIS_RDSGROUP_MASK,
- V4L2_CID_PRIVATE_IRIS_REGION,
- V4L2_CID_PRIVATE_IRIS_SIGNAL_TH,
- V4L2_CID_PRIVATE_IRIS_SRCH_PTY,
- V4L2_CID_PRIVATE_IRIS_SRCH_PI,
- V4L2_CID_PRIVATE_IRIS_SRCH_CNT,
- V4L2_CID_PRIVATE_IRIS_EMPHASIS,
- V4L2_CID_PRIVATE_IRIS_RDS_STD,
- V4L2_CID_PRIVATE_IRIS_SPACING,
- V4L2_CID_PRIVATE_IRIS_RDSON,
- V4L2_CID_PRIVATE_IRIS_RDSGROUP_PROC,
- V4L2_CID_PRIVATE_IRIS_LP_MODE,
- V4L2_CID_PRIVATE_IRIS_ANTENNA,
- V4L2_CID_PRIVATE_IRIS_RDSD_BUF,
- V4L2_CID_PRIVATE_IRIS_PSALL, /*0x8000014*/
-
- /*v4l2 Tx controls*/
- V4L2_CID_PRIVATE_IRIS_TX_SETPSREPEATCOUNT,
- V4L2_CID_PRIVATE_IRIS_STOP_RDS_TX_PS_NAME,
- V4L2_CID_PRIVATE_IRIS_STOP_RDS_TX_RT,
- V4L2_CID_PRIVATE_IRIS_IOVERC,
- V4L2_CID_PRIVATE_IRIS_INTDET,
- V4L2_CID_PRIVATE_IRIS_MPX_DCC,
- V4L2_CID_PRIVATE_IRIS_AF_JUMP,
- V4L2_CID_PRIVATE_IRIS_RSSI_DELTA,
- V4L2_CID_PRIVATE_IRIS_HLSI, /*0x800001d*/
-
- /*Diagnostic commands*/
- V4L2_CID_PRIVATE_IRIS_SOFT_MUTE,
- V4L2_CID_PRIVATE_IRIS_RIVA_ACCS_ADDR,
- V4L2_CID_PRIVATE_IRIS_RIVA_ACCS_LEN,
- V4L2_CID_PRIVATE_IRIS_RIVA_PEEK,
- V4L2_CID_PRIVATE_IRIS_RIVA_POKE,
- V4L2_CID_PRIVATE_IRIS_SSBI_ACCS_ADDR,
- V4L2_CID_PRIVATE_IRIS_SSBI_PEEK,
- V4L2_CID_PRIVATE_IRIS_SSBI_POKE,
- V4L2_CID_PRIVATE_IRIS_TX_TONE,
- V4L2_CID_PRIVATE_IRIS_RDS_GRP_COUNTERS,
- V4L2_CID_PRIVATE_IRIS_SET_NOTCH_FILTER, /* 0x8000028 */
- V4L2_CID_PRIVATE_IRIS_SET_AUDIO_PATH, /* TAVARUA specific command */
- V4L2_CID_PRIVATE_IRIS_DO_CALIBRATION,
- V4L2_CID_PRIVATE_IRIS_SRCH_ALGORITHM, /* TAVARUA specific command */
- V4L2_CID_PRIVATE_IRIS_GET_SINR,
- V4L2_CID_PRIVATE_INTF_LOW_THRESHOLD,
- V4L2_CID_PRIVATE_INTF_HIGH_THRESHOLD,
- V4L2_CID_PRIVATE_SINR_THRESHOLD,
- V4L2_CID_PRIVATE_SINR_SAMPLES,
- V4L2_CID_PRIVATE_SPUR_FREQ,
- V4L2_CID_PRIVATE_SPUR_FREQ_RMSSI,
- V4L2_CID_PRIVATE_SPUR_SELECTION,
- V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE,
- V4L2_CID_PRIVATE_VALID_CHANNEL,
- V4L2_CID_PRIVATE_AF_RMSSI_TH,
- V4L2_CID_PRIVATE_AF_RMSSI_SAMPLES,
- V4L2_CID_PRIVATE_GOOD_CH_RMSSI_TH,
- V4L2_CID_PRIVATE_SRCHALGOTYPE,
- V4L2_CID_PRIVATE_CF0TH12,
- V4L2_CID_PRIVATE_SINRFIRSTSTAGE,
- V4L2_CID_PRIVATE_RMSSIFIRSTSTAGE,
-
-
- /*using private CIDs under userclass*/
- V4L2_CID_PRIVATE_IRIS_READ_DEFAULT = 0x00980928,
- V4L2_CID_PRIVATE_IRIS_WRITE_DEFAULT,
- V4L2_CID_PRIVATE_IRIS_SET_CALIBRATION,
-};
-
-
-enum iris_evt_t {
- IRIS_EVT_RADIO_READY,
- IRIS_EVT_TUNE_SUCC,
- IRIS_EVT_SEEK_COMPLETE,
- IRIS_EVT_SCAN_NEXT,
- IRIS_EVT_NEW_RAW_RDS,
- IRIS_EVT_NEW_RT_RDS,
- IRIS_EVT_NEW_PS_RDS,
- IRIS_EVT_ERROR,
- IRIS_EVT_BELOW_TH,
- IRIS_EVT_ABOVE_TH,
- IRIS_EVT_STEREO,
- IRIS_EVT_MONO,
- IRIS_EVT_RDS_AVAIL,
- IRIS_EVT_RDS_NOT_AVAIL,
- IRIS_EVT_NEW_SRCH_LIST,
- IRIS_EVT_NEW_AF_LIST,
- IRIS_EVT_TXRDSDAT,
- IRIS_EVT_TXRDSDONE,
- IRIS_EVT_RADIO_DISABLED,
- IRIS_EVT_NEW_ODA,
- IRIS_EVT_NEW_RT_PLUS,
- IRIS_EVT_NEW_ERT,
-};
enum emphasis_type {
FM_RX_EMP75 = 0x0,
FM_RX_EMP50 = 0x1
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 04e683f..fa4dedc 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -335,6 +335,7 @@
/* Payload an #ADM_CMD_GET_PP_PARAMS_V5 command.
*/
struct adm_cmd_get_pp_params_v5 {
+ struct apr_hdr hdr;
u32 data_payload_addr_lsw;
/* LSW of parameter data payload address.*/
@@ -2593,6 +2594,14 @@
} __packed;
+/* @brief Dolby Digital Plus end point configuration structure
+ */
+struct asm_dec_ddp_endp_param_v2 {
+ struct apr_hdr hdr;
+ struct asm_stream_cmd_set_encdec_param encdec;
+ int endp_param_value;
+} __packed;
+
/* @brief Multichannel PCM encoder configuration structure used
* in the #ASM_STREAM_CMD_OPEN_READ_V2 command.
*/
@@ -6913,4 +6922,7 @@
struct afe_port_cmd_set_param_v2 param;
} __packed;
+/* Dolby DAP topology */
+#define DOLBY_ADM_COPP_TOPOLOGY_ID 0x0001033B
+
#endif /*_APR_AUDIO_V2_H_ */
diff --git a/include/sound/compress_params.h b/include/sound/compress_params.h
index b95fa3c..f5ab179 100644
--- a/include/sound/compress_params.h
+++ b/include/sound/compress_params.h
@@ -89,6 +89,8 @@
#define SND_AUDIOCODEC_PASS_THROUGH ((__u32) 0x00000015)
#define SND_AUDIOCODEC_MP2 ((__u32) 0x00000016)
#define SND_AUDIOCODEC_DTS_LBR_PASS_THROUGH ((__u32) 0x00000017)
+#define SND_AUDIOCODEC_EAC3 ((__u32) 0x00000018)
+#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_EAC3
/*
* Profile and modes are listed with bit masks. This allows for a
* more compact representation of fields that will not evolve
@@ -337,7 +339,12 @@
__u32 modelIdLength;
__u8 *modelId;
};
-
+struct snd_dec_ddp {
+ __u32 params_length;
+ __u8 *params;
+ __u32 params_id[18];
+ __u32 params_value[18];
+};
union snd_codec_options {
struct snd_enc_wma wma;
struct snd_enc_vorbis vorbis;
@@ -345,6 +352,7 @@
struct snd_enc_flac flac;
struct snd_enc_generic generic;
struct snd_dec_dts dts;
+ struct snd_dec_ddp ddp;
};
/** struct snd_codec_desc - description of codec capabilities
diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h
index 77a805c..4bea1e1 100644
--- a/include/sound/q6adm-v2.h
+++ b/include/sound/q6adm-v2.h
@@ -32,6 +32,12 @@
int adm_open(int port, int path, int rate, int mode, int topology,
bool perf_mode, uint16_t bits_per_sample);
+int adm_dolby_dap_get_params(int port_id, uint32_t module_id, uint32_t param_id,
+ uint32_t params_length, char *params);
+
+int adm_dolby_dap_send_params(int port_id, char *params,
+ uint32_t params_length);
+
int adm_multi_ch_copp_open(int port, int path, int rate, int mode,
int topology, bool perf_mode, uint16_t bits_per_sample);
diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h
index 9c86e1d..2a740f4 100644
--- a/include/sound/q6afe-v2.h
+++ b/include/sound/q6afe-v2.h
@@ -81,6 +81,8 @@
IDX_AFE_PORT_ID_TERTIARY_MI2S_TX = 39,
IDX_AFE_PORT_ID_PRIMARY_MI2S_RX = 40,
IDX_AFE_PORT_ID_PRIMARY_MI2S_TX = 41,
+ IDX_AFE_PORT_ID_SECONDARY_PCM_RX = 42,
+ IDX_AFE_PORT_ID_SECONDARY_PCM_TX = 43,
IDX_GLOBAL_CFG,
AFE_MAX_PORTS
};
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 0d0670e..0dd14e6 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -41,6 +41,8 @@
#define FORMAT_AMR_WB_PLUS 0x0010
#define FORMAT_MPEG4_MULTI_AAC 0x0011
#define FORMAT_MULTI_CHANNEL_LINEAR_PCM 0x0012
+#define FORMAT_AC3 0x0013
+#define FORMAT_EAC3 0x0014
#define ENCDEC_SBCBITRATE 0x0001
#define ENCDEC_IMMEDIATE_DECODE 0x0002
@@ -295,6 +297,10 @@
int q6asm_media_format_block_amrwbplus(struct audio_client *ac,
struct asm_amrwbplus_cfg *cfg);
+
+int q6asm_ds1_set_endp_params(struct audio_client *ac,
+ int param_id, int param_value);
+
/* PP specific */
int q6asm_equalizer(struct audio_client *ac, void *eq);
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
index ea83664..951e6ca 100644
--- a/include/trace/events/cpufreq_interactive.h
+++ b/include/trace/events/cpufreq_interactive.h
@@ -28,13 +28,7 @@
__entry->actualfreq)
);
-DEFINE_EVENT(set, cpufreq_interactive_up,
- TP_PROTO(u32 cpu_id, unsigned long targfreq,
- unsigned long actualfreq),
- TP_ARGS(cpu_id, targfreq, actualfreq)
-);
-
-DEFINE_EVENT(set, cpufreq_interactive_down,
+DEFINE_EVENT(set, cpufreq_interactive_setspeed,
TP_PROTO(u32 cpu_id, unsigned long targfreq,
unsigned long actualfreq),
TP_ARGS(cpu_id, targfreq, actualfreq)
@@ -42,44 +36,50 @@
DECLARE_EVENT_CLASS(loadeval,
TP_PROTO(unsigned long cpu_id, unsigned long load,
- unsigned long curfreq, unsigned long targfreq),
- TP_ARGS(cpu_id, load, curfreq, targfreq),
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
TP_STRUCT__entry(
__field(unsigned long, cpu_id )
__field(unsigned long, load )
- __field(unsigned long, curfreq )
- __field(unsigned long, targfreq )
+ __field(unsigned long, curtarg )
+ __field(unsigned long, curactual )
+ __field(unsigned long, newtarg )
),
TP_fast_assign(
__entry->cpu_id = cpu_id;
__entry->load = load;
- __entry->curfreq = curfreq;
- __entry->targfreq = targfreq;
+ __entry->curtarg = curtarg;
+ __entry->curactual = curactual;
+ __entry->newtarg = newtarg;
),
- TP_printk("cpu=%lu load=%lu cur=%lu targ=%lu",
- __entry->cpu_id, __entry->load, __entry->curfreq,
- __entry->targfreq)
+ TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+ __entry->cpu_id, __entry->load, __entry->curtarg,
+ __entry->curactual, __entry->newtarg)
);
DEFINE_EVENT(loadeval, cpufreq_interactive_target,
TP_PROTO(unsigned long cpu_id, unsigned long load,
- unsigned long curfreq, unsigned long targfreq),
- TP_ARGS(cpu_id, load, curfreq, targfreq)
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
);
DEFINE_EVENT(loadeval, cpufreq_interactive_already,
TP_PROTO(unsigned long cpu_id, unsigned long load,
- unsigned long curfreq, unsigned long targfreq),
- TP_ARGS(cpu_id, load, curfreq, targfreq)
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
);
DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
TP_PROTO(unsigned long cpu_id, unsigned long load,
- unsigned long curfreq, unsigned long targfreq),
- TP_ARGS(cpu_id, load, curfreq, targfreq)
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
);
TRACE_EVENT(cpufreq_interactive_boost,
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index a1da44f..e76e822 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -495,6 +495,70 @@
TP_ARGS(mode)
);
+DECLARE_EVENT_CLASS(ion_alloc_pages,
+
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order),
+
+ TP_STRUCT__entry(
+ __field(gfp_t, gfp_flags)
+ __field(unsigned int, order)
+ ),
+
+ TP_fast_assign(
+ __entry->gfp_flags = gfp_flags;
+ __entry->order = order;
+ ),
+
+ TP_printk("gfp_flags=%s order=%d",
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->order)
+ );
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_start,
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order)
+ );
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_end,
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order)
+ );
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_fail,
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order)
+ );
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_start,
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order)
+ );
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_end,
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order)
+ );
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_fail,
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order)
+ );
+
#endif /* _TRACE_KMEM_H */
/* This part must be outside protection */
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
new file mode 100644
index 0000000..37115c4
--- /dev/null
+++ b/include/trace/events/mmc.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmc
+
+#if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMC_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(mmc_clk,
+ TP_PROTO(char *print_info),
+
+ TP_ARGS(print_info),
+
+ TP_STRUCT__entry(
+ __string(print_info, print_info)
+ ),
+
+ TP_fast_assign(
+ __assign_str(print_info, print_info);
+ ),
+
+ TP_printk("%s",
+ __get_str(print_info)
+ )
+);
+
+#endif /* if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index edd656c..e6a2e35 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -224,10 +224,13 @@
raw_spin_unlock(&base->cpu_base->lock);
raw_spin_lock(&new_base->cpu_base->lock);
- if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
- cpu = this_cpu;
+ this_cpu = smp_processor_id();
+
+ if (cpu != this_cpu && (hrtimer_check_target(timer, new_base)
+ || !cpu_online(cpu))) {
raw_spin_unlock(&new_base->cpu_base->lock);
raw_spin_lock(&base->cpu_base->lock);
+ cpu = smp_processor_id();
timer->base = base;
goto again;
}
diff --git a/kernel/pid.c b/kernel/pid.c
index 9f08dfa..7acf590 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -430,6 +430,7 @@
{
return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
}
+EXPORT_SYMBOL_GPL(find_task_by_vpid);
struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a2bad88..862e172 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4757,6 +4757,7 @@
delayacct_blkio_end();
return ret;
}
+EXPORT_SYMBOL(io_schedule_timeout);
/**
* sys_sched_get_priority_max - return maximum RT priority.
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b175073..7e31770 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2126,11 +2126,11 @@
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ struct cfs_rq *cfs_rq = &rq->cfs;
WARN_ON(task_rq(p) != rq);
- if (cfs_rq->nr_running > 1) {
+ if (cfs_rq->h_nr_running > 1) {
u64 slice = sched_slice(cfs_rq, se);
u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
s64 delta = slice - ran;
@@ -2154,8 +2154,7 @@
/*
* called from enqueue/dequeue and updates the hrtick when the
- * current task is from our class and nr_running is low enough
- * to matter.
+ * current task is from our class.
*/
static void hrtick_update(struct rq *rq)
{
@@ -2164,8 +2163,7 @@
if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
return;
- if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
- hrtick_start_fair(rq, curr);
+ hrtick_start_fair(rq, curr);
}
#else /* !CONFIG_SCHED_HRTICK */
static inline void
@@ -4626,7 +4624,7 @@
raw_spin_lock(&this_rq->lock);
- if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
+ if (!pulled_task || time_after(jiffies, this_rq->next_balance)) {
/*
* We are going idle. next_balance may be set based on
* a busy processor. So reset next_balance.
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 9cf1b8b..604ee09 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -171,7 +171,7 @@
*
* Returns 0 on success or a -ve errno on failure.
*/
-int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
+int gen_pool_add_virt(struct gen_pool *pool, u64 virt, phys_addr_t phys,
size_t size, int nid)
{
struct gen_pool_chunk *chunk;
@@ -208,7 +208,7 @@
*
* Returns the physical address on success, or -1 on error.
*/
-phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
+phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, u64 addr)
{
struct gen_pool_chunk *chunk;
phys_addr_t paddr = -1;
@@ -273,11 +273,11 @@
* Uses a first-fit algorithm. Can not be used in NMI handler on
* architectures without NMI-safe cmpxchg implementation.
*/
-unsigned long gen_pool_alloc_aligned(struct gen_pool *pool, size_t size,
+u64 gen_pool_alloc_aligned(struct gen_pool *pool, size_t size,
unsigned alignment_order)
{
struct gen_pool_chunk *chunk;
- unsigned long addr = 0, align_mask = 0;
+ u64 addr = 0, align_mask = 0;
int order = pool->min_alloc_order;
int nbits, start_bit = 0, remain;
@@ -314,7 +314,7 @@
goto retry;
}
- addr = chunk->start_addr + ((unsigned long)start_bit << order);
+ addr = chunk->start_addr + ((u64)start_bit << order);
size = nbits << pool->min_alloc_order;
atomic_sub(size, &chunk->avail);
break;
@@ -334,7 +334,7 @@
* pool. Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
*/
-void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
+void gen_pool_free(struct gen_pool *pool, u64 addr, size_t size)
{
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
diff --git a/lib/memory_alloc.c b/lib/memory_alloc.c
index cc7424f..03f1944 100644
--- a/lib/memory_alloc.c
+++ b/lib/memory_alloc.c
@@ -67,7 +67,7 @@
struct rb_node *r = p;
struct alloc *node = rb_entry(r, struct alloc, rb_node);
- seq_printf(m, "0x%lx 0x%p %ld %u %pS\n", node->paddr, node->vaddr,
+ seq_printf(m, "0x%pa 0x%pa %ld %u %pS\n", &node->paddr, &node->vaddr,
node->len, node->mpool->id, node->caller);
return 0;
}
@@ -84,7 +84,7 @@
return seq_open(file, &mempool_op);
}
-static struct alloc *find_alloc(void *addr)
+static struct alloc *find_alloc(phys_addr_t addr)
{
struct rb_root *root = &alloc_root;
struct rb_node *p = root->rb_node;
@@ -126,7 +126,7 @@
else if (node->vaddr > tmp->vaddr)
p = &(*p)->rb_right;
else {
- WARN(1, "memory at %p already allocated", tmp->vaddr);
+ WARN(1, "memory at %pa already allocated", &tmp->vaddr);
mutex_unlock(&alloc_mutex);
return -EINVAL;
}
@@ -149,7 +149,7 @@
return 0;
}
-static struct gen_pool *initialize_gpool(unsigned long start,
+static struct gen_pool *initialize_gpool(phys_addr_t start,
unsigned long size)
{
struct gen_pool *gpool;
@@ -194,7 +194,12 @@
if (!vaddr)
goto out_kfree;
- node->vaddr = vaddr;
+ /*
+ * Just cast to an unsigned long to avoid warnings about casting from a
+ * pointer to an integer of different size. The pointer is only 32-bits
+ * so we lose no data.
+ */
+ node->vaddr = (unsigned long)vaddr;
node->paddr = paddr;
node->len = aligned_size;
node->mpool = mpool;
@@ -216,13 +221,19 @@
static void __free(void *vaddr, bool unmap)
{
- struct alloc *node = find_alloc(vaddr);
+ struct alloc *node = find_alloc((unsigned long)vaddr);
if (!node)
return;
if (unmap)
- iounmap(node->vaddr);
+ /*
+ * We need the double cast because otherwise gcc complains about
+ * cast to pointer of different size. This is technically a down
+ * cast but if unmap is being called, this had better be an
+ * actual 32-bit pointer anyway.
+ */
+ iounmap((void *)(unsigned long)node->vaddr);
gen_pool_free(node->mpool->gpool, node->paddr, node->len);
node->mpool->free += node->len;
@@ -248,7 +259,7 @@
return mpool;
}
-struct mem_pool *initialize_memory_pool(unsigned long start,
+struct mem_pool *initialize_memory_pool(phys_addr_t start,
unsigned long size, int mem_type)
{
int id = mem_type;
@@ -264,8 +275,8 @@
mpools[id].id = id;
mutex_unlock(&mpools[id].pool_mutex);
- pr_info("memory pool %d (start %lx size %lx) initialized\n",
- id, start, size);
+ pr_info("memory pool %d (start %pa size %lx) initialized\n",
+ id, &start, size);
return &mpools[id];
}
EXPORT_SYMBOL_GPL(initialize_memory_pool);
@@ -285,10 +296,10 @@
}
EXPORT_SYMBOL_GPL(allocate_contiguous_memory);
-unsigned long _allocate_contiguous_memory_nomap(unsigned long size,
+phys_addr_t _allocate_contiguous_memory_nomap(unsigned long size,
int mem_type, unsigned long align, void *caller)
{
- unsigned long paddr;
+ phys_addr_t paddr;
unsigned long aligned_size;
struct alloc *node;
@@ -317,7 +328,7 @@
* are disjoint, so there won't be any chance of
* a duplicate node->vaddr value.
*/
- node->vaddr = (void *)paddr;
+ node->vaddr = paddr;
node->len = aligned_size;
node->mpool = mpool;
node->caller = caller;
@@ -334,7 +345,7 @@
}
EXPORT_SYMBOL_GPL(_allocate_contiguous_memory_nomap);
-unsigned long allocate_contiguous_memory_nomap(unsigned long size,
+phys_addr_t allocate_contiguous_memory_nomap(unsigned long size,
int mem_type, unsigned long align)
{
return _allocate_contiguous_memory_nomap(size, mem_type, align,
@@ -351,18 +362,18 @@
}
EXPORT_SYMBOL_GPL(free_contiguous_memory);
-void free_contiguous_memory_by_paddr(unsigned long paddr)
+void free_contiguous_memory_by_paddr(phys_addr_t paddr)
{
if (!paddr)
return;
- __free((void *)paddr, false);
+ __free((void *)(unsigned long)paddr, false);
return;
}
EXPORT_SYMBOL_GPL(free_contiguous_memory_by_paddr);
-unsigned long memory_pool_node_paddr(void *vaddr)
+phys_addr_t memory_pool_node_paddr(void *vaddr)
{
- struct alloc *node = find_alloc(vaddr);
+ struct alloc *node = find_alloc((unsigned long)vaddr);
if (!node)
return -EINVAL;
@@ -373,7 +384,7 @@
unsigned long memory_pool_node_len(void *vaddr)
{
- struct alloc *node = find_alloc(vaddr);
+ struct alloc *node = find_alloc((unsigned long)vaddr);
if (!node)
return -EINVAL;
diff --git a/mm/dmapool.c b/mm/dmapool.c
index c5ab33b..da1b0f0 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -50,7 +50,6 @@
size_t allocation;
size_t boundary;
char name[32];
- wait_queue_head_t waitq;
struct list_head pools;
};
@@ -62,8 +61,6 @@
unsigned int offset;
};
-#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
-
static DEFINE_MUTEX(pools_lock);
static ssize_t
@@ -172,7 +169,6 @@
retval->size = size;
retval->boundary = boundary;
retval->allocation = allocation;
- init_waitqueue_head(&retval->waitq);
if (dev) {
int ret;
@@ -227,7 +223,6 @@
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
#endif
pool_initialise_page(pool, page);
- list_add(&page->page_list, &pool->page_list);
page->in_use = 0;
page->offset = 0;
} else {
@@ -315,30 +310,21 @@
might_sleep_if(mem_flags & __GFP_WAIT);
spin_lock_irqsave(&pool->lock, flags);
- restart:
list_for_each_entry(page, &pool->page_list, page_list) {
if (page->offset < pool->allocation)
goto ready;
}
- page = pool_alloc_page(pool, GFP_ATOMIC);
- if (!page) {
- if (mem_flags & __GFP_WAIT) {
- DECLARE_WAITQUEUE(wait, current);
- __set_current_state(TASK_UNINTERRUPTIBLE);
- __add_wait_queue(&pool->waitq, &wait);
- spin_unlock_irqrestore(&pool->lock, flags);
+ /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
+ spin_unlock_irqrestore(&pool->lock, flags);
- schedule_timeout(POOL_TIMEOUT_JIFFIES);
+ page = pool_alloc_page(pool, mem_flags);
+ if (!page)
+ return NULL;
- spin_lock_irqsave(&pool->lock, flags);
- __remove_wait_queue(&pool->waitq, &wait);
- goto restart;
- }
- retval = NULL;
- goto done;
- }
+ spin_lock_irqsave(&pool->lock, flags);
+ list_add(&page->page_list, &pool->page_list);
ready:
page->in_use++;
offset = page->offset;
@@ -348,7 +334,6 @@
#ifdef DMAPOOL_DEBUG
memset(retval, POOL_POISON_ALLOCATED, pool->size);
#endif
- done:
spin_unlock_irqrestore(&pool->lock, flags);
return retval;
}
@@ -435,8 +420,6 @@
page->in_use--;
*(int *)vaddr = page->offset;
page->offset = offset;
- if (waitqueue_active(&pool->waitq))
- wake_up_locked(&pool->waitq);
/*
* Resist a temptation to do
* if (!is_page_busy(page)) pool_free_page(pool, page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d6dd07a..69b9521 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1128,7 +1128,7 @@
#ifdef CONFIG_CMA
if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc)
page = __rmqueue_smallest(zone, order, MIGRATE_CMA);
- else
+ if (!page)
#endif
retry_reserve :
page = __rmqueue_smallest(zone, order, migratetype);
diff --git a/mm/readahead.c b/mm/readahead.c
index cbcbb02..728a7a3 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -261,6 +261,8 @@
/*
* Set the initial window size, round to next power of 2 and square
+ * Small size is not dependant on max value - only a one-page read is regarded
+ * as small.
* for small size, x 4 for medium, and x 2 for large
* for 128k (32 page) max ra
* 1-8 page = 32k initial, > 8 page = 128k initial
@@ -269,7 +271,7 @@
{
unsigned long newsize = roundup_pow_of_two(size);
- if (newsize <= max / 32)
+ if (newsize <= 1)
newsize = newsize * 4;
else if (newsize <= max / 4)
newsize = newsize * 2;
diff --git a/scripts/build-all.py b/scripts/build-all.py
index 3cecbe2..4789af7 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -88,7 +88,6 @@
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
- r'msmzinc*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
diff --git a/sound/soc/codecs/wcd9304-tables.c b/sound/soc/codecs/wcd9304-tables.c
index 83c0c1d..7ec0152 100644
--- a/sound/soc/codecs/wcd9304-tables.c
+++ b/sound/soc/codecs/wcd9304-tables.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -235,8 +235,24 @@
[SITAR_A_CDC_RX1_B4_CTL] = SITAR_A_CDC_RX1_B4_CTL__POR,
[SITAR_A_CDC_RX1_B5_CTL] = SITAR_A_CDC_RX1_B5_CTL__POR,
[SITAR_A_CDC_RX1_B6_CTL] = SITAR_A_CDC_RX1_B6_CTL__POR,
+ [SITAR_A_CDC_RX2_B1_CTL] = SITAR_A_CDC_RX2_B1_CTL__POR,
+ [SITAR_A_CDC_RX2_B2_CTL] = SITAR_A_CDC_RX2_B2_CTL__POR,
+ [SITAR_A_CDC_RX2_B3_CTL] = SITAR_A_CDC_RX2_B3_CTL__POR,
+ [SITAR_A_CDC_RX2_B4_CTL] = SITAR_A_CDC_RX2_B4_CTL__POR,
+ [SITAR_A_CDC_RX2_B5_CTL] = SITAR_A_CDC_RX2_B5_CTL__POR,
+ [SITAR_A_CDC_RX2_B6_CTL] = SITAR_A_CDC_RX2_B6_CTL__POR,
+ [SITAR_A_CDC_RX3_B1_CTL] = SITAR_A_CDC_RX3_B1_CTL__POR,
+ [SITAR_A_CDC_RX3_B2_CTL] = SITAR_A_CDC_RX3_B2_CTL__POR,
+ [SITAR_A_CDC_RX3_B3_CTL] = SITAR_A_CDC_RX3_B3_CTL__POR,
+ [SITAR_A_CDC_RX3_B4_CTL] = SITAR_A_CDC_RX3_B4_CTL__POR,
+ [SITAR_A_CDC_RX3_B5_CTL] = SITAR_A_CDC_RX3_B5_CTL__POR,
+ [SITAR_A_CDC_RX3_B6_CTL] = SITAR_A_CDC_RX3_B6_CTL__POR,
[SITAR_A_CDC_RX1_VOL_CTL_B1_CTL] = SITAR_A_CDC_RX1_VOL_CTL_B1_CTL__POR,
[SITAR_A_CDC_RX1_VOL_CTL_B2_CTL] = SITAR_A_CDC_RX1_VOL_CTL_B2_CTL__POR,
+ [SITAR_A_CDC_RX2_VOL_CTL_B1_CTL] = SITAR_A_CDC_RX2_VOL_CTL_B1_CTL__POR,
+ [SITAR_A_CDC_RX2_VOL_CTL_B2_CTL] = SITAR_A_CDC_RX2_VOL_CTL_B2_CTL__POR,
+ [SITAR_A_CDC_RX3_VOL_CTL_B1_CTL] = SITAR_A_CDC_RX3_VOL_CTL_B1_CTL__POR,
+ [SITAR_A_CDC_RX3_VOL_CTL_B2_CTL] = SITAR_A_CDC_RX3_VOL_CTL_B2_CTL__POR,
[SITAR_A_CDC_CLK_ANC_RESET_CTL] = SITAR_A_CDC_CLK_ANC_RESET_CTL__POR,
[SITAR_A_CDC_CLK_RX_RESET_CTL] = SITAR_A_CDC_CLK_RX_RESET_CTL__POR,
[SITAR_A_CDC_CLK_TX_RESET_B1_CTL] =
@@ -322,6 +338,15 @@
[SITAR_A_CDC_COMP1_SHUT_DOWN_STATUS] =
SITAR_A_CDC_COMP1_SHUT_DOWN_STATUS__POR,
[SITAR_A_CDC_COMP1_FS_CFG] = SITAR_A_CDC_COMP1_FS_CFG__POR,
+ [SITAR_A_CDC_COMP2_B1_CTL] = SITAR_A_CDC_COMP2_B1_CTL__POR,
+ [SITAR_A_CDC_COMP2_B2_CTL] = SITAR_A_CDC_COMP2_B2_CTL__POR,
+ [SITAR_A_CDC_COMP2_B3_CTL] = SITAR_A_CDC_COMP2_B3_CTL__POR,
+ [SITAR_A_CDC_COMP2_B4_CTL] = SITAR_A_CDC_COMP2_B4_CTL__POR,
+ [SITAR_A_CDC_COMP2_B5_CTL] = SITAR_A_CDC_COMP2_B5_CTL__POR,
+ [SITAR_A_CDC_COMP2_B6_CTL] = SITAR_A_CDC_COMP2_B6_CTL__POR,
+ [SITAR_A_CDC_COMP2_SHUT_DOWN_STATUS] =
+ SITAR_A_CDC_COMP2_SHUT_DOWN_STATUS__POR,
+ [SITAR_A_CDC_COMP2_FS_CFG] = SITAR_A_CDC_COMP2_FS_CFG__POR,
[SITAR_A_CDC_CONN_RX1_B1_CTL] = SITAR_A_CDC_CONN_RX1_B1_CTL__POR,
[SITAR_A_CDC_CONN_RX1_B2_CTL] = SITAR_A_CDC_CONN_RX1_B2_CTL__POR,
[SITAR_A_CDC_CONN_RX1_B3_CTL] = SITAR_A_CDC_CONN_RX1_B3_CTL__POR,
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index 36d5d6b..616f8d5 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -81,6 +81,11 @@
#define SITAR_OCP_ATTEMPT 1
+#define COMP_DIGITAL_DB_GAIN_APPLY(a, b) \
+ (((a) <= 0) ? ((a) - b) : (a))
+/* The wait time value comes from codec HW specification */
+#define COMP_BRINGUP_WAIT_TIME 3000
+
#define SITAR_MCLK_RATE_12288KHZ 12288000
#define SITAR_MCLK_RATE_9600KHZ 9600000
@@ -102,6 +107,12 @@
#define SITAR_MBHC_STATUS_REL_DETECTION 0x0C
#define SITAR_MBHC_GPIO_REL_DEBOUNCE_TIME_MS 200
+#define CUT_OF_FREQ_MASK 0x30
+#define CF_MIN_3DB_4HZ 0x0
+#define CF_MIN_3DB_75HZ 0x01
+#define CF_MIN_3DB_150HZ 0x02
+
+
static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
@@ -142,6 +153,22 @@
BAND_MAX,
};
+enum {
+ COMPANDER_1 = 0,
+ COMPANDER_2,
+ COMPANDER_MAX,
+};
+
+enum {
+ COMPANDER_FS_8KHZ = 0,
+ COMPANDER_FS_16KHZ,
+ COMPANDER_FS_32KHZ,
+ COMPANDER_FS_48KHZ,
+ COMPANDER_FS_96KHZ,
+ COMPANDER_FS_192KHZ,
+ COMPANDER_FS_MAX,
+};
+
/* Flags to track of PA and DAC state.
* PA and DAC should be tracked separately as AUXPGA loopback requires
* only PA to be turned on without DAC being on. */
@@ -152,6 +179,33 @@
SITAR_HPHR_DAC_OFF_ACK
};
+struct comp_sample_dependent_params {
+ u32 peak_det_timeout;
+ u32 rms_meter_div_fact;
+ u32 rms_meter_resamp_fact;
+};
+
+struct comp_dgtl_gain_offset {
+ u8 whole_db_gain;
+ u8 half_db_gain;
+};
+
+static const struct comp_dgtl_gain_offset comp_dgtl_gain[] = {
+ {0, 0},
+ {1, 1},
+ {3, 0},
+ {4, 1},
+ {6, 0},
+ {7, 1},
+ {9, 0},
+ {10, 1},
+ {12, 0},
+ {13, 1},
+ {15, 0},
+ {16, 1},
+ {18, 0},
+};
+
/* Data used by MBHC */
struct mbhc_internal_cal_data {
u16 dce_z;
@@ -194,6 +248,15 @@
0, /* AIF1_CAP */
};
+struct hpf_work {
+ struct sitar_priv *sitar;
+ u32 decimator;
+ u8 tx_hpf_cut_of_freq;
+ struct delayed_work dwork;
+};
+
+static struct hpf_work tx_hpf_work[NUM_DECIMATORS];
+
struct sitar_priv {
struct snd_soc_codec *codec;
u32 mclk_freq;
@@ -258,6 +321,11 @@
/* num of slim ports required */
struct wcd9xxx_codec_dai_data dai[NUM_CODEC_DAIS];
+ /*compander*/
+ int comp_enabled[COMPANDER_MAX];
+ u32 comp_fs[COMPANDER_MAX];
+ u8 comp_gain_offset[NUM_INTERPOLATORS];
+
/* Currently, only used for mbhc purpose, to protect
* concurrent execution of mbhc threaded irq handlers and
* kill race between DAPM and MBHC.But can serve as a
@@ -280,6 +348,47 @@
struct sitar_priv *debug_sitar_priv;
#endif
+static const int comp_rx_path[] = {
+ COMPANDER_2,
+ COMPANDER_1,
+ COMPANDER_1,
+ COMPANDER_MAX,
+};
+
+static const struct comp_sample_dependent_params
+ comp_samp_params[COMPANDER_FS_MAX] = {
+ {
+ .peak_det_timeout = 0x6,
+ .rms_meter_div_fact = 0x9 << 4,
+ .rms_meter_resamp_fact = 0x06,
+ },
+ {
+ .peak_det_timeout = 0x7,
+ .rms_meter_div_fact = 0xA << 4,
+ .rms_meter_resamp_fact = 0x0C,
+ },
+ {
+ .peak_det_timeout = 0x8,
+ .rms_meter_div_fact = 0xB << 4,
+ .rms_meter_resamp_fact = 0x30,
+ },
+ {
+ .peak_det_timeout = 0x9,
+ .rms_meter_div_fact = 0xB << 4,
+ .rms_meter_resamp_fact = 0x28,
+ },
+ {
+ .peak_det_timeout = 0xA,
+ .rms_meter_div_fact = 0xC << 4,
+ .rms_meter_resamp_fact = 0x50,
+ },
+ {
+ .peak_det_timeout = 0xB,
+ .rms_meter_div_fact = 0xC << 4,
+ .rms_meter_resamp_fact = 0x50,
+ },
+};
+
static int sitar_get_anc_slot(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -524,6 +633,268 @@
return 0;
}
+static int sitar_compander_gain_offset(
+ struct snd_soc_codec *codec, u32 enable,
+ unsigned int pa_reg, unsigned int vol_reg,
+ int mask, int event,
+ struct comp_dgtl_gain_offset *gain_offset,
+ int index)
+{
+ unsigned int pa_gain = snd_soc_read(codec, pa_reg);
+ unsigned int digital_vol = snd_soc_read(codec, vol_reg);
+ int pa_mode = pa_gain & mask;
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
+
+ pr_debug("%s: pa_gain(0x%x=0x%x)digital_vol(0x%x=0x%x)event(0x%x) index(%d)\n",
+ __func__, pa_reg, pa_gain, vol_reg, digital_vol, event, index);
+ if (((pa_gain & 0xF) + 1) > ARRAY_SIZE(comp_dgtl_gain) ||
+ (index >= ARRAY_SIZE(sitar->comp_gain_offset))) {
+ pr_err("%s: Out of array boundary\n", __func__);
+ return -EINVAL;
+ }
+
+ if (SND_SOC_DAPM_EVENT_ON(event) && (enable != 0)) {
+ gain_offset->whole_db_gain = COMP_DIGITAL_DB_GAIN_APPLY(
+ (digital_vol - comp_dgtl_gain[pa_gain & 0xF].whole_db_gain),
+ comp_dgtl_gain[pa_gain & 0xF].half_db_gain);
+ pr_debug("%s: listed whole_db_gain:0x%x, adjusted whole_db_gain:0x%x\n",
+ __func__, comp_dgtl_gain[pa_gain & 0xF].whole_db_gain,
+ gain_offset->whole_db_gain);
+ gain_offset->half_db_gain =
+ comp_dgtl_gain[pa_gain & 0xF].half_db_gain;
+ sitar->comp_gain_offset[index] = digital_vol -
+ gain_offset->whole_db_gain ;
+ }
+ if (SND_SOC_DAPM_EVENT_OFF(event) && (pa_mode == 0)) {
+ gain_offset->whole_db_gain = digital_vol +
+ sitar->comp_gain_offset[index];
+ pr_debug("%s: listed whole_db_gain:0x%x, adjusted whole_db_gain:0x%x\n",
+ __func__, comp_dgtl_gain[pa_gain & 0xF].whole_db_gain,
+ gain_offset->whole_db_gain);
+ gain_offset->half_db_gain = 0;
+ }
+
+ pr_debug("%s: half_db_gain(%d)whole_db_gain(0x%x)comp_gain_offset[%d](%d)\n",
+ __func__, gain_offset->half_db_gain,
+ gain_offset->whole_db_gain, index,
+ sitar->comp_gain_offset[index]);
+ return 0;
+}
+
+static int sitar_config_gain_compander(
+ struct snd_soc_codec *codec,
+ u32 compander, u32 enable, int event)
+{
+ int value = 0;
+ int mask = 1 << 4;
+ struct comp_dgtl_gain_offset gain_offset = {0, 0};
+ if (compander >= COMPANDER_MAX) {
+ pr_err("%s: Error, invalid compander channel\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((enable == 0) || SND_SOC_DAPM_EVENT_OFF(event))
+ value = 1 << 4;
+
+ if (compander == COMPANDER_1) {
+ sitar_compander_gain_offset(codec, enable,
+ SITAR_A_RX_HPH_L_GAIN,
+ SITAR_A_CDC_RX2_VOL_CTL_B2_CTL,
+ mask, event, &gain_offset, 1);
+ snd_soc_update_bits(codec, SITAR_A_RX_HPH_L_GAIN, mask, value);
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX2_VOL_CTL_B2_CTL,
+ 0xFF, gain_offset.whole_db_gain);
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX2_B6_CTL,
+ 0x02, gain_offset.half_db_gain);
+ sitar_compander_gain_offset(codec, enable,
+ SITAR_A_RX_HPH_R_GAIN,
+ SITAR_A_CDC_RX3_VOL_CTL_B2_CTL,
+ mask, event, &gain_offset, 2);
+ snd_soc_update_bits(codec, SITAR_A_RX_HPH_R_GAIN, mask, value);
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX3_VOL_CTL_B2_CTL,
+ 0xFF, gain_offset.whole_db_gain);
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX3_B6_CTL,
+ 0x02, gain_offset.half_db_gain);
+ } else if (compander == COMPANDER_2) {
+ sitar_compander_gain_offset(codec, enable,
+ SITAR_A_RX_LINE_1_GAIN,
+ SITAR_A_CDC_RX1_VOL_CTL_B2_CTL,
+ mask, event, &gain_offset, 0);
+ snd_soc_update_bits(codec, SITAR_A_RX_LINE_1_GAIN, mask, value);
+ snd_soc_update_bits(codec, SITAR_A_RX_LINE_2_GAIN, mask, value);
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX1_VOL_CTL_B2_CTL,
+ 0xFF, gain_offset.whole_db_gain);
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX1_B6_CTL,
+ 0x02, gain_offset.half_db_gain);
+ }
+ return 0;
+}
+
+static int sitar_get_compander(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ int comp = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->shift;
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = sitar->comp_enabled[comp];
+
+ return 0;
+}
+
+static int sitar_set_compander(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
+ int comp = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->shift;
+ int value = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: compander #%d enable %d\n",
+ __func__, comp + 1, value);
+ if (value == sitar->comp_enabled[comp]) {
+ pr_debug("%s: compander #%d enable %d no change\n",
+ __func__, comp + 1, value);
+ return 0;
+ }
+ sitar->comp_enabled[comp] = value;
+ return 0;
+}
+
+static int sitar_config_compander(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
+ u32 rate = sitar->comp_fs[w->shift];
+ u32 value;
+
+ pr_debug("%s: compander #%d enable %d event %d widget name %s\n",
+ __func__, w->shift + 1,
+ sitar->comp_enabled[w->shift], event , w->name);
+ if (sitar->comp_enabled[w->shift] == 0)
+ goto rtn;
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Update compander sample rate */
+ snd_soc_update_bits(codec, SITAR_A_CDC_COMP1_FS_CFG +
+ w->shift * 8, 0x07, rate);
+ /* Enable compander clock */
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_RX_B2_CTL,
+ 1 << w->shift,
+ 1 << w->shift);
+ /* Toggle compander reset bits */
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_OTHR_RESET_CTL,
+ 1 << w->shift,
+ 1 << w->shift);
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_OTHR_RESET_CTL,
+ 1 << w->shift, 0);
+ sitar_config_gain_compander(codec, w->shift, 1, event);
+ /* Compander enable -> 0x370/0x378 */
+ snd_soc_update_bits(codec, SITAR_A_CDC_COMP1_B1_CTL +
+ w->shift * 8, 0x03, 0x03);
+ /* Update the RMS meter resampling */
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_COMP1_B3_CTL +
+ w->shift * 8, 0xFF, 0x01);
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_COMP1_B2_CTL +
+ w->shift * 8, 0xF0, 0x50);
+ usleep_range(COMP_BRINGUP_WAIT_TIME, COMP_BRINGUP_WAIT_TIME);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLSG_CTL,
+ 0x11, 0x00);
+ if (w->shift == COMPANDER_1)
+ value = 0x22;
+ else
+ value = 0x11;
+ snd_soc_write(codec,
+ SITAR_A_CDC_CONN_CLSG_CTL, value);
+
+ snd_soc_update_bits(codec, SITAR_A_CDC_COMP1_B2_CTL +
+ w->shift * 8, 0x0F,
+ comp_samp_params[rate].peak_det_timeout);
+ snd_soc_update_bits(codec, SITAR_A_CDC_COMP1_B2_CTL +
+ w->shift * 8, 0xF0,
+ comp_samp_params[rate].rms_meter_div_fact);
+ snd_soc_update_bits(codec, SITAR_A_CDC_COMP1_B3_CTL +
+ w->shift * 8, 0xFF,
+ comp_samp_params[rate].rms_meter_resamp_fact);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, SITAR_A_CDC_COMP1_B1_CTL +
+ w->shift * 8, 0x03, 0x00);
+ /* Toggle compander reset bits */
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_OTHR_RESET_CTL,
+ 1 << w->shift,
+ 1 << w->shift);
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_OTHR_RESET_CTL,
+ 1 << w->shift, 0);
+ /* Disable compander clock */
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_RX_B2_CTL,
+ 1 << w->shift,
+ 0);
+ /* Restore the gain */
+ sitar_config_gain_compander(codec, w->shift,
+ sitar->comp_enabled[w->shift],
+ event);
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLSG_CTL,
+ 0x11, 0x11);
+ snd_soc_write(codec,
+ SITAR_A_CDC_CONN_CLSG_CTL, 0x14);
+ break;
+ }
+rtn:
+ return 0;
+}
+
+static int sitar_codec_dem_input_selection(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
+ pr_debug("%s: compander#1->enable(%d) compander#2->enable(%d) reg(0x%x = 0x%x) event(%d)\n",
+ __func__, sitar->comp_enabled[COMPANDER_1],
+ sitar->comp_enabled[COMPANDER_2],
+ SITAR_A_CDC_RX1_B6_CTL + w->shift * 8,
+ snd_soc_read(codec, SITAR_A_CDC_RX1_B6_CTL + w->shift * 8),
+ event);
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (sitar->comp_enabled[COMPANDER_1] ||
+ sitar->comp_enabled[COMPANDER_2])
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_RX1_B6_CTL +
+ w->shift * 8,
+ 1 << 5, 0);
+ else
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_RX1_B6_CTL +
+ w->shift * 8,
+ 1 << 5, 0x20);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_RX1_B6_CTL + w->shift * 8,
+ 1 << 5, 0);
+ break;
+ }
+ return 0;
+}
+
static const char * const sitar_ear_pa_gain_text[] = {"POS_6_DB",
"POS_2_DB", "NEG_2P5_DB", "NEG_12_DB"};
@@ -637,6 +1008,10 @@
sitar_get_iir_band_audio_mixer, sitar_put_iir_band_audio_mixer),
SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5,
sitar_get_iir_band_audio_mixer, sitar_put_iir_band_audio_mixer),
+ SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMPANDER_1, 1, 0,
+ sitar_get_compander, sitar_set_compander),
+ SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMPANDER_2, 1, 0,
+ sitar_get_compander, sitar_set_compander),
};
static const char *rx_mix1_text[] = {
@@ -1252,9 +1627,14 @@
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
u16 lineout_gain_reg;
- pr_debug("%s %d %s\n", __func__, event, w->name);
+ pr_debug("%s %d %s comp2 enable %d\n", __func__, event, w->name,
+ sitar->comp_enabled[COMPANDER_2]);
+
+ if (sitar->comp_enabled[COMPANDER_2])
+ goto rtn;
switch (w->shift) {
case 0:
@@ -1296,6 +1676,7 @@
snd_soc_update_bits(codec, lineout_gain_reg, 0x10, 0x00);
break;
}
+rtn:
return 0;
}
@@ -1755,6 +2136,8 @@
snd_soc_update_bits(codec, micb_int_reg, 0x1C, 0x1C);
break;
case SND_SOC_DAPM_POST_PMU:
+
+ usleep_range(20000, 20000);
if (sitar->mbhc_polling_active &&
sitar->mbhc_cfg.micbias == micb_line) {
SITAR_ACQUIRE_LOCK(sitar->codec_resource_lock);
@@ -1780,36 +2163,141 @@
return 0;
}
+static void tx_hpf_corner_freq_callback(struct work_struct *work)
+{
+ struct delayed_work *hpf_delayed_work;
+ struct hpf_work *hpf_work;
+ struct sitar_priv *sitar;
+ struct snd_soc_codec *codec;
+ u16 tx_mux_ctl_reg;
+ u8 hpf_cut_of_freq;
+
+ hpf_delayed_work = to_delayed_work(work);
+ hpf_work = container_of(hpf_delayed_work, struct hpf_work, dwork);
+ sitar = hpf_work->sitar;
+ codec = hpf_work->sitar->codec;
+ hpf_cut_of_freq = hpf_work->tx_hpf_cut_of_freq;
+
+ tx_mux_ctl_reg = SITAR_A_CDC_TX1_MUX_CTL +
+ (hpf_work->decimator - 1) * 8;
+
+ pr_debug("%s(): decimator %u hpf_cut_of_freq 0x%x\n", __func__,
+ hpf_work->decimator, (unsigned int)hpf_cut_of_freq);
+
+ snd_soc_update_bits(codec, tx_mux_ctl_reg,
+ CUT_OF_FREQ_MASK, hpf_cut_of_freq << 4);
+}
+
static int sitar_codec_enable_dec(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- u16 dec_reset_reg, gain_reg;
- u8 current_gain;
+ u16 dec_reset_reg, gain_reg, tx_vol_ctl_reg, tx_mux_ctl_reg;
+ unsigned int decimator;
+ char *dec_name = NULL;
+ char *widget_name = NULL;
+ char *temp;
+ int ret = 0;
+ u8 dec_hpf_cut_of_freq, current_gain;
pr_debug("%s %d\n", __func__, event);
+ widget_name = kstrndup(w->name, 15, GFP_KERNEL);
+ if (!widget_name)
+ return -ENOMEM;
+ temp = widget_name;
+
+ dec_name = strsep(&widget_name, " ");
+ widget_name = temp;
+ if (!dec_name) {
+ pr_err("%s: Invalid decimator = %s\n", __func__, w->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = kstrtouint(strpbrk(dec_name, "1234"), 10, &decimator);
+ if (ret < 0) {
+ pr_err("%s: Invalid decimator = %s\n", __func__, dec_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pr_debug("%s(): widget = %s dec_name = %s decimator = %u\n", __func__,
+ w->name, dec_name, decimator);
+
if (w->reg == SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL)
dec_reset_reg = SITAR_A_CDC_CLK_TX_RESET_B1_CTL;
else {
pr_err("%s: Error, incorrect dec\n", __func__);
- return -EINVAL;
+ ret = EINVAL;
+ goto out;
}
+ tx_vol_ctl_reg = SITAR_A_CDC_TX1_VOL_CTL_CFG + 8 * (decimator - 1);
+ tx_mux_ctl_reg = SITAR_A_CDC_TX1_MUX_CTL + 8 * (decimator - 1);
+
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ /* Enable TX Digital Mute */
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
+
snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift,
1 << w->shift);
snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0);
+
+ dec_hpf_cut_of_freq = snd_soc_read(codec, tx_mux_ctl_reg);
+ dec_hpf_cut_of_freq = (dec_hpf_cut_of_freq &
+ CUT_OF_FREQ_MASK) >> 4;
+
+ tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq =
+ dec_hpf_cut_of_freq;
+
+ if ((dec_hpf_cut_of_freq != CF_MIN_3DB_150HZ)) {
+ /* Set cut off freq to CF_MIN_3DB_150HZ (0x01) */
+ snd_soc_update_bits(codec, tx_mux_ctl_reg,
+ CUT_OF_FREQ_MASK, CF_MIN_3DB_150HZ << 4);
+ }
+
+ /* enable HPF */
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x00);
+
break;
+
case SND_SOC_DAPM_POST_PMU:
+ /* Disable TX Digital Mute */
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
+
+ if (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq !=
+ CF_MIN_3DB_150HZ) {
+ schedule_delayed_work(&tx_hpf_work[decimator - 1].dwork,
+ msecs_to_jiffies(300));
+ }
+
/* Reprogram the digital gain after power up of Decimator */
gain_reg = SITAR_A_CDC_TX1_VOL_CTL_GAIN + (8 * w->shift);
current_gain = snd_soc_read(codec, gain_reg);
snd_soc_write(codec, gain_reg, current_gain);
break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ /* Enable Digital Mute, Cancel possibly scheduled work */
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
+ cancel_delayed_work_sync(&tx_hpf_work[decimator - 1].dwork);
+
+ break;
+
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, CUT_OF_FREQ_MASK,
+ (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq) << 4);
+ break;
+
}
- return 0;
+
+out:
+ kfree(widget_name);
+ return ret;
+
}
static int sitar_codec_reset_interpolator(struct snd_soc_dapm_widget *w,
@@ -1888,16 +2376,22 @@
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
- pr_debug("%s %s %d\n", __func__, w->name, event);
+ pr_debug("%s %s %d comp#1 enable %d\n", __func__,
+ w->name, event, sitar->comp_enabled[COMPANDER_1]);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
if (w->reg == SITAR_A_RX_HPH_L_DAC_CTL) {
- snd_soc_update_bits(codec, SITAR_A_CDC_CONN_CLSG_CTL,
- 0x30, 0x20);
- snd_soc_update_bits(codec, SITAR_A_CDC_CONN_CLSG_CTL,
- 0x0C, 0x08);
+ if (!sitar->comp_enabled[COMPANDER_1]) {
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CONN_CLSG_CTL,
+ 0x30, 0x20);
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CONN_CLSG_CTL,
+ 0x0C, 0x08);
+ }
}
snd_soc_update_bits(codec, w->reg, 0x40, 0x40);
break;
@@ -2216,9 +2710,15 @@
SND_SOC_DAPM_MUX("DAC4 MUX", SND_SOC_NOPM, 0, 0,
&rx_dac4_mux),
- SND_SOC_DAPM_MIXER("RX1 CHAIN", SITAR_A_CDC_RX1_B6_CTL, 5, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("RX2 CHAIN", SITAR_A_CDC_RX2_B6_CTL, 5, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("RX3 CHAIN", SITAR_A_CDC_RX3_B6_CTL, 5, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER_E("RX1 CHAIN", SND_SOC_NOPM, 0, 0, NULL,
+ 0, sitar_codec_dem_input_selection,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX2 CHAIN", SND_SOC_NOPM, 1, 0, NULL,
+ 0, sitar_codec_dem_input_selection,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX3 CHAIN", SND_SOC_NOPM, 2, 0, NULL,
+ 0, sitar_codec_dem_input_selection,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
&rx_mix1_inp1_mux),
@@ -2278,16 +2778,23 @@
SND_SOC_DAPM_MUX_E("DEC1 MUX", SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL, 0, 0,
&dec1_mux, sitar_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
SND_SOC_DAPM_MUX_E("DEC2 MUX", SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL, 1, 0,
&dec2_mux, sitar_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
SND_SOC_DAPM_MUX_E("DEC3 MUX", SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL, 2, 0,
&dec3_mux, sitar_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
SND_SOC_DAPM_MUX_E("DEC4 MUX", SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL, 3, 0,
&dec4_mux, sitar_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX("ANC1 MUX", SND_SOC_NOPM, 0, 0, &anc1_mux),
SND_SOC_DAPM_MUX("ANC2 MUX", SND_SOC_NOPM, 0, 0, &anc2_mux),
@@ -2334,6 +2841,13 @@
sitar_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, COMPANDER_1, 0,
+ sitar_config_compander, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, COMPANDER_2, 0,
+ sitar_config_compander, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
/* Sidetone */
SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
SND_SOC_DAPM_PGA("IIR1", SITAR_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0),
@@ -2463,6 +2977,10 @@
{"SLIM RX3", NULL, "SLIM RX3 MUX"},
{"SLIM RX4", NULL, "SLIM RX4 MUX"},
+ {"RX1 MIX1", NULL, "COMP2_CLK"},
+ {"RX2 MIX1", NULL, "COMP1_CLK"},
+ {"RX3 MIX1", NULL, "COMP1_CLK"},
+
/* Slimbus port 5 is non functional in Sitar 1.0 */
{"RX1 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX1 MIX1 INP1", "RX2", "SLIM RX2"},
@@ -2604,6 +3122,10 @@
if (reg == SITAR_A_CDC_RX1_VOL_CTL_B2_CTL + (8 * i))
return 1;
}
+
+ if ((reg == SITAR_A_CDC_COMP1_SHUT_DOWN_STATUS) ||
+ (reg == SITAR_A_CDC_COMP2_SHUT_DOWN_STATUS))
+ return 1;
return 0;
}
@@ -3062,6 +3584,7 @@
struct snd_soc_codec *codec = dai->codec;
struct sitar_priv *sitar = snd_soc_codec_get_drvdata(dai->codec);
u8 path, shift;
+ u32 compander_fs;
u16 tx_fs_reg, rx_fs_reg;
u8 tx_fs_rate, rx_fs_rate, rx_state, tx_state;
@@ -3071,18 +3594,32 @@
case 8000:
tx_fs_rate = 0x00;
rx_fs_rate = 0x00;
+ compander_fs = COMPANDER_FS_8KHZ;
break;
case 16000:
tx_fs_rate = 0x01;
rx_fs_rate = 0x20;
+ compander_fs = COMPANDER_FS_16KHZ;
break;
case 32000:
tx_fs_rate = 0x02;
rx_fs_rate = 0x40;
+ compander_fs = COMPANDER_FS_32KHZ;
break;
case 48000:
tx_fs_rate = 0x03;
rx_fs_rate = 0x60;
+ compander_fs = COMPANDER_FS_48KHZ;
+ break;
+ case 96000:
+ tx_fs_rate = 0x04;
+ rx_fs_rate = 0x80;
+ compander_fs = COMPANDER_FS_96KHZ;
+ break;
+ case 192000:
+ tx_fs_rate = 0x05;
+ rx_fs_rate = 0xa0;
+ compander_fs = COMPANDER_FS_192KHZ;
break;
default:
pr_err("%s: Invalid sampling rate %d\n", __func__,
@@ -3156,6 +3693,9 @@
+ (BITS_PER_REG*(path-1));
snd_soc_update_bits(codec, rx_fs_reg,
0xE0, rx_fs_rate);
+ if (comp_rx_path[shift] < COMPANDER_MAX)
+ sitar->comp_fs[comp_rx_path[shift]]
+ = compander_fs;
}
}
if (sitar->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
@@ -4850,8 +5390,10 @@
if (sitar) {
codec = sitar->codec;
- if (sitar->hphlocp_cnt++ < SITAR_OCP_ATTEMPT) {
+ if ((sitar->hphlocp_cnt < SITAR_OCP_ATTEMPT) &&
+ (!sitar->hphrocp_cnt)) {
pr_info("%s: retry\n", __func__);
+ sitar->hphlocp_cnt++;
snd_soc_update_bits(codec, SITAR_A_RX_HPH_OCP_CTL, 0x10,
0x00);
snd_soc_update_bits(codec, SITAR_A_RX_HPH_OCP_CTL, 0x10,
@@ -4859,7 +5401,6 @@
} else {
wcd9xxx_disable_irq(codec->control_data,
WCD9XXX_IRQ_HPH_PA_OCPL_FAULT);
- sitar->hphlocp_cnt = 0;
sitar->hph_status |= SND_JACK_OC_HPHL;
if (sitar->mbhc_cfg.headset_jack)
sitar_snd_soc_jack_report(sitar,
@@ -4883,8 +5424,10 @@
if (sitar) {
codec = sitar->codec;
- if (sitar->hphrocp_cnt++ < SITAR_OCP_ATTEMPT) {
+ if ((sitar->hphrocp_cnt < SITAR_OCP_ATTEMPT) &&
+ (!sitar->hphlocp_cnt)) {
pr_info("%s: retry\n", __func__);
+ sitar->hphrocp_cnt++;
snd_soc_update_bits(codec, SITAR_A_RX_HPH_OCP_CTL, 0x10,
0x00);
snd_soc_update_bits(codec, SITAR_A_RX_HPH_OCP_CTL, 0x10,
@@ -4892,7 +5435,6 @@
} else {
wcd9xxx_disable_irq(codec->control_data,
WCD9XXX_IRQ_HPH_PA_OCPR_FAULT);
- sitar->hphrocp_cnt = 0;
sitar->hph_status |= SND_JACK_OC_HPHR;
if (sitar->mbhc_cfg.headset_jack)
sitar_snd_soc_jack_report(sitar,
@@ -5314,6 +5856,14 @@
return -ENOMEM;
}
+ for (i = 0; i < NUM_DECIMATORS; i++) {
+ tx_hpf_work[i].sitar = sitar;
+ tx_hpf_work[i].decimator = i + 1;
+ INIT_DELAYED_WORK(&tx_hpf_work[i].dwork,
+ tx_hpf_corner_freq_callback);
+ }
+
+
/* Make sure mbhc micbias register addresses are zeroed out */
memset(&sitar->mbhc_bias_regs, 0,
sizeof(struct mbhc_micbias_regs));
@@ -5345,6 +5895,11 @@
if (sitar->intf_type == WCD9XXX_INTERFACE_TYPE_I2C)
sitar_i2c_codec_init_reg(codec);
+ for (i = 0; i < COMPANDER_MAX; i++) {
+ sitar->comp_enabled[i] = 0;
+ sitar->comp_fs[i] = COMPANDER_FS_48KHZ;
+ }
+
ret = sitar_handle_pdata(sitar);
if (IS_ERR_VALUE(ret)) {
pr_err("%s: bad pdata\n", __func__);
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index ca8cfaa..67674f3 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -193,6 +193,7 @@
s32 dmic_5_6_clk_cnt;
u32 anc_slot;
+ bool anc_func;
/*track tapan interface type*/
u8 intf_type;
@@ -348,6 +349,58 @@
return 0;
}
+static int tapan_get_anc_func(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = (tapan->anc_func == true ? 1 : 0);
+ return 0;
+}
+
+static int tapan_put_anc_func(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+ mutex_lock(&dapm->codec->mutex);
+ tapan->anc_func = (!ucontrol->value.integer.value[0] ? false : true);
+
+ dev_err(codec->dev, "%s: anc_func %x", __func__, tapan->anc_func);
+
+ if (tapan->anc_func == true) {
+ pr_info("enable anc virtual widgets");
+ snd_soc_dapm_enable_pin(dapm, "ANC HPHR");
+ snd_soc_dapm_enable_pin(dapm, "ANC HPHL");
+ snd_soc_dapm_enable_pin(dapm, "ANC HEADPHONE");
+ snd_soc_dapm_enable_pin(dapm, "ANC EAR PA");
+ snd_soc_dapm_enable_pin(dapm, "ANC EAR");
+ snd_soc_dapm_disable_pin(dapm, "HPHR");
+ snd_soc_dapm_disable_pin(dapm, "HPHL");
+ snd_soc_dapm_disable_pin(dapm, "HEADPHONE");
+ snd_soc_dapm_disable_pin(dapm, "EAR PA");
+ snd_soc_dapm_disable_pin(dapm, "EAR");
+ } else {
+ pr_info("disable anc virtual widgets");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
+ snd_soc_dapm_disable_pin(dapm, "ANC HEADPHONE");
+ snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
+ snd_soc_dapm_disable_pin(dapm, "ANC EAR");
+ snd_soc_dapm_enable_pin(dapm, "HPHR");
+ snd_soc_dapm_enable_pin(dapm, "HPHL");
+ snd_soc_dapm_enable_pin(dapm, "HEADPHONE");
+ snd_soc_dapm_enable_pin(dapm, "EAR PA");
+ snd_soc_dapm_enable_pin(dapm, "EAR");
+ }
+ snd_soc_dapm_sync(dapm);
+ mutex_unlock(&dapm->codec->mutex);
+ return 0;
+}
+
static int tapan_pa_gain_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -713,6 +766,10 @@
SOC_ENUM_SINGLE_EXT(2, tapan_ear_pa_gain_text),
};
+static const char *const tapan_anc_func_text[] = {"OFF", "ON"};
+static const struct soc_enum tapan_anc_func_enum =
+ SOC_ENUM_SINGLE_EXT(2, tapan_anc_func_text);
+
/*cut of frequency for high pass filter*/
static const char * const cf_text[] = {
"MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
@@ -770,11 +827,11 @@
SOC_SINGLE_TLV("SPK DRV Volume", TAPAN_A_SPKR_DRV_GAIN, 3, 7, 1,
line_gain),
- SOC_SINGLE_TLV("ADC1 Volume", TAPAN_A_TX_1_EN, 2, 13, 0, analog_gain),
- SOC_SINGLE_TLV("ADC2 Volume", TAPAN_A_TX_2_EN, 2, 13, 0, analog_gain),
- SOC_SINGLE_TLV("ADC3 Volume", TAPAN_A_TX_3_EN, 2, 13, 0, analog_gain),
- SOC_SINGLE_TLV("ADC4 Volume", TAPAN_A_TX_4_EN, 2, 13, 0, analog_gain),
- SOC_SINGLE_TLV("ADC5 Volume", TAPAN_A_TX_5_EN, 2, 13, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC1 Volume", TAPAN_A_TX_1_EN, 2, 19, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC2 Volume", TAPAN_A_TX_2_EN, 2, 19, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC3 Volume", TAPAN_A_TX_3_EN, 2, 19, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC4 Volume", TAPAN_A_TX_4_EN, 2, 19, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC5 Volume", TAPAN_A_TX_5_EN, 2, 19, 0, analog_gain),
SOC_SINGLE_S8_TLV("RX1 Digital Volume", TAPAN_A_CDC_RX1_VOL_CTL_B2_CTL,
-84, 40, digital_gain),
@@ -803,9 +860,10 @@
SOC_SINGLE_S8_TLV("IIR1 INP4 Volume", TAPAN_A_CDC_IIR1_GAIN_B4_CTL, -84,
40, digital_gain),
- SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 0, 100, tapan_get_anc_slot,
+ SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 100, 0, tapan_get_anc_slot,
tapan_put_anc_slot),
-
+ SOC_ENUM_EXT("ANC Function", tapan_anc_func_enum, tapan_get_anc_func,
+ tapan_put_anc_func),
SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
SOC_ENUM("TX3 HPF cut off", cf_dec3_enum),
@@ -943,6 +1001,10 @@
"RSVD", "RSVD"
};
+static const char * const anc1_fb_mux_text[] = {
+ "ZERO", "EAR_HPH_L", "EAR_LINE_1",
+};
+
static const char * const iir1_inp1_text[] = {
"ZERO", "DEC1", "DEC2", "DEC3", "DEC4",
"RX1", "RX2", "RX3", "RX4", "RX5"
@@ -1031,6 +1093,9 @@
static const struct soc_enum anc2_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_ANC_B1_CTL, 4, 15, anc_mux_text);
+static const struct soc_enum anc1_fb_mux_enum =
+ SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_ANC_B2_CTL, 0, 3, anc1_fb_mux_text);
+
static const struct soc_enum iir1_inp1_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_EQ1_B1_CTL, 0, 10, iir1_inp1_text);
@@ -1200,6 +1265,9 @@
static const struct snd_kcontrol_new anc2_mux =
SOC_DAPM_ENUM("ANC2 MUX Mux", anc2_mux_enum);
+static const struct snd_kcontrol_new anc1_fb_mux =
+ SOC_DAPM_ENUM("ANC1 FB MUX Mux", anc1_fb_mux_enum);
+
static const struct snd_kcontrol_new dac1_switch[] = {
SOC_DAPM_SINGLE("Switch", TAPAN_A_RX_EAR_EN, 5, 1, 0)
};
@@ -1322,11 +1390,11 @@
return 0;
}
break;
- default:
- dev_err(codec->dev, "Unknown AIF %d\n", dai_id);
- mutex_unlock(&codec->mutex);
- return -EINVAL;
- }
+ default:
+ dev_err(codec->dev, "Unknown AIF %d\n", dai_id);
+ mutex_unlock(&codec->mutex);
+ return -EINVAL;
+ }
dev_dbg(codec->dev, "%s: name %s sname %s updated value %u shift %d\n",
__func__, widget->name, widget->sname,
widget->value, widget->shift);
@@ -1392,14 +1460,14 @@
break;
case 2:
if (wcd9xxx_rx_vport_validation(port_id + core->num_tx_port,
- &tapan_p->dai[AIF1_PB].wcd9xxx_ch_list))
+ &tapan_p->dai[AIF2_PB].wcd9xxx_ch_list))
goto pr_err;
list_add_tail(&core->rx_chs[port_id].list,
&tapan_p->dai[AIF2_PB].wcd9xxx_ch_list);
break;
case 3:
if (wcd9xxx_rx_vport_validation(port_id + core->num_tx_port,
- &tapan_p->dai[AIF1_PB].wcd9xxx_ch_list))
+ &tapan_p->dai[AIF3_PB].wcd9xxx_ch_list))
goto pr_err;
list_add_tail(&core->rx_chs[port_id].list,
&tapan_p->dai[AIF3_PB].wcd9xxx_ch_list);
@@ -1662,15 +1730,17 @@
int i;
int ret;
int num_anc_slots;
- struct anc_header *anc_head;
+ struct wcd9xxx_anc_header *anc_head;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
u32 anc_writes_size = 0;
int anc_size_remaining;
u32 *anc_ptr;
u16 reg;
- u8 mask, val;
+ u8 mask, val, old_val;
dev_dbg(codec->dev, "%s %d\n", __func__, event);
+ if (tapan->anc_func == 0)
+ return 0;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -1683,16 +1753,18 @@
return -ENODEV;
}
- if (fw->size < sizeof(struct anc_header)) {
+ if (fw->size < sizeof(struct wcd9xxx_anc_header)) {
dev_err(codec->dev, "Not enough data\n");
release_firmware(fw);
return -ENOMEM;
}
/* First number is the number of register writes */
- anc_head = (struct anc_header *)(fw->data);
- anc_ptr = (u32 *)((u32)fw->data + sizeof(struct anc_header));
- anc_size_remaining = fw->size - sizeof(struct anc_header);
+ anc_head = (struct wcd9xxx_anc_header *)(fw->data);
+ anc_ptr = (u32 *)((u32)fw->data +
+ sizeof(struct wcd9xxx_anc_header));
+ anc_size_remaining = fw->size -
+ sizeof(struct wcd9xxx_anc_header);
num_anc_slots = anc_head->num_anc_slots;
if (tapan->anc_slot >= num_anc_slots) {
@@ -1735,14 +1807,21 @@
for (i = 0; i < anc_writes_size; i++) {
TAPAN_CODEC_UNPACK_ENTRY(anc_ptr[i], reg,
mask, val);
- snd_soc_write(codec, reg, val);
+ old_val = snd_soc_read(codec, reg);
+ snd_soc_write(codec, reg, (old_val & ~mask) |
+ (val & mask));
}
release_firmware(fw);
break;
case SND_SOC_DAPM_POST_PMD:
- snd_soc_write(codec, TAPAN_A_CDC_CLK_ANC_RESET_CTL, 0xFF);
+ msleep(40);
+ snd_soc_update_bits(codec, TAPAN_A_CDC_ANC1_B1_CTL, 0x01, 0x00);
+ snd_soc_update_bits(codec, TAPAN_A_CDC_ANC2_B1_CTL, 0x02, 0x00);
+ msleep(20);
+ snd_soc_write(codec, TAPAN_A_CDC_CLK_ANC_RESET_CTL, 0x0F);
snd_soc_write(codec, TAPAN_A_CDC_CLK_ANC_CLK_EN_CTL, 0);
+ snd_soc_write(codec, TAPAN_A_CDC_CLK_ANC_RESET_CTL, 0xFF);
break;
}
return 0;
@@ -2135,12 +2214,12 @@
dev_dbg(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
if (w->shift == 5) {
- e_pre_on = WCD9XXX_EVENT_PRE_HPHL_PA_ON;
- e_post_off = WCD9XXX_EVENT_POST_HPHL_PA_OFF;
- req_clsh_state = WCD9XXX_CLSH_STATE_HPHL;
- } else if (w->shift == 4) {
e_pre_on = WCD9XXX_EVENT_PRE_HPHR_PA_ON;
e_post_off = WCD9XXX_EVENT_POST_HPHR_PA_OFF;
+ req_clsh_state = WCD9XXX_CLSH_STATE_HPHL;
+ } else if (w->shift == 4) {
+ e_pre_on = WCD9XXX_EVENT_PRE_HPHL_PA_ON;
+ e_post_off = WCD9XXX_EVENT_POST_HPHL_PA_OFF;
req_clsh_state = WCD9XXX_CLSH_STATE_HPHR;
} else {
pr_err("%s: Invalid w->shift %d\n", __func__, w->shift);
@@ -2180,6 +2259,46 @@
return 0;
}
+static int tapan_codec_enable_anc_hph(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ ret = tapan_hph_pa_event(w, kcontrol, event);
+ if (w->shift == 4) {
+ ret |= tapan_codec_enable_anc(w, kcontrol, event);
+ msleep(50);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ if (w->shift == 4) {
+ snd_soc_update_bits(codec,
+ TAPAN_A_RX_HPH_CNP_EN, 0x30, 0x30);
+ msleep(30);
+ }
+ ret = tapan_hph_pa_event(w, kcontrol, event);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ if (w->shift == 5) {
+ snd_soc_update_bits(codec,
+ TAPAN_A_RX_HPH_CNP_EN, 0x30, 0x00);
+ msleep(40);
+ }
+ if (w->shift == 5) {
+ snd_soc_update_bits(codec,
+ TAPAN_A_TX_7_MBHC_EN, 0x80, 00);
+ ret |= tapan_codec_enable_anc(w, kcontrol, event);
+ }
+ case SND_SOC_DAPM_POST_PMD:
+ ret = tapan_hph_pa_event(w, kcontrol, event);
+ break;
+ }
+ return ret;
+}
+
static const struct snd_soc_dapm_widget tapan_dapm_i2s_widgets[] = {
SND_SOC_DAPM_SUPPLY("I2S_CLK", TAPAN_A_CDC_CLK_I2S_CTL,
4, 0, NULL, 0),
@@ -2294,6 +2413,11 @@
{"EAR_PA_MIXER", NULL, "DAC1"},
{"DAC1", NULL, "RX_BIAS"},
+ {"ANC EAR", NULL, "ANC EAR PA"},
+ {"ANC EAR PA", NULL, "EAR_PA_MIXER"},
+ {"ANC1 FB MUX", "EAR_HPH_L", "RX1 MIX2"},
+ {"ANC1 FB MUX", "EAR_LINE_1", "RX2 MIX2"},
+
/* Headset (RX MIX1 and RX MIX2) */
{"HEADPHONE", NULL, "HPHL"},
{"HEADPHONE", NULL, "HPHR"},
@@ -2306,6 +2430,33 @@
{"HPHR_PA_MIXER", NULL, "HPHR DAC"},
{"HPHR DAC", NULL, "RX_BIAS"},
+ {"ANC HEADPHONE", NULL, "ANC HPHL"},
+ {"ANC HEADPHONE", NULL, "ANC HPHR"},
+
+ {"ANC HPHL", NULL, "HPHL_PA_MIXER"},
+ {"ANC HPHR", NULL, "HPHR_PA_MIXER"},
+
+ {"ANC1 MUX", "ADC1", "ADC1"},
+ {"ANC1 MUX", "ADC2", "ADC2"},
+ {"ANC1 MUX", "ADC3", "ADC3"},
+ {"ANC1 MUX", "ADC4", "ADC4"},
+ {"ANC1 MUX", "ADC5", "ADC5"},
+ {"ANC1 MUX", "DMIC1", "DMIC1"},
+ {"ANC1 MUX", "DMIC2", "DMIC2"},
+ {"ANC1 MUX", "DMIC3", "DMIC3"},
+ {"ANC1 MUX", "DMIC4", "DMIC4"},
+ {"ANC2 MUX", "ADC1", "ADC1"},
+ {"ANC2 MUX", "ADC2", "ADC2"},
+ {"ANC2 MUX", "ADC3", "ADC3"},
+ {"ANC2 MUX", "ADC4", "ADC4"},
+ {"ANC2 MUX", "ADC5", "ADC5"},
+ {"ANC2 MUX", "DMIC1", "DMIC1"},
+ {"ANC2 MUX", "DMIC2", "DMIC2"},
+ {"ANC2 MUX", "DMIC3", "DMIC3"},
+ {"ANC2 MUX", "DMIC4", "DMIC4"},
+
+ {"ANC HPHR", NULL, "CDC_CONN"},
+
{"DAC1", "Switch", "CLASS_H_DSM MUX"},
{"HPHL DAC", "Switch", "CLASS_H_DSM MUX"},
{"HPHR DAC", NULL, "RX2 CHAIN"},
@@ -2334,6 +2485,8 @@
{"RX1 CHAIN", NULL, "RX1 MIX2"},
{"RX2 CHAIN", NULL, "RX2 MIX2"},
{"CLASS_H_DSM MUX", "RX_HPHL", "RX1 CHAIN"},
+ {"RX1 MIX2", NULL, "ANC1 MUX"},
+ {"RX2 MIX2", NULL, "ANC2 MUX"},
{"LINEOUT1 DAC", NULL, "RX_BIAS"},
{"LINEOUT2 DAC", NULL, "RX_BIAS"},
@@ -2562,6 +2715,14 @@
(reg <= TAPAN_A_CDC_IIR2_COEF_B2_CTL))
return 1;
+ /* ANC filter registers are not cacheable */
+ if ((reg >= TAPAN_A_CDC_ANC1_IIR_B1_CTL) &&
+ (reg <= TAPAN_A_CDC_ANC1_LPF_B2_CTL))
+ return 1;
+ if ((reg >= TAPAN_A_CDC_ANC2_IIR_B1_CTL) &&
+ (reg <= TAPAN_A_CDC_ANC2_LPF_B2_CTL))
+ return 1;
+
/* Digital gain register is not cacheable so we have to write
* the setting even it is the same
*/
@@ -2848,7 +3009,7 @@
tapan->comp_fs[comp_rx_path[j]]
= compander_fs;
}
- if (j <= 2)
+ if (j <= 1)
rx_mix_1_reg_1 += 3;
else
rx_mix_1_reg_1 += 2;
@@ -3417,6 +3578,33 @@
return 0;
}
+static int tapan_codec_enable_anc_ear(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ ret = tapan_codec_enable_anc(w, kcontrol, event);
+ msleep(50);
+ snd_soc_update_bits(codec, TAPAN_A_RX_EAR_EN, 0x10, 0x10);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ ret = tapan_codec_enable_ear_pa(w, kcontrol, event);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, TAPAN_A_RX_EAR_EN, 0x10, 0x00);
+ msleep(40);
+ ret |= tapan_codec_enable_anc(w, kcontrol, event);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ ret = tapan_codec_enable_ear_pa(w, kcontrol, event);
+ break;
+ }
+ return ret;
+}
+
/* Todo: Have seperate dapm widgets for I2S and Slimbus.
* Might Need to have callbacks registered only for slimbus
@@ -3693,9 +3881,21 @@
SND_SOC_DAPM_MUX("ANC1 MUX", SND_SOC_NOPM, 0, 0, &anc1_mux),
SND_SOC_DAPM_MUX("ANC2 MUX", SND_SOC_NOPM, 0, 0, &anc2_mux),
- SND_SOC_DAPM_MIXER_E("ANC", SND_SOC_NOPM, 0, 0, NULL, 0,
- tapan_codec_enable_anc, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_OUTPUT("ANC HEADPHONE"),
+ SND_SOC_DAPM_PGA_E("ANC HPHL", SND_SOC_NOPM, 5, 0, NULL, 0,
+ tapan_codec_enable_anc_hph,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PGA_E("ANC HPHR", SND_SOC_NOPM, 4, 0, NULL, 0,
+ tapan_codec_enable_anc_hph, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_OUTPUT("ANC EAR"),
+ SND_SOC_DAPM_PGA_E("ANC EAR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+ tapan_codec_enable_anc_ear,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX("ANC1 FB MUX", SND_SOC_NOPM, 0, 0, &anc1_fb_mux),
SND_SOC_DAPM_INPUT("AMIC2"),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 External", TAPAN_A_MICB_2_CTL, 7, 0,
@@ -4171,6 +4371,12 @@
return ret;
}
+static void tapan_cleanup_irqs(struct tapan_priv *tapan)
+{
+ struct snd_soc_codec *codec = tapan->codec;
+ wcd9xxx_free_irq(codec->control_data, WCD9XXX_IRQ_SLIMBUS, tapan);
+}
+
int tapan_hs_detect(struct snd_soc_codec *codec,
struct wcd9xxx_mbhc_config *mbhc_cfg)
{
@@ -4226,7 +4432,12 @@
return ret;
}
- /* TODO: wcd9xxx_mbhc_init to enable mbhc */
+ ret = wcd9xxx_mbhc_init(&tapan->mbhc, &tapan->resmgr, codec,
+ WCD9XXX_MBHC_VERSION_TAPAN);
+ if (ret) {
+ pr_err("%s: mbhc init failed %d\n", __func__, ret);
+ return ret;
+ }
tapan->codec = codec;
for (i = 0; i < COMPANDER_MAX; i++) {
@@ -4297,8 +4508,20 @@
(void) tapan_setup_irqs(tapan);
atomic_set(&kp_tapan_priv, (unsigned long)tapan);
+ mutex_lock(&dapm->codec->mutex);
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
+ snd_soc_dapm_disable_pin(dapm, "ANC HEADPHONE");
+ snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
+ snd_soc_dapm_disable_pin(dapm, "ANC EAR");
+ snd_soc_dapm_sync(dapm);
+ mutex_unlock(&dapm->codec->mutex);
codec->ignore_pmdown_time = 1;
+
+ if (ret)
+ tapan_cleanup_irqs(tapan);
+
return ret;
err_pdata:
@@ -4319,6 +4542,9 @@
wcd9xxx_resmgr_put_bandgap(&tapan->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
WCD9XXX_BCL_UNLOCK(&tapan->resmgr);
+
+ tapan_cleanup_irqs(tapan);
+
/* cleanup MBHC */
wcd9xxx_mbhc_deinit(&tapan->mbhc);
/* cleanup resmgr */
diff --git a/sound/soc/codecs/wcd9306.h b/sound/soc/codecs/wcd9306.h
index 61d47b5..fdd62d1 100644
--- a/sound/soc/codecs/wcd9306.h
+++ b/sound/soc/codecs/wcd9306.h
@@ -71,11 +71,6 @@
TAPAN_TX_MAX,
};
-struct anc_header {
- u32 reserved[3];
- u32 num_anc_slots;
-};
-
extern int tapan_mclk_enable(struct snd_soc_codec *codec, int mclk_enable,
bool dapm);
extern int tapan_hs_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index b8a4a86..29703b9 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -55,9 +55,12 @@
#define MBHC_FW_READ_ATTEMPTS 15
#define MBHC_FW_READ_TIMEOUT 2000000
#define MBHC_VDDIO_SWITCH_WAIT_MS 10
+#define COMP_DIGITAL_DB_GAIN_APPLY(a, b) \
+ (((a) <= 0) ? ((a) - b) : (a))
#define SLIM_CLOSE_TIMEOUT 1000
-
+/* The wait time value comes from codec HW specification */
+#define COMP_BRINGUP_WAIT_TIME 2000
enum {
MBHC_USE_HPHL_TRIGGER = 1,
MBHC_USE_MB_TRIGGER = 2
@@ -99,9 +102,7 @@
RX_MIX1_INP_SEL_RX6,
RX_MIX1_INP_SEL_RX7,
};
-
-#define TABLA_COMP_DIGITAL_GAIN_HP_OFFSET 3
-#define TABLA_COMP_DIGITAL_GAIN_LINEOUT_OFFSET 6
+#define MAX_PA_GAIN_OPTIONS 13
#define TABLA_MCLK_RATE_12288KHZ 12288000
#define TABLA_MCLK_RATE_9600KHZ 9600000
@@ -220,6 +221,28 @@
u32 shutdown_timeout;
};
+struct comp_dgtl_gain_offset {
+ u8 whole_db_gain;
+ u8 half_db_gain;
+};
+
+static const struct comp_dgtl_gain_offset
+ comp_dgtl_gain[MAX_PA_GAIN_OPTIONS] = {
+ {0, 0},
+ {1, 1},
+ {3, 0},
+ {4, 1},
+ {6, 0},
+ {7, 1},
+ {9, 0},
+ {10, 1},
+ {12, 0},
+ {13, 1},
+ {15, 0},
+ {16, 1},
+ {18, 0},
+};
+
/* Data used by MBHC */
struct mbhc_internal_cal_data {
u16 dce_z;
@@ -377,6 +400,7 @@
/*compander*/
int comp_enabled[COMPANDER_MAX];
u32 comp_fs[COMPANDER_MAX];
+ u8 comp_gain_offset[TABLA_SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS - 1];
/* Maintain the status of AUX PGA */
int aux_pga_cnt;
@@ -547,7 +571,10 @@
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
+
+ mutex_lock(&codec->dapm.codec->mutex);
ucontrol->value.integer.value[0] = (tabla->anc_func == true ? 1 : 0);
+ mutex_unlock(&codec->dapm.codec->mutex);
return 0;
}
@@ -802,34 +829,51 @@
static int tabla_compander_gain_offset(
struct snd_soc_codec *codec, u32 enable,
- unsigned int reg, int mask, int event, u32 comp)
+ unsigned int pa_reg, unsigned int vol_reg,
+ int mask, int event,
+ struct comp_dgtl_gain_offset *gain_offset,
+ int index)
{
- int pa_mode = snd_soc_read(codec, reg) & mask;
- int gain_offset = 0;
- /* if PMU && enable is 1-> offset is 3
- * if PMU && enable is 0-> offset is 0
- * if PMD && pa_mode is PA -> offset is 0: PMU compander is off
- * if PMD && pa_mode is comp -> offset is -3: PMU compander is on.
- */
+ unsigned int pa_gain = snd_soc_read(codec, pa_reg);
+ unsigned int digital_vol = snd_soc_read(codec, vol_reg);
+ int pa_mode = pa_gain & mask;
+ struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
+
+ pr_debug("%s: pa_gain(0x%x=0x%x)digital_vol(0x%x=0x%x)event(0x%x) index(%d)\n",
+ __func__, pa_reg, pa_gain, vol_reg, digital_vol, event, index);
+ if (((pa_gain & 0xF) + 1) > ARRAY_SIZE(comp_dgtl_gain) ||
+ (index >= ARRAY_SIZE(tabla->comp_gain_offset))) {
+ pr_err("%s: Out of array boundary\n", __func__);
+ return -EINVAL;
+ }
if (SND_SOC_DAPM_EVENT_ON(event) && (enable != 0)) {
- if (comp == COMPANDER_1)
- gain_offset = TABLA_COMP_DIGITAL_GAIN_HP_OFFSET;
- if (comp == COMPANDER_2)
- gain_offset = TABLA_COMP_DIGITAL_GAIN_LINEOUT_OFFSET;
+ gain_offset->whole_db_gain = COMP_DIGITAL_DB_GAIN_APPLY(
+ (digital_vol - comp_dgtl_gain[pa_gain & 0xF].whole_db_gain),
+ comp_dgtl_gain[pa_gain & 0xF].half_db_gain);
+ pr_debug("%s: listed whole_db_gain:0x%x, adjusted whole_db_gain:0x%x\n",
+ __func__, comp_dgtl_gain[pa_gain & 0xF].whole_db_gain,
+ gain_offset->whole_db_gain);
+ gain_offset->half_db_gain =
+ comp_dgtl_gain[pa_gain & 0xF].half_db_gain;
+ tabla->comp_gain_offset[index] = digital_vol -
+ gain_offset->whole_db_gain ;
}
if (SND_SOC_DAPM_EVENT_OFF(event) && (pa_mode == 0)) {
- if (comp == COMPANDER_1)
- gain_offset = -TABLA_COMP_DIGITAL_GAIN_HP_OFFSET;
- if (comp == COMPANDER_2)
- gain_offset = -TABLA_COMP_DIGITAL_GAIN_LINEOUT_OFFSET;
-
+ gain_offset->whole_db_gain = digital_vol +
+ tabla->comp_gain_offset[index];
+ pr_debug("%s: listed whole_db_gain:0x%x, adjusted whole_db_gain:0x%x\n",
+ __func__, comp_dgtl_gain[pa_gain & 0xF].whole_db_gain,
+ gain_offset->whole_db_gain);
+ gain_offset->half_db_gain = 0;
}
- pr_debug("%s: compander #%d gain_offset %d\n",
- __func__, comp + 1, gain_offset);
- return gain_offset;
-}
+ pr_debug("%s: half_db_gain(%d)whole_db_gain(%d)comp_gain_offset[%d](%d)\n",
+ __func__, gain_offset->half_db_gain,
+ gain_offset->whole_db_gain, index,
+ tabla->comp_gain_offset[index]);
+ return 0;
+}
static int tabla_config_gain_compander(
struct snd_soc_codec *codec,
@@ -837,8 +881,7 @@
{
int value = 0;
int mask = 1 << 4;
- int gain = 0;
- int gain_offset;
+ struct comp_dgtl_gain_offset gain_offset = {0, 0};
if (compander >= COMPANDER_MAX) {
pr_err("%s: Error, invalid compander channel\n", __func__);
return -EINVAL;
@@ -848,43 +891,61 @@
value = 1 << 4;
if (compander == COMPANDER_1) {
- gain_offset = tabla_compander_gain_offset(codec, enable,
- TABLA_A_RX_HPH_L_GAIN, mask, event, compander);
+ tabla_compander_gain_offset(codec, enable,
+ TABLA_A_RX_HPH_L_GAIN,
+ TABLA_A_CDC_RX1_VOL_CTL_B2_CTL,
+ mask, event, &gain_offset, 0);
snd_soc_update_bits(codec, TABLA_A_RX_HPH_L_GAIN, mask, value);
- gain = snd_soc_read(codec, TABLA_A_CDC_RX1_VOL_CTL_B2_CTL);
snd_soc_update_bits(codec, TABLA_A_CDC_RX1_VOL_CTL_B2_CTL,
- 0xFF, gain - gain_offset);
- gain_offset = tabla_compander_gain_offset(codec, enable,
- TABLA_A_RX_HPH_R_GAIN, mask, event, compander);
+ 0xFF, gain_offset.whole_db_gain);
+ snd_soc_update_bits(codec, TABLA_A_CDC_RX1_B6_CTL,
+ 0x02, gain_offset.half_db_gain);
+ tabla_compander_gain_offset(codec, enable,
+ TABLA_A_RX_HPH_R_GAIN,
+ TABLA_A_CDC_RX2_VOL_CTL_B2_CTL,
+ mask, event, &gain_offset, 1);
snd_soc_update_bits(codec, TABLA_A_RX_HPH_R_GAIN, mask, value);
- gain = snd_soc_read(codec, TABLA_A_CDC_RX2_VOL_CTL_B2_CTL);
snd_soc_update_bits(codec, TABLA_A_CDC_RX2_VOL_CTL_B2_CTL,
- 0xFF, gain - gain_offset);
+ 0xFF, gain_offset.whole_db_gain);
+ snd_soc_update_bits(codec, TABLA_A_CDC_RX2_B6_CTL,
+ 0x02, gain_offset.half_db_gain);
} else if (compander == COMPANDER_2) {
- gain_offset = tabla_compander_gain_offset(codec, enable,
- TABLA_A_RX_LINE_1_GAIN, mask, event, compander);
+ tabla_compander_gain_offset(codec, enable,
+ TABLA_A_RX_LINE_1_GAIN,
+ TABLA_A_CDC_RX3_VOL_CTL_B2_CTL,
+ mask, event, &gain_offset, 2);
snd_soc_update_bits(codec, TABLA_A_RX_LINE_1_GAIN, mask, value);
- gain = snd_soc_read(codec, TABLA_A_CDC_RX3_VOL_CTL_B2_CTL);
snd_soc_update_bits(codec, TABLA_A_CDC_RX3_VOL_CTL_B2_CTL,
- 0xFF, gain - gain_offset);
- gain_offset = tabla_compander_gain_offset(codec, enable,
- TABLA_A_RX_LINE_3_GAIN, mask, event, compander);
+ 0xFF, gain_offset.whole_db_gain);
+ snd_soc_update_bits(codec, TABLA_A_CDC_RX3_B6_CTL,
+ 0x02, gain_offset.half_db_gain);
+ tabla_compander_gain_offset(codec, enable,
+ TABLA_A_RX_LINE_3_GAIN,
+ TABLA_A_CDC_RX4_VOL_CTL_B2_CTL,
+ mask, event, &gain_offset, 3);
snd_soc_update_bits(codec, TABLA_A_RX_LINE_3_GAIN, mask, value);
- gain = snd_soc_read(codec, TABLA_A_CDC_RX4_VOL_CTL_B2_CTL);
snd_soc_update_bits(codec, TABLA_A_CDC_RX4_VOL_CTL_B2_CTL,
- 0xFF, gain - gain_offset);
- gain_offset = tabla_compander_gain_offset(codec, enable,
- TABLA_A_RX_LINE_2_GAIN, mask, event, compander);
+ 0xFF, gain_offset.whole_db_gain);
+ snd_soc_update_bits(codec, TABLA_A_CDC_RX4_B6_CTL,
+ 0x02, gain_offset.half_db_gain);
+ tabla_compander_gain_offset(codec, enable,
+ TABLA_A_RX_LINE_2_GAIN,
+ TABLA_A_CDC_RX5_VOL_CTL_B2_CTL,
+ mask, event, &gain_offset, 4);
snd_soc_update_bits(codec, TABLA_A_RX_LINE_2_GAIN, mask, value);
- gain = snd_soc_read(codec, TABLA_A_CDC_RX5_VOL_CTL_B2_CTL);
snd_soc_update_bits(codec, TABLA_A_CDC_RX5_VOL_CTL_B2_CTL,
- 0xFF, gain - gain_offset);
- gain_offset = tabla_compander_gain_offset(codec, enable,
- TABLA_A_RX_LINE_4_GAIN, mask, event, compander);
+ 0xFF, gain_offset.whole_db_gain);
+ snd_soc_update_bits(codec, TABLA_A_CDC_RX5_B6_CTL,
+ 0x02, gain_offset.half_db_gain);
+ tabla_compander_gain_offset(codec, enable,
+ TABLA_A_RX_LINE_4_GAIN,
+ TABLA_A_CDC_RX6_VOL_CTL_B2_CTL,
+ mask, event, &gain_offset, 5);
snd_soc_update_bits(codec, TABLA_A_RX_LINE_4_GAIN, mask, value);
- gain = snd_soc_read(codec, TABLA_A_CDC_RX6_VOL_CTL_B2_CTL);
snd_soc_update_bits(codec, TABLA_A_CDC_RX6_VOL_CTL_B2_CTL,
- 0xFF, gain - gain_offset);
+ 0xFF, gain_offset.whole_db_gain);
+ snd_soc_update_bits(codec, TABLA_A_CDC_RX6_B6_CTL,
+ 0x02, gain_offset.half_db_gain);
}
return 0;
}
@@ -921,7 +982,6 @@
return 0;
}
-
static int tabla_config_compander(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
@@ -929,106 +989,161 @@
struct snd_soc_codec *codec = w->codec;
struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
u32 rate = tabla->comp_fs[w->shift];
- u32 status;
- unsigned long timeout;
- pr_debug("%s: compander #%d enable %d event %d\n",
+
+ pr_debug("%s: compander #%d enable %d event %d widget name %s\n",
__func__, w->shift + 1,
- tabla->comp_enabled[w->shift], event);
+ tabla->comp_enabled[w->shift], event , w->name);
+ if (tabla->comp_enabled[w->shift] == 0)
+ goto rtn;
+ if ((w->shift == COMPANDER_1) && (tabla->anc_func)) {
+ pr_debug("%s: ANC is enabled so compander #%d cannot be enabled\n",
+ __func__, w->shift + 1);
+ goto rtn;
+ }
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- if (tabla->comp_enabled[w->shift] != 0) {
- /* Enable both L/R compander clocks */
- snd_soc_update_bits(codec,
- TABLA_A_CDC_CLK_RX_B2_CTL,
- 1 << comp_shift[w->shift],
- 1 << comp_shift[w->shift]);
- /* Clear the HALT for the compander*/
- snd_soc_update_bits(codec,
- TABLA_A_CDC_COMP1_B1_CTL +
- w->shift * 8, 1 << 2, 0);
- /* Toggle compander reset bits*/
- snd_soc_update_bits(codec,
- TABLA_A_CDC_CLK_OTHR_RESET_CTL,
- 1 << comp_shift[w->shift],
- 1 << comp_shift[w->shift]);
- snd_soc_update_bits(codec,
- TABLA_A_CDC_CLK_OTHR_RESET_CTL,
- 1 << comp_shift[w->shift], 0);
- tabla_config_gain_compander(codec, w->shift, 1, event);
- /* Compander enable -> 0x370/0x378*/
- snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B1_CTL +
- w->shift * 8, 0x03, 0x03);
- /* Update the RMS meter resampling*/
- snd_soc_update_bits(codec,
- TABLA_A_CDC_COMP1_B3_CTL +
- w->shift * 8, 0xFF, 0x01);
- snd_soc_update_bits(codec,
- TABLA_A_CDC_COMP1_B2_CTL +
- w->shift * 8, 0xF0, 0x50);
- /* Wait for 1ms*/
- usleep_range(5000, 5000);
- }
+ /* Update compander sample rate */
+ snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_FS_CFG +
+ w->shift * 8, 0x07, rate);
+ /* Enable both L/R compander clocks */
+ snd_soc_update_bits(codec,
+ TABLA_A_CDC_CLK_RX_B2_CTL,
+ 1 << comp_shift[w->shift],
+ 1 << comp_shift[w->shift]);
+ /* Toggle compander reset bits */
+ snd_soc_update_bits(codec,
+ TABLA_A_CDC_CLK_OTHR_RESET_CTL,
+ 1 << comp_shift[w->shift],
+ 1 << comp_shift[w->shift]);
+ snd_soc_update_bits(codec,
+ TABLA_A_CDC_CLK_OTHR_RESET_CTL,
+ 1 << comp_shift[w->shift], 0);
+ tabla_config_gain_compander(codec, w->shift, 1, event);
+ /* Compander enable -> 0x370/0x378 */
+ snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B1_CTL +
+ w->shift * 8, 0x03, 0x03);
+ /* Update the RMS meter resampling */
+ snd_soc_update_bits(codec,
+ TABLA_A_CDC_COMP1_B3_CTL +
+ w->shift * 8, 0xFF, 0x01);
+ snd_soc_update_bits(codec,
+ TABLA_A_CDC_COMP1_B2_CTL +
+ w->shift * 8, 0xF0, 0x50);
+ usleep_range(COMP_BRINGUP_WAIT_TIME, COMP_BRINGUP_WAIT_TIME);
break;
case SND_SOC_DAPM_POST_PMU:
- /* Set sample rate dependent paramater*/
- if (tabla->comp_enabled[w->shift] != 0) {
- snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_FS_CFG +
- w->shift * 8, 0x07, rate);
- snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B2_CTL +
- w->shift * 8, 0x0F,
- comp_samp_params[rate].peak_det_timeout);
- snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B2_CTL +
- w->shift * 8, 0xF0,
- comp_samp_params[rate].rms_meter_div_fact);
- snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B3_CTL +
- w->shift * 8, 0xFF,
- comp_samp_params[rate].rms_meter_resamp_fact);
- snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B1_CTL +
- w->shift * 8, 0x38,
- comp_samp_params[rate].shutdown_timeout);
+ /* Set sample rate dependent paramater */
+ if (w->shift == COMPANDER_1) {
+ snd_soc_update_bits(codec,
+ TABLA_A_CDC_CLSG_CTL,
+ 0x11, 0x00);
+ snd_soc_write(codec,
+ TABLA_A_CDC_CONN_CLSG_CTL, 0x11);
}
+ snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B2_CTL +
+ w->shift * 8, 0x0F,
+ comp_samp_params[rate].peak_det_timeout);
+ snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B2_CTL +
+ w->shift * 8, 0xF0,
+ comp_samp_params[rate].rms_meter_div_fact);
+ snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B3_CTL +
+ w->shift * 8, 0xFF,
+ comp_samp_params[rate].rms_meter_resamp_fact);
+ snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B1_CTL +
+ w->shift * 8, 0x38,
+ comp_samp_params[rate].shutdown_timeout);
break;
case SND_SOC_DAPM_PRE_PMD:
- if (tabla->comp_enabled[w->shift] != 0) {
- status = snd_soc_read(codec,
- TABLA_A_CDC_COMP1_SHUT_DOWN_STATUS +
- w->shift * 8);
- pr_debug("%s: compander #%d shutdown status %d in event %d\n",
- __func__, w->shift + 1, status, event);
- /* Halt the compander*/
- snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B1_CTL +
- w->shift * 8, 1 << 2, 1 << 2);
- }
break;
case SND_SOC_DAPM_POST_PMD:
- if (tabla->comp_enabled[w->shift] != 0) {
- /* Wait up to a second for shutdown complete */
- timeout = jiffies + HZ;
- do {
- status = snd_soc_read(codec,
- TABLA_A_CDC_COMP1_SHUT_DOWN_STATUS +
- w->shift * 8);
- if (status == 0x3)
- break;
- usleep_range(5000, 5000);
- } while (!(time_after(jiffies, timeout)));
- /* Restore the gain */
- tabla_config_gain_compander(codec, w->shift,
- tabla->comp_enabled[w->shift],
- event);
- /* Disable the compander*/
- snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B1_CTL +
- w->shift * 8, 0x03, 0x00);
- /* Turn off the clock for compander in pair*/
- snd_soc_update_bits(codec, TABLA_A_CDC_CLK_RX_B2_CTL,
- 0x03 << comp_shift[w->shift], 0);
- /* Clear the HALT for the compander*/
+ /* Disable the compander */
+ snd_soc_update_bits(codec, TABLA_A_CDC_COMP1_B1_CTL +
+ w->shift * 8, 0x03, 0x00);
+ /* Toggle compander reset bits */
+ snd_soc_update_bits(codec,
+ TABLA_A_CDC_CLK_OTHR_RESET_CTL,
+ 1 << comp_shift[w->shift],
+ 1 << comp_shift[w->shift]);
+ snd_soc_update_bits(codec,
+ TABLA_A_CDC_CLK_OTHR_RESET_CTL,
+ 1 << comp_shift[w->shift], 0);
+ /* Turn off the clock for compander in pair */
+ snd_soc_update_bits(codec, TABLA_A_CDC_CLK_RX_B2_CTL,
+ 0x03 << comp_shift[w->shift], 0);
+ /* Restore the gain */
+ tabla_config_gain_compander(codec, w->shift,
+ tabla->comp_enabled[w->shift],
+ event);
+ if (w->shift == COMPANDER_1) {
snd_soc_update_bits(codec,
- TABLA_A_CDC_COMP1_B1_CTL +
- w->shift * 8, 1 << 2, 0);
+ TABLA_A_CDC_CLSG_CTL,
+ 0x11, 0x11);
+ snd_soc_write(codec,
+ TABLA_A_CDC_CONN_CLSG_CTL, 0x14);
}
break;
}
+rtn:
+ return 0;
+}
+
+static int tabla_codec_hphr_dem_input_selection(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
+
+ pr_debug("%s: compander#1->enable(%d) reg(0x%x = 0x%x) event(%d)\n",
+ __func__, tabla->comp_enabled[COMPANDER_1],
+ TABLA_A_CDC_RX1_B6_CTL,
+ snd_soc_read(codec, TABLA_A_CDC_RX1_B6_CTL), event);
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (tabla->comp_enabled[COMPANDER_1] && !tabla->anc_func)
+ snd_soc_update_bits(codec, TABLA_A_CDC_RX1_B6_CTL,
+ 1 << w->shift, 0);
+ else
+ snd_soc_update_bits(codec, TABLA_A_CDC_RX1_B6_CTL,
+ 1 << w->shift, 1 << w->shift);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, TABLA_A_CDC_RX1_B6_CTL,
+ 1 << w->shift, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tabla_codec_hphl_dem_input_selection(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
+
+ pr_debug("%s: compander#1->enable(%d) reg(0x%x = 0x%x) event(%d)\n",
+ __func__, tabla->comp_enabled[COMPANDER_1],
+ TABLA_A_CDC_RX2_B6_CTL,
+ snd_soc_read(codec, TABLA_A_CDC_RX2_B6_CTL), event);
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (tabla->comp_enabled[COMPANDER_1] && !tabla->anc_func)
+ snd_soc_update_bits(codec, TABLA_A_CDC_RX2_B6_CTL,
+ 1 << w->shift, 0);
+ else
+ snd_soc_update_bits(codec, TABLA_A_CDC_RX2_B6_CTL,
+ 1 << w->shift, 1 << w->shift);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, TABLA_A_CDC_RX2_B6_CTL,
+ 1 << w->shift, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -5361,8 +5476,12 @@
&rx6_dsm_mux, tabla_codec_reset_interpolator,
SND_SOC_DAPM_PRE_PMU),
- SND_SOC_DAPM_MIXER("RX1 CHAIN", TABLA_A_CDC_RX1_B6_CTL, 5, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("RX2 CHAIN", TABLA_A_CDC_RX2_B6_CTL, 5, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER_E("RX1 CHAIN", SND_SOC_NOPM, 5, 0, NULL,
+ 0, tabla_codec_hphr_dem_input_selection,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX2 CHAIN", SND_SOC_NOPM, 5, 0, NULL,
+ 0, tabla_codec_hphl_dem_input_selection,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
&rx_mix1_inp1_mux),
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 08167ca..43a1042 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -24,6 +24,7 @@
#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
#include <linux/mfd/wcd9xxx/wcd9320_registers.h>
#include <linux/mfd/wcd9xxx/pdata.h>
+#include <linux/regulator/consumer.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -41,6 +42,9 @@
#define TAIKO_MAD_SLIMBUS_TX_PORT 12
#define TAIKO_MAD_AUDIO_FIRMWARE_PATH "wcd9320/wcd9320_mad_audio.bin"
+#define TAIKO_HPH_PA_SETTLE_COMP_ON 3000
+#define TAIKO_HPH_PA_SETTLE_COMP_OFF 13000
+
static atomic_t kp_taiko_priv;
static int spkr_drv_wrnd_param_set(const char *val,
const struct kernel_param *kp);
@@ -394,6 +398,7 @@
u8 aux_r_gain;
bool spkr_pa_widget_on;
+ struct regulator *spkdrv_reg;
struct afe_param_cdc_slimbus_slave_cfg slimbus_slave_cfg;
@@ -404,7 +409,6 @@
/* class h specific data */
struct wcd9xxx_clsh_cdc_data clsh_d;
-
};
static const u32 comp_shift[] = {
@@ -427,39 +431,39 @@
static const struct comp_sample_dependent_params comp_samp_params[] = {
{
/* 8 Khz */
- .peak_det_timeout = 0x02,
+ .peak_det_timeout = 0x06,
.rms_meter_div_fact = 0x09,
.rms_meter_resamp_fact = 0x06,
},
{
/* 16 Khz */
- .peak_det_timeout = 0x03,
+ .peak_det_timeout = 0x07,
.rms_meter_div_fact = 0x0A,
.rms_meter_resamp_fact = 0x0C,
},
{
/* 32 Khz */
- .peak_det_timeout = 0x05,
+ .peak_det_timeout = 0x08,
.rms_meter_div_fact = 0x0B,
.rms_meter_resamp_fact = 0x1E,
},
{
/* 48 Khz */
- .peak_det_timeout = 0x05,
+ .peak_det_timeout = 0x09,
.rms_meter_div_fact = 0x0B,
.rms_meter_resamp_fact = 0x28,
},
{
/* 96 Khz */
- .peak_det_timeout = 0x06,
+ .peak_det_timeout = 0x0A,
.rms_meter_div_fact = 0x0C,
.rms_meter_resamp_fact = 0x50,
},
{
/* 192 Khz */
- .peak_det_timeout = 0x07,
- .rms_meter_div_fact = 0xD,
- .rms_meter_resamp_fact = 0xA0,
+ .peak_det_timeout = 0x0B,
+ .rms_meter_div_fact = 0xC,
+ .rms_meter_resamp_fact = 0x50,
},
};
@@ -809,6 +813,36 @@
pr_debug("%s: Compander %d enable current %d, new %d\n",
__func__, comp, taiko->comp_enabled[comp], value);
taiko->comp_enabled[comp] = value;
+
+ if (comp == COMPANDER_1 &&
+ taiko->comp_enabled[comp] == 1) {
+ /* Wavegen to 5 msec */
+ snd_soc_write(codec, TAIKO_A_RX_HPH_CNP_WG_CTL, 0xDA);
+ snd_soc_write(codec, TAIKO_A_RX_HPH_CNP_WG_TIME, 0x15);
+ snd_soc_write(codec, TAIKO_A_RX_HPH_BIAS_WG_OCP, 0x2A);
+
+ /* Enable Chopper */
+ snd_soc_update_bits(codec,
+ TAIKO_A_RX_HPH_CHOP_CTL, 0x80, 0x80);
+
+ snd_soc_write(codec, TAIKO_A_NCP_DTEST, 0x20);
+ pr_debug("%s: Enabled Chopper and set wavegen to 5 msec\n",
+ __func__);
+ } else if (comp == COMPANDER_1 &&
+ taiko->comp_enabled[comp] == 0) {
+ /* Wavegen to 20 msec */
+ snd_soc_write(codec, TAIKO_A_RX_HPH_CNP_WG_CTL, 0xDB);
+ snd_soc_write(codec, TAIKO_A_RX_HPH_CNP_WG_TIME, 0x58);
+ snd_soc_write(codec, TAIKO_A_RX_HPH_BIAS_WG_OCP, 0x1A);
+
+ /* Disable CHOPPER block */
+ snd_soc_update_bits(codec,
+ TAIKO_A_RX_HPH_CHOP_CTL, 0x80, 0x00);
+
+ snd_soc_write(codec, TAIKO_A_NCP_DTEST, 0x10);
+ pr_debug("%s: Disabled Chopper and set wavegen to 20 msec\n",
+ __func__);
+ }
return 0;
}
@@ -848,26 +882,50 @@
static void taiko_discharge_comp(struct snd_soc_codec *codec, int comp)
{
- /* Update RSM to 1, DIVF to 5 */
- snd_soc_write(codec, TAIKO_A_CDC_COMP0_B3_CTL + (comp * 8), 1);
+ /* Level meter DIV Factor to 5*/
snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8), 0xF0,
- 1 << 5);
- /* Wait for 1ms */
- usleep_range(1000, 1000);
+ 0x05 << 4);
+ /* RMS meter Sampling to 0x01 */
+ snd_soc_write(codec, TAIKO_A_CDC_COMP0_B3_CTL + (comp * 8), 0x01);
+
+ /* Worst case timeout for compander CnP sleep timeout */
+ usleep_range(3000, 3000);
+}
+
+static enum wcd9xxx_buck_volt taiko_codec_get_buck_mv(
+ struct snd_soc_codec *codec)
+{
+ int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
+ struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
+ struct wcd9xxx_pdata *pdata = taiko->resmgr.pdata;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
+ if (!strncmp(pdata->regulator[i].name,
+ WCD9XXX_SUPPLY_BUCK_NAME,
+ sizeof(WCD9XXX_SUPPLY_BUCK_NAME))) {
+ if ((pdata->regulator[i].min_uV ==
+ WCD9XXX_CDC_BUCK_MV_1P8) ||
+ (pdata->regulator[i].min_uV ==
+ WCD9XXX_CDC_BUCK_MV_2P15))
+ buck_volt = pdata->regulator[i].min_uV;
+ break;
+ }
+ }
+ return buck_volt;
}
static int taiko_config_compander(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- int mask, emask;
- bool timedout;
- unsigned long timeout;
+ int mask, enable_mask;
struct snd_soc_codec *codec = w->codec;
struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
const int comp = w->shift;
const u32 rate = taiko->comp_fs[comp];
const struct comp_sample_dependent_params *comp_params =
&comp_samp_params[rate];
+ enum wcd9xxx_buck_volt buck_mv;
pr_debug("%s: %s event %d compander %d, enabled %d", __func__,
w->name, event, comp, taiko->comp_enabled[comp]);
@@ -877,72 +935,73 @@
/* Compander 0 has single channel */
mask = (comp == COMPANDER_0 ? 0x01 : 0x03);
- emask = (comp == COMPANDER_0 ? 0x02 : 0x03);
+ enable_mask = (comp == COMPANDER_0 ? 0x02 : 0x03);
+ buck_mv = taiko_codec_get_buck_mv(codec);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- /* Set gain source to compander */
- taiko_config_gain_compander(codec, comp, true);
- /* Enable RX interpolation path clocks */
+ /* Set compander Sample rate */
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_FS_CFG + (comp * 8),
+ 0x07, rate);
+ /* Set the static gain offset */
+ if (comp == COMPANDER_1
+ && buck_mv == WCD9XXX_CDC_BUCK_MV_2P15) {
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_B4_CTL + (comp * 8),
+ 0x80, 0x80);
+ } else {
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_B4_CTL + (comp * 8),
+ 0x80, 0x00);
+ }
+ /* Enable RX interpolation path compander clocks */
snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_RX_B2_CTL,
mask << comp_shift[comp],
mask << comp_shift[comp]);
-
- taiko_discharge_comp(codec, comp);
-
- /* Clear compander halt */
- snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B1_CTL +
- (comp * 8),
- 1 << 2, 0);
/* Toggle compander reset bits */
snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
mask << comp_shift[comp],
mask << comp_shift[comp]);
snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
mask << comp_shift[comp], 0);
- break;
- case SND_SOC_DAPM_POST_PMU:
+
+ /* Set gain source to compander */
+ taiko_config_gain_compander(codec, comp, true);
+
+ /* Compander enable */
+ snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B1_CTL +
+ (comp * 8), enable_mask, enable_mask);
+
+ taiko_discharge_comp(codec, comp);
+
/* Set sample rate dependent paramater */
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_COMP0_FS_CFG + (comp * 8),
- 0x07, rate);
snd_soc_write(codec, TAIKO_A_CDC_COMP0_B3_CTL + (comp * 8),
comp_params->rms_meter_resamp_fact);
snd_soc_update_bits(codec,
TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8),
- 0x0F, comp_params->peak_det_timeout);
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8),
0xF0, comp_params->rms_meter_div_fact << 4);
- /* Compander enable */
- snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B1_CTL +
- (comp * 8), emask, emask);
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8),
+ 0x0F, comp_params->peak_det_timeout);
break;
case SND_SOC_DAPM_PRE_PMD:
- /* Halt compander */
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_COMP0_B1_CTL + (comp * 8),
- 1 << 2, 1 << 2);
- /* Wait up to a second for shutdown complete */
- timeout = jiffies + HZ;
- do {
- if ((snd_soc_read(codec,
- TAIKO_A_CDC_COMP0_SHUT_DOWN_STATUS +
- (comp * 8)) & mask) == mask)
- break;
- } while (!(timedout = time_after(jiffies, timeout)));
- pr_debug("%s: Compander %d shutdown %s in %dms\n", __func__,
- comp, timedout ? "timedout" : "completed",
- jiffies_to_msecs(timeout - HZ - jiffies));
- break;
- case SND_SOC_DAPM_POST_PMD:
/* Disable compander */
snd_soc_update_bits(codec,
TAIKO_A_CDC_COMP0_B1_CTL + (comp * 8),
- emask, 0x00);
+ enable_mask, 0x00);
+
+ /* Toggle compander reset bits */
+ snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
+ mask << comp_shift[comp],
+ mask << comp_shift[comp]);
+ snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
+ mask << comp_shift[comp], 0);
+
/* Turn off the clock for compander in pair */
snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_RX_B2_CTL,
mask << comp_shift[comp], 0);
+
/* Set gain source to register */
taiko_config_gain_compander(codec, comp, false);
break;
@@ -1074,6 +1133,14 @@
40, digital_gain),
SOC_SINGLE_S8_TLV("IIR1 INP4 Volume", TAIKO_A_CDC_IIR1_GAIN_B4_CTL, -84,
40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR2 INP1 Volume", TAIKO_A_CDC_IIR2_GAIN_B1_CTL, -84,
+ 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR2 INP2 Volume", TAIKO_A_CDC_IIR2_GAIN_B2_CTL, -84,
+ 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR2 INP3 Volume", TAIKO_A_CDC_IIR2_GAIN_B3_CTL, -84,
+ 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR2 INP4 Volume", TAIKO_A_CDC_IIR2_GAIN_B4_CTL, -84,
+ 40, digital_gain),
SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 100, 0, taiko_get_anc_slot,
taiko_put_anc_slot),
@@ -1388,7 +1455,7 @@
"ZERO", "EAR_HPH_L", "EAR_LINE_1",
};
-static const char * const iir1_inp1_text[] = {
+static const char * const iir_inp1_text[] = {
"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
};
@@ -1536,7 +1603,10 @@
SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_ANC_B2_CTL, 0, 3, anc1_fb_mux_text);
static const struct soc_enum iir1_inp1_mux_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B1_CTL, 0, 18, iir1_inp1_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B1_CTL, 0, 18, iir_inp1_text);
+
+static const struct soc_enum iir2_inp1_mux_enum =
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B1_CTL, 0, 18, iir_inp1_text);
static const struct snd_kcontrol_new rx_mix1_inp1_mux =
SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
@@ -1760,6 +1830,9 @@
static const struct snd_kcontrol_new iir1_inp1_mux =
SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum);
+static const struct snd_kcontrol_new iir2_inp1_mux =
+ SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
+
static const struct snd_kcontrol_new anc1_mux =
SOC_DAPM_ENUM("ANC1 MUX Mux", anc1_mux_enum);
@@ -2686,10 +2759,20 @@
int ret = 0;
struct snd_soc_codec *codec = w->codec;
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
+ struct taiko_priv *priv = snd_soc_codec_get_drvdata(codec);
pr_debug("%s: %d %s\n", __func__, event, w->name);
+
+ WARN_ONCE(!priv->spkdrv_reg, "SPKDRV supply %s isn't defined\n",
+ WCD9XXX_VDD_SPKDRV_NAME);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (priv->spkdrv_reg) {
+ ret = regulator_enable(priv->spkdrv_reg);
+ if (ret)
+ pr_err("%s: Failed to enable spkdrv_reg %s\n",
+ __func__, WCD9XXX_VDD_SPKDRV_NAME);
+ }
if (spkr_drv_wrnd > 0) {
WARN_ON(!(snd_soc_read(codec, TAIKO_A_SPKR_DRV_EN) &
0x80));
@@ -2710,6 +2793,12 @@
snd_soc_update_bits(codec, TAIKO_A_SPKR_DRV_EN, 0x80,
0x80);
}
+ if (priv->spkdrv_reg) {
+ ret = regulator_disable(priv->spkdrv_reg);
+ if (ret)
+ pr_err("%s: Failed to disable spkdrv_reg %s\n",
+ __func__, WCD9XXX_VDD_SPKDRV_NAME);
+ }
break;
}
@@ -2834,7 +2923,7 @@
int i;
int ret;
int num_anc_slots;
- struct anc_header *anc_head;
+ struct wcd9xxx_anc_header *anc_head;
struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
u32 anc_writes_size = 0;
int anc_size_remaining;
@@ -2857,16 +2946,18 @@
return -ENODEV;
}
- if (fw->size < sizeof(struct anc_header)) {
+ if (fw->size < sizeof(struct wcd9xxx_anc_header)) {
dev_err(codec->dev, "Not enough data\n");
release_firmware(fw);
return -ENOMEM;
}
/* First number is the number of register writes */
- anc_head = (struct anc_header *)(fw->data);
- anc_ptr = (u32 *)((u32)fw->data + sizeof(struct anc_header));
- anc_size_remaining = fw->size - sizeof(struct anc_header);
+ anc_head = (struct wcd9xxx_anc_header *)(fw->data);
+ anc_ptr = (u32 *)((u32)fw->data +
+ sizeof(struct wcd9xxx_anc_header));
+ anc_size_remaining = fw->size -
+ sizeof(struct wcd9xxx_anc_header);
num_anc_slots = anc_head->num_anc_slots;
if (taiko->anc_slot >= num_anc_slots) {
@@ -2932,6 +3023,7 @@
struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
enum wcd9xxx_notify_event e_pre_on, e_post_off;
u8 req_clsh_state;
+ u32 pa_settle_time = TAIKO_HPH_PA_SETTLE_COMP_OFF;
pr_debug("%s: %s event = %d\n", __func__, w->name, event);
if (w->shift == 5) {
@@ -2947,6 +3039,9 @@
return -EINVAL;
}
+ if (taiko->comp_enabled[COMPANDER_1])
+ pa_settle_time = TAIKO_HPH_PA_SETTLE_COMP_ON;
+
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Let MBHC module know PA is turning on */
@@ -2954,16 +3049,21 @@
break;
case SND_SOC_DAPM_POST_PMU:
+ usleep_range(pa_settle_time, pa_settle_time + 1000);
+ pr_debug("%s: sleep %d us after %s PA enable\n", __func__,
+ pa_settle_time, w->name);
wcd9xxx_clsh_fsm(codec, &taiko->clsh_d,
req_clsh_state,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
-
- usleep_range(5000, 5000);
break;
case SND_SOC_DAPM_POST_PMD:
+ usleep_range(pa_settle_time, pa_settle_time + 1000);
+ pr_debug("%s: sleep %d us after %s PA disable\n", __func__,
+ pa_settle_time, w->name);
+
/* Let MBHC module know PA turned off */
wcd9xxx_resmgr_notifier_call(&taiko->resmgr, e_post_off);
@@ -2972,9 +3072,6 @@
WCD9XXX_CLSH_REQ_DISABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
- pr_debug("%s: sleep 10 ms after %s PA disable.\n", __func__,
- w->name);
- usleep_range(5000, 5000);
break;
}
return 0;
@@ -3372,6 +3469,7 @@
{"RX1 MIX1 INP1", "RX6", "SLIM RX6"},
{"RX1 MIX1 INP1", "RX7", "SLIM RX7"},
{"RX1 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX1 MIX1 INP1", "IIR2", "IIR2"},
{"RX1 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX1 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX1 MIX1 INP2", "RX3", "SLIM RX3"},
@@ -3380,6 +3478,7 @@
{"RX1 MIX1 INP2", "RX6", "SLIM RX6"},
{"RX1 MIX1 INP2", "RX7", "SLIM RX7"},
{"RX1 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX1 MIX1 INP2", "IIR2", "IIR2"},
{"RX1 MIX1 INP3", "RX1", "SLIM RX1"},
{"RX1 MIX1 INP3", "RX2", "SLIM RX2"},
{"RX1 MIX1 INP3", "RX3", "SLIM RX3"},
@@ -3395,6 +3494,7 @@
{"RX2 MIX1 INP1", "RX6", "SLIM RX6"},
{"RX2 MIX1 INP1", "RX7", "SLIM RX7"},
{"RX2 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX2 MIX1 INP1", "IIR2", "IIR2"},
{"RX2 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX2 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX2 MIX1 INP2", "RX3", "SLIM RX3"},
@@ -3403,6 +3503,7 @@
{"RX2 MIX1 INP2", "RX6", "SLIM RX6"},
{"RX2 MIX1 INP2", "RX7", "SLIM RX7"},
{"RX2 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX2 MIX1 INP2", "IIR2", "IIR2"},
{"RX3 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX3 MIX1 INP1", "RX2", "SLIM RX2"},
{"RX3 MIX1 INP1", "RX3", "SLIM RX3"},
@@ -3411,6 +3512,7 @@
{"RX3 MIX1 INP1", "RX6", "SLIM RX6"},
{"RX3 MIX1 INP1", "RX7", "SLIM RX7"},
{"RX3 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX3 MIX1 INP1", "IIR2", "IIR2"},
{"RX3 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX3 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX3 MIX1 INP2", "RX3", "SLIM RX3"},
@@ -3419,6 +3521,7 @@
{"RX3 MIX1 INP2", "RX6", "SLIM RX6"},
{"RX3 MIX1 INP2", "RX7", "SLIM RX7"},
{"RX3 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX3 MIX1 INP2", "IIR2", "IIR2"},
{"RX4 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX4 MIX1 INP1", "RX2", "SLIM RX2"},
{"RX4 MIX1 INP1", "RX3", "SLIM RX3"},
@@ -3427,6 +3530,7 @@
{"RX4 MIX1 INP1", "RX6", "SLIM RX6"},
{"RX4 MIX1 INP1", "RX7", "SLIM RX7"},
{"RX4 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX4 MIX1 INP1", "IIR2", "IIR2"},
{"RX4 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX4 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX4 MIX1 INP2", "RX3", "SLIM RX3"},
@@ -3435,6 +3539,7 @@
{"RX4 MIX1 INP2", "RX6", "SLIM RX6"},
{"RX4 MIX1 INP2", "RX7", "SLIM RX7"},
{"RX4 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX4 MIX1 INP2", "IIR2", "IIR2"},
{"RX5 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX5 MIX1 INP1", "RX2", "SLIM RX2"},
{"RX5 MIX1 INP1", "RX3", "SLIM RX3"},
@@ -3443,6 +3548,7 @@
{"RX5 MIX1 INP1", "RX6", "SLIM RX6"},
{"RX5 MIX1 INP1", "RX7", "SLIM RX7"},
{"RX5 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX5 MIX1 INP1", "IIR2", "IIR2"},
{"RX5 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX5 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX5 MIX1 INP2", "RX3", "SLIM RX3"},
@@ -3451,6 +3557,7 @@
{"RX5 MIX1 INP2", "RX6", "SLIM RX6"},
{"RX5 MIX1 INP2", "RX7", "SLIM RX7"},
{"RX5 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX5 MIX1 INP2", "IIR2", "IIR2"},
{"RX6 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX6 MIX1 INP1", "RX2", "SLIM RX2"},
{"RX6 MIX1 INP1", "RX3", "SLIM RX3"},
@@ -3459,6 +3566,7 @@
{"RX6 MIX1 INP1", "RX6", "SLIM RX6"},
{"RX6 MIX1 INP1", "RX7", "SLIM RX7"},
{"RX6 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX6 MIX1 INP1", "IIR2", "IIR2"},
{"RX6 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX6 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX6 MIX1 INP2", "RX3", "SLIM RX3"},
@@ -3467,6 +3575,7 @@
{"RX6 MIX1 INP2", "RX6", "SLIM RX6"},
{"RX6 MIX1 INP2", "RX7", "SLIM RX7"},
{"RX6 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX6 MIX1 INP2", "IIR2", "IIR2"},
{"RX7 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX7 MIX1 INP1", "RX2", "SLIM RX2"},
{"RX7 MIX1 INP1", "RX3", "SLIM RX3"},
@@ -3475,6 +3584,7 @@
{"RX7 MIX1 INP1", "RX6", "SLIM RX6"},
{"RX7 MIX1 INP1", "RX7", "SLIM RX7"},
{"RX7 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX7 MIX1 INP1", "IIR2", "IIR2"},
{"RX7 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX7 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX7 MIX1 INP2", "RX3", "SLIM RX3"},
@@ -3489,6 +3599,13 @@
{"RX2 MIX2 INP2", "IIR1", "IIR1"},
{"RX7 MIX2 INP1", "IIR1", "IIR1"},
{"RX7 MIX2 INP2", "IIR1", "IIR1"},
+ {"RX7 MIX1 INP2", "IIR2", "IIR2"},
+ {"RX1 MIX2 INP1", "IIR2", "IIR2"},
+ {"RX1 MIX2 INP2", "IIR2", "IIR2"},
+ {"RX2 MIX2 INP1", "IIR2", "IIR2"},
+ {"RX2 MIX2 INP2", "IIR2", "IIR2"},
+ {"RX7 MIX2 INP1", "IIR2", "IIR2"},
+ {"RX7 MIX2 INP2", "IIR2", "IIR2"},
/* Decimator Inputs */
{"DEC1 MUX", "DMIC1", "DMIC1"},
@@ -3561,6 +3678,18 @@
{"IIR1 INP1 MUX", "DEC9", "DEC9 MUX"},
{"IIR1 INP1 MUX", "DEC10", "DEC10 MUX"},
+ {"IIR2", NULL, "IIR2 INP1 MUX"},
+ {"IIR2 INP1 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR2 INP1 MUX", "DEC2", "DEC2 MUX"},
+ {"IIR2 INP1 MUX", "DEC3", "DEC3 MUX"},
+ {"IIR2 INP1 MUX", "DEC4", "DEC4 MUX"},
+ {"IIR2 INP1 MUX", "DEC5", "DEC5 MUX"},
+ {"IIR2 INP1 MUX", "DEC6", "DEC6 MUX"},
+ {"IIR2 INP1 MUX", "DEC7", "DEC7 MUX"},
+ {"IIR2 INP1 MUX", "DEC8", "DEC8 MUX"},
+ {"IIR2 INP1 MUX", "DEC9", "DEC9 MUX"},
+ {"IIR2 INP1 MUX", "DEC10", "DEC10 MUX"},
+
{"MIC BIAS1 Internal1", NULL, "LDO_H"},
{"MIC BIAS1 Internal2", NULL, "LDO_H"},
{"MIC BIAS1 External", NULL, "LDO_H"},
@@ -4255,7 +4384,7 @@
.rate_max = 192000,
.rate_min = 8000,
.channels_min = 1,
- .channels_max = 4,
+ .channels_max = 5,
},
.ops = &taiko_dai_ops,
},
@@ -4900,13 +5029,13 @@
SND_SOC_DAPM_SUPPLY("COMP0_CLK", SND_SOC_NOPM, 0, 0,
taiko_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 1, 0,
taiko_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 2, 0,
taiko_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_INPUT("AMIC1"),
@@ -5114,6 +5243,9 @@
SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
SND_SOC_DAPM_PGA("IIR1", TAIKO_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MUX("IIR2 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp1_mux),
+ SND_SOC_DAPM_PGA("IIR2", TAIKO_A_CDC_CLK_SD_CTL, 1, 0, NULL, 0),
+
/* AUX PGA */
SND_SOC_DAPM_ADC_E("AUX_PGA_Left", NULL, TAIKO_A_RX_AUX_SW_CTL, 7, 0,
taiko_codec_enable_aux_pga, SND_SOC_DAPM_PRE_PMU |
@@ -5320,7 +5452,8 @@
}
for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
- if (!strncmp(pdata->regulator[i].name, "CDC_VDDA_RX", 11)) {
+ if (pdata->regulator[i].name &&
+ !strncmp(pdata->regulator[i].name, "CDC_VDDA_RX", 11)) {
if (pdata->regulator[i].min_uV == 1800000 &&
pdata->regulator[i].max_uV == 1800000) {
snd_soc_write(codec, TAIKO_A_BIAS_REF_CTL,
@@ -5681,6 +5814,21 @@
{TAIKO_A_CDC_COMP0_B5_CTL, 0x7F, 0x7F},
{TAIKO_A_CDC_COMP1_B5_CTL, 0x7F, 0x7F},
{TAIKO_A_CDC_COMP2_B5_CTL, 0x7F, 0x7F},
+
+ /*
+ * Setup wavegen timer to 20msec and disable chopper
+ * as default. This corresponds to Compander OFF
+ */
+ {TAIKO_A_RX_HPH_CNP_WG_CTL, 0xFF, 0xDB},
+ {TAIKO_A_RX_HPH_CNP_WG_TIME, 0xFF, 0x58},
+ {TAIKO_A_RX_HPH_BIAS_WG_OCP, 0xFF, 0x1A},
+ {TAIKO_A_RX_HPH_CHOP_CTL, 0xFF, 0x24},
+
+ /* Choose max non-overlap time for NCP */
+ {TAIKO_A_NCP_CLK, 0xFF, 0xFC},
+
+ /* Program the 0.85 volt VBG_REFERENCE */
+ {TAIKO_A_BIAS_CURR_CTL_2, 0xFF, 0x04},
};
static void taiko_codec_init_reg(struct snd_soc_codec *codec)
@@ -5693,28 +5841,39 @@
taiko_codec_reg_init_val[i].val);
}
-static int taiko_setup_irqs(struct taiko_priv *taiko)
+static void taiko_slim_interface_init_reg(struct snd_soc_codec *codec)
{
int i;
- int ret = 0;
- struct snd_soc_codec *codec = taiko->codec;
-
- ret = wcd9xxx_request_irq(codec->control_data, WCD9XXX_IRQ_SLIMBUS,
- taiko_slimbus_irq, "SLIMBUS Slave", taiko);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- WCD9XXX_IRQ_SLIMBUS);
- goto exit;
- }
for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++)
wcd9xxx_interface_reg_write(codec->control_data,
TAIKO_SLIM_PGD_PORT_INT_EN0 + i,
0xFF);
-exit:
+}
+
+static int taiko_setup_irqs(struct taiko_priv *taiko)
+{
+ int ret = 0;
+ struct snd_soc_codec *codec = taiko->codec;
+
+ ret = wcd9xxx_request_irq(codec->control_data, WCD9XXX_IRQ_SLIMBUS,
+ taiko_slimbus_irq, "SLIMBUS Slave", taiko);
+ if (ret)
+ pr_err("%s: Failed to request irq %d\n", __func__,
+ WCD9XXX_IRQ_SLIMBUS);
+ else
+ taiko_slim_interface_init_reg(codec);
+
return ret;
}
+static void taiko_cleanup_irqs(struct taiko_priv *taiko)
+{
+ struct snd_soc_codec *codec = taiko->codec;
+
+ wcd9xxx_free_irq(codec->control_data, WCD9XXX_IRQ_SLIMBUS, taiko);
+}
+
int taiko_hs_detect(struct snd_soc_codec *codec,
struct wcd9xxx_mbhc_config *mbhc_cfg)
{
@@ -5773,9 +5932,11 @@
pr_err("%s: bad pdata\n", __func__);
taiko_init_slim_slave_cfg(codec);
+ taiko_slim_interface_init_reg(codec);
wcd9xxx_mbhc_deinit(&taiko->mbhc);
- ret = wcd9xxx_mbhc_init(&taiko->mbhc, &taiko->resmgr, codec);
+ ret = wcd9xxx_mbhc_init(&taiko->mbhc, &taiko->resmgr, codec,
+ WCD9XXX_MBHC_VERSION_TAIKO);
if (ret)
pr_err("%s: mbhc init failed %d\n", __func__, ret);
else
@@ -5818,24 +5979,6 @@
return 0;
}
-static int taiko_codec_get_buck_mv(struct snd_soc_codec *codec)
-{
- int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
- struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx_pdata *pdata = taiko->resmgr.pdata;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
- if (!strncmp(pdata->regulator[i].name,
- WCD9XXX_SUPPLY_BUCK_NAME,
- sizeof(WCD9XXX_SUPPLY_BUCK_NAME))) {
- buck_volt = pdata->regulator[i].min_uV;
- break;
- }
- }
- return buck_volt;
-}
-
static const struct snd_soc_dapm_widget taiko_1_dapm_widgets[] = {
SND_SOC_DAPM_ADC_E("ADC1", NULL, TAIKO_A_TX_1_2_EN, 7, 0,
taiko_codec_enable_adc,
@@ -5886,6 +6029,21 @@
SND_SOC_DAPM_POST_PMU),
};
+static struct regulator *taiko_codec_find_regulator(struct snd_soc_codec *codec,
+ const char *name)
+{
+ int i;
+ struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
+
+ for (i = 0; i < core->num_of_supplies; i++) {
+ if (core->supplies[i].supply &&
+ !strcmp(core->supplies[i].supply, name))
+ return core->supplies[i].consumer;
+ }
+
+ return NULL;
+}
+
static int taiko_codec_probe(struct snd_soc_codec *codec)
{
struct wcd9xxx *control;
@@ -5917,10 +6075,8 @@
tx_hpf_corner_freq_callback);
}
-
snd_soc_codec_set_drvdata(codec, taiko);
-
/* codec resmgr module init */
wcd9xxx = codec->control_data;
pdata = dev_get_platdata(codec->dev->parent);
@@ -5928,17 +6084,18 @@
&taiko_reg_address);
if (ret) {
pr_err("%s: wcd9xxx init failed %d\n", __func__, ret);
- return ret;
+ goto err_init;
}
taiko->clsh_d.buck_mv = taiko_codec_get_buck_mv(codec);
wcd9xxx_clsh_init(&taiko->clsh_d, &taiko->resmgr);
/* init and start mbhc */
- ret = wcd9xxx_mbhc_init(&taiko->mbhc, &taiko->resmgr, codec);
+ ret = wcd9xxx_mbhc_init(&taiko->mbhc, &taiko->resmgr, codec,
+ WCD9XXX_MBHC_VERSION_TAIKO);
if (ret) {
pr_err("%s: mbhc init failed %d\n", __func__, ret);
- return ret;
+ goto err_init;
}
taiko->codec = codec;
@@ -5963,6 +6120,9 @@
goto err_pdata;
}
+ taiko->spkdrv_reg = taiko_codec_find_regulator(codec,
+ WCD9XXX_VDD_SPKDRV_NAME);
+
if (spkr_drv_wrnd > 0) {
WCD9XXX_BCL_LOCK(&taiko->resmgr);
wcd9xxx_resmgr_get_bandgap(&taiko->resmgr,
@@ -6029,7 +6189,11 @@
snd_soc_dapm_sync(dapm);
- (void) taiko_setup_irqs(taiko);
+ ret = taiko_setup_irqs(taiko);
+ if (ret) {
+ pr_err("%s: taiko irq setup failed %d\n", __func__, ret);
+ goto err_irq;
+ }
atomic_set(&kp_taiko_priv, (unsigned long)taiko);
mutex_lock(&dapm->codec->mutex);
@@ -6044,10 +6208,13 @@
codec->ignore_pmdown_time = 1;
return ret;
+err_irq:
+ taiko_cleanup_irqs(taiko);
err_pdata:
kfree(ptr);
err_nomem_slimch:
kfree(taiko);
+err_init:
return ret;
}
static int taiko_codec_remove(struct snd_soc_codec *codec)
@@ -6062,11 +6229,15 @@
WCD9XXX_BANDGAP_AUDIO_MODE);
WCD9XXX_BCL_UNLOCK(&taiko->resmgr);
+ taiko_cleanup_irqs(taiko);
+
/* cleanup MBHC */
wcd9xxx_mbhc_deinit(&taiko->mbhc);
/* cleanup resmgr */
wcd9xxx_resmgr_deinit(&taiko->resmgr);
+ taiko->spkdrv_reg = NULL;
+
kfree(taiko);
return 0;
}
diff --git a/sound/soc/codecs/wcd9320.h b/sound/soc/codecs/wcd9320.h
index 36310e5..a4dbd7a 100644
--- a/sound/soc/codecs/wcd9320.h
+++ b/sound/soc/codecs/wcd9320.h
@@ -90,11 +90,6 @@
TAIKO_TX_MAX,
};
-struct anc_header {
- u32 reserved[3];
- u32 num_anc_slots;
-};
-
struct mad_audio_header {
u32 reserved[3];
u32 num_reg_cfg;
diff --git a/sound/soc/codecs/wcd9xxx-common.h b/sound/soc/codecs/wcd9xxx-common.h
index dc00ec6..6bc581c 100644
--- a/sound/soc/codecs/wcd9xxx-common.h
+++ b/sound/soc/codecs/wcd9xxx-common.h
@@ -55,6 +55,10 @@
struct wcd9xxx_resmgr *resmgr;
};
+struct wcd9xxx_anc_header {
+ u32 reserved[3];
+ u32 num_anc_slots;
+};
enum wcd9xxx_buck_volt {
WCD9XXX_CDC_BUCK_UNSUPPORTED = 0,
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index cbfff1c..daba6d5 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -34,6 +34,7 @@
#include <linux/kernel.h>
#include <linux/gpio.h>
#include "wcd9320.h"
+#include "wcd9306.h"
#include "wcd9xxx-mbhc.h"
#include "wcd9xxx-resmgr.h"
@@ -88,10 +89,13 @@
#define WCD9XXX_MEAS_INVALD_RANGE_LOW_MV 20
#define WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV 80
#define WCD9XXX_GM_SWAP_THRES_MIN_MV 150
-#define WCD9XXX_GM_SWAP_THRES_MAX_MV 500
+#define WCD9XXX_GM_SWAP_THRES_MAX_MV 650
#define WCD9XXX_USLEEP_RANGE_MARGIN_US 1000
+#define WCD9XXX_IRQ_MBHC_JACK_SWITCH_TAIKO 28
+#define WCD9XXX_IRQ_MBHC_JACK_SWITCH_TAPAN 21
+
static bool detect_use_vddio_switch;
struct wcd9xxx_mbhc_detect {
@@ -140,6 +144,64 @@
snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x04, on << 2);
}
+static int wcd9xxx_enable_mux_bias_block(struct snd_soc_codec *codec,
+ struct wcd9xxx_mbhc *mbhc)
+{
+ switch (mbhc->mbhc_version) {
+ case WCD9XXX_MBHC_VERSION_TAIKO:
+ snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
+ 0x80, 0x80);
+ break;
+ case WCD9XXX_MBHC_VERSION_TAPAN:
+ snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
+ 0x80, 0x00);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int wcd9xxx_put_cfilt_fast_mode(struct snd_soc_codec *codec,
+ struct wcd9xxx_mbhc *mbhc)
+{
+ switch (mbhc->mbhc_version) {
+ case WCD9XXX_MBHC_VERSION_TAIKO:
+ snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
+ 0x70, 0x00);
+ break;
+ case WCD9XXX_MBHC_VERSION_TAPAN:
+ snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
+ 0x70, 0x70);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int wcd9xxx_codec_specific_cal_setup(struct snd_soc_codec *codec,
+ struct wcd9xxx_mbhc *mbhc)
+{
+ switch (mbhc->mbhc_version) {
+ case WCD9XXX_MBHC_VERSION_TAIKO:
+ snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
+ 0x04, 0x04);
+ break;
+ case WCD9XXX_MBHC_VERSION_TAPAN:
+ snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
+ 0x0C, 0x04);
+ snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0xE0, 0xE0);
+ /* Make sure the calibration is ON */
+ snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_val,
+ 0x02, 0x02);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/* called under codec_resource_lock acquisition */
static void wcd9xxx_pause_hs_polling(struct wcd9xxx_mbhc *mbhc)
{
@@ -161,17 +223,24 @@
{
struct snd_soc_codec *codec = mbhc->codec;
int mbhc_state = mbhc->mbhc_state;
+ int ret;
pr_debug("%s: enter\n", __func__);
if (!mbhc->polling_active) {
pr_debug("Polling is not active, do not start polling\n");
return;
}
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x84);
+
+ snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x04);
+ ret = wcd9xxx_enable_mux_bias_block(codec, mbhc);
+ if (ret) {
+ pr_err("%s: Error returned, ret: %d\n", __func__, ret);
+ return;
+ }
if (!mbhc->no_mic_headset_override &&
mbhc_state == MBHC_STATE_POTENTIAL) {
- pr_debug("%s recovering MBHC state macine\n", __func__);
+ pr_debug("%s recovering MBHC state machine\n", __func__);
mbhc->mbhc_state = MBHC_STATE_POTENTIAL_RECOVERY;
/* set to max button press threshold */
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL, 0x7F);
@@ -347,33 +416,65 @@
(mbhc->mbhc_data.v_brl >> 8) & 0xFF);
}
-static void wcd9xxx_codec_switch_cfilt_mode(struct wcd9xxx_mbhc *mbhc,
+static int wcd9xxx_codec_switch_cfilt_mode(struct wcd9xxx_mbhc *mbhc,
bool fast)
{
struct snd_soc_codec *codec = mbhc->codec;
u8 reg_mode_val, cur_mode_val;
- if (fast)
- reg_mode_val = WCD9XXX_CFILT_FAST_MODE;
- else
- reg_mode_val = WCD9XXX_CFILT_SLOW_MODE;
+ switch (mbhc->mbhc_version) {
+ case WCD9XXX_MBHC_VERSION_TAIKO:
+ if (fast)
+ reg_mode_val = WCD9XXX_CFILT_FAST_MODE;
+ else
+ reg_mode_val = WCD9XXX_CFILT_SLOW_MODE;
- cur_mode_val =
- snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl) & 0x40;
+ cur_mode_val =
+ snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl) & 0x40;
- if (cur_mode_val != reg_mode_val) {
- if (mbhc->polling_active)
- wcd9xxx_pause_hs_polling(mbhc);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl, 0x40,
- reg_mode_val);
- if (mbhc->polling_active)
- wcd9xxx_start_hs_polling(mbhc);
- pr_debug("%s: CFILT mode change (%x to %x)\n", __func__,
- cur_mode_val, reg_mode_val);
- } else {
- pr_debug("%s: CFILT Value is already %x\n",
- __func__, cur_mode_val);
+ if (cur_mode_val != reg_mode_val) {
+ if (mbhc->polling_active)
+ wcd9xxx_pause_hs_polling(mbhc);
+ snd_soc_update_bits(codec,
+ mbhc->mbhc_bias_regs.cfilt_ctl,
+ 0x40, reg_mode_val);
+ if (mbhc->polling_active)
+ wcd9xxx_start_hs_polling(mbhc);
+ pr_debug("%s: CFILT mode change (%x to %x)\n", __func__,
+ cur_mode_val, reg_mode_val);
+ } else {
+ pr_debug("%s: CFILT Value is already %x\n",
+ __func__, cur_mode_val);
+ }
+ break;
+ case WCD9XXX_MBHC_VERSION_TAPAN:
+ if (fast)
+ reg_mode_val = WCD9XXX_CFILT_EXT_PRCHG_EN;
+ else
+ reg_mode_val = WCD9XXX_CFILT_EXT_PRCHG_DSBL;
+
+ cur_mode_val =
+ snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl) & 0x70;
+
+ if (cur_mode_val != reg_mode_val) {
+ if (mbhc->polling_active)
+ wcd9xxx_pause_hs_polling(mbhc);
+ snd_soc_update_bits(codec,
+ mbhc->mbhc_bias_regs.cfilt_ctl,
+ 0x70, reg_mode_val);
+ if (mbhc->polling_active)
+ wcd9xxx_start_hs_polling(mbhc);
+ pr_debug("%s: CFILT mode change (%x to %x)\n", __func__,
+ cur_mode_val, reg_mode_val);
+ } else {
+ pr_debug("%s: CFILT Value is already %x\n",
+ __func__, cur_mode_val);
+ }
+ break;
+ default:
+ return -EINVAL;
}
+ return 0;
}
static void wcd9xxx_jack_report(struct wcd9xxx_mbhc *mbhc,
@@ -838,6 +939,7 @@
struct snd_soc_codec *codec = mbhc->codec;
short bias_value;
u8 cfilt_mode;
+ int ret;
WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
@@ -858,10 +960,14 @@
/* Make sure CFILT is in fast mode, save current mode */
cfilt_mode = snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl, 0x70, 0x00);
-
+ ret = wcd9xxx_put_cfilt_fast_mode(codec, mbhc);
+ if (ret)
+ goto gen_err;
snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2, 0x2);
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x84);
+ snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x04);
+ ret = wcd9xxx_enable_mux_bias_block(codec, mbhc);
+ if (ret)
+ goto gen_err;
snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x80, 0x80);
snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x1F, 0x1C);
@@ -878,11 +984,14 @@
/* don't flip override */
bias_value = __wcd9xxx_codec_sta_dce(mbhc, 1, true, true);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl, 0x40,
- cfilt_mode);
+ snd_soc_write(codec, mbhc->mbhc_bias_regs.cfilt_ctl, cfilt_mode);
snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00);
return bias_value;
+
+gen_err:
+ pr_err("%s: Error returned, ret: %d\n", __func__, ret);
+ return ret;
}
static void wcd9xxx_shutdown_hs_removal_detect(struct wcd9xxx_mbhc *mbhc)
@@ -1273,8 +1382,9 @@
0);
}
- snd_soc_update_bits(codec, mbhc->resmgr->reg_addr->micb_4_mbhc, 0x3,
- mbhc->mbhc_cfg->micbias);
+ if (mbhc->resmgr->reg_addr->micb_4_mbhc)
+ snd_soc_update_bits(codec, mbhc->resmgr->reg_addr->micb_4_mbhc,
+ 0x3, mbhc->mbhc_cfg->micbias);
wcd9xxx_enable_irq(mbhc->resmgr->core, WCD9XXX_IRQ_MBHC_INSERTION);
snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x1, 0x1);
@@ -2580,6 +2690,7 @@
{
u8 cfilt_mode;
u16 reg0, reg1;
+ int ret;
struct snd_soc_codec *codec = mbhc->codec;
pr_debug("%s: enter\n", __func__);
@@ -2596,8 +2707,9 @@
* Need to restore defaults once calculation is done.
*/
cfilt_mode = snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl, 0x40, 0x00);
-
+ ret = wcd9xxx_put_cfilt_fast_mode(codec, mbhc);
+ if (ret)
+ goto gen_err;
/*
* Micbias, CFILT, LDOH, MBHC MUX mode settings
* to perform ADC calibration
@@ -2607,8 +2719,9 @@
snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1, 0x60, 0x60);
snd_soc_write(codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0x78);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x04, 0x04);
-
+ ret = wcd9xxx_codec_specific_cal_setup(codec, mbhc);
+ if (ret)
+ goto gen_err;
/* Pull down micbias to ground */
reg0 = snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg);
snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 1, 1);
@@ -2616,7 +2729,10 @@
reg1 = snd_soc_read(codec, WCD9XXX_A_MAD_ANA_CTRL);
snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4, 1 << 0);
/* Connect the MUX to micbias */
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x82);
+ snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02);
+ ret = wcd9xxx_enable_mux_bias_block(codec, mbhc);
+ if (ret)
+ goto gen_err;
usleep_range(WCD9XXX_MUX_SWITCH_READY_WAIT_US,
WCD9XXX_MUX_SWITCH_READY_WAIT_US +
WCD9XXX_USLEEP_RANGE_MARGIN_US);
@@ -2635,7 +2751,10 @@
/* DCE measurment for MB voltage */
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x82);
+ snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02);
+ ret = wcd9xxx_enable_mux_bias_block(codec, mbhc);
+ if (ret)
+ goto gen_err;
usleep_range(100, 100);
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x04);
usleep_range(mbhc->mbhc_data.t_dce, mbhc->mbhc_data.t_dce);
@@ -2645,7 +2764,10 @@
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x02);
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x82);
+ snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02);
+ ret = wcd9xxx_enable_mux_bias_block(codec, mbhc);
+ if (ret)
+ goto gen_err;
usleep_range(100, 100);
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x02);
usleep_range(mbhc->mbhc_data.t_sta, mbhc->mbhc_data.t_sta);
@@ -2653,16 +2775,20 @@
/* Restore default settings. */
snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x04, 0x00);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl, 0x40,
- cfilt_mode);
-
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x84);
+ snd_soc_write(codec, mbhc->mbhc_bias_regs.cfilt_ctl, cfilt_mode);
+ snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x04);
+ ret = wcd9xxx_enable_mux_bias_block(codec, mbhc);
+ if (ret)
+ goto gen_err;
usleep_range(100, 100);
wcd9xxx_enable_irq(codec->control_data, WCD9XXX_IRQ_MBHC_POTENTIAL);
wcd9xxx_turn_onoff_rel_detection(codec, true);
pr_debug("%s: leave\n", __func__);
+
+gen_err:
+ pr_err("%s: Error returned, ret: %d\n", __func__, ret);
}
static void wcd9xxx_mbhc_setup(struct wcd9xxx_mbhc *mbhc)
@@ -2702,8 +2828,9 @@
snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78,
btn_det->mbhc_nsc << 3);
- snd_soc_update_bits(codec, mbhc->resmgr->reg_addr->micb_4_mbhc, 0x03,
- MBHC_MICBIAS2);
+ if (mbhc->resmgr->reg_addr->micb_4_mbhc)
+ snd_soc_update_bits(codec, mbhc->resmgr->reg_addr->micb_4_mbhc,
+ 0x03, MBHC_MICBIAS2);
snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x02, 0x02);
@@ -2720,6 +2847,7 @@
{
int ret = 0;
void *core = mbhc->resmgr->core;
+ int jack_irq;
if (mbhc->mbhc_cfg->gpio) {
ret = request_threaded_irq(mbhc->mbhc_cfg->gpio_irq, NULL,
@@ -2741,13 +2869,25 @@
/* Enable HPHL_10K_SW */
snd_soc_update_bits(mbhc->codec, WCD9XXX_A_RX_HPH_OCP_CTL,
1 << 1, 1 << 1);
- ret = wcd9xxx_request_irq(core, WCD9XXX_IRQ_MBHC_JACK_SWITCH,
+
+ switch (mbhc->mbhc_version) {
+ case WCD9XXX_MBHC_VERSION_TAIKO:
+ jack_irq = WCD9XXX_IRQ_MBHC_JACK_SWITCH_TAIKO;
+ break;
+ case WCD9XXX_MBHC_VERSION_TAPAN:
+ jack_irq = WCD9XXX_IRQ_MBHC_JACK_SWITCH_TAPAN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = wcd9xxx_request_irq(core, jack_irq,
wcd9xxx_mech_plug_detect_irq,
"Jack Detect",
mbhc);
if (ret)
pr_err("%s: Failed to request insert detect irq %d\n",
- __func__, WCD9XXX_IRQ_MBHC_JACK_SWITCH);
+ __func__, jack_irq);
}
return ret;
@@ -2977,7 +3117,7 @@
int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc,
struct wcd9xxx_mbhc_config *mbhc_cfg)
{
- int rc = 0;
+ int rc;
struct snd_soc_codec *codec = mbhc->codec;
pr_debug("%s: enter\n", __func__);
@@ -3001,10 +3141,10 @@
wcd9xxx_get_mbhc_micbias_regs(mbhc, &mbhc->mbhc_bias_regs);
/* Put CFILT in fast mode by default */
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
- 0x40, WCD9XXX_CFILT_FAST_MODE);
-
- if (!mbhc->mbhc_cfg->read_fw_bin)
+ rc = wcd9xxx_put_cfilt_fast_mode(codec, mbhc);
+ if (rc)
+ pr_err("%s: Error returned, ret: %d\n", __func__, rc);
+ else if (!mbhc->mbhc_cfg->read_fw_bin)
rc = wcd9xxx_init_and_calibrate(mbhc);
else
schedule_delayed_work(&mbhc->mbhc_firmware_dwork,
@@ -3211,7 +3351,7 @@
* Switch CFILT to slow mode if MBHC CFILT is being
* used.
*/
- wcd9xxx_codec_switch_cfilt_mode(mbhc, false);
+ ret = wcd9xxx_codec_switch_cfilt_mode(mbhc, false);
break;
case WCD9XXX_EVENT_POST_CFILT_1_OFF:
case WCD9XXX_EVENT_POST_CFILT_2_OFF:
@@ -3222,7 +3362,7 @@
* Switch CFILT to fast mode if MBHC CFILT is not
* used anymore.
*/
- wcd9xxx_codec_switch_cfilt_mode(mbhc, true);
+ ret = wcd9xxx_codec_switch_cfilt_mode(mbhc, true);
break;
/* System resume */
case WCD9XXX_EVENT_POST_RESUME:
@@ -3244,7 +3384,7 @@
pr_debug("%s: leave\n", __func__);
- return 0;
+ return ret;
}
/*
@@ -3253,7 +3393,7 @@
* NOTE: mbhc->mbhc_cfg is not YET configure so shouldn't be used
*/
int wcd9xxx_mbhc_init(struct wcd9xxx_mbhc *mbhc, struct wcd9xxx_resmgr *resmgr,
- struct snd_soc_codec *codec)
+ struct snd_soc_codec *codec, int version)
{
int ret;
void *core;
@@ -3276,6 +3416,7 @@
mbhc->codec = codec;
mbhc->resmgr = resmgr;
mbhc->resmgr->mbhc = mbhc;
+ mbhc->mbhc_version = version;
if (mbhc->headset_jack.jack == NULL) {
ret = snd_soc_jack_new(codec, "Headset Jack", WCD9XXX_JACK_MASK,
@@ -3393,7 +3534,6 @@
{
void *cdata = mbhc->codec->control_data;
- wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_SLIMBUS, mbhc);
wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_RELEASE, mbhc);
wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_POTENTIAL, mbhc);
wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_REMOVAL, mbhc);
@@ -3402,7 +3542,6 @@
wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_JACK_SWITCH, mbhc);
wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_HPH_PA_OCPL_FAULT, mbhc);
wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_HPH_PA_OCPR_FAULT, mbhc);
- wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_RELEASE, mbhc);
if (mbhc->mbhc_fw)
release_firmware(mbhc->mbhc_fw);
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.h b/sound/soc/codecs/wcd9xxx-mbhc.h
index fb1dfdc..300e34e 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.h
+++ b/sound/soc/codecs/wcd9xxx-mbhc.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,8 @@
#define WCD9XXX_CFILT_FAST_MODE 0x00
#define WCD9XXX_CFILT_SLOW_MODE 0x40
+#define WCD9XXX_CFILT_EXT_PRCHG_EN 0x70
+#define WCD9XXX_CFILT_EXT_PRCHG_DSBL 0x40
struct mbhc_micbias_regs {
u16 cfilt_val;
@@ -51,6 +53,12 @@
s16 v_inval_ins_high;
};
+enum wcd9xxx_mbhc_version {
+ WCD9XXX_MBHC_VERSION_UNKNOWN = 0,
+ WCD9XXX_MBHC_VERSION_TAIKO,
+ WCD9XXX_MBHC_VERSION_TAPAN,
+};
+
enum wcd9xxx_mbhc_plug_type {
PLUG_TYPE_INVALID = -1,
PLUG_TYPE_NONE,
@@ -243,6 +251,8 @@
struct notifier_block nblock;
+ enum wcd9xxx_mbhc_version mbhc_version;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_poke;
struct dentry *debugfs_mbhc;
@@ -307,7 +317,7 @@
int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc,
struct wcd9xxx_mbhc_config *mbhc_cfg);
int wcd9xxx_mbhc_init(struct wcd9xxx_mbhc *mbhc, struct wcd9xxx_resmgr *resmgr,
- struct snd_soc_codec *codec);
+ struct snd_soc_codec *codec, int version);
void wcd9xxx_mbhc_deinit(struct wcd9xxx_mbhc *mbhc);
void *wcd9xxx_mbhc_cal_btn_det_mp(
const struct wcd9xxx_mbhc_btn_detect_cfg *btn_det,
diff --git a/sound/soc/codecs/wcd9xxx-resmgr.c b/sound/soc/codecs/wcd9xxx-resmgr.c
index 18614d8..60a76a2 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr.c
@@ -391,8 +391,6 @@
} else {
snd_soc_update_bits(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x1, 0);
snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0);
- /* clk source to ext clk and clk buff ref to VBG */
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x0C, 0x04);
}
return 0;
@@ -423,9 +421,14 @@
snd_soc_write(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02);
wcd9xxx_resmgr_enable_config_mode(codec, 0);
}
+ /* clk source to ext clk and clk buff ref to VBG */
+ snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x0C, 0x04);
}
snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x01, 0x01);
+ /* sleep time required by codec hardware to enable clock buffer */
+ usleep_range(1000, 1200);
+
snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x00);
/* on MCLK */
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 35a9646..66c475f 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -163,6 +163,16 @@
OCMEM gets exercised for low-power
audio and voice use cases.
+config DOLBY_DAP
+ bool "Enable Dolby DAP"
+ depends on SND_SOC_MSM8974
+ help
+ To add support for dolby DAP post processing.
+ This support is to configure the post processing parameters
+ to DSP. The configuration includes sending the end point
+ device, end point dependent post processing parameters and
+ the various posrt processing parameters
+
config SND_SOC_MSM8974
tristate "SoC Machine driver for MSM8974 boards"
depends on ARCH_MSM8974
@@ -173,6 +183,7 @@
select SND_SOC_MSM_HDMI_CODEC_RX
select SND_DYNAMIC_MINORS
select AUDIO_OCMEM
+ select DOLBY_DAP
help
To add support for SoC audio on MSM8974.
This will enable sound soc drivers which
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index c26eafc..ebde90b 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -62,12 +62,13 @@
snd-soc-qdsp6-objs += msm-pcm-lpa.o msm-pcm-afe.o
obj-$(CONFIG_SND_SOC_QDSP6) += snd-soc-qdsp6.o
+snd-soc-hostless-pcm-objs := msm-pcm-hostless.o
+obj-$(CONFIG_SND_SOC_MSM_HOSTLESS_PCM) += snd-soc-hostless-pcm.o
+
snd-soc-msm8960-objs := msm8960.o apq8064.o msm8930.o mpq8064.o apq8064-i2s.o
obj-$(CONFIG_SND_SOC_MSM8960) += snd-soc-msm8960.o
# Generic MSM drivers
-snd-soc-hostless-pcm-objs := msm-pcm-hostless.o
-obj-$(CONFIG_SND_SOC_MSM_HOSTLESS_PCM) += snd-soc-hostless-pcm.o
snd-soc-msm8660-apq-objs := msm8660-apq-wm8903.o
obj-$(CONFIG_SND_SOC_MSM8660_APQ) += snd-soc-msm8660-apq.o
diff --git a/sound/soc/msm/apq8064-i2s.c b/sound/soc/msm/apq8064-i2s.c
index f9e0402..99defcd 100644
--- a/sound/soc/msm/apq8064-i2s.c
+++ b/sound/soc/msm/apq8064-i2s.c
@@ -2636,6 +2636,7 @@
pr_info("%s: Not APQ8064 in I2S mode\n", __func__);
return -ENODEV;
}
+ mutex_init(&cdc_mclk_mutex);
pr_debug("%s: APQ8064 is in I2S mode\n", __func__);
mbhc_cfg.calibration = def_tabla_mbhc_cal();
if (!mbhc_cfg.calibration) {
@@ -2680,7 +2681,6 @@
return ret;
}
- mutex_init(&cdc_mclk_mutex);
atomic_set(&mi2s_rsc_ref, 0);
atomic_set(&auxpcm_rsc_ref, 0);
return ret;
diff --git a/sound/soc/msm/apq8064.c b/sound/soc/msm/apq8064.c
index cafc5c3..7960f13 100644
--- a/sound/soc/msm/apq8064.c
+++ b/sound/soc/msm/apq8064.c
@@ -2187,6 +2187,8 @@
if (socinfo_get_pmic_model() == PMIC_MODEL_PM8917)
bottom_spk_pamp_gpio = PM8921_GPIO_PM_TO_SYS(16);
+ mutex_init(&cdc_mclk_mutex);
+
mbhc_cfg.calibration = def_tabla_mbhc_cal();
if (!mbhc_cfg.calibration) {
pr_err("Calibration data allocation failed\n");
@@ -2208,7 +2210,6 @@
return ret;
}
- mutex_init(&cdc_mclk_mutex);
atomic_set(&auxpcm_rsc_ref, 0);
return ret;
diff --git a/sound/soc/msm/mdm9625.c b/sound/soc/msm/mdm9625.c
index 2bef1b7..f3ccb33 100644
--- a/sound/soc/msm/mdm9625.c
+++ b/sound/soc/msm/mdm9625.c
@@ -761,6 +761,21 @@
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA1
},
{
+ .name = "MDM9625 Media2",
+ .stream_name = "MultiMedia2",
+ .cpu_dai_name = "MultiMedia2",
+ .platform_name = "msm-pcm-dsp.0",
+ .dynamic = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA2,
+ },
+ {
.name = "MSM VoIP",
.stream_name = "VoIP",
.cpu_dai_name = "VoIP",
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 734bd39..8db13f6 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -25,7 +25,7 @@
/* Conventional and unconventional sample rate supported */
static unsigned int supported_sample_rates[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
- 96000
+ 96000, 192000
};
static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
@@ -53,14 +53,14 @@
.playback = {
.stream_name = "Multimedia1 Playback",
.aif_name = "MM_DL1",
- .rates = (SNDRV_PCM_RATE_8000_96000|
+ .rates = (SNDRV_PCM_RATE_8000_192000|
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
},
.capture = {
.stream_name = "Multimedia1 Capture",
@@ -80,14 +80,14 @@
.playback = {
.stream_name = "Multimedia2 Playback",
.aif_name = "MM_DL2",
- .rates = (SNDRV_PCM_RATE_8000_96000|
+ .rates = (SNDRV_PCM_RATE_8000_192000|
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
},
.capture = {
.stream_name = "Multimedia2 Capture",
@@ -157,14 +157,14 @@
.playback = {
.stream_name = "MultiMedia3 Playback",
.aif_name = "MM_DL3",
- .rates = (SNDRV_PCM_RATE_8000_96000 |
+ .rates = (SNDRV_PCM_RATE_8000_192000 |
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
.channels_max = 6,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
},
.ops = &msm_fe_Multimedia_dai_ops,
.name = "MultiMedia3",
@@ -173,14 +173,14 @@
.playback = {
.stream_name = "MultiMedia4 Playback",
.aif_name = "MM_DL4",
- .rates = (SNDRV_PCM_RATE_8000_96000 |
+ .rates = (SNDRV_PCM_RATE_8000_192000 |
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
},
.capture = {
.stream_name = "MultiMedia4 Capture",
@@ -200,14 +200,14 @@
.playback = {
.stream_name = "MultiMedia5 Playback",
.aif_name = "MM_DL5",
- .rates = (SNDRV_PCM_RATE_8000_96000 |
+ .rates = (SNDRV_PCM_RATE_8000_192000 |
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
},
.capture = {
.stream_name = "MultiMedia5 Capture",
@@ -227,14 +227,14 @@
.playback = {
.stream_name = "MultiMedia6 Playback",
.aif_name = "MM_DL6",
- .rates = (SNDRV_PCM_RATE_8000_96000 |
+ .rates = (SNDRV_PCM_RATE_8000_192000 |
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
},
.ops = &msm_fe_Multimedia_dai_ops,
.name = "MultiMedia6",
@@ -243,14 +243,14 @@
.playback = {
.stream_name = "MultiMedia7 Playback",
.aif_name = "MM_DL7",
- .rates = (SNDRV_PCM_RATE_8000_96000 |
+ .rates = (SNDRV_PCM_RATE_8000_192000 |
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
},
.ops = &msm_fe_Multimedia_dai_ops,
.name = "MultiMedia7",
@@ -259,14 +259,14 @@
.playback = {
.stream_name = "MultiMedia8 Playback",
.aif_name = "MM_DL8",
- .rates = (SNDRV_PCM_RATE_8000_96000 |
+ .rates = (SNDRV_PCM_RATE_8000_192000 |
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
},
.ops = &msm_fe_Multimedia_dai_ops,
.name = "MultiMedia8",
@@ -276,7 +276,7 @@
.playback = {
.stream_name = "SLIMBUS0 Hostless Playback",
.aif_name = "SLIM0_DL_HL",
- .rates = SNDRV_PCM_RATE_8000_96000,
+ .rates = SNDRV_PCM_RATE_8000_192000,
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
@@ -301,13 +301,13 @@
.playback = {
.stream_name = "SLIMBUS1 Hostless Playback",
.aif_name = "SLIM1_DL_HL",
- .rates = SNDRV_PCM_RATE_8000_96000,
+ .rates = SNDRV_PCM_RATE_8000_192000,
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
.channels_max = 2,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
},
.capture = {
.stream_name = "SLIMBUS1 Hostless Capture",
@@ -326,13 +326,13 @@
.playback = {
.stream_name = "SLIMBUS3 Hostless Playback",
.aif_name = "SLIM3_DL_HL",
- .rates = SNDRV_PCM_RATE_8000_48000,
+ .rates = SNDRV_PCM_RATE_8000_192000,
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
.channels_max = 2,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 192000,
},
.capture = {
.stream_name = "SLIMBUS3 Hostless Capture",
@@ -351,13 +351,13 @@
.playback = {
.stream_name = "SLIMBUS4 Hostless Playback",
.aif_name = "SLIM4_DL_HL",
- .rates = SNDRV_PCM_RATE_8000_96000,
+ .rates = SNDRV_PCM_RATE_8000_192000,
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
.channels_max = 2,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 192000,
},
.capture = {
.stream_name = "SLIMBUS4 Hostless Capture",
diff --git a/sound/soc/msm/msm-dai-q6.c b/sound/soc/msm/msm-dai-q6.c
index 7381677..d973c17 100644
--- a/sound/soc/msm/msm-dai-q6.c
+++ b/sound/soc/msm/msm-dai-q6.c
@@ -1805,7 +1805,7 @@
SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
- .channels_max = 4,
+ .channels_max = 5,
.rate_min = 8000,
.rate_max = 192000,
},
diff --git a/sound/soc/msm/msm-pcm-lpa.c b/sound/soc/msm/msm-pcm-lpa.c
index ba054bd..95c5cd7 100644
--- a/sound/soc/msm/msm-pcm-lpa.c
+++ b/sound/soc/msm/msm-pcm-lpa.c
@@ -24,6 +24,7 @@
#include <sound/pcm.h>
#include <sound/initval.h>
#include <sound/control.h>
+#include <sound/pcm_params.h>
#include <asm/dma.h>
#include <linux/dma-mapping.h>
@@ -470,8 +471,8 @@
return -EPERM;
ret = q6asm_audio_client_buf_alloc_contiguous(dir,
prtd->audio_client,
- runtime->hw.period_bytes_min,
- runtime->hw.periods_max);
+ params_period_bytes(params),
+ params_periods(params));
if (ret < 0) {
pr_err("Audio Start: Buffer Allocation failed \
rc = %d\n", ret);
@@ -488,7 +489,7 @@
dma_buf->private_data = NULL;
dma_buf->area = buf[0].data;
dma_buf->addr = buf[0].phys;
- dma_buf->bytes = runtime->hw.buffer_bytes_max;
+ dma_buf->bytes = params_period_bytes(params) * params_periods(params);
if (!dma_buf->area)
return -ENOMEM;
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index c5cb560..e74a0dd 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -93,6 +93,7 @@
static const DECLARE_TLV_DB_LINEAR(compressed3_rx_vol_gain, 0,
INT_RX_VOL_MAX_STEPS);
static int msm_route_ec_ref_rx;
+static int msm_route_ext_ec_ref;
/* Equal to Frontend after last of the MULTIMEDIA SESSIONS */
#define MAX_EQ_SESSIONS MSM_FRONTEND_DAI_CS_VOICE
@@ -1425,6 +1426,57 @@
msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put),
};
+static int msm_routing_ext_ec_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: ext_ec_ref_rx = %x\n", __func__, msm_route_ext_ec_ref);
+
+ mutex_lock(&routing_lock);
+ ucontrol->value.integer.value[0] = msm_route_ext_ec_ref;
+ mutex_unlock(&routing_lock);
+ return 0;
+}
+
+static int msm_routing_ext_ec_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ int mux = ucontrol->value.enumerated.item[0];
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ int ret = 0;
+
+ pr_debug("%s: msm_route_ec_ref_rx = %d value = %ld\n",
+ __func__, msm_route_ext_ec_ref,
+ ucontrol->value.integer.value[0]);
+
+ mutex_lock(&routing_lock);
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_route_ext_ec_ref = MI2S_TX;
+ ret = voc_set_ext_ec_ref(msm_route_ext_ec_ref, true);
+ break;
+ default:
+ msm_route_ext_ec_ref = AFE_PORT_INVALID;
+ ret = voc_set_ext_ec_ref(msm_route_ext_ec_ref, false);
+ break;
+ }
+ snd_soc_dapm_mux_update_power(widget, kcontrol, 1, mux, e);
+ mutex_unlock(&routing_lock);
+ return ret;
+}
+
+static const char * const ext_ec_ref_rx[] = {"NONE", "MI2S_TX"};
+
+static const struct soc_enum msm_route_ext_ec_ref_rx_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, ext_ec_ref_rx),
+};
+
+static const struct snd_kcontrol_new voc_ext_ec_mux =
+ SOC_DAPM_ENUM_EXT("VOC_EXT_EC MUX Mux", msm_route_ext_ec_ref_rx_enum[0],
+ msm_routing_ext_ec_get, msm_routing_ext_ec_put);
+
+
static const struct snd_kcontrol_new pri_i2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_I2S_RX ,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -2887,6 +2939,7 @@
/* Virtual Pins to force backends ON atm */
SND_SOC_DAPM_OUTPUT("BE_OUT"),
SND_SOC_DAPM_INPUT("BE_IN"),
+ SND_SOC_DAPM_MUX("VOC_EXT_EC MUX", SND_SOC_NOPM, 0, 0, &voc_ext_ec_mux),
};
@@ -3071,6 +3124,8 @@
{"HDMI", NULL, "HDMI_RX_Voice Mixer"},
{"HDMI", NULL, "HDMI_DL_HL"},
+ {"VOC_EXT_EC MUX", "MI2S_TX" , "MI2S_TX"},
+ {"CS-VOICE_UL1", NULL, "VOC_EXT_EC MUX"},
{"Voice_Tx Mixer", "PRI_TX_Voice", "PRI_I2S_TX"},
{"Voice_Tx Mixer", "SEC_TX_Voice", "SEC_I2S_TX"},
{"Voice_Tx Mixer", "MI2S_TX_Voice", "MI2S_TX"},
diff --git a/sound/soc/msm/msm8226.c b/sound/soc/msm/msm8226.c
index b97d9dc..08731f6 100644
--- a/sound/soc/msm/msm8226.c
+++ b/sound/soc/msm/msm8226.c
@@ -56,7 +56,7 @@
.mclk_rate = TAPAN_EXT_CLK_RATE,
.gpio = 0,
.gpio_irq = 0,
- .gpio_level_insert = 1,
+ .gpio_level_insert = 0,
.detect_extn_cable = true,
.insert_detect = true,
.swap_gnd_mic = NULL,
@@ -402,11 +402,8 @@
/* start mbhc */
mbhc_cfg.calibration = def_tapan_mbhc_cal();
- if (mbhc_cfg.calibration) {
- pr_info("%s: WCD9306: Headset detection disabled\n",
- __func__);
- }
-
+ if (mbhc_cfg.calibration)
+ err = tapan_hs_detect(codec, &mbhc_cfg);
else
err = -ENOMEM;
@@ -439,8 +436,8 @@
S(t_ldoh, 100);
S(t_bg_fast_settle, 100);
S(t_shutdown_plug_rem, 255);
- S(mbhc_nsa, 4);
- S(mbhc_navg, 4);
+ S(mbhc_nsa, 2);
+ S(mbhc_navg, 128);
#undef S
#define S(X, Y) ((WCD9XXX_MBHC_CAL_PLUG_DET_PTR(tapan_cal)->X) = (Y))
S(mic_current, TAPAN_PID_MIC_5_UA);
@@ -451,13 +448,13 @@
#undef S
#define S(X, Y) ((WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(tapan_cal)->X) = (Y))
S(v_no_mic, 30);
- S(v_hs_max, 2400);
+ S(v_hs_max, 1650);
#undef S
#define S(X, Y) ((WCD9XXX_MBHC_CAL_BTN_DET_PTR(tapan_cal)->X) = (Y))
S(c[0], 62);
S(c[1], 124);
S(nc, 1);
- S(n_meas, 3);
+ S(n_meas, 5);
S(mbhc_nsc, 11);
S(n_btn_meas, 1);
S(n_btn_con, 2);
@@ -487,13 +484,13 @@
btn_high[7] = 330;
n_ready = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_N_READY);
n_ready[0] = 80;
- n_ready[1] = 68;
+ n_ready[1] = 12;
n_cic = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_N_CIC);
n_cic[0] = 60;
n_cic[1] = 47;
gain = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_GAIN);
gain[0] = 11;
- gain[1] = 9;
+ gain[1] = 14;
return tapan_cal;
}
diff --git a/sound/soc/msm/msm8930.c b/sound/soc/msm/msm8930.c
index 2bd5c88..737317c 100644
--- a/sound/soc/msm/msm8930.c
+++ b/sound/soc/msm/msm8930.c
@@ -63,6 +63,7 @@
static int clk_users;
static int msm8930_headset_gpios_configured;
+static struct mutex cdc_mclk_mutex;
static struct snd_soc_jack hs_jack;
static struct snd_soc_jack button_jack;
@@ -265,35 +266,42 @@
struct snd_soc_codec *codec, int enable,
bool dapm)
{
+ int r = 0;
pr_debug("%s: enable = %d\n", __func__, enable);
+
+ mutex_lock(&cdc_mclk_mutex);
if (enable) {
clk_users++;
pr_debug("%s: clk_users = %d\n", __func__, clk_users);
- if (clk_users != 1)
- return 0;
-
- if (codec_clk) {
- clk_set_rate(codec_clk, SITAR_EXT_CLK_RATE);
- clk_prepare_enable(codec_clk);
- sitar_mclk_enable(codec, 1, dapm);
- } else {
- pr_err("%s: Error setting Sitar MCLK\n", __func__);
- clk_users--;
- return -EINVAL;
+ if (clk_users == 1) {
+ if (codec_clk) {
+ clk_set_rate(codec_clk, SITAR_EXT_CLK_RATE);
+ clk_prepare_enable(codec_clk);
+ sitar_mclk_enable(codec, 1, dapm);
+ } else {
+ pr_err("%s: Error setting Sitar MCLK\n",
+ __func__);
+ clk_users--;
+ r = -EINVAL;
+ }
}
} else {
- pr_debug("%s: clk_users = %d\n", __func__, clk_users);
- if (clk_users == 0)
- return 0;
- clk_users--;
- if (!clk_users) {
- pr_debug("%s: disabling MCLK. clk_users = %d\n",
+ if (clk_users > 0) {
+ clk_users--;
+ pr_debug("%s: clk_users = %d\n", __func__, clk_users);
+ if (clk_users == 0) {
+ pr_debug("%s: disabling MCLK. clk_users = %d\n",
__func__, clk_users);
- sitar_mclk_enable(codec, 0, dapm);
- clk_disable_unprepare(codec_clk);
+ sitar_mclk_enable(codec, 0, dapm);
+ clk_disable_unprepare(codec_clk);
+ }
+ } else {
+ pr_err("%s: Error releasing Sitar MCLK\n", __func__);
+ r = -EINVAL;
}
}
- return 0;
+ mutex_unlock(&cdc_mclk_mutex);
+ return r;
}
static int msm8930_mclk_event(struct snd_soc_dapm_widget *w,
@@ -1378,6 +1386,7 @@
msm8930_headset_gpios_configured = 1;
atomic_set(&auxpcm_rsc_ref, 0);
+ mutex_init(&cdc_mclk_mutex);
return ret;
}
@@ -1392,6 +1401,7 @@
msm8930_free_headset_mic_gpios();
platform_device_unregister(msm8930_snd_device);
kfree(mbhc_cfg.calibration);
+ mutex_destroy(&cdc_mclk_mutex);
}
module_exit(msm8930_audio_exit);
diff --git a/sound/soc/msm/msm8960.c b/sound/soc/msm/msm8960.c
index 65b3a57..c7fbc43 100644
--- a/sound/soc/msm/msm8960.c
+++ b/sound/soc/msm/msm8960.c
@@ -1778,6 +1778,7 @@
return -ENODEV ;
}
+ mutex_init(&cdc_mclk_mutex);
mbhc_cfg.calibration = def_tabla_mbhc_cal();
if (!mbhc_cfg.calibration) {
pr_err("Calibration data allocation failed\n");
@@ -1837,7 +1838,6 @@
__func__);
}
- mutex_init(&cdc_mclk_mutex);
atomic_set(&auxpcm_rsc_ref, 0);
return ret;
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index c305a15..76d6b57 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -48,6 +48,7 @@
#define SAMPLING_RATE_48KHZ 48000
#define SAMPLING_RATE_96KHZ 96000
+#define SAMPLING_RATE_192KHZ 192000
static int msm8974_auxpcm_rate = 8000;
#define LO_1_SPK_AMP 0x1
@@ -142,19 +143,28 @@
u32 mclk_freq;
int us_euro_gpio;
struct msm_auxpcm_ctrl *pri_auxpcm_ctrl;
+ struct msm_auxpcm_ctrl *sec_auxpcm_ctrl;
};
#define GPIO_NAME_INDEX 0
#define DT_PARSE_INDEX 1
-static char *msm_auxpcm_gpio_name[][2] = {
- {"PRIM_AUXPCM_CLK", "prim-auxpcm-gpio-clk"},
- {"PRIM_AUXPCM_SYNC", "prim-auxpcm-gpio-sync"},
- {"PRIM_AUXPCM_DIN", "prim-auxpcm-gpio-din"},
- {"PRIM_AUXPCM_DOUT", "prim-auxpcm-gpio-dout"},
+static char *msm_prim_auxpcm_gpio_name[][2] = {
+ {"PRIM_AUXPCM_CLK", "qcom,prim-auxpcm-gpio-clk"},
+ {"PRIM_AUXPCM_SYNC", "qcom,prim-auxpcm-gpio-sync"},
+ {"PRIM_AUXPCM_DIN", "qcom,prim-auxpcm-gpio-din"},
+ {"PRIM_AUXPCM_DOUT", "qcom,prim-auxpcm-gpio-dout"},
+};
+
+static char *msm_sec_auxpcm_gpio_name[][2] = {
+ {"SEC_AUXPCM_CLK", "qcom,sec-auxpcm-gpio-clk"},
+ {"SEC_AUXPCM_SYNC", "qcom,sec-auxpcm-gpio-sync"},
+ {"SEC_AUXPCM_DIN", "qcom,sec-auxpcm-gpio-din"},
+ {"SEC_AUXPCM_DOUT", "qcom,sec-auxpcm-gpio-dout"},
};
void *lpaif_pri_muxsel_virt_addr;
+void *lpaif_sec_muxsel_virt_addr;
struct msm8974_liquid_dock_dev {
int dock_plug_gpio;
@@ -189,11 +199,13 @@
static int msm_btsco_ch = 1;
static int msm_hdmi_rx_ch = 2;
static int slim0_rx_sample_rate = SAMPLING_RATE_48KHZ;
+static int msm_proxy_rx_ch = 2;
static struct mutex cdc_mclk_mutex;
-static struct q_clkdiv *codec_clk;
+static struct clk *codec_clk;
static int clk_users;
-static atomic_t auxpcm_rsc_ref;
+static atomic_t prim_auxpcm_rsc_ref;
+static atomic_t sec_auxpcm_rsc_ref;
static int msm8974_liquid_ext_spk_power_amp_init(void)
@@ -569,23 +581,24 @@
if (clk_users != 1)
goto exit;
- ret = qpnp_clkdiv_enable(codec_clk);
- if (ret) {
- dev_err(codec->dev, "%s: Error enabling taiko MCLK\n",
- __func__);
- ret = -ENODEV;
+ if (codec_clk) {
+ clk_set_rate(codec_clk, TAIKO_EXT_CLK_RATE);
+ clk_prepare_enable(codec_clk);
+ taiko_mclk_enable(codec, 1, dapm);
+ } else {
+ pr_err("%s: Error setting Taiko MCLK\n", __func__);
+ clk_users--;
goto exit;
}
- taiko_mclk_enable(codec, 1, dapm);
} else {
if (clk_users > 0) {
clk_users--;
if (clk_users == 0) {
taiko_mclk_enable(codec, 0, dapm);
- qpnp_clkdiv_disable(codec_clk);
+ clk_disable_unprepare(codec_clk);
}
} else {
- pr_err("%s: Error releasing Tabla MCLK\n", __func__);
+ pr_err("%s: Error releasing Taiko MCLK\n", __func__);
ret = -EINVAL;
goto exit;
}
@@ -641,17 +654,15 @@
static const char *const spk_function[] = {"Off", "On"};
static const char *const slim0_rx_ch_text[] = {"One", "Two"};
-static const char *const slim0_tx_ch_text[] = {"One", "Two", "Three", "Four"};
+static const char *const slim0_tx_ch_text[] = {"One", "Two", "Three", "Four",
+ "Five"};
static char const *hdmi_rx_ch_text[] = {"Two", "Three", "Four", "Five",
"Six", "Seven", "Eight"};
static char const *rx_bit_format_text[] = {"S16_LE", "S24_LE"};
-static char const *slim0_rx_sample_rate_text[] = {"KHZ_48", "KHZ_96"};
-
-static const struct soc_enum msm_enum[] = {
- SOC_ENUM_SINGLE_EXT(2, spk_function),
- SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text),
- SOC_ENUM_SINGLE_EXT(4, slim0_tx_ch_text),
-};
+static char const *slim0_rx_sample_rate_text[] = {"KHZ_48", "KHZ_96",
+ "KHZ_192"};
+static const char *const proxy_rx_ch_text[] = {"One", "Two", "Three", "Four",
+ "Five", "Six", "Seven", "Eight"};
static const char *const btsco_rate_text[] = {"8000", "16000"};
static const struct soc_enum msm_btsco_enum[] = {
@@ -664,6 +675,10 @@
int sample_rate_val = 0;
switch (slim0_rx_sample_rate) {
+ case SAMPLING_RATE_192KHZ:
+ sample_rate_val = 2;
+ break;
+
case SAMPLING_RATE_96KHZ:
sample_rate_val = 1;
break;
@@ -688,6 +703,9 @@
ucontrol->value.integer.value[0]);
switch (ucontrol->value.integer.value[0]) {
+ case 2:
+ slim0_rx_sample_rate = SAMPLING_RATE_192KHZ;
+ break;
case 1:
slim0_rx_sample_rate = SAMPLING_RATE_96KHZ;
break;
@@ -909,6 +927,23 @@
}
return 0;
}
+static int msm_proxy_rx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_proxy_rx_ch = %d\n", __func__,
+ msm_proxy_rx_ch);
+ ucontrol->value.integer.value[0] = msm_proxy_rx_ch - 1;
+ return 0;
+}
+
+static int msm_proxy_rx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_proxy_rx_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_proxy_rx_ch = %d\n", __func__,
+ msm_proxy_rx_ch);
+ return 1;
+}
static int msm_auxpcm_be_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
@@ -925,15 +960,31 @@
return 0;
}
-static int msm_proxy_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_hw_params *params)
+static int msm_proxy_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
{
struct snd_interval *rate = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_RATE);
+ SNDRV_PCM_HW_PARAM_RATE);
- pr_debug("%s()\n", __func__);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ pr_debug("%s: msm_proxy_rx_ch =%d\n", __func__, msm_proxy_rx_ch);
+
+ if (channels->max < 2)
+ channels->min = channels->max = 2;
+ channels->min = channels->max = msm_proxy_rx_ch;
rate->min = rate->max = 48000;
+ return 0;
+}
+static int msm_proxy_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+
+ rate->min = rate->max = 48000;
return 0;
}
@@ -959,30 +1010,13 @@
return 0;
}
-static int msm_aux_pcm_get_gpios(struct snd_pcm_substream *substream)
+static int msm_aux_pcm_get_gpios(struct msm_auxpcm_ctrl *auxpcm_ctrl)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_card *card = rtd->card;
- struct msm8974_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
- struct msm_auxpcm_ctrl *auxpcm_ctrl = NULL;
struct msm_auxpcm_gpio *pin_data = NULL;
int ret = 0;
int i;
int j;
- if (pdata == NULL) {
- pr_err("%s: pdata is NULL\n", __func__);
- ret = -EINVAL;
- goto err;
- }
-
- auxpcm_ctrl = pdata->pri_auxpcm_ctrl;
-
- if (auxpcm_ctrl == NULL || auxpcm_ctrl->pin_data == NULL) {
- pr_err("%s: Ctrl pointers are NULL\n", __func__);
- ret = -EINVAL;
- goto err;
- }
pin_data = auxpcm_ctrl->pin_data;
for (i = 0; i < auxpcm_ctrl->cnt; i++, pin_data++) {
ret = gpio_request(pin_data->gpio_no,
@@ -998,31 +1032,17 @@
/* Release all GPIOs on failure */
for (j = i; j >= 0; j--)
gpio_free(pin_data->gpio_no);
- goto err;
+ return ret;
}
}
-
-err:
- return ret;
+ return 0;
}
-static int msm_aux_pcm_free_gpios(struct snd_pcm_substream *substream)
+static int msm_aux_pcm_free_gpios(struct msm_auxpcm_ctrl *auxpcm_ctrl)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_card *card = rtd->card;
- struct msm8974_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
- struct msm_auxpcm_ctrl *auxpcm_ctrl = NULL;
struct msm_auxpcm_gpio *pin_data = NULL;
- int ret = 0;
int i;
-
- if (pdata == NULL) {
- pr_err("%s: pdata is NULL\n", __func__);
- ret = -EINVAL;
- goto err;
- }
-
- auxpcm_ctrl = pdata->pri_auxpcm_ctrl;
+ int ret = 0;
if (auxpcm_ctrl == NULL || auxpcm_ctrl->pin_data == NULL) {
pr_err("%s: Ctrl pointers are NULL\n", __func__);
@@ -1041,40 +1061,115 @@
return ret;
}
-static int msm_auxpcm_startup(struct snd_pcm_substream *substream)
+static int msm_prim_auxpcm_startup(struct snd_pcm_substream *substream)
{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct msm8974_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_auxpcm_ctrl *auxpcm_ctrl = NULL;
int ret = 0;
- pr_debug("%s(): substream = %s, auxpcm_rsc_ref counter = %d\n",
- __func__, substream->name, atomic_read(&auxpcm_rsc_ref));
+ pr_debug("%s(): substream = %s, prim_auxpcm_rsc_ref counter = %d\n",
+ __func__, substream->name, atomic_read(&prim_auxpcm_rsc_ref));
- if (atomic_inc_return(&auxpcm_rsc_ref) == 1) {
+ auxpcm_ctrl = pdata->pri_auxpcm_ctrl;
+
+ if (auxpcm_ctrl == NULL || auxpcm_ctrl->pin_data == NULL) {
+ pr_err("%s: Ctrl pointers are NULL\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+ if (atomic_inc_return(&prim_auxpcm_rsc_ref) == 1) {
if (lpaif_pri_muxsel_virt_addr != NULL)
iowrite32(I2S_PCM_SEL << I2S_PCM_SEL_OFFSET,
lpaif_pri_muxsel_virt_addr);
else
pr_err("%s lpaif_pri_muxsel_virt_addr is NULL\n",
__func__);
- ret = msm_aux_pcm_get_gpios(substream);
+ ret = msm_aux_pcm_get_gpios(auxpcm_ctrl);
}
if (ret < 0) {
pr_err("%s: Aux PCM GPIO request failed\n", __func__);
return -EINVAL;
}
+err:
return ret;
}
-static void msm_auxpcm_shutdown(struct snd_pcm_substream *substream)
+static void msm_prim_auxpcm_shutdown(struct snd_pcm_substream *substream)
{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct msm8974_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_auxpcm_ctrl *auxpcm_ctrl = NULL;
- pr_debug("%s(): substream = %s, auxpcm_rsc_ref counter = %d\n",
- __func__, substream->name, atomic_read(&auxpcm_rsc_ref));
- if (atomic_dec_return(&auxpcm_rsc_ref) == 0)
- msm_aux_pcm_free_gpios(substream);
+ pr_debug("%s(): substream = %s, prim_auxpcm_rsc_ref counter = %d\n",
+ __func__, substream->name, atomic_read(&prim_auxpcm_rsc_ref));
+
+ auxpcm_ctrl = pdata->pri_auxpcm_ctrl;
+
+ if (atomic_dec_return(&prim_auxpcm_rsc_ref) == 0)
+ msm_aux_pcm_free_gpios(auxpcm_ctrl);
}
-static struct snd_soc_ops msm_auxpcm_be_ops = {
- .startup = msm_auxpcm_startup,
- .shutdown = msm_auxpcm_shutdown,
+static struct snd_soc_ops msm_pri_auxpcm_be_ops = {
+ .startup = msm_prim_auxpcm_startup,
+ .shutdown = msm_prim_auxpcm_shutdown,
+};
+
+static int msm_sec_auxpcm_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct msm8974_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_auxpcm_ctrl *auxpcm_ctrl = NULL;
+ int ret = 0;
+
+ pr_debug("%s(): substream = %s, sec_auxpcm_rsc_ref counter = %d\n",
+ __func__, substream->name, atomic_read(&sec_auxpcm_rsc_ref));
+
+ auxpcm_ctrl = pdata->sec_auxpcm_ctrl;
+
+ if (auxpcm_ctrl == NULL || auxpcm_ctrl->pin_data == NULL) {
+ pr_err("%s: Ctrl pointers are NULL\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+ if (atomic_inc_return(&sec_auxpcm_rsc_ref) == 1) {
+ if (lpaif_sec_muxsel_virt_addr != NULL)
+ iowrite32(I2S_PCM_SEL << I2S_PCM_SEL_OFFSET,
+ lpaif_sec_muxsel_virt_addr);
+ else
+ pr_err("%s lpaif_sec_muxsel_virt_addr is NULL\n",
+ __func__);
+ ret = msm_aux_pcm_get_gpios(auxpcm_ctrl);
+ }
+ if (ret < 0) {
+ pr_err("%s: Aux PCM GPIO request failed\n", __func__);
+ return -EINVAL;
+ }
+err:
+ return ret;
+}
+
+static void msm_sec_auxpcm_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct msm8974_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_auxpcm_ctrl *auxpcm_ctrl = NULL;
+
+ pr_debug("%s(): substream = %s, sec_auxpcm_rsc_ref counter = %d\n",
+ __func__, substream->name, atomic_read(&sec_auxpcm_rsc_ref));
+
+ auxpcm_ctrl = pdata->sec_auxpcm_ctrl;
+
+ if (atomic_dec_return(&sec_auxpcm_rsc_ref) == 0)
+ msm_aux_pcm_free_gpios(auxpcm_ctrl);
+}
+
+static struct snd_soc_ops msm_sec_auxpcm_be_ops = {
+ .startup = msm_sec_auxpcm_startup,
+ .shutdown = msm_sec_auxpcm_shutdown,
};
static int msm_slim_0_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
@@ -1157,10 +1252,11 @@
static const struct soc_enum msm_snd_enum[] = {
SOC_ENUM_SINGLE_EXT(2, spk_function),
SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text),
- SOC_ENUM_SINGLE_EXT(4, slim0_tx_ch_text),
+ SOC_ENUM_SINGLE_EXT(5, slim0_tx_ch_text),
SOC_ENUM_SINGLE_EXT(7, hdmi_rx_ch_text),
SOC_ENUM_SINGLE_EXT(2, rx_bit_format_text),
- SOC_ENUM_SINGLE_EXT(2, slim0_rx_sample_rate_text),
+ SOC_ENUM_SINGLE_EXT(3, slim0_rx_sample_rate_text),
+ SOC_ENUM_SINGLE_EXT(8, proxy_rx_ch_text),
};
static const struct snd_kcontrol_new msm_snd_controls[] = {
@@ -1180,6 +1276,8 @@
slim0_rx_sample_rate_get, slim0_rx_sample_rate_put),
SOC_ENUM_EXT("HDMI_RX Bit Format", msm_snd_enum[4],
hdmi_rx_bit_format_get, hdmi_rx_bit_format_put),
+ SOC_ENUM_EXT("PROXY_RX Channels", msm_snd_enum[6],
+ msm_proxy_rx_ch_get, msm_proxy_rx_ch_put),
};
static bool msm8974_swap_gnd_mic(struct snd_soc_codec *codec)
@@ -1247,6 +1345,8 @@
snd_soc_dapm_sync(dapm);
+ codec_clk = clk_get(cpu_dai->dev, "osr_clk");
+
snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch),
tx_ch, ARRAY_SIZE(rx_ch), rx_ch);
@@ -1256,7 +1356,7 @@
if (err) {
pr_err("%s: Failed to set codec registers config %d\n",
__func__, err);
- return err;
+ goto out;
}
config_data = taiko_get_afe_config(codec, AFE_SLIMBUS_SLAVE_CONFIG);
@@ -1264,7 +1364,7 @@
if (err) {
pr_err("%s: Failed to set slimbus slave config %d\n", __func__,
err);
- return err;
+ goto out;
}
config_data = taiko_get_afe_config(codec, AFE_AANC_VERSION);
@@ -1272,16 +1372,23 @@
if (err) {
pr_err("%s: Failed to set aanc version %d\n",
__func__, err);
- return err;
+ goto out;
}
/* start mbhc */
mbhc_cfg.calibration = def_taiko_mbhc_cal();
- if (mbhc_cfg.calibration)
+ if (mbhc_cfg.calibration) {
err = taiko_hs_detect(codec, &mbhc_cfg);
- else
+ if (err)
+ goto out;
+ else
+ return err;
+ } else {
err = -ENOMEM;
-
+ goto out;
+ }
+out:
+ clk_put(codec_clk);
return err;
}
@@ -1820,7 +1927,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_AFE_PCM_RX,
- .be_hw_params_fixup = msm_proxy_be_hw_params_fixup,
+ .be_hw_params_fixup = msm_proxy_rx_be_hw_params_fixup,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -1834,7 +1941,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_AFE_PCM_TX,
- .be_hw_params_fixup = msm_proxy_be_hw_params_fixup,
+ .be_hw_params_fixup = msm_proxy_tx_be_hw_params_fixup,
.ignore_suspend = 1,
},
/* HDMI Hostless */
@@ -1852,7 +1959,7 @@
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
- /* AUX PCM Backend DAI Links */
+ /* Primary AUX PCM Backend DAI Links */
{
.name = LPASS_BE_AUXPCM_RX,
.stream_name = "AUX PCM Playback",
@@ -1863,7 +1970,7 @@
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_AUXPCM_RX,
.be_hw_params_fixup = msm_auxpcm_be_params_fixup,
- .ops = &msm_auxpcm_be_ops,
+ .ops = &msm_pri_auxpcm_be_ops,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
/* this dainlink has playback support */
@@ -1878,9 +1985,39 @@
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_AUXPCM_TX,
.be_hw_params_fixup = msm_auxpcm_be_params_fixup,
- .ops = &msm_auxpcm_be_ops,
+ .ops = &msm_pri_auxpcm_be_ops,
.ignore_suspend = 1,
},
+ /* Secondary AUX PCM Backend DAI Links */
+ {
+ .name = LPASS_BE_SEC_AUXPCM_RX,
+ .stream_name = "Sec AUX PCM Playback",
+ .cpu_dai_name = "msm-dai-q6.4108",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ .be_hw_params_fixup = msm_auxpcm_be_params_fixup,
+ .ops = &msm_sec_auxpcm_be_ops,
+ .ignore_pmdown_time = 1,
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ },
+ {
+ .name = LPASS_BE_SEC_AUXPCM_TX,
+ .stream_name = "Sec AUX PCM Capture",
+ .cpu_dai_name = "msm-dai-q6.4109",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ .be_hw_params_fixup = msm_auxpcm_be_params_fixup,
+ .ops = &msm_sec_auxpcm_be_ops,
+ .ignore_suspend = 1,
+ },
+
/* Backend DAI Links */
{
.name = LPASS_BE_SLIMBUS_0_RX,
@@ -2098,7 +2235,8 @@
};
static int msm8974_dtparse_auxpcm(struct platform_device *pdev,
- struct msm8974_asoc_mach_data **pdata)
+ struct msm_auxpcm_ctrl **auxpcm_ctrl,
+ char *msm_auxpcm_gpio_name[][2])
{
int ret = 0;
int i = 0;
@@ -2106,7 +2244,7 @@
struct msm_auxpcm_ctrl *ctrl;
unsigned int gpio_no[NUM_OF_AUXPCM_GPIOS];
enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
- int prim_cnt = 0;
+ int auxpcm_cnt = 0;
pin_data = devm_kzalloc(&pdev->dev, (ARRAY_SIZE(gpio_no) *
sizeof(struct msm_auxpcm_gpio)),
@@ -2124,13 +2262,13 @@
if (gpio_no[i] > 0) {
pin_data[i].gpio_name =
- msm_auxpcm_gpio_name[prim_cnt][GPIO_NAME_INDEX];
+ msm_auxpcm_gpio_name[auxpcm_cnt][GPIO_NAME_INDEX];
pin_data[i].gpio_no = gpio_no[i];
dev_dbg(&pdev->dev, "%s:GPIO gpio[%s] =\n"
"0x%x\n", __func__,
pin_data[i].gpio_name,
pin_data[i].gpio_no);
- prim_cnt++;
+ auxpcm_cnt++;
} else {
dev_err(&pdev->dev, "%s:Invalid AUXPCM GPIO[%s]= %x\n",
__func__,
@@ -2150,8 +2288,8 @@
}
ctrl->pin_data = pin_data;
- ctrl->cnt = prim_cnt;
- (*pdata)->pri_auxpcm_ctrl = ctrl;
+ ctrl->cnt = auxpcm_cnt;
+ *auxpcm_ctrl = ctrl;
return ret;
err:
@@ -2174,20 +2312,6 @@
}
}
- codec_clk = qpnp_clkdiv_get(card->dev, "taiko-mclk");
- if (IS_ERR(codec_clk)) {
- dev_err(card->dev,
- "%s: Failed to request taiko mclk from pmic %ld\n",
- __func__, PTR_ERR(codec_clk));
- return -ENODEV ;
- }
-
- ret = qpnp_clkdiv_config(codec_clk, Q_CLKDIV_XO_DIV_2);
- if (ret) {
- dev_err(card->dev, "%s: Failed to set taiko mclk to %u\n",
- __func__, pdata->mclk_gpio);
- return ret;
- }
return 0;
}
@@ -2215,6 +2339,7 @@
struct snd_soc_card *card = &snd_soc_card_msm8974;
struct msm8974_asoc_mach_data *pdata;
int ret;
+ const char *auxpcm_pri_gpio_set = NULL;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No platform supplied from device tree\n");
@@ -2228,10 +2353,21 @@
return -ENOMEM;
}
- ret = msm8974_dtparse_auxpcm(pdev, &pdata);
+ /* Parse Primary AUXPCM info from DT */
+ ret = msm8974_dtparse_auxpcm(pdev, &pdata->pri_auxpcm_ctrl,
+ msm_prim_auxpcm_gpio_name);
if (ret) {
dev_err(&pdev->dev,
- "%s: Auxpcm pin data parse failed\n", __func__);
+ "%s: Primary Auxpcm pin data parse failed\n", __func__);
+ goto err;
+ }
+
+ /* Parse Secondary AUXPCM info from DT */
+ ret = msm8974_dtparse_auxpcm(pdev, &pdata->sec_auxpcm_ctrl,
+ msm_sec_auxpcm_gpio_name);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: Secondary Auxpcm pin data parse failed\n", __func__);
goto err;
}
@@ -2316,7 +2452,8 @@
ret);
mutex_init(&cdc_mclk_mutex);
- atomic_set(&auxpcm_rsc_ref, 0);
+ atomic_set(&prim_auxpcm_rsc_ref, 0);
+ atomic_set(&sec_auxpcm_rsc_ref, 0);
spdev = pdev;
ext_spk_amp_regulator = NULL;
msm8974_liquid_dock_dev = NULL;
@@ -2328,12 +2465,35 @@
goto err;
}
- lpaif_pri_muxsel_virt_addr = ioremap(LPAIF_PRI_MODE_MUXSEL, 4);
+ ret = of_property_read_string(pdev->dev.of_node,
+ "qcom,prim-auxpcm-gpio-set", &auxpcm_pri_gpio_set);
+ if (ret) {
+ dev_err(&pdev->dev, "Looking up %s property in node %s failed",
+ "qcom,prim-auxpcm-gpio-set",
+ pdev->dev.of_node->full_name);
+ goto err;
+ }
+ if (!strcmp(auxpcm_pri_gpio_set, "prim-gpio-prim")) {
+ lpaif_pri_muxsel_virt_addr = ioremap(LPAIF_PRI_MODE_MUXSEL, 4);
+ } else if (!strcmp(auxpcm_pri_gpio_set, "prim-gpio-tert")) {
+ lpaif_pri_muxsel_virt_addr = ioremap(LPAIF_TER_MODE_MUXSEL, 4);
+ } else {
+ dev_err(&pdev->dev, "Invalid value %s for AUXPCM GPIO set\n",
+ auxpcm_pri_gpio_set);
+ ret = -EINVAL;
+ goto err;
+ }
if (lpaif_pri_muxsel_virt_addr == NULL) {
pr_err("%s Pri muxsel virt addr is null\n", __func__);
ret = -EINVAL;
goto err;
}
+ lpaif_sec_muxsel_virt_addr = ioremap(LPAIF_SEC_MODE_MUXSEL, 4);
+ if (lpaif_sec_muxsel_virt_addr == NULL) {
+ pr_err("%s Sec muxsel virt addr is null\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
return 0;
err:
if (pdata->mclk_gpio > 0) {
@@ -2381,6 +2541,7 @@
}
iounmap(lpaif_pri_muxsel_virt_addr);
+ iounmap(lpaif_sec_muxsel_virt_addr);
snd_soc_unregister_card(card);
return 0;
diff --git a/sound/soc/msm/qdsp6/q6voice.c b/sound/soc/msm/qdsp6/q6voice.c
index 17f2d03..bb13695 100644
--- a/sound/soc/msm/qdsp6/q6voice.c
+++ b/sound/soc/msm/qdsp6/q6voice.c
@@ -73,6 +73,7 @@
static int32_t qdsp_mvm_callback(struct apr_client_data *data, void *priv);
static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv);
static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv);
+static int voice_send_set_device_cmd_v2(struct voice_data *v);
static u16 voice_get_mvm_handle(struct voice_data *v)
{
@@ -1380,6 +1381,80 @@
return -EINVAL;
}
+static int voice_send_set_device_cmd_v2(struct voice_data *v)
+{
+ struct cvp_set_device_cmd_v2 cvp_setdev_cmd_v2;
+ int ret = 0;
+ void *apr_cvp;
+ u16 cvp_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* set device and wait for response */
+ cvp_setdev_cmd_v2.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvp_setdev_cmd_v2.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_setdev_cmd_v2) - APR_HDR_SIZE);
+ cvp_setdev_cmd_v2.hdr.src_port = v->session_id;
+ cvp_setdev_cmd_v2.hdr.dest_port = cvp_handle;
+ cvp_setdev_cmd_v2.hdr.token = 0;
+ cvp_setdev_cmd_v2.hdr.opcode = VSS_IVOCPROC_CMD_SET_DEVICE_V2;
+
+ voc_get_tx_rx_topology(v,
+ &cvp_setdev_cmd_v2.cvp_set_device_v2.tx_topology_id,
+ &cvp_setdev_cmd_v2.cvp_set_device_v2.rx_topology_id);
+
+ cvp_setdev_cmd_v2.cvp_set_device_v2.tx_port_id = v->dev_tx.port_id;
+ cvp_setdev_cmd_v2.cvp_set_device_v2.rx_port_id = v->dev_rx.port_id;
+ if (common.ec_ref_ext == true) {
+ cvp_setdev_cmd_v2.cvp_set_device_v2.vocproc_mode =
+ VSS_IVOCPROC_VOCPROC_MODE_EC_EXT_MIXING;
+ cvp_setdev_cmd_v2.cvp_set_device_v2.ec_ref_port_id =
+ common.ec_port_id;
+ } else {
+ cvp_setdev_cmd_v2.cvp_set_device_v2.vocproc_mode =
+ VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING;
+ cvp_setdev_cmd_v2.cvp_set_device_v2.ec_ref_port_id =
+ VSS_IVOCPROC_PORT_ID_NONE;
+ }
+ pr_debug("%s:topology=%d , tx_port_id=%d, rx_port_id=%d\n"
+ "ec_ref_port_id = %x\n", __func__,
+ cvp_setdev_cmd_v2.cvp_set_device_v2.tx_topology_id,
+ cvp_setdev_cmd_v2.cvp_set_device_v2.tx_port_id,
+ cvp_setdev_cmd_v2.cvp_set_device_v2.rx_port_id,
+ cvp_setdev_cmd_v2.cvp_set_device_v2.ec_ref_port_id);
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_setdev_cmd_v2);
+ if (ret < 0) {
+ pr_err("Fail in sending VOCPROC_FULL_CONTROL_SESSION\n");
+ goto fail;
+ }
+ pr_debug("wait for cvp create session event\n");
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
static int voice_send_stop_voice_cmd(struct voice_data *v)
{
struct apr_hdr mvm_stop_voice_cmd;
@@ -2277,7 +2352,13 @@
pr_err("%s: wait_event timeout\n", __func__);
goto fail;
}
-
+ if (common.ec_ref_ext == true) {
+ ret = voice_send_set_device_cmd_v2(v);
+ if (ret < 0)
+ pr_err("%s: set device V2 failed rc =%x\n",
+ __func__, ret);
+ goto fail;
+ }
/* send cvs cal */
ret = voice_send_cvs_map_memory_cmd(v);
if (!ret)
@@ -3242,7 +3323,8 @@
voice_send_cvp_deregister_cal_cmd(v);
voice_get_cal_paddr_size(v, &paddr, NULL);
voice_send_cvp_unmap_memory_cmd(v, paddr);
-
+ if (common.ec_ref_ext == true)
+ voc_set_ext_ec_ref(AFE_PORT_INVALID, false);
v->voc_state = VOC_CHANGE;
}
@@ -3268,10 +3350,19 @@
mutex_lock(&v->lock);
if (v->voc_state == VOC_CHANGE) {
- ret = voice_send_set_device_cmd(v);
- if (ret < 0) {
- pr_err("%s: set device failed\n", __func__);
- goto fail;
+ if (common.ec_ref_ext == true) {
+ ret = voice_send_set_device_cmd_v2(v);
+ if (ret < 0)
+ pr_err("%s: set device V2 failed\n"
+ "rc =%x\n", __func__, ret);
+ goto fail;
+ } else {
+ ret = voice_send_set_device_cmd(v);
+ if (ret < 0) {
+ pr_err("%s: set device failed rc=%x\n",
+ __func__, ret);
+ goto fail;
+ }
}
/* send cvp and vol cal */
if (!voice_get_cal_paddr_size(v, &cal_paddr, &cal_size) &&
@@ -3675,7 +3766,8 @@
if (ret < 0)
pr_err("%s: destroy voice failed\n", __func__);
voice_destroy_mvm_cvs_session(v);
-
+ if (common.ec_ref_ext == true)
+ voc_set_ext_ec_ref(AFE_PORT_INVALID, false);
v->voc_state = VOC_RELEASE;
}
mutex_unlock(&v->lock);
@@ -3845,6 +3937,28 @@
return ret;
}
+int voc_set_ext_ec_ref(uint16_t port_id, bool state)
+{
+ int ret = 0;
+
+ mutex_lock(&common.common_lock);
+ if (state == true) {
+ if (port_id == AFE_PORT_INVALID) {
+ pr_err("%s: Invalid port id", __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+ common.ec_port_id = port_id;
+ common.ec_ref_ext = true;
+ } else {
+ common.ec_ref_ext = false;
+ common.ec_port_id = port_id;
+ }
+fail:
+ mutex_unlock(&common.common_lock);
+ return ret;
+}
+
void voc_register_mvs_cb(ul_cb_fn ul_cb,
dl_cb_fn dl_cb,
void *private_data)
@@ -4201,6 +4315,7 @@
v->cvp_state = CMD_STATUS_SUCCESS;
wake_up(&v->cvp_wait);
break;
+ case VSS_IVOCPROC_CMD_SET_DEVICE_V2:
case VSS_IVOCPROC_CMD_SET_DEVICE:
case VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX:
case VSS_IVOCPROC_CMD_ENABLE:
@@ -4517,6 +4632,7 @@
common.default_mute_val = 0; /* default is un-mute */
common.default_vol_val = 0;
common.default_sample_val = 8000;
+ common.ec_ref_ext = false;
/* Initialize MVS info. */
common.mvs_info.network_type = VSS_NETWORK_ID_DEFAULT;
diff --git a/sound/soc/msm/qdsp6/q6voice.h b/sound/soc/msm/qdsp6/q6voice.h
index 0bae384..7463a5f 100644
--- a/sound/soc/msm/qdsp6/q6voice.h
+++ b/sound/soc/msm/qdsp6/q6voice.h
@@ -903,6 +903,8 @@
#define VSS_IVOCPROC_CMD_SET_DEVICE 0x000100C4
+#define VSS_IVOCPROC_CMD_SET_DEVICE_V2 0x000112C6
+
#define VSS_IVOCPROC_CMD_SET_VP3_DATA 0x000110EB
#define VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX 0x000110EE
@@ -958,6 +960,9 @@
#define VOICE_CMD_GET_PARAM 0x00011007
#define VOICE_EVT_GET_PARAM_ACK 0x00011008
+/* Default AFE port ID. Applicable to Tx and Rx. */
+#define VSS_IVOCPROC_PORT_ID_NONE 0xFFFF
+
struct vss_ivocproc_cmd_create_full_control_session_t {
uint16_t direction;
/*
@@ -1027,6 +1032,32 @@
*/
} __packed;
+/* Internal EC */
+#define VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING 0x00010F7C
+
+/* External EC */
+#define VSS_IVOCPROC_VOCPROC_MODE_EC_EXT_MIXING 0x00010F7D
+
+struct vss_ivocproc_cmd_set_device_v2_t {
+ uint16_t tx_port_id;
+ /* Tx device port ID to which the vocproc connects. */
+ uint32_t tx_topology_id;
+ /* Tx path topology ID. */
+ uint16_t rx_port_id;
+ /* Rx device port ID to which the vocproc connects. */
+ uint32_t rx_topology_id;
+ /* Rx path topology ID. */
+ uint32_t vocproc_mode;
+ /* Vocproc mode. The supported values:
+ * VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING - 0x00010F7C
+ * VSS_IVOCPROC_VOCPROC_MODE_EC_EXT_MIXING - 0x00010F7D
+ */
+ uint16_t ec_ref_port_id;
+ /* Port ID to which the vocproc connects for receiving
+ * echo cancellation reference signal.
+ */
+} __packed;
+
struct vss_ivocproc_cmd_register_calibration_data_t {
uint32_t phys_addr;
/* Phsical address to be registered with vocproc. Calibration data
@@ -1076,6 +1107,11 @@
struct vss_ivocproc_cmd_set_device_t cvp_set_device;
} __packed;
+struct cvp_set_device_cmd_v2 {
+ struct apr_hdr hdr;
+ struct vss_ivocproc_cmd_set_device_v2_t cvp_set_device_v2;
+} __packed;
+
struct cvp_set_vp3_data_cmd {
struct apr_hdr hdr;
} __packed;
@@ -1227,6 +1263,8 @@
uint32_t default_mute_val;
uint32_t default_vol_val;
uint32_t default_sample_val;
+ bool ec_ref_ext;
+ uint16_t ec_port_id;
/* APR to MVM in the Q6 */
void *apr_q6_mvm;
@@ -1325,4 +1363,5 @@
int voc_start_playback(uint32_t set);
int voc_start_record(uint32_t port_id, uint32_t set);
+int voc_set_ext_ec_ref(uint16_t port_id, bool state);
#endif
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index 391b3da..f3dcf95 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -5,6 +5,7 @@
msm-lsm-client.o
obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o msm-pcm-dtmf-v2.o \
msm-dai-stub-v2.o
+obj-$(CONFIG_DOLBY_DAP) += msm-dolby-dap-config.o
obj-y += q6adm.o q6afe.o q6asm.o q6audio-v2.o q6voice.o q6core.o audio_acdb.o \
rtac.o q6lsm.o
ocmem-audio-objs += audio_ocmem.o
diff --git a/sound/soc/msm/qdsp6v2/audio_acdb.c b/sound/soc/msm/qdsp6v2/audio_acdb.c
index 16d6e81..a2e0b87 100644
--- a/sound/soc/msm/qdsp6v2/audio_acdb.c
+++ b/sound/soc/msm/qdsp6v2/audio_acdb.c
@@ -18,6 +18,7 @@
#include <linux/uaccess.h>
#include <linux/msm_ion.h>
#include <linux/mm.h>
+#include <linux/msm_audio_ion.h>
#include "audio_acdb.h"
@@ -870,9 +871,7 @@
kfree(acdb_data.col_data[i]);
acdb_data.col_data[i] = NULL;
}
- ion_unmap_kernel(acdb_data.ion_client, acdb_data.ion_handle);
- ion_free(acdb_data.ion_client, acdb_data.ion_handle);
- ion_client_destroy(acdb_data.ion_client);
+ msm_audio_ion_free(acdb_data.ion_client, acdb_data.ion_handle);
mutex_unlock(&acdb_data.acdb_mutex);
}
return 0;
@@ -894,34 +893,16 @@
(uint32_t)acdb_data.col_data[i]);
}
- acdb_data.ion_client =
- msm_ion_client_create(UINT_MAX, "audio_acdb_client");
- if (IS_ERR_OR_NULL(acdb_data.ion_client)) {
- pr_err("%s: Could not register ION client!!!\n", __func__);
+ result = msm_audio_ion_import("audio_acdb_client",
+ &acdb_data.ion_client,
+ &acdb_data.ion_handle,
+ atomic_read(&acdb_data.map_handle),
+ NULL, 0,
+ &paddr, (size_t *)&mem_len, &kvptr);
+ if (result) {
+ pr_err("%s: audio ION alloc failed, rc = %d\n",
+ __func__, result);
result = PTR_ERR(acdb_data.ion_client);
- goto err;
- }
-
- acdb_data.ion_handle = ion_import_dma_buf(acdb_data.ion_client,
- atomic_read(&acdb_data.map_handle));
- if (IS_ERR_OR_NULL(acdb_data.ion_handle)) {
- pr_err("%s: Could not import map handle!!!\n", __func__);
- result = PTR_ERR(acdb_data.ion_handle);
- goto err_ion_client;
- }
-
- result = ion_phys(acdb_data.ion_client, acdb_data.ion_handle,
- &paddr, (size_t *)&mem_len);
- if (result != 0) {
- pr_err("%s: Could not get phys addr!!!\n", __func__);
- goto err_ion_handle;
- }
-
- kvptr = ion_map_kernel(acdb_data.ion_client,
- acdb_data.ion_handle);
- if (IS_ERR_OR_NULL(kvptr)) {
- pr_err("%s: Could not get kernel virt addr!!!\n", __func__);
- result = PTR_ERR(kvptr);
goto err_ion_handle;
}
kvaddr = (unsigned long)kvptr;
@@ -938,10 +919,8 @@
return result;
err_ion_handle:
- ion_free(acdb_data.ion_client, acdb_data.ion_handle);
-err_ion_client:
- ion_client_destroy(acdb_data.ion_client);
-err:
+ msm_audio_ion_free(acdb_data.ion_client, acdb_data.ion_handle);
+
atomic64_set(&acdb_data.mem_len, 0);
mutex_unlock(&acdb_data.acdb_mutex);
return result;
@@ -1064,7 +1043,7 @@
goto done;
}
- if (size <= 0) {
+ if ((size <= 0) || (size > sizeof(data))) {
pr_err("%s: Invalid size sent to driver: %d\n",
__func__, size);
result = -EFAULT;
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
index c14cb74..08d7277 100644
--- a/sound/soc/msm/qdsp6v2/audio_ocmem.c
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -20,18 +20,58 @@
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/of_device.h>
+#include <linux/memory_alloc.h>
#include <asm/mach-types.h>
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
#include <mach/ocmem.h>
+#include <mach/subsystem_notif.h>
+#include <mach/subsystem_restart.h>
+#include <mach/msm_memtypes.h>
+#include <mach/ramdump.h>
#include "q6core.h"
#include "audio_ocmem.h"
+
#define AUDIO_OCMEM_BUF_SIZE (512 * SZ_1K)
+/**
+ * Exercise OCMEM Dump if audio OCMEM state is
+ * one of the following. All other states indicate
+ * audio data is not mapped from DDR to OCMEM and
+ * therefore no need of dump.
+ */
+#define _DO_OCMEM_DUMP_BIT_MASK_\
+ ((1 << OCMEM_STATE_MAP_COMPL) |\
+ (1 << OCMEM_STATE_MAP_TRANSITION) |\
+ (1 << OCMEM_STATE_UNMAP_TRANSITION) |\
+ (1 << OCMEM_STATE_SHRINK) |\
+ (1 << OCMEM_STATE_GROW))
+
+/**
+ * Wait for OCMEM driver to process and respond for
+ * ongoing map/unmap request before calling OCMEM dump.
+ */
+#define _WAIT_BFR_DUMP_BIT_MASK_\
+ ((1 << OCMEM_STATE_MAP_COMPL) |\
+ (1 << OCMEM_STATE_UNMAP_COMPL) |\
+ (1 << OCMEM_STATE_MAP_FAIL) |\
+ (1 << OCMEM_STATE_UNMAP_FAIL))
+
+#define _MAP_RESPONSE_BIT_MASK_\
+ ((1 << OCMEM_STATE_MAP_COMPL) |\
+ (1 << OCMEM_STATE_MAP_FAIL))
+
+
+#define _UNMAP_RESPONSE_BIT_MASK_\
+ ((1 << OCMEM_STATE_UNMAP_COMPL) |\
+ (1 << OCMEM_STATE_UNMAP_FAIL))
+
#define _BIT_MASK_\
- ((1 << OCMEM_STATE_EXIT) |\
+ ((1 << OCMEM_STATE_SSR) |\
+ (1 << OCMEM_STATE_EXIT) |\
(1 << OCMEM_STATE_GROW) |\
(1 << OCMEM_STATE_SHRINK))
@@ -85,10 +125,14 @@
atomic_t audio_cond;
atomic_t audio_exit;
spinlock_t audio_lock;
- struct mutex protect_lock;
+ struct mutex state_process_lock;
struct workqueue_struct *audio_ocmem_workqueue;
struct workqueue_struct *voice_ocmem_workqueue;
bool ocmem_en;
+ bool audio_ocmem_running;
+ void *ocmem_ramdump_dev;
+ struct ramdump_segment ocmem_ramdump_segment;
+ unsigned long ocmem_dump_addr;
};
static struct audio_ocmem_prv audio_ocmem_lcl;
@@ -114,7 +158,9 @@
break;
case OCMEM_MAP_FAIL:
pr_debug("%s: map fail\n", __func__);
- atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_FAIL);
+ clear_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_MAP_TRANSITION);
+ set_bit_pos(audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_FAIL);
break;
case OCMEM_UNMAP_DONE:
pr_debug("%s: unmap done\n", __func__);
@@ -125,8 +171,10 @@
break;
case OCMEM_UNMAP_FAIL:
pr_debug("%s: unmap fail\n", __func__);
- atomic_set(&audio_ocmem_lcl.audio_state,
- OCMEM_STATE_UNMAP_FAIL);
+ clear_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_TRANSITION);
+ set_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_FAIL);
break;
case OCMEM_ALLOC_GROW:
rbuf = data;
@@ -170,6 +218,9 @@
} else if (test_bit_pos((*state), OCMEM_STATE_EXIT)) {
pr_debug("%s: returning exit state\n", __func__);
return OCMEM_STATE_EXIT;
+ } else if (test_bit_pos((*state), OCMEM_STATE_SSR)) {
+ pr_debug("%s: returning ssr state\n", __func__);
+ return OCMEM_STATE_SSR;
} else
return -EINVAL;
@@ -201,6 +252,7 @@
if (ret != 0) {
pr_err("%s: get low power segments from DSP failed, rc=%d\n",
__func__, ret);
+ mutex_unlock(&audio_ocmem_lcl.state_process_lock);
goto fail_cmd;
}
}
@@ -222,7 +274,9 @@
buf = ocmem_allocate_nb(cid, AUDIO_OCMEM_BUF_SIZE);
if (IS_ERR_OR_NULL(buf)) {
pr_err("%s: failed: %d\n", __func__, cid);
- return -ENOMEM;
+ ret = -ENOMEM;
+ mutex_unlock(&audio_ocmem_lcl.state_process_lock);
+ goto fail_cmd;
}
set_bit_pos(audio_ocmem_lcl.audio_state, OCMEM_STATE_ALLOC);
@@ -234,7 +288,7 @@
if (!buf->len) {
pr_debug("%s: buf.len is 0, waiting for ocmem region\n",
__func__);
- mutex_unlock(&audio_ocmem_lcl.protect_lock);
+ mutex_unlock(&audio_ocmem_lcl.state_process_lock);
wait_event_interruptible(audio_ocmem_lcl.audio_wait,
(atomic_read(&audio_ocmem_lcl.audio_cond) == 0) ||
(atomic_read(&audio_ocmem_lcl.audio_exit) == 1));
@@ -251,7 +305,7 @@
goto fail_cmd;
}
clear_bit_pos(audio_ocmem_lcl.audio_state, OCMEM_STATE_GROW);
- mutex_trylock(&audio_ocmem_lcl.protect_lock);
+ mutex_trylock(&audio_ocmem_lcl.state_process_lock);
}
pr_debug("%s: buf->len: %ld\n", __func__, (audio_ocmem_lcl.buf)->len);
@@ -278,11 +332,11 @@
}
wait_event_interruptible(audio_ocmem_lcl.audio_wait,
- test_bit_pos(audio_ocmem_lcl.audio_state,
- OCMEM_STATE_MAP_COMPL) != 0);
+ (atomic_read(&audio_ocmem_lcl.audio_state) &
+ _MAP_RESPONSE_BIT_MASK_) != 0);
atomic_set(&audio_ocmem_lcl.audio_cond, 1);
- mutex_unlock(&audio_ocmem_lcl.protect_lock);
+ mutex_unlock(&audio_ocmem_lcl.state_process_lock);
pr_debug("%s: audio_cond[%d] audio_state[0x%x]\n", __func__,
atomic_read(&audio_ocmem_lcl.audio_cond),
atomic_read(&audio_ocmem_lcl.audio_state));
@@ -293,7 +347,7 @@
wait_event_interruptible(audio_ocmem_lcl.audio_wait,
(atomic_read(&audio_ocmem_lcl.audio_state) &
_BIT_MASK_) != 0);
-
+ mutex_lock(&audio_ocmem_lcl.state_process_lock);
state_bit = get_state_to_process(&audio_ocmem_lcl.audio_state);
switch (state_bit) {
case OCMEM_STATE_MAP_COMPL:
@@ -322,8 +376,9 @@
}
wait_event_interruptible(audio_ocmem_lcl.audio_wait,
- test_bit_pos(audio_ocmem_lcl.audio_state,
- OCMEM_STATE_UNMAP_COMPL) != 0);
+ (atomic_read(&audio_ocmem_lcl.audio_state) &
+ _UNMAP_RESPONSE_BIT_MASK_)
+ != 0);
ret = ocmem_shrink(cid, audio_ocmem_lcl.buf, 0);
if (ret) {
pr_err("%s: ocmem_shrink failed, state[%d]\n",
@@ -353,8 +408,8 @@
goto fail_cmd;
}
wait_event_interruptible(audio_ocmem_lcl.audio_wait,
- test_bit_pos(audio_ocmem_lcl.audio_state,
- OCMEM_STATE_MAP_COMPL) != 0);
+ (atomic_read(&audio_ocmem_lcl.audio_state) &
+ _MAP_RESPONSE_BIT_MASK_) != 0);
clear_bit_pos(audio_ocmem_lcl.audio_state,
OCMEM_STATE_GROW);
@@ -377,8 +432,8 @@
}
wait_event_interruptible(
audio_ocmem_lcl.audio_wait,
- test_bit_pos(audio_ocmem_lcl.audio_state,
- OCMEM_STATE_UNMAP_COMPL) != 0);
+ (atomic_read(&audio_ocmem_lcl.audio_state) &
+ _UNMAP_RESPONSE_BIT_MASK_) != 0);
}
if (test_bit_pos(audio_ocmem_lcl.audio_state,
@@ -434,14 +489,15 @@
goto fail_cmd;
}
pr_debug("%s: ocmem_free success\n", __func__);
+ /* Fall through */
+ case OCMEM_STATE_SSR:
msm_bus_scale_client_update_request(
audio_ocmem_lcl.audio_ocmem_bus_client,
0);
set_bit_pos(audio_ocmem_lcl.audio_state,
- OCMEM_STATE_DISABLE);
+ OCMEM_STATE_DISABLE);
break;
-
case -EINVAL:
pr_info("%s: audio_cond[%d] audio_state[0x%x]\n",
__func__,
@@ -449,10 +505,12 @@
atomic_read(&audio_ocmem_lcl.audio_state));
break;
}
+ mutex_unlock(&audio_ocmem_lcl.state_process_lock);
}
ret = 0;
fail_cmd:
pr_debug("%s: exit\n", __func__);
+ audio_ocmem_lcl.audio_ocmem_running = false;
return ret;
}
@@ -470,11 +528,14 @@
pr_debug("%s: audio_cond[0x%x], audio_state[0x%x]\n", __func__,
atomic_read(&audio_ocmem_lcl.audio_cond),
atomic_read(&audio_ocmem_lcl.audio_state));
- set_bit_pos(audio_ocmem_lcl.audio_state,
- OCMEM_STATE_EXIT);
+ if (!test_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_SSR))
+ set_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_EXIT);
+
wake_up(&audio_ocmem_lcl.audio_wait);
- mutex_unlock(&audio_ocmem_lcl.protect_lock);
+ mutex_unlock(&audio_ocmem_lcl.state_process_lock);
pr_debug("%s: exit\n", __func__);
return 0;
}
@@ -597,7 +658,7 @@
container_of(work, struct audio_ocmem_workdata, work);
en = audio_ocm_work->en;
- mutex_lock(&audio_ocmem_lcl.protect_lock);
+ mutex_lock(&audio_ocmem_lcl.state_process_lock);
/* if previous work waiting for ocmem - signal it to exit */
atomic_set(&audio_ocmem_lcl.audio_exit, 1);
pr_debug("%s: acquired mutex for %d\n", __func__, en);
@@ -652,6 +713,7 @@
}
workdata->id = id;
workdata->en = enable;
+ audio_ocmem_lcl.audio_ocmem_running = true;
INIT_WORK(&workdata->work, audio_ocmem_process_workdata);
queue_work(audio_ocmem_lcl.audio_ocmem_workqueue,
@@ -684,12 +746,130 @@
return 0;
}
+
+static void do_ocmem_ramdump(void)
+{
+ int ret = 0;
+ void *virt = NULL;
+
+ virt = ioremap(audio_ocmem_lcl.ocmem_dump_addr, AUDIO_OCMEM_BUF_SIZE);
+ ret = ocmem_dump(OCMEM_LP_AUDIO,
+ audio_ocmem_lcl.buf,
+ (unsigned long)virt);
+ iounmap(virt);
+
+ if (ret)
+ pr_err("%s: ocmem_dump failed\n", __func__);
+
+ audio_ocmem_lcl.ocmem_ramdump_segment.address
+ = (unsigned long)audio_ocmem_lcl.ocmem_dump_addr;
+ audio_ocmem_lcl.ocmem_ramdump_segment.size
+ = AUDIO_OCMEM_BUF_SIZE;
+ ret = do_ramdump(audio_ocmem_lcl.ocmem_ramdump_dev,
+ &audio_ocmem_lcl.ocmem_ramdump_segment,
+ 1);
+ if (ret < 0)
+ pr_err("%s: do_ramdump failed\n", __func__);
+}
+
+static void process_ocmem_dump(void)
+{
+ int ret = 0;
+
+ set_bit_pos(audio_ocmem_lcl.audio_state, OCMEM_STATE_SSR);
+
+ if (atomic_read(&audio_ocmem_lcl.audio_state) &
+ _DO_OCMEM_DUMP_BIT_MASK_) {
+
+ wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+ (atomic_read(&audio_ocmem_lcl.audio_state) &
+ _WAIT_BFR_DUMP_BIT_MASK_) != 0);
+
+ if (test_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_MAP_COMPL) ||
+ test_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_FAIL)) {
+
+ if (audio_ocmem_lcl.ocmem_dump_addr &&
+ audio_ocmem_lcl.ocmem_ramdump_dev)
+ do_ocmem_ramdump();
+ else
+ pr_err("%s: Error calling ocmem ramdump\n",
+ __func__);
+
+ ret = ocmem_drop(OCMEM_LP_AUDIO, audio_ocmem_lcl.buf,
+ &audio_ocmem_lcl.mlist);
+ if (ret)
+ pr_err("%s: ocmem_drop failed\n", __func__);
+ }
+ }
+
+ ret = ocmem_free(OCMEM_LP_AUDIO, audio_ocmem_lcl.buf);
+ if (ret)
+ pr_err("%s: ocmem_free failed\n", __func__);
+}
+
+static int lpass_notifier_cb(struct notifier_block *this, unsigned long code,
+ void *_cmd)
+{
+ int ret = NOTIFY_DONE;
+
+ switch (code) {
+ case SUBSYS_BEFORE_SHUTDOWN:
+ pr_debug("AO-Notify: Shutdown started\n");
+ break;
+ case SUBSYS_AFTER_SHUTDOWN:
+ pr_debug("AO-Notify: Shutdown Completed\n");
+ break;
+ case SUBSYS_RAMDUMP_NOTIFICATION:
+ pr_debug("AO-Notify: OCMEM dump\n");
+ if (audio_ocmem_lcl.ocmem_en &&
+ audio_ocmem_lcl.audio_ocmem_running)
+ process_ocmem_dump();
+ pr_debug("AO-Notify: OCMEM dump done\n");
+ break;
+ case SUBSYS_BEFORE_POWERUP:
+ pr_debug("AO-Notify: Powerup started\n");
+ break;
+ case SUBSYS_AFTER_POWERUP:
+ pr_debug("AO-Notify: Powerup completed\n");
+ break;
+ default:
+ pr_err("AO-Notify: Generel: %lu\n", code);
+ break;
+ }
+ return ret;
+}
+
+static struct notifier_block anb = {
+ .notifier_call = lpass_notifier_cb,
+};
+
static int ocmem_audio_client_probe(struct platform_device *pdev)
{
int ret;
struct msm_bus_scale_pdata *audio_ocmem_bus_scale_pdata = NULL;
pr_debug("%s\n", __func__);
+
+ subsys_notif_register_notifier("adsp", &anb);
+
+ audio_ocmem_lcl.ocmem_dump_addr =
+ allocate_contiguous_memory_nomap(AUDIO_OCMEM_BUF_SIZE,
+ MEMTYPE_EBI1,
+ AUDIO_OCMEM_BUF_SIZE);
+
+ if (audio_ocmem_lcl.ocmem_dump_addr) {
+ audio_ocmem_lcl.ocmem_ramdump_dev =
+ create_ramdump_device("audio-ocmem", &pdev->dev);
+
+ if (!audio_ocmem_lcl.ocmem_ramdump_dev)
+ pr_err("%s: audio-ocmem ramdump device failed\n",
+ __func__);
+ } else {
+ pr_err("%s: ocmem dump memory alloc failed\n", __func__);
+ }
+
audio_ocmem_lcl.audio_ocmem_workqueue =
alloc_workqueue("ocmem_audio_client_driver_audio",
WQ_NON_REENTRANT | WQ_UNBOUND, 0);
@@ -713,8 +893,9 @@
atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_DEFAULT);
atomic_set(&audio_ocmem_lcl.audio_exit, 0);
spin_lock_init(&audio_ocmem_lcl.audio_lock);
- mutex_init(&audio_ocmem_lcl.protect_lock);
+ mutex_init(&audio_ocmem_lcl.state_process_lock);
audio_ocmem_lcl.ocmem_en = true;
+ audio_ocmem_lcl.audio_ocmem_running = false;
/* populate platform data */
ret = audio_ocmem_platform_data_populate(pdev);
@@ -753,6 +934,7 @@
msm_bus_cl_clear_pdata(audio_ocmem_bus_scale_pdata);
ocmem_notifier_unregister(audio_ocmem_lcl.audio_hdl,
&audio_ocmem_client_nb);
+ free_contiguous_memory_by_paddr(audio_ocmem_lcl.ocmem_dump_addr);
return 0;
}
static const struct of_device_id msm_ocmem_audio_dt_match[] = {
@@ -771,11 +953,9 @@
.remove = ocmem_audio_client_remove,
};
-
static int __init ocmem_audio_client_init(void)
{
int rc;
-
rc = platform_driver_register(&audio_ocmem_driver);
if (rc)
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
index 5dc5f96..9359ed7 100644
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
@@ -321,6 +321,23 @@
}
}
+static int msm_compr_send_ddp_cfg(struct audio_client *ac,
+ struct snd_dec_ddp *ddp)
+{
+ int i, rc;
+ pr_debug("%s\n", __func__);
+ for (i = 0; i < ddp->params_length/2; i++) {
+ rc = q6asm_ds1_set_endp_params(ac, ddp->params_id[i],
+ ddp->params_value[i]);
+ if (rc) {
+ pr_err("sending params_id: %d failed\n",
+ ddp->params_id[i]);
+ return rc;
+ }
+ }
+ return 0;
+}
+
static int msm_compr_playback_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -358,6 +375,24 @@
if (ret < 0)
pr_err("%s: CMD Format block failed\n", __func__);
break;
+ case SND_AUDIOCODEC_AC3: {
+ struct snd_dec_ddp *ddp =
+ &compr->info.codec_param.codec.options.ddp;
+ pr_debug("%s: SND_AUDIOCODEC_AC3\n", __func__);
+ ret = msm_compr_send_ddp_cfg(prtd->audio_client, ddp);
+ if (ret < 0)
+ pr_err("%s: DDP CMD CFG failed\n", __func__);
+ break;
+ }
+ case SND_AUDIOCODEC_EAC3: {
+ struct snd_dec_ddp *ddp =
+ &compr->info.codec_param.codec.options.ddp;
+ pr_debug("%s: SND_AUDIOCODEC_EAC3\n", __func__);
+ ret = msm_compr_send_ddp_cfg(prtd->audio_client, ddp);
+ if (ret < 0)
+ pr_err("%s: DDP CMD CFG failed\n", __func__);
+ break;
+ }
default:
return -EINVAL;
}
@@ -511,13 +546,15 @@
{
pr_debug("%s\n", __func__);
/* MP3 Block */
- compr->info.compr_cap.num_codecs = 2;
+ compr->info.compr_cap.num_codecs = 4;
compr->info.compr_cap.min_fragment_size = runtime->hw.period_bytes_min;
compr->info.compr_cap.max_fragment_size = runtime->hw.period_bytes_max;
compr->info.compr_cap.min_fragments = runtime->hw.periods_min;
compr->info.compr_cap.max_fragments = runtime->hw.periods_max;
compr->info.compr_cap.codecs[0] = SND_AUDIOCODEC_MP3;
compr->info.compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
+ compr->info.compr_cap.codecs[2] = SND_AUDIOCODEC_AC3;
+ compr->info.compr_cap.codecs[3] = SND_AUDIOCODEC_EAC3;
/* Add new codecs here */
}
@@ -897,7 +934,7 @@
}
return 0;
case SNDRV_COMPRESS_SET_PARAMS:
- pr_debug("SNDRV_COMPRESS_SET_PARAMS: ");
+ pr_debug("SNDRV_COMPRESS_SET_PARAMS:\n");
if (copy_from_user(&compr->info.codec_param, (void *) arg,
sizeof(struct snd_compr_params))) {
rc = -EFAULT;
@@ -914,6 +951,68 @@
pr_debug("SND_AUDIOCODEC_AAC\n");
compr->codec = FORMAT_MPEG4_AAC;
break;
+ case SND_AUDIOCODEC_AC3: {
+ char params_value[18*2*sizeof(int)];
+ int *params_value_data = (int *)params_value;
+ /* 36 is the max param length for ddp */
+ int i;
+ struct snd_dec_ddp *ddp =
+ &compr->info.codec_param.codec.options.ddp;
+ int params_length = ddp->params_length*sizeof(int);
+ pr_debug("SND_AUDIOCODEC_AC3\n");
+ compr->codec = FORMAT_AC3;
+ if (copy_from_user(params_value, (void *)ddp->params,
+ params_length))
+ pr_err("%s: ERROR: copy ddp params value\n",
+ __func__);
+ pr_debug("params_length: %d\n", ddp->params_length);
+ for (i = 0; i < params_length; i++)
+ pr_debug("params_value[%d]: %x\n", i,
+ params_value_data[i]);
+ for (i = 0; i < ddp->params_length/2; i++) {
+ ddp->params_id[i] = params_value_data[2*i];
+ ddp->params_value[i] = params_value_data[2*i+1];
+ }
+ if (atomic_read(&prtd->start)) {
+ rc = msm_compr_send_ddp_cfg(prtd->audio_client,
+ ddp);
+ if (rc < 0)
+ pr_err("%s: DDP CMD CFG failed\n",
+ __func__);
+ }
+ break;
+ }
+ case SND_AUDIOCODEC_EAC3: {
+ char params_value[18*2*sizeof(int)];
+ int *params_value_data = (int *)params_value;
+ /* 36 is the max param length for ddp */
+ int i;
+ struct snd_dec_ddp *ddp =
+ &compr->info.codec_param.codec.options.ddp;
+ int params_length = ddp->params_length*sizeof(int);
+ pr_debug("SND_AUDIOCODEC_EAC3\n");
+ compr->codec = FORMAT_EAC3;
+ if (copy_from_user(params_value, (void *)ddp->params,
+ params_length))
+ pr_err("%s: ERROR: copy ddp params value\n",
+ __func__);
+ pr_debug("params_length: %d\n", ddp->params_length);
+ for (i = 0; i < ddp->params_length; i++)
+ pr_debug("params_value[%d]: %x\n", i,
+ params_value_data[i]);
+ for (i = 0; i < ddp->params_length/2; i++) {
+ ddp->params_id[i] = params_value_data[2*i];
+ ddp->params_value[i] = params_value_data[2*i+1];
+ }
+ if (atomic_read(&prtd->start)) {
+ rc = msm_compr_send_ddp_cfg(prtd->audio_client,
+ ddp);
+ if (rc < 0)
+ pr_err("%s: DDP CMD CFG failed\n",
+ __func__);
+ }
+ break;
+ }
default:
pr_debug("FORMAT_LINEAR_PCM\n");
compr->codec = FORMAT_LINEAR_PCM;
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 5b18311..8bb3eaf 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -155,6 +155,8 @@
struct afe_clk_cfg *lpass_pcm_src_clk = NULL;
struct afe_clk_cfg lpass_pcm_oe_clk;
struct msm_dai_auxpcm_pdata *auxpcm_pdata = NULL;
+ unsigned int rx_port = 0;
+ unsigned int tx_port = 0;
mutex_lock(&aux_pcm_mutex);
@@ -186,22 +188,32 @@
auxpcm_pdata = (struct msm_dai_auxpcm_pdata *)dai->dev->platform_data;
lpass_pcm_src_clk = (struct afe_clk_cfg *)auxpcm_pdata->clk_cfg;
- rc = afe_close(PCM_RX); /* can block */
+ if (dai->id == AFE_PORT_ID_PRIMARY_PCM_RX
+ || dai->id == AFE_PORT_ID_PRIMARY_PCM_TX) {
+ rx_port = PCM_RX;
+ tx_port = PCM_TX;
+ } else if (dai->id == AFE_PORT_ID_SECONDARY_PCM_RX
+ || dai->id == AFE_PORT_ID_SECONDARY_PCM_TX) {
+ rx_port = AFE_PORT_ID_SECONDARY_PCM_RX;
+ tx_port = AFE_PORT_ID_SECONDARY_PCM_TX;
+ }
+
+ rc = afe_close(rx_port); /* can block */
if (IS_ERR_VALUE(rc))
dev_err(dai->dev, "fail to close PCM_RX AFE port\n");
- rc = afe_close(PCM_TX);
+ rc = afe_close(tx_port);
if (IS_ERR_VALUE(rc))
dev_err(dai->dev, "fail to close AUX PCM TX port\n");
lpass_pcm_src_clk->clk_val1 = 0;
- afe_set_lpass_clock(PCM_TX, lpass_pcm_src_clk);
- afe_set_lpass_clock(PCM_RX, lpass_pcm_src_clk);
+ afe_set_lpass_clock(tx_port, lpass_pcm_src_clk);
+ afe_set_lpass_clock(rx_port, lpass_pcm_src_clk);
memcpy(&lpass_pcm_oe_clk, &lpass_clk_cfg_default,
sizeof(struct afe_clk_cfg));
lpass_pcm_oe_clk.clk_val1 = 0;
- afe_set_lpass_clock(PCM_RX, &lpass_pcm_oe_clk);
+ afe_set_lpass_clock(rx_port, &lpass_pcm_oe_clk);
mutex_unlock(&aux_pcm_mutex);
}
@@ -215,6 +227,8 @@
unsigned long pcm_clk_rate;
struct afe_clk_cfg lpass_pcm_oe_clk;
struct afe_clk_cfg *lpass_pcm_src_clk = NULL;
+ unsigned int rx_port = 0;
+ unsigned int tx_port = 0;
auxpcm_pdata = dai->dev->platform_data;
lpass_pcm_src_clk = (struct afe_clk_cfg *)auxpcm_pdata->clk_cfg;
@@ -279,30 +293,39 @@
sizeof(struct afe_clk_cfg));
lpass_pcm_oe_clk.clk_val1 = Q6AFE_LPASS_OSR_CLK_12_P288_MHZ;
- rc = afe_set_lpass_clock(PCM_RX, lpass_pcm_src_clk);
+ if (dai->id == AFE_PORT_ID_PRIMARY_PCM_RX ||
+ dai->id == AFE_PORT_ID_PRIMARY_PCM_TX) {
+ rx_port = PCM_RX;
+ tx_port = PCM_TX;
+ } else if (dai->id == AFE_PORT_ID_SECONDARY_PCM_RX ||
+ dai->id == AFE_PORT_ID_SECONDARY_PCM_TX) {
+ rx_port = AFE_PORT_ID_SECONDARY_PCM_RX;
+ tx_port = AFE_PORT_ID_SECONDARY_PCM_TX;
+ }
+
+ rc = afe_set_lpass_clock(rx_port, lpass_pcm_src_clk);
if (rc < 0) {
pr_err("%s:afe_set_lpass_clock on RX pcm_src_clk failed\n",
__func__);
goto fail;
}
- rc = afe_set_lpass_clock(PCM_TX, lpass_pcm_src_clk);
+ rc = afe_set_lpass_clock(tx_port, lpass_pcm_src_clk);
if (rc < 0) {
pr_err("%s:afe_set_lpass_clock on TX pcm_src_clk failed\n",
__func__);
goto fail;
}
- rc = afe_set_lpass_clock(PCM_RX, &lpass_pcm_oe_clk);
+ rc = afe_set_lpass_clock(rx_port, &lpass_pcm_oe_clk);
if (rc < 0) {
pr_err("%s:afe_set_lpass_clock on pcm_oe_clk failed\n",
__func__);
goto fail;
}
- afe_open(PCM_RX, &dai_data->port_config, dai_data->rate);
-
- afe_open(PCM_TX, &dai_data->port_config, dai_data->rate);
+ afe_open(rx_port, &dai_data->port_config, dai_data->rate);
+ afe_open(tx_port, &dai_data->port_config, dai_data->rate);
fail:
mutex_unlock(&aux_pcm_mutex);
@@ -314,7 +337,7 @@
{
int rc = 0;
- pr_debug("%s:port:%d cmd:%d aux_pcm_count= %d",
+ pr_debug("%s:port:%d cmd:%d aux_pcm_count= %d\n",
__func__, dai->id, cmd, aux_pcm_count);
switch (cmd) {
@@ -359,7 +382,7 @@
} else
dev_set_drvdata(dai->dev, dai_data);
- pr_err("%s : probe done for dai->id %d\n", __func__, dai->id);
+ pr_debug("%s : probe done for dai->id %d\n", __func__, dai->id);
return rc;
}
@@ -367,6 +390,8 @@
{
struct msm_dai_q6_dai_data *dai_data;
int rc;
+ unsigned int rx_port = 0;
+ unsigned int tx_port = 0;
dai_data = dev_get_drvdata(dai->dev);
@@ -393,14 +418,22 @@
dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d.closing afe\n",
__func__, dai->id, aux_pcm_count);
- rc = afe_close(PCM_RX); /* can block */
+ if (dai->id == AFE_PORT_ID_PRIMARY_PCM_RX ||
+ dai->id == AFE_PORT_ID_PRIMARY_PCM_TX) {
+ rx_port = PCM_RX;
+ tx_port = PCM_TX;
+ } else if (dai->id == AFE_PORT_ID_SECONDARY_PCM_RX ||
+ dai->id == AFE_PORT_ID_SECONDARY_PCM_TX) {
+ rx_port = AFE_PORT_ID_SECONDARY_PCM_RX;
+ tx_port = AFE_PORT_ID_SECONDARY_PCM_TX;
+ }
+ rc = afe_close(rx_port); /* can block */
if (IS_ERR_VALUE(rc))
dev_err(dai->dev, "fail to close AUX PCM RX AFE port\n");
- rc = afe_close(PCM_TX);
+ rc = afe_close(tx_port);
if (IS_ERR_VALUE(rc))
dev_err(dai->dev, "fail to close AUX PCM TX AFE port\n");
-
done:
kfree(dai_data);
snd_soc_unregister_dai(dai->dev);
@@ -941,12 +974,13 @@
static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_rx_dai = {
.playback = {
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000,
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
.channels_min = 1,
.channels_max = 2,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
},
.ops = &msm_dai_q6_ops,
.probe = msm_dai_q6_dai_probe,
@@ -1084,10 +1118,12 @@
switch (id) {
case AFE_PORT_ID_PRIMARY_PCM_RX:
+ case AFE_PORT_ID_SECONDARY_PCM_RX:
rc = snd_soc_register_dai(&pdev->dev,
&msm_dai_q6_aux_pcm_rx_dai);
break;
case AFE_PORT_ID_PRIMARY_PCM_TX:
+ case AFE_PORT_ID_SECONDARY_PCM_TX:
rc = snd_soc_register_dai(&pdev->dev,
&msm_dai_q6_aux_pcm_tx_dai);
break;
@@ -1828,8 +1864,8 @@
if (ch_cnt) {
dai_data->rx_dai.mi2s_dai_data.port_config.i2s.channel_mode =
- mi2s_pdata->rx_sd_lines;
- dai_data->rx_dai.pdata_mi2s_lines = mi2s_pdata->rx_sd_lines;
+ sd_line;
+ dai_data->rx_dai.pdata_mi2s_lines = sd_line;
dai_driver->playback.channels_min = 1;
dai_driver->playback.channels_max = ch_cnt << 1;
} else {
@@ -1846,8 +1882,8 @@
if (ch_cnt) {
dai_data->tx_dai.mi2s_dai_data.port_config.i2s.channel_mode =
- mi2s_pdata->tx_sd_lines;
- dai_data->tx_dai.pdata_mi2s_lines = mi2s_pdata->tx_sd_lines;
+ sd_line;
+ dai_data->tx_dai.pdata_mi2s_lines = sd_line;
dai_driver->capture.channels_min = 1;
dai_driver->capture.channels_max = ch_cnt << 1;
} else {
@@ -1873,25 +1909,26 @@
u32 rx_line = 0;
u32 mi2s_intf = 0;
struct msm_mi2s_pdata *mi2s_pdata;
- int rc = 0;
-
+ int rc;
+ struct snd_soc_dai_driver *mi2s_dai;
rc = of_property_read_u32(pdev->dev.of_node, q6_mi2s_dev_id,
&mi2s_intf);
if (rc) {
dev_err(&pdev->dev,
"%s: missing %x in dt node\n", __func__, mi2s_intf);
- return rc;
+ goto rtn;
}
dev_dbg(&pdev->dev, "dev name %s dev id %x\n", dev_name(&pdev->dev),
- mi2s_intf);
+ mi2s_intf);
if (mi2s_intf < MSM_PRIM_MI2S || mi2s_intf > MSM_QUAT_MI2S) {
dev_err(&pdev->dev,
"%s: Invalid MI2S ID %u from Device Tree\n",
__func__, mi2s_intf);
- return -ENXIO;
+ rc = -ENXIO;
+ goto rtn;
}
dev_set_name(&pdev->dev, "%s.%d", "msm-dai-q6-mi2s", mi2s_intf);
@@ -1909,7 +1946,7 @@
if (rc) {
dev_err(&pdev->dev, "%s: Rx line from DT file %s\n", __func__,
"qcom,msm-mi2s-rx-lines");
- goto rtn;
+ goto free_pdata;
}
rc = of_property_read_u32(pdev->dev.of_node, "qcom,msm-mi2s-tx-lines",
@@ -1917,36 +1954,53 @@
if (rc) {
dev_err(&pdev->dev, "%s: Tx line from DT file %s\n", __func__,
"qcom,msm-mi2s-tx-lines");
- goto rtn;
+ goto free_pdata;
}
dev_dbg(&pdev->dev, "dev name %s Rx line %x , Tx ine %x\n",
dev_name(&pdev->dev), rx_line, tx_line);
mi2s_pdata->rx_sd_lines = rx_line;
mi2s_pdata->tx_sd_lines = tx_line;
+
dai_data = kzalloc(sizeof(struct msm_dai_q6_mi2s_dai_data),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!dai_data) {
dev_err(&pdev->dev, "fail to allocate dai data\n");
rc = -ENOMEM;
- goto rtn;
+ goto free_pdata;
} else
dev_set_drvdata(&pdev->dev, dai_data);
+
pdev->dev.platform_data = mi2s_pdata;
- rc = msm_dai_q6_mi2s_platform_data_validation(pdev,
- &msm_dai_q6_mi2s_dai);
+
+ mi2s_dai = kzalloc(sizeof(struct snd_soc_dai_driver), GFP_KERNEL);
+ if (!mi2s_dai) {
+ dev_err(&pdev->dev, "fail to allocate for mi2s_dai\n");
+ rc = -ENOMEM;
+ goto free_dai_data;
+ }
+
+ memcpy(mi2s_dai, &msm_dai_q6_mi2s_dai,
+ sizeof(struct snd_soc_dai_driver));
+ rc = msm_dai_q6_mi2s_platform_data_validation(pdev, mi2s_dai);
if (IS_ERR_VALUE(rc))
- goto err_pdata;
+ goto free_dai;
+
dai_data->rate_constraint.count = 1;
dai_data->bitwidth_constraint.count = 1;
- rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_mi2s_dai);
+ rc = snd_soc_register_dai(&pdev->dev, mi2s_dai);
if (IS_ERR_VALUE(rc))
- goto err_pdata;
+ goto err_register;
return 0;
-err_pdata:
+
+err_register:
dev_err(&pdev->dev, "fail to msm_dai_q6_mi2s_dev_probe\n");
+free_dai:
+ kfree(mi2s_dai);
+free_dai_data:
kfree(dai_data);
-rtn:
+free_pdata:
kfree(mi2s_pdata);
+rtn:
return rc;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
new file mode 100644
index 0000000..f77ec0f
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
@@ -0,0 +1,709 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/q6adm-v2.h>
+#include <sound/q6asm-v2.h>
+#include <sound/q6afe-v2.h>
+
+#include "msm-dolby-dap-config.h"
+
+/* dolby endp based parameters */
+struct dolby_dap_endp_params_s {
+ int device;
+ int device_ch_caps;
+ int dap_device;
+ int params_id[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
+ int params_len[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
+ int params_offset[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
+ int params_val[DOLBY_ENDDEP_PARAM_LENGTH];
+};
+
+const struct dolby_dap_endp_params_s
+ dolby_dap_endp_params[NUM_DOLBY_ENDP_DEVICE] = {
+ {EARPIECE, 2, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {SPEAKER, 2, DOLBY_ENDP_INT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {WIRED_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {WIRED_HEADPHONE, 2, DOLBY_ENDP_HEADPHONES,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {BLUETOOTH_SCO, 2, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {BLUETOOTH_SCO_HEADSET, 2, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {BLUETOOTH_SCO_CARKIT, 2, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {BLUETOOTH_A2DP, 2, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {BLUETOOTH_A2DP_HEADPHONES, 2, DOLBY_ENDP_HEADPHONES,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {BLUETOOTH_A2DP_SPEAKER, 2, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {AUX_DIGITAL, 2, DOLBY_ENDP_HDMI,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-640} },
+ {AUX_DIGITAL, 6, DOLBY_ENDP_HDMI,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-640} },
+ {AUX_DIGITAL, 8, DOLBY_ENDP_HDMI,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-640} },
+ {ANLG_DOCK_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {DGTL_DOCK_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {USB_ACCESSORY, 2, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {USB_DEVICE, 2, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {REMOTE_SUBMIX, 2, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {ANC_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {ANC_HEADPHONE, 2, DOLBY_ENDP_HEADPHONES,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {PROXY, 2, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {FM, 2, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+ {FM_TX, 2, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO}, {DOLBY_ENDDEP_PARAM_DVLO_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET}, {-320} },
+};
+
+/* dolby param ids to/from dsp */
+static uint32_t dolby_dap_params_id[ALL_DOLBY_PARAMS] = {
+ DOLBY_PARAM_ID_VDHE, DOLBY_PARAM_ID_VSPE, DOLBY_PARAM_ID_DSSF,
+ DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLE,
+ DOLBY_PARAM_ID_DVMC, DOLBY_PARAM_ID_DVME, DOLBY_PARAM_ID_IENB,
+ DOLBY_PARAM_ID_IEBF, DOLBY_PARAM_ID_IEON, DOLBY_PARAM_ID_DEON,
+ DOLBY_PARAM_ID_NGON, DOLBY_PARAM_ID_GEON, DOLBY_PARAM_ID_GENB,
+ DOLBY_PARAM_ID_GEBF, DOLBY_PARAM_ID_AONB, DOLBY_PARAM_ID_AOBF,
+ DOLBY_PARAM_ID_AOBG, DOLBY_PARAM_ID_AOON, DOLBY_PARAM_ID_ARNB,
+ DOLBY_PARAM_ID_ARBF, DOLBY_PARAM_ID_PLB, DOLBY_PARAM_ID_PLMD,
+ DOLBY_PARAM_ID_DHSB, DOLBY_PARAM_ID_DHRG, DOLBY_PARAM_ID_DSSB,
+ DOLBY_PARAM_ID_DSSA, DOLBY_PARAM_ID_DVLA, DOLBY_PARAM_ID_IEBT,
+ DOLBY_PARAM_ID_IEA, DOLBY_PARAM_ID_DEA, DOLBY_PARAM_ID_DED,
+ DOLBY_PARAM_ID_GEBG, DOLBY_PARAM_ID_AOCC, DOLBY_PARAM_ID_ARBI,
+ DOLBY_PARAM_ID_ARBL, DOLBY_PARAM_ID_ARBH, DOLBY_PARAM_ID_AROD,
+ DOLBY_PARAM_ID_ARTP, DOLBY_PARAM_ID_VMON, DOLBY_PARAM_ID_VMB,
+ DOLBY_PARAM_ID_VCNB, DOLBY_PARAM_ID_VCBF, DOLBY_PARAM_ID_PREG,
+ DOLBY_PARAM_ID_VEN, DOLBY_PARAM_ID_PSTG, DOLBY_COMMIT_ALL_TO_DSP,
+ DOLBY_COMMIT_TO_DSP, DOLBY_USE_CACHE, DOLBY_AUTO_ENDP,
+ DOLBY_AUTO_ENDDEP_PARAMS
+};
+
+/* modifed state: 0x00000000 - Not updated
+* > 0x00000000 && < 0x00010000
+* Updated and not commited to DSP
+* 0x00010001 - Updated and commited to DSP
+* > 0x00010001 - Modified the commited value
+*/
+static int dolby_dap_params_modified[MAX_DOLBY_PARAMS] = { 0 };
+/* param offset */
+static uint32_t dolby_dap_params_offset[MAX_DOLBY_PARAMS] = {
+ DOLBY_PARAM_VDHE_OFFSET, DOLBY_PARAM_VSPE_OFFSET,
+ DOLBY_PARAM_DSSF_OFFSET, DOLBY_PARAM_DVLI_OFFSET,
+ DOLBY_PARAM_DVLO_OFFSET, DOLBY_PARAM_DVLE_OFFSET,
+ DOLBY_PARAM_DVMC_OFFSET, DOLBY_PARAM_DVME_OFFSET,
+ DOLBY_PARAM_IENB_OFFSET, DOLBY_PARAM_IEBF_OFFSET,
+ DOLBY_PARAM_IEON_OFFSET, DOLBY_PARAM_DEON_OFFSET,
+ DOLBY_PARAM_NGON_OFFSET, DOLBY_PARAM_GEON_OFFSET,
+ DOLBY_PARAM_GENB_OFFSET, DOLBY_PARAM_GEBF_OFFSET,
+ DOLBY_PARAM_AONB_OFFSET, DOLBY_PARAM_AOBF_OFFSET,
+ DOLBY_PARAM_AOBG_OFFSET, DOLBY_PARAM_AOON_OFFSET,
+ DOLBY_PARAM_ARNB_OFFSET, DOLBY_PARAM_ARBF_OFFSET,
+ DOLBY_PARAM_PLB_OFFSET, DOLBY_PARAM_PLMD_OFFSET,
+ DOLBY_PARAM_DHSB_OFFSET, DOLBY_PARAM_DHRG_OFFSET,
+ DOLBY_PARAM_DSSB_OFFSET, DOLBY_PARAM_DSSA_OFFSET,
+ DOLBY_PARAM_DVLA_OFFSET, DOLBY_PARAM_IEBT_OFFSET,
+ DOLBY_PARAM_IEA_OFFSET, DOLBY_PARAM_DEA_OFFSET,
+ DOLBY_PARAM_DED_OFFSET, DOLBY_PARAM_GEBG_OFFSET,
+ DOLBY_PARAM_AOCC_OFFSET, DOLBY_PARAM_ARBI_OFFSET,
+ DOLBY_PARAM_ARBL_OFFSET, DOLBY_PARAM_ARBH_OFFSET,
+ DOLBY_PARAM_AROD_OFFSET, DOLBY_PARAM_ARTP_OFFSET,
+ DOLBY_PARAM_VMON_OFFSET, DOLBY_PARAM_VMB_OFFSET,
+ DOLBY_PARAM_VCNB_OFFSET, DOLBY_PARAM_VCBF_OFFSET,
+ DOLBY_PARAM_PREG_OFFSET, DOLBY_PARAM_VEN_OFFSET,
+ DOLBY_PARAM_PSTG_OFFSET
+};
+/* param_length */
+static uint32_t dolby_dap_params_length[MAX_DOLBY_PARAMS] = {
+ DOLBY_PARAM_VDHE_LENGTH, DOLBY_PARAM_VSPE_LENGTH,
+ DOLBY_PARAM_DSSF_LENGTH, DOLBY_PARAM_DVLI_LENGTH,
+ DOLBY_PARAM_DVLO_LENGTH, DOLBY_PARAM_DVLE_LENGTH,
+ DOLBY_PARAM_DVMC_LENGTH, DOLBY_PARAM_DVME_LENGTH,
+ DOLBY_PARAM_IENB_LENGTH, DOLBY_PARAM_IEBF_LENGTH,
+ DOLBY_PARAM_IEON_LENGTH, DOLBY_PARAM_DEON_LENGTH,
+ DOLBY_PARAM_NGON_LENGTH, DOLBY_PARAM_GEON_LENGTH,
+ DOLBY_PARAM_GENB_LENGTH, DOLBY_PARAM_GEBF_LENGTH,
+ DOLBY_PARAM_AONB_LENGTH, DOLBY_PARAM_AOBF_LENGTH,
+ DOLBY_PARAM_AOBG_LENGTH, DOLBY_PARAM_AOON_LENGTH,
+ DOLBY_PARAM_ARNB_LENGTH, DOLBY_PARAM_ARBF_LENGTH,
+ DOLBY_PARAM_PLB_LENGTH, DOLBY_PARAM_PLMD_LENGTH,
+ DOLBY_PARAM_DHSB_LENGTH, DOLBY_PARAM_DHRG_LENGTH,
+ DOLBY_PARAM_DSSB_LENGTH, DOLBY_PARAM_DSSA_LENGTH,
+ DOLBY_PARAM_DVLA_LENGTH, DOLBY_PARAM_IEBT_LENGTH,
+ DOLBY_PARAM_IEA_LENGTH, DOLBY_PARAM_DEA_LENGTH,
+ DOLBY_PARAM_DED_LENGTH, DOLBY_PARAM_GEBG_LENGTH,
+ DOLBY_PARAM_AOCC_LENGTH, DOLBY_PARAM_ARBI_LENGTH,
+ DOLBY_PARAM_ARBL_LENGTH, DOLBY_PARAM_ARBH_LENGTH,
+ DOLBY_PARAM_AROD_LENGTH, DOLBY_PARAM_ARTP_LENGTH,
+ DOLBY_PARAM_VMON_LENGTH, DOLBY_PARAM_VMB_LENGTH,
+ DOLBY_PARAM_VCNB_LENGTH, DOLBY_PARAM_VCBF_LENGTH,
+ DOLBY_PARAM_PREG_LENGTH, DOLBY_PARAM_VEN_LENGTH,
+ DOLBY_PARAM_PSTG_LENGTH
+};
+
+/* param_value */
+static uint32_t dolby_dap_params_value[TOTAL_LENGTH_DOLBY_PARAM] = {0};
+
+struct dolby_dap_params_get_s {
+ int32_t port_id;
+ uint32_t device_id;
+ uint32_t param_id;
+ uint32_t offset;
+ uint32_t length;
+};
+
+struct dolby_dap_params_states_s {
+ bool use_cache;
+ bool auto_endp;
+ bool enddep_params;
+ int port_id;
+ int port_open_count;
+ int port_ids_dolby_can_be_enabled;
+ int device;
+};
+
+static struct dolby_dap_params_get_s dolby_dap_params_get = {-1, DEVICE_OUT_ALL,
+ 0, 0, 0};
+static struct dolby_dap_params_states_s dolby_dap_params_states = { true, true,
+ true, DOLBY_INVALID_PORT_ID,
+ 0, DEVICE_OUT_ALL, 0 };
+/*
+port_ids_dolby_can_be_enabled is set to 0x7FFFFFFF.
+this needs to be removed after interface validation
+*/
+
+static int map_device_to_dolby_endpoint(int device)
+{
+ int i, dolby_dap_device = DOLBY_ENDP_INT_SPEAKERS;
+ for (i = 0; i < NUM_DOLBY_ENDP_DEVICE; i++) {
+ if (dolby_dap_endp_params[i].device == device) {
+ dolby_dap_device = dolby_dap_endp_params[i].dap_device;
+ break;
+ }
+ }
+ /* default the endpoint to speaker if corresponding device entry */
+ /* not found */
+ if (i >= NUM_DOLBY_ENDP_DEVICE)
+ dolby_dap_params_states.device = SPEAKER;
+ return dolby_dap_device;
+}
+
+static int dolby_dap_send_end_point(int port_id)
+{
+ int rc = 0;
+ char *params_value;
+ int *update_params_value;
+ uint32_t params_length = (DOLBY_PARAM_INT_ENDP_LENGTH +
+ DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t);
+
+ pr_debug("%s\n", __func__);
+ params_value = kzalloc(params_length, GFP_KERNEL);
+ if (!params_value) {
+ pr_err("%s, params memory alloc failed", __func__);
+ return -ENOMEM;
+ }
+ update_params_value = (int *)params_value;
+ *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
+ *update_params_value++ = DOLBY_PARAM_ID_INIT_ENDP;
+ *update_params_value++ = DOLBY_PARAM_INT_ENDP_LENGTH * sizeof(uint32_t);
+ *update_params_value++ =
+ map_device_to_dolby_endpoint(dolby_dap_params_states.device);
+ rc = adm_dolby_dap_send_params(port_id, params_value, params_length);
+ if (rc) {
+ pr_err("%s: send dolby params failed\n", __func__);
+ rc = -EINVAL;
+ }
+ kfree(params_value);
+ return rc;
+}
+
+static int dolby_dap_send_enddep_params(int port_id, int device_channels)
+{
+ int i, j, rc = 0, idx, offset;
+ char *params_value;
+ int *update_params_value;
+ uint32_t params_length = (DOLBY_ENDDEP_PARAM_LENGTH +
+ DOLBY_NUM_ENDP_DEPENDENT_PARAMS *
+ DOLBY_PARAM_PAYLOAD_SIZE) *
+ sizeof(uint32_t);
+
+ pr_debug("%s\n", __func__);
+ params_value = kzalloc(params_length, GFP_KERNEL);
+ if (!params_value) {
+ pr_err("%s, params memory alloc failed", __func__);
+ return -ENOMEM;
+ }
+ update_params_value = (int *)params_value;
+ for (idx = 0; idx < NUM_DOLBY_ENDP_DEVICE; idx++) {
+ if (dolby_dap_endp_params[idx].device ==
+ dolby_dap_params_states.device) {
+ if (dolby_dap_params_states.device == AUX_DIGITAL) {
+ if (dolby_dap_endp_params[idx].device_ch_caps ==
+ device_channels)
+ break;
+ } else {
+ break;
+ }
+ }
+ }
+ if (idx >= NUM_DOLBY_ENDP_DEVICE) {
+ pr_err("%s: device is not set accordingly\n", __func__);
+ kfree(params_value);
+ return -EINVAL;
+ }
+ for (i = 0; i < DOLBY_ENDDEP_PARAM_LENGTH; i++) {
+ *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
+ *update_params_value++ =
+ dolby_dap_endp_params[idx].params_id[i];
+ *update_params_value++ =
+ dolby_dap_endp_params[idx].params_len[i] *
+ sizeof(uint32_t);
+ offset = dolby_dap_endp_params[idx].params_offset[i];
+ for (j = 0; j < dolby_dap_endp_params[idx].params_len[i]; j++)
+ *update_params_value++ =
+ dolby_dap_endp_params[idx].params_val[offset+j];
+ }
+ rc = adm_dolby_dap_send_params(port_id, params_value, params_length);
+ if (rc) {
+ pr_err("%s: send dolby params failed\n", __func__);
+ rc = -EINVAL;
+ }
+ kfree(params_value);
+ return rc;
+}
+
+static int dolby_dap_send_cached_params(int port_id, int commit)
+{
+ char *params_value;
+ int *update_params_value, rc = 0;
+ uint32_t index_offset, i, j;
+ uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
+ MAX_DOLBY_PARAMS * DOLBY_PARAM_PAYLOAD_SIZE) *
+ sizeof(uint32_t);
+
+ params_value = kzalloc(params_length, GFP_KERNEL);
+ if (!params_value) {
+ pr_err("%s, params memory alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ update_params_value = (int *)params_value;
+ params_length = 0;
+ for (i = 0; i < MAX_DOLBY_PARAMS; i++) {
+ if ((dolby_dap_params_modified[i] == 0) ||
+ ((commit) &&
+ ((dolby_dap_params_modified[i] & 0x00010000) &&
+ ((dolby_dap_params_modified[i] & 0x0000FFFF) <= 1))))
+ continue;
+ *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
+ *update_params_value++ = dolby_dap_params_id[i];
+ *update_params_value++ = dolby_dap_params_length[i] *
+ sizeof(uint32_t);
+ index_offset = dolby_dap_params_offset[i];
+ for (j = 0; j < dolby_dap_params_length[i]; j++)
+ *update_params_value++ =
+ dolby_dap_params_value[index_offset+j];
+ params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
+ dolby_dap_params_length[i]) * sizeof(uint32_t);
+ }
+ pr_debug("%s, valid param length: %d", __func__, params_length);
+ if (params_length) {
+ rc = adm_dolby_dap_send_params(port_id, params_value,
+ params_length);
+ if (rc) {
+ pr_err("%s: send dolby params failed\n", __func__);
+ kfree(params_value);
+ return -EINVAL;
+ }
+ for (i = 0; i < MAX_DOLBY_PARAMS; i++) {
+ if ((dolby_dap_params_modified[i] == 0) ||
+ ((commit) &&
+ ((dolby_dap_params_modified[i] & 0x00010000) &&
+ ((dolby_dap_params_modified[i] & 0x0000FFFF) <= 1))
+ ))
+ continue;
+ dolby_dap_params_modified[i] = 0x00010001;
+ }
+ }
+ kfree(params_value);
+ return 0;
+}
+
+int dolby_dap_init(int port_id, int channels)
+{
+ int ret = 0;
+ if ((port_id != DOLBY_INVALID_PORT_ID) &&
+ (port_id &
+ dolby_dap_params_states.port_ids_dolby_can_be_enabled)) {
+ dolby_dap_params_states.port_id = port_id;
+ dolby_dap_params_states.port_open_count++;
+ if (dolby_dap_params_states.auto_endp) {
+ ret = dolby_dap_send_end_point(port_id);
+ if (ret) {
+ pr_err("%s: err sending endppoint\n", __func__);
+ return ret;
+ }
+ }
+ if (dolby_dap_params_states.use_cache) {
+ ret = dolby_dap_send_cached_params(port_id, 0);
+ if (ret) {
+ pr_err("%s: err sending cached params\n",
+ __func__);
+ return ret;
+ }
+ }
+ if (dolby_dap_params_states.enddep_params) {
+ dolby_dap_send_enddep_params(port_id,
+ channels);
+ if (ret) {
+ pr_err("%s: err sending endp dependent params\n",
+ __func__);
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+void dolby_dap_deinit(int port_id)
+{
+ dolby_dap_params_states.port_open_count--;
+ if ((dolby_dap_params_states.port_id == port_id) &&
+ (!dolby_dap_params_states.port_open_count))
+ dolby_dap_params_states.port_id = DOLBY_INVALID_PORT_ID;
+}
+
+static int map_device_to_port_id(int device)
+{
+ int port_id = SLIMBUS_0_RX;
+ device = DEVICE_OUT_ALL;
+ /*update the device when single stream to multiple device is handled*/
+ if (device == DEVICE_OUT_ALL) {
+ port_id = PRIMARY_I2S_RX | SLIMBUS_0_RX | HDMI_RX |
+ INT_BT_SCO_RX | INT_FM_RX |
+ RT_PROXY_PORT_001_RX | PCM_RX |
+ MI2S_RX | SECONDARY_I2S_RX |
+ SLIMBUS_1_RX | SLIMBUS_4_RX | SLIMBUS_3_RX |
+ AFE_PORT_ID_SECONDARY_MI2S_RX;
+ } else {
+ /* update port_id based on the device */
+ }
+ return port_id;
+}
+
+int msm_routing_get_dolby_dap_param_to_set_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ /* not used while setting the parameters */
+ return 0;
+}
+
+int msm_routing_put_dolby_dap_param_to_set_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ int rc = 0;
+ uint32_t idx, j;
+ uint32_t device = ucontrol->value.integer.value[0];
+ uint32_t param_id = ucontrol->value.integer.value[1];
+ uint32_t offset = ucontrol->value.integer.value[2];
+ uint32_t length = ucontrol->value.integer.value[3];
+
+ int port_id = dolby_dap_params_states.port_id;
+
+ dolby_dap_params_states.port_ids_dolby_can_be_enabled =
+ map_device_to_port_id(device);
+ for (idx = 0; idx < ALL_DOLBY_PARAMS; idx++) {
+ /*paramid from user space*/
+ if (param_id == dolby_dap_params_id[idx])
+ break;
+ }
+ if (idx > ALL_DOLBY_PARAMS-1) {
+ pr_err("%s: invalid param id 0x%x to set\n", __func__,
+ param_id);
+ return -EINVAL;
+ }
+ switch (idx) {
+ case DOLBY_COMMIT_ALL_IDX: {
+ /* COMIIT ALL: Send all parameters to DSP */
+ pr_debug("%s: COMMIT_ALL recvd\n", __func__);
+ if (port_id != DOLBY_INVALID_PORT_ID)
+ rc = dolby_dap_send_cached_params(port_id, 0);
+ }
+ break;
+ case DOLBY_COMMIT_IDX: {
+ pr_debug("%s: COMMIT recvd\n", __func__);
+ /* COMMIT: Send only modified paramters to DSP */
+ if (port_id != DOLBY_INVALID_PORT_ID)
+ rc = dolby_dap_send_cached_params(port_id, 1);
+ }
+ break;
+ case DOLBY_USE_CACHE_IDX: {
+ pr_debug("%s: USE CACHE recvd val: %ld\n", __func__,
+ ucontrol->value.integer.value[4]);
+ dolby_dap_params_states.use_cache =
+ ucontrol->value.integer.value[4];
+ }
+ break;
+ case DOLBY_AUTO_ENDP_IDX: {
+ pr_debug("%s: AUTO_ENDP recvd val: %ld\n", __func__,
+ ucontrol->value.integer.value[4]);
+ dolby_dap_params_states.auto_endp =
+ ucontrol->value.integer.value[4];
+ }
+ break;
+ case DOLBY_AUTO_ENDDEP_IDX: {
+ pr_debug("%s: USE_ENDDEP_PARAMS recvd val: %ld\n",
+ __func__, ucontrol->value.integer.value[4]);
+ dolby_dap_params_states.enddep_params =
+ ucontrol->value.integer.value[4];
+ }
+ break;
+ default: {
+ /* cache the parameters */
+ dolby_dap_params_modified[idx] += 1;
+ dolby_dap_params_length[idx] = length;
+ pr_debug("%s: param recvd deviceId=0x%x paramId=0x%x offset=%d length=%d\n",
+ __func__, device, param_id, offset, length);
+ for (j = 0; j < length; j++) {
+ dolby_dap_params_value[
+ dolby_dap_params_offset[idx] +
+ offset + j]
+ = ucontrol->value.integer.value[4+j];
+ pr_debug("value[%d]: %ld\n", j,
+ ucontrol->value.integer.value[4+j]);
+ }
+ }
+ }
+
+ return rc;
+}
+
+int msm_routing_get_dolby_dap_param_to_get_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ int rc = 0, i;
+ char *params_value;
+ int *update_params_value;
+ uint32_t params_length = DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM *
+ sizeof(uint32_t);
+ uint32_t param_payload_len =
+ DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
+ int port_id = dolby_dap_params_states.port_id;
+
+ if (port_id == DOLBY_INVALID_PORT_ID) {
+ pr_err("%s, port_id not set, returning error", __func__);
+ return -EINVAL;
+ }
+ params_value = kzalloc(params_length, GFP_KERNEL);
+ if (!params_value) {
+ pr_err("%s, params memory alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ if (DOLBY_PARAM_ID_VER == dolby_dap_params_get.param_id) {
+ rc = adm_dolby_dap_get_params(dolby_dap_params_get.port_id,
+ DOLBY_BUNDLE_MODULE_ID,
+ DOLBY_PARAM_ID_VER,
+ params_length +
+ param_payload_len,
+ params_value);
+ } else {
+ for (i = 0; i < MAX_DOLBY_PARAMS; i++)
+ if (dolby_dap_params_id[i] ==
+ dolby_dap_params_get.param_id)
+ break;
+ if (i > MAX_DOLBY_PARAMS-1) {
+ pr_err("%s: invalid param id to set", __func__);
+ rc = -EINVAL;
+ } else {
+ params_length = (dolby_dap_params_length[i] +
+ DOLBY_PARAM_PAYLOAD_SIZE) *
+ sizeof(uint32_t);
+ rc = adm_dolby_dap_get_params(
+ dolby_dap_params_get.port_id,
+ DOLBY_BUNDLE_MODULE_ID,
+ dolby_dap_params_id[i],
+ params_length +
+ param_payload_len,
+ params_value);
+ }
+ }
+ if (rc) {
+ pr_err("%s: get parameters failed\n", __func__);
+ kfree(params_value);
+ return -EINVAL;
+ }
+ update_params_value = (int *)params_value;
+ ucontrol->value.integer.value[0] = dolby_dap_params_get.device_id;
+ ucontrol->value.integer.value[1] = dolby_dap_params_get.param_id;
+ ucontrol->value.integer.value[2] = dolby_dap_params_get.offset;
+ ucontrol->value.integer.value[3] = dolby_dap_params_get.length;
+
+ pr_debug("%s: FROM DSP value[0] 0x%x value[1] %d value[2] 0x%x\n",
+ __func__, update_params_value[0],
+ update_params_value[1], update_params_value[2]);
+ for (i = 0; i < dolby_dap_params_get.length; i++) {
+ ucontrol->value.integer.value[DOLBY_PARAM_PAYLOAD_SIZE+i] =
+ update_params_value[i];
+ pr_debug("value[%d]:%d\n", i, update_params_value[i]);
+ }
+ pr_debug("%s: Returning param_id=0x%x offset=%d length=%d\n",
+ __func__, dolby_dap_params_get.param_id,
+ dolby_dap_params_get.offset,
+ dolby_dap_params_get.length);
+ kfree(params_value);
+ return 0;
+}
+
+int msm_routing_put_dolby_dap_param_to_get_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ dolby_dap_params_get.device_id = ucontrol->value.integer.value[0];
+ dolby_dap_params_get.port_id =
+ (dolby_dap_params_get.device_id == DEVICE_OUT_ALL) ?
+ dolby_dap_params_states.port_id :
+ map_device_to_port_id(dolby_dap_params_get.device_id);
+ dolby_dap_params_get.param_id = ucontrol->value.integer.value[1];
+ dolby_dap_params_get.offset = ucontrol->value.integer.value[2];
+ dolby_dap_params_get.length = ucontrol->value.integer.value[3];
+ pr_debug("%s: param_id=0x%x offset=%d length=%d\n", __func__,
+ dolby_dap_params_get.param_id, dolby_dap_params_get.offset,
+ dolby_dap_params_get.length);
+ return 0;
+}
+
+int msm_routing_get_dolby_dap_param_visualizer_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ uint32_t length = dolby_dap_params_value[DOLBY_PARAM_VCNB_OFFSET];
+ char *visualizer_data;
+ int i, rc;
+ int *update_visualizer_data;
+ uint32_t offset, params_length =
+ (2*length + DOLBY_VIS_PARAM_HEADER_SIZE)*sizeof(uint32_t);
+ uint32_t param_payload_len =
+ DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
+ int port_id = dolby_dap_params_states.port_id;
+ if (port_id == DOLBY_INVALID_PORT_ID) {
+ pr_err("%s, port_id not set, returning error", __func__);
+ ucontrol->value.integer.value[0] = 0;
+ return -EINVAL;
+ }
+ visualizer_data = kzalloc(params_length, GFP_KERNEL);
+ if (!visualizer_data) {
+ pr_err("%s, params memory alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ offset = 0;
+ params_length = length * sizeof(uint32_t);
+ rc = adm_dolby_dap_get_params(dolby_dap_params_states.port_id,
+ DOLBY_BUNDLE_MODULE_ID,
+ DOLBY_PARAM_ID_VCBG,
+ params_length + param_payload_len,
+ visualizer_data + offset);
+ if (rc) {
+ pr_err("%s: get parameters failed\n", __func__);
+ kfree(visualizer_data);
+ return -EINVAL;
+ }
+
+ offset = length * sizeof(uint32_t);
+ rc = adm_dolby_dap_get_params(dolby_dap_params_states.port_id,
+ DOLBY_BUNDLE_MODULE_ID,
+ DOLBY_PARAM_ID_VCBE,
+ params_length + param_payload_len,
+ visualizer_data + offset);
+ if (rc) {
+ pr_err("%s: get parameters failed\n", __func__);
+ kfree(visualizer_data);
+ return -EINVAL;
+ }
+
+ ucontrol->value.integer.value[0] = 2*length;
+ pr_debug("%s: visualizer data length %ld\n", __func__,
+ ucontrol->value.integer.value[0]);
+ update_visualizer_data = (int *)visualizer_data;
+ for (i = 0; i < 2*length; i++) {
+ ucontrol->value.integer.value[1+i] = update_visualizer_data[i];
+ pr_debug("value[%d] %d\n", i, update_visualizer_data[i]);
+ }
+ kfree(visualizer_data);
+ return 0;
+}
+
+int msm_routing_put_dolby_dap_param_visualizer_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ /* not used while getting the visualizer data */
+ return 0;
+}
+
+int msm_routing_get_dolby_dap_endpoint_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ /* not used while setting the endpoint */
+ return 0;
+}
+
+int msm_routing_put_dolby_dap_endpoint_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ int device = ucontrol->value.integer.value[0];
+ dolby_dap_params_states.device = device;
+ return 0;
+}
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h
new file mode 100644
index 0000000..58ea36d
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h
@@ -0,0 +1,348 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MSM_DOLBY_DAP_CONFIG_H_
+#define _MSM_DOLBY_DAP_CONFIG_H_
+
+#ifdef CONFIG_DOLBY_DAP
+/* DOLBY DOLBY GUIDS */
+#define DOLBY_ADM_COPP_TOPOLOGY_ID 0x0001033B
+#define DOLBY_BUNDLE_MODULE_ID 0x00010723
+#define DOLBY_VISUALIZER_MODULE_ID 0x0001072B
+
+#define DOLBY_PARAM_ID_VDHE 0x0001074D
+#define DOLBY_PARAM_ID_VSPE 0x00010750
+#define DOLBY_PARAM_ID_DSSF 0x00010753
+#define DOLBY_PARAM_ID_DVLI 0x0001073E
+#define DOLBY_PARAM_ID_DVLO 0x0001073F
+#define DOLBY_PARAM_ID_DVLE 0x0001073C
+#define DOLBY_PARAM_ID_DVMC 0x00010741
+#define DOLBY_PARAM_ID_DVME 0x00010740
+#define DOLBY_PARAM_ID_IENB 0x00010744
+#define DOLBY_PARAM_ID_IEBF 0x00010745
+#define DOLBY_PARAM_ID_IEON 0x00010743
+#define DOLBY_PARAM_ID_DEON 0x00010738
+#define DOLBY_PARAM_ID_NGON 0x00010736
+#define DOLBY_PARAM_ID_GEON 0x00010748
+#define DOLBY_PARAM_ID_GENB 0x00010749
+#define DOLBY_PARAM_ID_GEBF 0x0001074A
+#define DOLBY_PARAM_ID_AONB 0x0001075B
+#define DOLBY_PARAM_ID_AOBF 0x0001075C
+#define DOLBY_PARAM_ID_AOBG 0x0001075D
+#define DOLBY_PARAM_ID_AOON 0x00010759
+#define DOLBY_PARAM_ID_ARNB 0x0001075F
+#define DOLBY_PARAM_ID_ARBF 0x00010760
+#define DOLBY_PARAM_ID_PLB 0x00010768
+#define DOLBY_PARAM_ID_PLMD 0x00010767
+#define DOLBY_PARAM_ID_DHSB 0x0001074E
+#define DOLBY_PARAM_ID_DHRG 0x0001074F
+#define DOLBY_PARAM_ID_DSSB 0x00010751
+#define DOLBY_PARAM_ID_DSSA 0x00010752
+#define DOLBY_PARAM_ID_DVLA 0x0001073D
+#define DOLBY_PARAM_ID_IEBT 0x00010746
+#define DOLBY_PARAM_ID_IEA 0x0001076A
+#define DOLBY_PARAM_ID_DEA 0x00010739
+#define DOLBY_PARAM_ID_DED 0x0001073A
+#define DOLBY_PARAM_ID_GEBG 0x0001074B
+#define DOLBY_PARAM_ID_AOCC 0x0001075A
+#define DOLBY_PARAM_ID_ARBI 0x00010761
+#define DOLBY_PARAM_ID_ARBL 0x00010762
+#define DOLBY_PARAM_ID_ARBH 0x00010763
+#define DOLBY_PARAM_ID_AROD 0x00010764
+#define DOLBY_PARAM_ID_ARTP 0x00010765
+#define DOLBY_PARAM_ID_VMON 0x00010756
+#define DOLBY_PARAM_ID_VMB 0x00010757
+#define DOLBY_PARAM_ID_VCNB 0x00010733
+#define DOLBY_PARAM_ID_VCBF 0x00010734
+#define DOLBY_PARAM_ID_PREG 0x00010728
+#define DOLBY_PARAM_ID_VEN 0x00010732
+#define DOLBY_PARAM_ID_PSTG 0x00010729
+#define DOLBY_PARAM_ID_INIT_ENDP 0x00010727
+
+/* Not Used with Set Param kcontrol, only to query using Get Param */
+#define DOLBY_PARAM_ID_VER 0x00010726
+
+#define DOLBY_PARAM_ID_VCBG 0x00010730
+#define DOLBY_PARAM_ID_VCBE 0x00010731
+
+/* DOLBY DAP control params */
+#define DOLBY_COMMIT_ALL_TO_DSP 0x70000001
+#define DOLBY_COMMIT_TO_DSP 0x70000002
+#define DOLBY_USE_CACHE 0x70000003
+#define DOLBY_AUTO_ENDP 0x70000004
+#define DOLBY_AUTO_ENDDEP_PARAMS 0x70000005
+
+/* DOLBY DAP offsets start */
+#define DOLBY_PARAM_VDHE_LENGTH 1
+#define DOLBY_PARAM_VDHE_OFFSET 0
+#define DOLBY_PARAM_VSPE_LENGTH 1
+#define DOLBY_PARAM_VSPE_OFFSET (DOLBY_PARAM_VDHE_OFFSET + \
+ DOLBY_PARAM_VDHE_LENGTH)
+#define DOLBY_PARAM_DSSF_LENGTH 1
+#define DOLBY_PARAM_DSSF_OFFSET (DOLBY_PARAM_VSPE_OFFSET + \
+ DOLBY_PARAM_VSPE_LENGTH)
+#define DOLBY_PARAM_DVLI_LENGTH 1
+#define DOLBY_PARAM_DVLI_OFFSET (DOLBY_PARAM_DSSF_OFFSET + \
+ DOLBY_PARAM_DSSF_LENGTH)
+#define DOLBY_PARAM_DVLO_LENGTH 1
+#define DOLBY_PARAM_DVLO_OFFSET (DOLBY_PARAM_DVLI_OFFSET + \
+ DOLBY_PARAM_DVLI_LENGTH)
+#define DOLBY_PARAM_DVLE_LENGTH 1
+#define DOLBY_PARAM_DVLE_OFFSET (DOLBY_PARAM_DVLO_OFFSET + \
+ DOLBY_PARAM_DVLO_LENGTH)
+#define DOLBY_PARAM_DVMC_LENGTH 1
+#define DOLBY_PARAM_DVMC_OFFSET (DOLBY_PARAM_DVLE_OFFSET + \
+ DOLBY_PARAM_DVLE_LENGTH)
+#define DOLBY_PARAM_DVME_LENGTH 1
+#define DOLBY_PARAM_DVME_OFFSET (DOLBY_PARAM_DVMC_OFFSET + \
+ DOLBY_PARAM_DVMC_LENGTH)
+#define DOLBY_PARAM_IENB_LENGTH 1
+#define DOLBY_PARAM_IENB_OFFSET (DOLBY_PARAM_DVME_OFFSET + \
+ DOLBY_PARAM_DVME_LENGTH)
+#define DOLBY_PARAM_IEBF_LENGTH 40
+#define DOLBY_PARAM_IEBF_OFFSET (DOLBY_PARAM_IENB_OFFSET + \
+ DOLBY_PARAM_IENB_LENGTH)
+#define DOLBY_PARAM_IEON_LENGTH 1
+#define DOLBY_PARAM_IEON_OFFSET (DOLBY_PARAM_IEBF_OFFSET + \
+ DOLBY_PARAM_IEBF_LENGTH)
+#define DOLBY_PARAM_DEON_LENGTH 1
+#define DOLBY_PARAM_DEON_OFFSET (DOLBY_PARAM_IEON_OFFSET + \
+ DOLBY_PARAM_IEON_LENGTH)
+#define DOLBY_PARAM_NGON_LENGTH 1
+#define DOLBY_PARAM_NGON_OFFSET (DOLBY_PARAM_DEON_OFFSET + \
+ DOLBY_PARAM_DEON_LENGTH)
+#define DOLBY_PARAM_GEON_LENGTH 1
+#define DOLBY_PARAM_GEON_OFFSET (DOLBY_PARAM_NGON_OFFSET + \
+ DOLBY_PARAM_NGON_LENGTH)
+#define DOLBY_PARAM_GENB_LENGTH 1
+#define DOLBY_PARAM_GENB_OFFSET (DOLBY_PARAM_GEON_OFFSET + \
+ DOLBY_PARAM_GEON_LENGTH)
+#define DOLBY_PARAM_GEBF_LENGTH 40
+#define DOLBY_PARAM_GEBF_OFFSET (DOLBY_PARAM_GENB_OFFSET + \
+ DOLBY_PARAM_GENB_LENGTH)
+#define DOLBY_PARAM_AONB_LENGTH 1
+#define DOLBY_PARAM_AONB_OFFSET (DOLBY_PARAM_GEBF_OFFSET + \
+ DOLBY_PARAM_GEBF_LENGTH)
+#define DOLBY_PARAM_AOBF_LENGTH 40
+#define DOLBY_PARAM_AOBF_OFFSET (DOLBY_PARAM_AONB_OFFSET + \
+ DOLBY_PARAM_AONB_LENGTH)
+#define DOLBY_PARAM_AOBG_LENGTH 329
+#define DOLBY_PARAM_AOBG_OFFSET (DOLBY_PARAM_AOBF_OFFSET + \
+ DOLBY_PARAM_AOBF_LENGTH)
+#define DOLBY_PARAM_AOON_LENGTH 1
+#define DOLBY_PARAM_AOON_OFFSET (DOLBY_PARAM_AOBG_OFFSET + \
+ DOLBY_PARAM_AOBG_LENGTH)
+#define DOLBY_PARAM_ARNB_LENGTH 1
+#define DOLBY_PARAM_ARNB_OFFSET (DOLBY_PARAM_AOON_OFFSET + \
+ DOLBY_PARAM_AOON_LENGTH)
+#define DOLBY_PARAM_ARBF_LENGTH 40
+#define DOLBY_PARAM_ARBF_OFFSET (DOLBY_PARAM_ARNB_OFFSET + \
+ DOLBY_PARAM_ARNB_LENGTH)
+#define DOLBY_PARAM_PLB_LENGTH 1
+#define DOLBY_PARAM_PLB_OFFSET (DOLBY_PARAM_ARBF_OFFSET + \
+ DOLBY_PARAM_ARBF_LENGTH)
+#define DOLBY_PARAM_PLMD_LENGTH 1
+#define DOLBY_PARAM_PLMD_OFFSET (DOLBY_PARAM_PLB_OFFSET + \
+ DOLBY_PARAM_PLB_LENGTH)
+#define DOLBY_PARAM_DHSB_LENGTH 1
+#define DOLBY_PARAM_DHSB_OFFSET (DOLBY_PARAM_PLMD_OFFSET + \
+ DOLBY_PARAM_PLMD_LENGTH)
+#define DOLBY_PARAM_DHRG_LENGTH 1
+#define DOLBY_PARAM_DHRG_OFFSET (DOLBY_PARAM_DHSB_OFFSET + \
+ DOLBY_PARAM_DHSB_LENGTH)
+#define DOLBY_PARAM_DSSB_LENGTH 1
+#define DOLBY_PARAM_DSSB_OFFSET (DOLBY_PARAM_DHRG_OFFSET + \
+ DOLBY_PARAM_DHRG_LENGTH)
+#define DOLBY_PARAM_DSSA_LENGTH 1
+#define DOLBY_PARAM_DSSA_OFFSET (DOLBY_PARAM_DSSB_OFFSET + \
+ DOLBY_PARAM_DSSB_LENGTH)
+#define DOLBY_PARAM_DVLA_LENGTH 1
+#define DOLBY_PARAM_DVLA_OFFSET (DOLBY_PARAM_DSSA_OFFSET + \
+ DOLBY_PARAM_DSSA_LENGTH)
+#define DOLBY_PARAM_IEBT_LENGTH 40
+#define DOLBY_PARAM_IEBT_OFFSET (DOLBY_PARAM_DVLA_OFFSET + \
+ DOLBY_PARAM_DVLA_LENGTH)
+#define DOLBY_PARAM_IEA_LENGTH 1
+#define DOLBY_PARAM_IEA_OFFSET (DOLBY_PARAM_IEBT_OFFSET + \
+ DOLBY_PARAM_IEBT_LENGTH)
+#define DOLBY_PARAM_DEA_LENGTH 1
+#define DOLBY_PARAM_DEA_OFFSET (DOLBY_PARAM_IEA_OFFSET + \
+ DOLBY_PARAM_IEA_LENGTH)
+#define DOLBY_PARAM_DED_LENGTH 1
+#define DOLBY_PARAM_DED_OFFSET (DOLBY_PARAM_DEA_OFFSET + \
+ DOLBY_PARAM_DEA_LENGTH)
+#define DOLBY_PARAM_GEBG_LENGTH 40
+#define DOLBY_PARAM_GEBG_OFFSET (DOLBY_PARAM_DED_OFFSET + \
+ DOLBY_PARAM_DED_LENGTH)
+#define DOLBY_PARAM_AOCC_LENGTH 1
+#define DOLBY_PARAM_AOCC_OFFSET (DOLBY_PARAM_GEBG_OFFSET + \
+ DOLBY_PARAM_GEBG_LENGTH)
+#define DOLBY_PARAM_ARBI_LENGTH 40
+#define DOLBY_PARAM_ARBI_OFFSET (DOLBY_PARAM_AOCC_OFFSET + \
+ DOLBY_PARAM_AOCC_LENGTH)
+#define DOLBY_PARAM_ARBL_LENGTH 40
+#define DOLBY_PARAM_ARBL_OFFSET (DOLBY_PARAM_ARBI_OFFSET + \
+ DOLBY_PARAM_ARBI_LENGTH)
+#define DOLBY_PARAM_ARBH_LENGTH 40
+#define DOLBY_PARAM_ARBH_OFFSET (DOLBY_PARAM_ARBL_OFFSET + \
+ DOLBY_PARAM_ARBL_LENGTH)
+#define DOLBY_PARAM_AROD_LENGTH 1
+#define DOLBY_PARAM_AROD_OFFSET (DOLBY_PARAM_ARBH_OFFSET + \
+ DOLBY_PARAM_ARBH_LENGTH)
+#define DOLBY_PARAM_ARTP_LENGTH 1
+#define DOLBY_PARAM_ARTP_OFFSET (DOLBY_PARAM_AROD_OFFSET + \
+ DOLBY_PARAM_AROD_LENGTH)
+#define DOLBY_PARAM_VMON_LENGTH 1
+#define DOLBY_PARAM_VMON_OFFSET (DOLBY_PARAM_ARTP_OFFSET + \
+ DOLBY_PARAM_ARTP_LENGTH)
+#define DOLBY_PARAM_VMB_LENGTH 1
+#define DOLBY_PARAM_VMB_OFFSET (DOLBY_PARAM_VMON_OFFSET + \
+ DOLBY_PARAM_VMON_LENGTH)
+#define DOLBY_PARAM_VCNB_LENGTH 1
+#define DOLBY_PARAM_VCNB_OFFSET (DOLBY_PARAM_VMB_OFFSET + \
+ DOLBY_PARAM_VMB_LENGTH)
+#define DOLBY_PARAM_VCBF_LENGTH 20
+#define DOLBY_PARAM_VCBF_OFFSET (DOLBY_PARAM_VCNB_OFFSET + \
+ DOLBY_PARAM_VCNB_LENGTH)
+#define DOLBY_PARAM_PREG_LENGTH 1
+#define DOLBY_PARAM_PREG_OFFSET (DOLBY_PARAM_VCBF_OFFSET + \
+ DOLBY_PARAM_VCBF_LENGTH)
+#define DOLBY_PARAM_VEN_LENGTH 1
+#define DOLBY_PARAM_VEN_OFFSET (DOLBY_PARAM_PREG_OFFSET + \
+ DOLBY_PARAM_PREG_LENGTH)
+#define DOLBY_PARAM_PSTG_LENGTH 1
+#define DOLBY_PARAM_PSTG_OFFSET (DOLBY_PARAM_VEN_OFFSET + \
+ DOLBY_PARAM_VEN_LENGTH)
+
+#define DOLBY_PARAM_INT_ENDP_LENGTH 1
+#define DOLBY_PARAM_PAYLOAD_SIZE 4
+#define DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM 329
+
+#define DOLBY_NUM_ENDP_DEPENDENT_PARAMS 1
+#define DOLBY_ENDDEP_PARAM_DVLO_OFFSET 0
+#define DOLBY_ENDDEP_PARAM_DVLO_LENGTH 1
+#define DOLBY_ENDDEP_PARAM_LENGTH DOLBY_ENDDEP_PARAM_DVLO_LENGTH
+
+#define MAX_DOLBY_PARAMS 47
+#define MAX_DOLBY_CTRL_PARAMS 5
+#define ALL_DOLBY_PARAMS (MAX_DOLBY_PARAMS + \
+ MAX_DOLBY_CTRL_PARAMS)
+#define DOLBY_COMMIT_ALL_IDX MAX_DOLBY_PARAMS
+#define DOLBY_COMMIT_IDX (MAX_DOLBY_PARAMS+1)
+#define DOLBY_USE_CACHE_IDX (MAX_DOLBY_PARAMS+2)
+#define DOLBY_AUTO_ENDP_IDX (MAX_DOLBY_PARAMS+3)
+#define DOLBY_AUTO_ENDDEP_IDX (MAX_DOLBY_PARAMS+4)
+
+#define TOTAL_LENGTH_DOLBY_PARAM 745
+#define NUM_DOLBY_ENDP_DEVICE 23
+#define DOLBY_VIS_PARAM_HEADER_SIZE 25
+
+#define DOLBY_INVALID_PORT_ID -1
+/* DOLBY device definitions */
+enum {
+ DOLBY_ENDP_INT_SPEAKERS = 0,
+ DOLBY_ENDP_EXT_SPEAKERS,
+ DOLBY_ENDP_HEADPHONES,
+ DOLBY_ENDP_HDMI,
+ DOLBY_ENDP_SPDIF,
+ DOLBY_ENDP_DLNA,
+ DOLBY_ENDP_ANALOG,
+};
+
+enum {
+ DEVICE_NONE = 0x0,
+ /* output devices */
+ EARPIECE = 0x1,
+ SPEAKER = 0x2,
+ WIRED_HEADSET = 0x4,
+ WIRED_HEADPHONE = 0x8,
+ BLUETOOTH_SCO = 0x10,
+ BLUETOOTH_SCO_HEADSET = 0x20,
+ BLUETOOTH_SCO_CARKIT = 0x40,
+ BLUETOOTH_A2DP = 0x80,
+ BLUETOOTH_A2DP_HEADPHONES = 0x100,
+ BLUETOOTH_A2DP_SPEAKER = 0x200,
+ AUX_DIGITAL = 0x400,
+ ANLG_DOCK_HEADSET = 0x800,
+ DGTL_DOCK_HEADSET = 0x1000,
+ USB_ACCESSORY = 0x2000,
+ USB_DEVICE = 0x4000,
+ REMOTE_SUBMIX = 0x8000,
+ ANC_HEADSET = 0x10000,
+ ANC_HEADPHONE = 0x20000,
+ PROXY = 0x40000,
+ FM = 0x80000,
+ FM_TX = 0x100000,
+ DEVICE_OUT_ALL = 0x7FFFFFFF,
+};
+/* DOLBY device definitions end */
+
+struct dolby_dap_params {
+ uint32_t value[TOTAL_LENGTH_DOLBY_PARAM + MAX_DOLBY_PARAMS];
+} __packed;
+int dolby_dap_init(int port_id, int channels);
+int msm_routing_get_dolby_dap_param_to_set_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int msm_routing_put_dolby_dap_param_to_set_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int msm_routing_get_dolby_dap_param_to_get_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int msm_routing_put_dolby_dap_param_to_get_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int msm_routing_get_dolby_dap_param_visualizer_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int msm_routing_put_dolby_dap_param_visualizer_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int msm_routing_get_dolby_dap_endpoint_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int msm_routing_put_dolby_dap_endpoint_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+void dolby_dap_deinit(int port_id);
+/* Dolby DOLBY end */
+#else
+int dolby_dap_init(int port_id, int channels) { return 0; }
+int msm_routing_get_dolby_dap_param_to_set_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) { return 0; }
+int msm_routing_put_dolby_dap_param_to_set_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) { return 0; }
+int msm_routing_get_dolby_dap_param_to_get_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) { return 0; }
+int msm_routing_put_dolby_dap_param_to_get_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) { return 0; }
+int msm_routing_get_dolby_dap_param_visualizer_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) { return 0; }
+int msm_routing_put_dolby_dap_param_visualizer_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) { return 0; }
+int msm_routing_get_dolby_dap_endpoint_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) { return 0; }
+int msm_routing_put_dolby_dap_endpoint_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) { return 0; }
+void dolby_dap_deinit(int port_id) { return; }
+#endif
+
+#endif
+
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
index 96ddcf6..2a64ae2 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
@@ -32,16 +32,22 @@
#include <linux/memory_alloc.h>
#include "msm-pcm-afe-v2.h"
-#define MIN_PERIOD_SIZE (128 * 2)
-#define MAX_PERIOD_SIZE (128 * 2 * 2 * 6)
-#define MAX_NUM_PERIODS 384
-#define MIN_NUM_PERIODS 32
-static struct snd_pcm_hardware msm_afe_hardware = {
- .info = (SNDRV_PCM_INFO_MMAP |
+#define MIN_PLAYBACK_PERIOD_SIZE (128 * 2)
+#define MAX_PLAYBACK_PERIOD_SIZE (128 * 2 * 2 * 6)
+#define MIN_PLAYBACK_NUM_PERIODS (32)
+#define MAX_PLAYBACK_NUM_PERIODS (384)
+
+#define MIN_CAPTURE_PERIOD_SIZE (128 * 2 * 4)
+#define MAX_CAPTURE_PERIOD_SIZE (128 * 2 * 2 * 6 * 4)
+#define MIN_CAPTURE_NUM_PERIODS (32)
+#define MAX_CAPTURE_NUM_PERIODS (384)
+
+static struct snd_pcm_hardware msm_afe_hardware_playback = {
+ .info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED),
- .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ .formats = SNDRV_PCM_FMTBIT_S16_LE|
SNDRV_PCM_FMTBIT_S24_LE,
.rates = (SNDRV_PCM_RATE_8000 |
SNDRV_PCM_RATE_16000 |
@@ -50,13 +56,39 @@
.rate_max = 48000,
.channels_min = 1,
.channels_max = 6,
- .buffer_bytes_max = MAX_PERIOD_SIZE * MIN_NUM_PERIODS,
- .period_bytes_min = MIN_PERIOD_SIZE,
- .period_bytes_max = MAX_PERIOD_SIZE,
- .periods_min = MIN_NUM_PERIODS,
- .periods_max = MAX_NUM_PERIODS,
+ .buffer_bytes_max = MAX_PLAYBACK_PERIOD_SIZE *
+ MIN_PLAYBACK_NUM_PERIODS,
+ .period_bytes_min = MIN_PLAYBACK_PERIOD_SIZE,
+ .period_bytes_max = MAX_PLAYBACK_PERIOD_SIZE,
+ .periods_min = MIN_PLAYBACK_NUM_PERIODS,
+ .periods_max = MAX_PLAYBACK_NUM_PERIODS,
.fifo_size = 0,
};
+
+static struct snd_pcm_hardware msm_afe_hardware_capture = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE|
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .rates = (SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000),
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 1,
+ .channels_max = 6,
+ .buffer_bytes_max = MAX_CAPTURE_PERIOD_SIZE *
+ MIN_CAPTURE_NUM_PERIODS,
+ .period_bytes_min = MIN_CAPTURE_PERIOD_SIZE,
+ .period_bytes_max = MAX_CAPTURE_PERIOD_SIZE,
+ .periods_min = MIN_CAPTURE_NUM_PERIODS,
+ .periods_max = MAX_CAPTURE_NUM_PERIODS,
+ .fifo_size = 0,
+};
+
+
static enum hrtimer_restart afe_hrtimer_callback(struct hrtimer *hrt);
static enum hrtimer_restart afe_hrtimer_rec_callback(struct hrtimer *hrt);
@@ -130,6 +162,8 @@
struct snd_pcm_substream *substream = NULL;
struct snd_pcm_runtime *runtime = NULL;
uint16_t event;
+ uint64_t period_bytes;
+ uint64_t bytes_one_sec;
if (prtd == NULL)
return;
@@ -143,12 +177,28 @@
switch (event) {
case AFE_EVENT_RTPORT_START: {
prtd->dsp_cnt = 0;
- prtd->poll_time = ((unsigned long)((
- snd_pcm_lib_period_bytes
- (prtd->substream) *
- 1000 * 1000)/
- (runtime->rate *
- runtime->channels * 2)));
+ /* Calculate poll time.
+ * Split steps to avoid overflow.
+ * Poll time-time corresponding to one period
+ * in bytes.
+ * (Samplerate * channelcount * format) =
+ * bytes in 1 sec.
+ * Poll time =
+ * (period bytes / bytes in one sec) *
+ * 1000000 micro seconds.
+ * Multiplication by 1000000 is done in two
+ * steps to keep the accuracy of poll time.
+ */
+ period_bytes = ((uint64_t)(
+ (snd_pcm_lib_period_bytes(
+ prtd->substream)) *
+ 1000));
+ bytes_one_sec =
+ (runtime->rate * runtime->channels * 2);
+ bytes_one_sec =
+ div_u64(bytes_one_sec, 1000);
+ prtd->poll_time =
+ div_u64(period_bytes, bytes_one_sec);
pr_debug("prtd->poll_time: %d",
prtd->poll_time);
break;
@@ -197,6 +247,8 @@
struct snd_pcm_substream *substream = NULL;
struct snd_pcm_runtime *runtime = NULL;
uint16_t event;
+ uint64_t period_bytes;
+ uint64_t bytes_one_sec;
if (prtd == NULL)
return;
@@ -210,11 +262,22 @@
switch (event) {
case AFE_EVENT_RTPORT_START: {
prtd->dsp_cnt = 0;
- prtd->poll_time = ((unsigned long)((
- snd_pcm_lib_period_bytes(prtd->substream)
- * 1000 * 1000)/(runtime->rate
- * runtime->channels * 2)));
- pr_debug("prtd->poll_time : %d", prtd->poll_time);
+ /* Calculate poll time. Split steps to avoid overflow.
+ * Poll time-time corresponding to one period in bytes.
+ * (Samplerate * channelcount * format)=bytes in 1 sec.
+ * Poll time = (period bytes / bytes in one sec) *
+ * 1000000 micro seconds.
+ * Multiplication by 1000000 is done in two steps to
+ * keep the accuracy of poll time.
+ */
+ period_bytes = ((uint64_t)(
+ (snd_pcm_lib_period_bytes(prtd->substream)) *
+ 1000));
+ bytes_one_sec = (runtime->rate * runtime->channels * 2);
+ bytes_one_sec = div_u64(bytes_one_sec , 1000);
+ prtd->poll_time =
+ div_u64(period_bytes, bytes_one_sec);
+ pr_debug("prtd->poll_time : %d\n", prtd->poll_time);
break;
}
case AFE_EVENT_RTPORT_STOP:
@@ -326,7 +389,11 @@
mutex_lock(&prtd->lock);
- runtime->hw = msm_afe_hardware;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ runtime->hw = msm_afe_hardware_playback;
+ else
+ runtime->hw = msm_afe_hardware_capture;
+
prtd->substream = substream;
runtime->private_data = prtd;
prtd->audio_client = q6afe_audio_client_alloc(prtd);
@@ -355,6 +422,18 @@
if (ret < 0)
pr_err("snd_pcm_hw_constraint_integer failed\n");
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ MIN_CAPTURE_NUM_PERIODS * MIN_CAPTURE_PERIOD_SIZE,
+ MAX_CAPTURE_NUM_PERIODS * MAX_CAPTURE_PERIOD_SIZE);
+
+ if (ret < 0) {
+ pr_err("constraint for buffer bytes min max ret = %d\n",
+ ret);
+ }
+ }
+
return 0;
}
@@ -497,10 +576,18 @@
dir = IN;
else
dir = OUT;
+
rc = q6afe_audio_client_buf_alloc_contiguous(dir,
- prtd->audio_client,
- runtime->hw.period_bytes_min,
- runtime->hw.periods_max);
+ prtd->audio_client,
+ (params_buffer_bytes(params) / params_periods(params)),
+ params_periods(params));
+ pr_debug("params_buffer_bytes(params) = %d\n",
+ (params_buffer_bytes(params)));
+ pr_debug("params_periods(params) = %d\n",
+ (params_periods(params)));
+ pr_debug("params_periodsize(params) = %d\n",
+ (params_buffer_bytes(params) / params_periods(params)));
+
if (rc < 0) {
pr_err("Audio Start: Buffer Allocation failed rc = %d\n", rc);
mutex_unlock(&prtd->lock);
@@ -519,14 +606,18 @@
dma_buf->private_data = NULL;
dma_buf->area = buf[0].data;
dma_buf->addr = buf[0].phys;
- dma_buf->bytes = runtime->hw.buffer_bytes_max;
+
+ dma_buf->bytes = params_buffer_bytes(params);
+
if (!dma_buf->area) {
pr_err("%s:MSM AFE physical memory allocation failed\n",
__func__);
mutex_unlock(&prtd->lock);
return -ENOMEM;
}
- memset(dma_buf->area, 0, runtime->hw.buffer_bytes_max);
+
+ memset(dma_buf->area, 0, params_buffer_bytes(params));
+
prtd->dma_addr = (u32) dma_buf->addr;
mutex_unlock(&prtd->lock);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
index c8de460..27b3f56 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
@@ -24,6 +24,7 @@
#include <sound/pcm.h>
#include <sound/initval.h>
#include <sound/control.h>
+#include <sound/pcm_params.h>
#include <asm/dma.h>
#include <linux/dma-mapping.h>
@@ -32,6 +33,7 @@
#include <sound/compress_offload.h>
#include <sound/compress_driver.h>
#include <sound/timer.h>
+#include <sound/pcm_params.h>
#include "msm-pcm-q6-v2.h"
#include "msm-pcm-routing-v2.h"
@@ -54,9 +56,10 @@
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
- .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT,
+ .rates = SNDRV_PCM_RATE_8000_192000 |
+ SNDRV_PCM_RATE_KNOT,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 192000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = 1024 * 1024,
@@ -69,7 +72,8 @@
/* Conventional and unconventional sample rate supported */
static unsigned int supported_sample_rates[] = {
- 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
+ 96000, 192000
};
static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
@@ -276,19 +280,7 @@
static int msm_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct msm_audio *prtd;
- struct asm_softpause_params softpause = {
- .enable = SOFT_PAUSE_ENABLE,
- .period = SOFT_PAUSE_PERIOD,
- .step = SOFT_PAUSE_STEP,
- .rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
- };
- struct asm_softvolume_params softvol = {
- .period = SOFT_VOLUME_PERIOD,
- .step = SOFT_VOLUME_STEP,
- .rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
- };
int ret = 0;
pr_debug("%s\n", __func__);
@@ -306,31 +298,9 @@
kfree(prtd);
return -ENOMEM;
}
- prtd->audio_client->perf_mode = false;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- ret = q6asm_open_write(prtd->audio_client, FORMAT_LINEAR_PCM);
- if (ret < 0) {
- pr_err("%s: pcm out open failed\n", __func__);
- q6asm_audio_client_free(prtd->audio_client);
- kfree(prtd);
- return -ENOMEM;
- }
- ret = q6asm_set_io_mode(prtd->audio_client, ASYNC_IO_MODE);
- if (ret < 0) {
- pr_err("%s: Set IO mode failed\n", __func__);
- q6asm_audio_client_free(prtd->audio_client);
- kfree(prtd);
- return -ENOMEM;
- }
- }
/* Capture path */
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
return -EPERM;
- pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
- prtd->session_id = prtd->audio_client->session;
- msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
- prtd->audio_client->perf_mode,
- prtd->session_id, substream->stream);
ret = snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
@@ -349,15 +319,6 @@
atomic_set(&lpa_audio.audio_ocmem_req, 0);
runtime->private_data = prtd;
lpa_audio.prtd = prtd;
- lpa_set_volume(0);
- ret = q6asm_set_softpause(lpa_audio.prtd->audio_client, &softpause);
- if (ret < 0)
- pr_err("%s: Send SoftPause Param failed ret=%d\n",
- __func__, ret);
- ret = q6asm_set_softvolume(lpa_audio.prtd->audio_client, &softvol);
- if (ret < 0)
- pr_err("%s: Send SoftVolume Param failed ret=%d\n",
- __func__, ret);
return 0;
}
@@ -406,22 +367,24 @@
prtd->pcm_irq_pos = 0;
}
- dir = IN;
- atomic_set(&prtd->pending_buffer, 0);
+ if (prtd->audio_client) {
+ dir = IN;
+ atomic_set(&prtd->pending_buffer, 0);
- if (atomic_cmpxchg(&lpa_audio.audio_ocmem_req, 1, 0))
- audio_ocmem_process_req(AUDIO, false);
- lpa_audio.prtd = NULL;
- q6asm_cmd(prtd->audio_client, CMD_CLOSE);
- q6asm_audio_client_buf_free_contiguous(dir,
+ if (atomic_cmpxchg(&lpa_audio.audio_ocmem_req, 1, 0))
+ audio_ocmem_process_req(AUDIO, false);
+ lpa_audio.prtd = NULL;
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+ q6asm_audio_client_buf_free_contiguous(dir,
prtd->audio_client);
- atomic_set(&prtd->stop, 1);
- pr_debug("%s\n", __func__);
+ atomic_set(&prtd->stop, 1);
+ q6asm_audio_client_free(prtd->audio_client);
+ pr_debug("%s\n", __func__);
+ }
msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
SNDRV_PCM_STREAM_PLAYBACK);
pr_debug("%s\n", __func__);
- q6asm_audio_client_free(prtd->audio_client);
kfree(prtd);
return 0;
@@ -483,9 +446,59 @@
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
struct audio_buffer *buf;
+ uint16_t bits_per_sample = 16;
int dir, ret;
+ struct asm_softpause_params softpause = {
+ .enable = SOFT_PAUSE_ENABLE,
+ .period = SOFT_PAUSE_PERIOD,
+ .step = SOFT_PAUSE_STEP,
+ .rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
+ };
+ struct asm_softvolume_params softvol = {
+ .period = SOFT_VOLUME_PERIOD,
+ .step = SOFT_VOLUME_STEP,
+ .rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
+ };
+
+ prtd->audio_client->perf_mode = false;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (params_format(params) == SNDRV_PCM_FORMAT_S24_LE)
+ bits_per_sample = 24;
+ ret = q6asm_open_write_v2(prtd->audio_client,
+ FORMAT_LINEAR_PCM, bits_per_sample);
+ if (ret < 0) {
+ pr_err("%s: pcm out open failed\n", __func__);
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ return -ENOMEM;
+ }
+ ret = q6asm_set_io_mode(prtd->audio_client, ASYNC_IO_MODE);
+ if (ret < 0) {
+ pr_err("%s: Set IO mode failed\n", __func__);
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
+ prtd->session_id = prtd->audio_client->session;
+ msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+ prtd->audio_client->perf_mode,
+ prtd->session_id, substream->stream);
+
+ lpa_set_volume(0);
+ ret = q6asm_set_softpause(lpa_audio.prtd->audio_client, &softpause);
+ if (ret < 0)
+ pr_err("%s: Send SoftPause Param failed ret=%d\n",
+ __func__, ret);
+ ret = q6asm_set_softvolume(lpa_audio.prtd->audio_client, &softvol);
+ if (ret < 0)
+ pr_err("%s: Send SoftVolume Param failed ret=%d\n",
+ __func__, ret);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = IN;
@@ -493,8 +506,8 @@
return -EPERM;
ret = q6asm_audio_client_buf_alloc_contiguous(dir,
prtd->audio_client,
- runtime->hw.period_bytes_min,
- runtime->hw.periods_max);
+ params_period_bytes(params),
+ params_periods(params));
if (ret < 0) {
pr_err("Audio Start: Buffer Allocation failed rc = %d\n",
ret);
@@ -511,7 +524,7 @@
dma_buf->private_data = NULL;
dma_buf->area = buf[0].data;
dma_buf->addr = buf[0].phys;
- dma_buf->bytes = runtime->hw.buffer_bytes_max;
+ dma_buf->bytes = params_period_bytes(params) * params_periods(params);
if (!dma_buf->area)
return -ENOMEM;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index ca91fe5..717e63b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -82,9 +82,9 @@
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
- .rates = SNDRV_PCM_RATE_8000_96000,
+ .rates = SNDRV_PCM_RATE_8000_192000,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
.channels_min = 1,
.channels_max = 8,
.buffer_bytes_max = PLAYBACK_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE,
@@ -98,7 +98,7 @@
/* Conventional and unconventional sample rate supported */
static unsigned int supported_sample_rates[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
- 96000
+ 96000, 192000
};
static uint32_t in_frame_info[CAPTURE_NUM_PERIODS][2];
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index d8f2759..c651ec7 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -10,7 +10,6 @@
* GNU General Public License for more details.
*/
-
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -33,7 +32,9 @@
#include <sound/pcm_params.h>
#include "msm-pcm-routing-v2.h"
+#include "msm-dolby-dap-config.h"
#include "q6voice.h"
+#include "q6core.h"
struct msm_pcm_routing_bdai_data {
u16 port_id; /* AFE port ID */
@@ -185,6 +186,20 @@
(void *)&msm_srs_trumedia_params[param_block_idx].srs_params.global);
}
+int get_topology(int path_type)
+{
+ int topology_id = 0;
+ if (path_type == ADM_PATH_PLAYBACK)
+ topology_id = get_adm_rx_topology();
+ else
+ topology_id = get_adm_tx_topology();
+
+ if (topology_id == 0)
+ topology_id = DEFAULT_COPP_TOPOLOGY;
+
+ return topology_id;
+}
+
#define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
static struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
{ PRIMARY_I2S_RX, 0, 0, 0, 0, 0},
@@ -225,6 +240,8 @@
{ AFE_PORT_ID_TERTIARY_MI2S_RX, 0, 0, 0, 0, 0},
{ AFE_PORT_ID_TERTIARY_MI2S_TX, 0, 0, 0, 0, 0},
{ AUDIO_PORT_ID_I2S_RX, 0, 0, 0, 0, 0},
+ { AFE_PORT_ID_SECONDARY_PCM_RX, 0, 0, 0, 0, 0},
+ { AFE_PORT_ID_SECONDARY_PCM_TX, 0, 0, 0, 0, 0},
};
@@ -318,7 +335,7 @@
void msm_pcm_routing_reg_phy_stream(int fedai_id, bool perf_mode,
int dspst_id, int stream_type)
{
- int i, session_type, path_type, port_type;
+ int i, session_type, path_type, port_type, port_id, topology;
struct route_payload payload;
u32 channels;
uint16_t bits_per_sample = 16;
@@ -346,6 +363,7 @@
/* re-enable EQ if active */
if (eq_data[fedai_id].enable)
msm_send_eq_values(fedai_id);
+ topology = get_topology(path_type);
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
if (test_bit(fedai_id, &msm_bedais[i].fe_sessions))
msm_bedais[i].perf_mode = perf_mode;
@@ -361,27 +379,31 @@
else if (msm_bedais[i].format ==
SNDRV_PCM_FORMAT_S24_LE)
bits_per_sample = 24;
-
if ((stream_type == SNDRV_PCM_STREAM_PLAYBACK) &&
(channels > 0))
adm_multi_ch_copp_open(msm_bedais[i].port_id,
path_type,
msm_bedais[i].sample_rate,
msm_bedais[i].channel,
- DEFAULT_COPP_TOPOLOGY, msm_bedais[i].perf_mode,
+ topology, msm_bedais[i].perf_mode,
bits_per_sample);
else
adm_open(msm_bedais[i].port_id,
path_type,
msm_bedais[i].sample_rate,
msm_bedais[i].channel,
- DEFAULT_COPP_TOPOLOGY, false,
+ topology, false,
bits_per_sample);
payload.copp_ids[payload.num_copps++] =
msm_bedais[i].port_id;
- srs_port_id = msm_bedais[i].port_id;
+ port_id = srs_port_id = msm_bedais[i].port_id;
srs_send_params(srs_port_id, 1, 0);
+ if (DOLBY_ADM_COPP_TOPOLOGY_ID == topology)
+ if (dolby_dap_init(port_id,
+ msm_bedais[i].channel) < 0)
+ pr_err("%s: Err init dolby dap\n",
+ __func__);
}
}
if (payload.num_copps)
@@ -393,7 +415,7 @@
void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type)
{
- int i, port_type, session_type;
+ int i, port_type, session_type, path_type, topology;
if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
/* bad ID assigned in machine driver */
@@ -404,19 +426,24 @@
if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
port_type = MSM_AFE_PORT_TYPE_RX;
session_type = SESSION_TYPE_RX;
+ path_type = ADM_PATH_PLAYBACK;
} else {
port_type = MSM_AFE_PORT_TYPE_TX;
session_type = SESSION_TYPE_TX;
+ path_type = ADM_PATH_LIVE_REC;
}
mutex_lock(&routing_lock);
-
+ topology = get_topology(path_type);
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
if (!is_be_dai_extproc(i) &&
(afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
(msm_bedais[i].active) &&
- (test_bit(fedai_id, &msm_bedais[i].fe_sessions)))
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
adm_close(msm_bedais[i].port_id);
+ if (DOLBY_ADM_COPP_TOPOLOGY_ID == topology)
+ dolby_dap_deinit(msm_bedais[i].port_id);
+ }
}
fe_dai_map[fedai_id][session_type] = INVALID_SESSION;
@@ -443,7 +470,7 @@
static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
{
- int session_type, path_type;
+ int session_type, path_type, port_id, topology;
u32 channels;
uint16_t bits_per_sample = 16;
@@ -465,7 +492,7 @@
}
mutex_lock(&routing_lock);
-
+ topology = get_topology(path_type);
if (set) {
if (!test_bit(val, &msm_bedais[reg].fe_sessions) &&
(msm_bedais[reg].port_id == VOICE_PLAYBACK_TX))
@@ -484,19 +511,23 @@
path_type,
msm_bedais[reg].sample_rate,
channels,
- DEFAULT_COPP_TOPOLOGY,
+ topology,
msm_bedais[reg].perf_mode,
bits_per_sample);
} else
adm_open(msm_bedais[reg].port_id,
path_type,
msm_bedais[reg].sample_rate, channels,
- DEFAULT_COPP_TOPOLOGY, false, bits_per_sample);
+ topology, false, bits_per_sample);
msm_pcm_routing_build_matrix(val,
fe_dai_map[val][session_type], path_type);
- srs_port_id = msm_bedais[reg].port_id;
+ port_id = srs_port_id = msm_bedais[reg].port_id;
srs_send_params(srs_port_id, 1, 0);
+ if (DOLBY_ADM_COPP_TOPOLOGY_ID == topology)
+ if (dolby_dap_init(port_id, channels) < 0)
+ pr_err("%s: Err init dolby dap\n",
+ __func__);
}
} else {
if (test_bit(val, &msm_bedais[reg].fe_sessions) &&
@@ -506,6 +537,8 @@
if (msm_bedais[reg].active && fe_dai_map[val][session_type] !=
INVALID_SESSION) {
adm_close(msm_bedais[reg].port_id);
+ if (DOLBY_ADM_COPP_TOPOLOGY_ID == topology)
+ dolby_dap_deinit(msm_bedais[reg].port_id);
msm_pcm_routing_build_matrix(val,
fe_dai_map[val][session_type], path_type);
}
@@ -1513,6 +1546,24 @@
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new sec_auxpcm_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new mmul1_mixer_controls[] = {
SOC_SINGLE_EXT("PRI_TX", MSM_BACKEND_DAI_PRI_I2S_TX,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -1532,6 +1583,9 @@
SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -1564,6 +1618,12 @@
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new mmul4_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new mmul5_mixer_controls[] = {
SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
@@ -1583,6 +1643,9 @@
SOC_SINGLE_EXT("AUX_PCM_TX", MSM_BACKEND_DAI_AUXPCM_TX,
MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_AUX_PCM_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
@@ -1723,6 +1786,24 @@
msm_routing_put_voice_mixer),
};
+static const struct snd_kcontrol_new sec_aux_pcm_rx_voice_mixer_controls[] = {
+ SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+};
+
static const struct snd_kcontrol_new hdmi_rx_voice_mixer_controls[] = {
SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_HDMI_RX,
MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
@@ -1781,6 +1862,9 @@
SOC_SINGLE_EXT("AUX_PCM_TX_Voice", MSM_BACKEND_DAI_AUXPCM_TX,
MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("SEC_AUX_PCM_TX_Voice", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new tx_voice2_mixer_controls[] = {
@@ -1820,6 +1904,9 @@
SOC_SINGLE_EXT("AUX_PCM_TX_VoLTE", MSM_BACKEND_DAI_AUXPCM_TX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("SEC_AUX_PCM_TX_VoLTE", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("MI2S_TX_VoLTE", MSM_BACKEND_DAI_MI2S_TX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -1844,6 +1931,9 @@
SOC_SINGLE_EXT("AUX_PCM_TX_Voip", MSM_BACKEND_DAI_AUXPCM_TX,
MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("SEC_AUX_PCM_TX_Voip", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new tx_voice_stub_mixer_controls[] = {
@@ -1874,6 +1964,9 @@
SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
msm_routing_put_port_mixer),
@@ -1888,6 +1981,15 @@
msm_routing_put_port_mixer),
};
+static const struct snd_kcontrol_new sec_auxpcm_rx_port_mixer_controls[] = {
+ SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+};
+
static const struct snd_kcontrol_new sbus_1_rx_port_mixer_controls[] = {
SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SLIMBUS_1_RX,
MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer,
@@ -2090,6 +2192,51 @@
}
};
+int msm_routing_get_dolby_security_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ /* not used while setting the manfr id*/
+ return 0;
+}
+
+int msm_routing_put_dolby_security_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ int manufacturer_id = ucontrol->value.integer.value[0];
+ core_set_dolby_manufacturer_id(manufacturer_id);
+ return 0;
+}
+
+static const struct snd_kcontrol_new dolby_security_controls[] = {
+ SOC_SINGLE_MULTI_EXT("DS1 Security", SND_SOC_NOPM, 0,
+ 0xFFFFFFFF, 0, 1, msm_routing_get_dolby_security_control,
+ msm_routing_put_dolby_security_control),
+};
+
+static const struct snd_kcontrol_new dolby_dap_param_to_set_controls[] = {
+ SOC_SINGLE_MULTI_EXT("DS1 DAP Set Param", SND_SOC_NOPM, 0, 0xFFFFFFFF,
+ 0, 128, msm_routing_get_dolby_dap_param_to_set_control,
+ msm_routing_put_dolby_dap_param_to_set_control),
+};
+
+static const struct snd_kcontrol_new dolby_dap_param_to_get_controls[] = {
+ SOC_SINGLE_MULTI_EXT("DS1 DAP Get Param", SND_SOC_NOPM, 0, 0xFFFFFFFF,
+ 0, 128, msm_routing_get_dolby_dap_param_to_get_control,
+ msm_routing_put_dolby_dap_param_to_get_control),
+};
+
+static const struct snd_kcontrol_new dolby_dap_param_visualizer_controls[] = {
+ SOC_SINGLE_MULTI_EXT("DS1 DAP Get Visualizer", SND_SOC_NOPM, 0,
+ 0xFFFFFFFF, 0, 41, msm_routing_get_dolby_dap_param_visualizer_control,
+ msm_routing_put_dolby_dap_param_visualizer_control),
+};
+
+static const struct snd_kcontrol_new dolby_dap_param_end_point_controls[] = {
+ SOC_SINGLE_MULTI_EXT("DS1 DAP Endpoint", SND_SOC_NOPM, 0,
+ 0xFFFFFFFF, 0, 1, msm_routing_get_dolby_dap_endpoint_control,
+ msm_routing_put_dolby_dap_endpoint_control),
+};
+
static const struct snd_kcontrol_new eq_enable_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1 EQ Enable", SND_SOC_NOPM,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_eq_enable_mixer,
@@ -2331,6 +2478,7 @@
SND_SOC_DAPM_AIF_IN("VOIP_DL", "VoIP Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL5", "MultiMedia5 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
@@ -2425,6 +2573,10 @@
SND_SOC_DAPM_AIF_OUT("AUX_PCM_RX", "AUX PCM Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("AUX_PCM_TX", "AUX PCM Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SEC_AUX_PCM_RX", "Sec AUX PCM Playback",
+ 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_AUX_PCM_TX", "Sec AUX PCM Capture",
+ 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("VOICE_STUB_DL", "VOICE_STUB Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("VOICE_STUB_UL", "VOICE_STUB Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("STUB_RX", "Stub Playback", 0, 0, 0, 0),
@@ -2476,10 +2628,14 @@
mmul1_mixer_controls, ARRAY_SIZE(mmul1_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia2 Mixer", SND_SOC_NOPM, 0, 0,
mmul2_mixer_controls, ARRAY_SIZE(mmul2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia4 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul4_mixer_controls, ARRAY_SIZE(mmul4_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia5 Mixer", SND_SOC_NOPM, 0, 0,
mmul5_mixer_controls, ARRAY_SIZE(mmul5_mixer_controls)),
SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
auxpcm_rx_mixer_controls, ARRAY_SIZE(auxpcm_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ sec_auxpcm_rx_mixer_controls, ARRAY_SIZE(sec_auxpcm_rx_mixer_controls)),
/* incall */
SND_SOC_DAPM_MIXER("Incall_Music Audio Mixer", SND_SOC_NOPM, 0, 0,
incall_music_delivery_mixer_controls,
@@ -2511,6 +2667,10 @@
SND_SOC_NOPM, 0, 0,
aux_pcm_rx_voice_mixer_controls,
ARRAY_SIZE(aux_pcm_rx_voice_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX_Voice Mixer",
+ SND_SOC_NOPM, 0, 0,
+ sec_aux_pcm_rx_voice_mixer_controls,
+ ARRAY_SIZE(sec_aux_pcm_rx_voice_mixer_controls)),
SND_SOC_DAPM_MIXER("HDMI_RX_Voice Mixer",
SND_SOC_NOPM, 0, 0,
hdmi_rx_voice_mixer_controls,
@@ -2551,6 +2711,9 @@
SND_SOC_DAPM_MIXER("AUXPCM_RX Port Mixer",
SND_SOC_NOPM, 0, 0, auxpcm_rx_port_mixer_controls,
ARRAY_SIZE(auxpcm_rx_port_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_AUXPCM_RX Port Mixer",
+ SND_SOC_NOPM, 0, 0, sec_auxpcm_rx_port_mixer_controls,
+ ARRAY_SIZE(sec_auxpcm_rx_port_mixer_controls)),
SND_SOC_DAPM_MIXER("SLIMBUS_1_RX Port Mixer", SND_SOC_NOPM, 0, 0,
sbus_1_rx_port_mixer_controls,
ARRAY_SIZE(sbus_1_rx_port_mixer_controls)),
@@ -2623,6 +2786,7 @@
{"MultiMedia1 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"},
{"MultiMedia1 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"},
{"MultiMedia1 Mixer", "SLIM_4_TX", "SLIMBUS_4_TX"},
+ {"MultiMedia4 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
@@ -2657,6 +2821,8 @@
{"MultiMedia1 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia1 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"MultiMedia5 Mixer", "AUX_PCM_TX", "AUX_PCM_TX"},
+ {"MultiMedia1 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+ {"MultiMedia5 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
{"MultiMedia2 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia1 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
@@ -2691,6 +2857,7 @@
{"MM_UL1", NULL, "MultiMedia1 Mixer"},
{"MultiMedia2 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MM_UL2", NULL, "MultiMedia2 Mixer"},
+ {"MM_UL4", NULL, "MultiMedia4 Mixer"},
{"MM_UL5", NULL, "MultiMedia5 Mixer"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -2700,6 +2867,13 @@
{"AUX_PCM_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
{"AUX_PCM_RX", NULL, "AUX_PCM_RX Audio Mixer"},
+ {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
+ {"SEC_AUX_PCM_RX", NULL, "SEC_AUX_PCM_RX Audio Mixer"},
+
{"MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
@@ -2749,6 +2923,12 @@
{"AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"AUX_PCM_RX", NULL, "AUX_PCM_RX_Voice Mixer"},
+ {"SEC_AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+ {"SEC_AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+ {"SEC_AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
+ {"SEC_AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+ {"SEC_AUX_PCM_RX", NULL, "SEC_AUX_PCM_RX_Voice Mixer"},
+
{"HDMI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"HDMI_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"HDMI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
@@ -2770,6 +2950,7 @@
{"Voice_Tx Mixer", "INTERNAL_BT_SCO_TX_Voice", "INT_BT_SCO_TX"},
{"Voice_Tx Mixer", "AFE_PCM_TX_Voice", "PCM_TX"},
{"Voice_Tx Mixer", "AUX_PCM_TX_Voice", "AUX_PCM_TX"},
+ {"Voice_Tx Mixer", "SEC_AUX_PCM_TX_Voice", "SEC_AUX_PCM_TX"},
{"CS-VOICE_UL1", NULL, "Voice_Tx Mixer"},
{"Voice2_Tx Mixer", "PRI_TX_Voice2", "PRI_I2S_TX"},
@@ -2785,6 +2966,7 @@
{"VoLTE_Tx Mixer", "INTERNAL_BT_SCO_TX_VoLTE", "INT_BT_SCO_TX"},
{"VoLTE_Tx Mixer", "AFE_PCM_TX_VoLTE", "PCM_TX"},
{"VoLTE_Tx Mixer", "AUX_PCM_TX_VoLTE", "AUX_PCM_TX"},
+ {"VoLTE_Tx Mixer", "SEC_AUX_PCM_TX_VoLTE", "SEC_AUX_PCM_TX"},
{"VoLTE_Tx Mixer", "MI2S_TX_VoLTE", "MI2S_TX"},
{"VoLTE_UL", NULL, "VoLTE_Tx Mixer"},
{"Voip_Tx Mixer", "PRI_TX_Voip", "PRI_I2S_TX"},
@@ -2793,6 +2975,7 @@
{"Voip_Tx Mixer", "INTERNAL_BT_SCO_TX_Voip", "INT_BT_SCO_TX"},
{"Voip_Tx Mixer", "AFE_PCM_TX_Voip", "PCM_TX"},
{"Voip_Tx Mixer", "AUX_PCM_TX_Voip", "AUX_PCM_TX"},
+ {"Voip_Tx Mixer", "SEC_AUX_PCM_TX_Voip", "SEC_AUX_PCM_TX"},
{"VOIP_UL", NULL, "Voip_Tx Mixer"},
{"SLIMBUS_DL_HL", "Switch", "SLIM0_DL_HL"},
@@ -2828,6 +3011,7 @@
{"SLIMBUS_0_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"SLIMBUS_0_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"SLIMBUS_0_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+ {"SLIMBUS_0_RX Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
{"SLIMBUS_0_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
{"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Port Mixer"},
{"AFE_PCM_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
@@ -2837,6 +3021,10 @@
{"AUXPCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"AUX_PCM_RX", NULL, "AUXPCM_RX Port Mixer"},
+ {"SEC_AUXPCM_RX Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+ {"SEC_AUXPCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+ {"SEC_AUX_PCM_RX", NULL, "SEC_AUXPCM_RX Port Mixer"},
+
{"Voice Stub Tx Mixer", "STUB_TX_HL", "STUB_TX"},
{"Voice Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
{"Voice Stub Tx Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
@@ -2892,6 +3080,7 @@
{"BE_OUT", NULL, "PCM_RX"},
{"BE_OUT", NULL, "SLIMBUS_3_RX"},
{"BE_OUT", NULL, "AUX_PCM_RX"},
+ {"BE_OUT", NULL, "SEC_AUX_PCM_RX"},
{"PRI_I2S_TX", NULL, "BE_IN"},
{"MI2S_TX", NULL, "BE_IN"},
@@ -2911,6 +3100,8 @@
{"BE_OUT", NULL, "SLIMBUS_3_RX"},
{"BE_OUT", NULL, "AUX_PCM_RX"},
{"AUX_PCM_TX", NULL, "BE_IN"},
+ {"BE_OUT", NULL, "SEC_AUX_PCM_RX"},
+ {"SEC_AUX_PCM_TX", NULL, "BE_IN"},
{"INCALL_RECORD_TX", NULL, "BE_IN"},
{"INCALL_RECORD_RX", NULL, "BE_IN"},
{"BE_OUT", NULL, "VOICE_PLAYBACK_TX"},
@@ -2941,7 +3132,7 @@
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
unsigned int be_id = rtd->dai_link->be_id;
- int i, session_type;
+ int i, session_type, path_type, topology;
struct msm_pcm_routing_bdai_data *bedai;
if (be_id >= MSM_BACKEND_DAI_MAX) {
@@ -2952,13 +3143,20 @@
bedai = &msm_bedais[be_id];
session_type = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
0 : 1);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ path_type = ADM_PATH_PLAYBACK;
+ else
+ path_type = ADM_PATH_LIVE_REC;
mutex_lock(&routing_lock);
-
+ topology = get_topology(path_type);
for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) {
- if (fe_dai_map[i][session_type] != INVALID_SESSION)
+ if (fe_dai_map[i][session_type] != INVALID_SESSION) {
adm_close(bedai->port_id);
srs_port_id = -1;
+ if (DOLBY_ADM_COPP_TOPOLOGY_ID == topology)
+ dolby_dap_deinit(bedai->port_id);
+ }
}
bedai->active = 0;
@@ -2974,7 +3172,7 @@
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
unsigned int be_id = rtd->dai_link->be_id;
- int i, path_type, session_type;
+ int i, path_type, session_type, port_id, topology;
struct msm_pcm_routing_bdai_data *bedai;
u32 channels;
bool playback, capture;
@@ -2996,7 +3194,7 @@
}
mutex_lock(&routing_lock);
-
+ topology = get_topology(path_type);
if (bedai->active == 1)
goto done; /* Ignore prepare if back-end already active */
@@ -3021,21 +3219,25 @@
path_type,
bedai->sample_rate,
channels,
- DEFAULT_COPP_TOPOLOGY, bedai->perf_mode,
+ topology, bedai->perf_mode,
bits_per_sample);
} else if (capture) {
adm_open(bedai->port_id,
path_type,
bedai->sample_rate,
channels,
- DEFAULT_COPP_TOPOLOGY, false,
+ topology, false,
bits_per_sample);
}
msm_pcm_routing_build_matrix(i,
fe_dai_map[i][session_type], path_type);
- srs_port_id = bedai->port_id;
+ port_id = srs_port_id = bedai->port_id;
srs_send_params(srs_port_id, 1, 0);
+ if (DOLBY_ADM_COPP_TOPOLOGY_ID == topology)
+ if (dolby_dap_init(port_id, channels) < 0)
+ pr_err("%s: Err init dolby dap\n",
+ __func__);
}
}
@@ -3130,6 +3332,27 @@
snd_soc_add_platform_controls(platform,
aanc_slim_0_rx_mux,
ARRAY_SIZE(aanc_slim_0_rx_mux));
+
+ snd_soc_add_platform_controls(platform,
+ dolby_security_controls,
+ ARRAY_SIZE(dolby_security_controls));
+
+ snd_soc_add_platform_controls(platform,
+ dolby_dap_param_to_set_controls,
+ ARRAY_SIZE(dolby_dap_param_to_set_controls));
+
+ snd_soc_add_platform_controls(platform,
+ dolby_dap_param_to_get_controls,
+ ARRAY_SIZE(dolby_dap_param_to_get_controls));
+
+ snd_soc_add_platform_controls(platform,
+ dolby_dap_param_visualizer_controls,
+ ARRAY_SIZE(dolby_dap_param_visualizer_controls));
+
+ snd_soc_add_platform_controls(platform,
+ dolby_dap_param_end_point_controls,
+ ARRAY_SIZE(dolby_dap_param_end_point_controls));
+
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index 1c1029c..4a58369 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -26,6 +26,8 @@
#define LPASS_BE_AFE_PCM_TX "RT_PROXY_DAI_002_TX"
#define LPASS_BE_AUXPCM_RX "AUX_PCM_RX"
#define LPASS_BE_AUXPCM_TX "AUX_PCM_TX"
+#define LPASS_BE_SEC_AUXPCM_RX "SEC_AUX_PCM_RX"
+#define LPASS_BE_SEC_AUXPCM_TX "SEC_AUX_PCM_TX"
#define LPASS_BE_VOICE_PLAYBACK_TX "VOICE_PLAYBACK_TX"
#define LPASS_BE_INCALL_RECORD_RX "INCALL_RECORD_TX"
#define LPASS_BE_INCALL_RECORD_TX "INCALL_RECORD_RX"
@@ -119,6 +121,8 @@
MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
MSM_BACKEND_DAI_AUDIO_I2S_RX,
+ MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_BACKEND_DAI_SEC_AUXPCM_TX,
MSM_BACKEND_DAI_MAX,
};
@@ -141,4 +145,7 @@
int compressed_set_volume(unsigned volume);
+uint32_t get_adm_rx_topology(void);
+
+uint32_t get_adm_tx_topology(void);
#endif /*_MSM_PCM_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
index 4df66d0..25bb72f 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
@@ -472,6 +472,7 @@
voc_set_tty_mode(voc_get_session_id(VOICE_SESSION_NAME), tty_mode);
voc_set_tty_mode(voc_get_session_id(VOICE2_SESSION_NAME), tty_mode);
+ voc_set_tty_mode(voc_get_session_id(VOLTE_SESSION_NAME), tty_mode);
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index b1db277..1bd3eac 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -33,6 +33,7 @@
#define RESET_COPP_ID 99
#define INVALID_COPP_ID 0xFF
+#define ADM_GET_PARAMETER_LENGTH 350
struct adm_ctl {
void *apr;
@@ -64,6 +65,8 @@
{0, 0, 0, 0, 0, 0, 0, 0}
};
+static int adm_dolby_get_parameters[ADM_GET_PARAMETER_LENGTH];
+
int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params)
{
struct adm_cmd_set_pp_params_inband_v5 *adm_params = NULL;
@@ -266,6 +269,134 @@
return ret;
}
+int adm_dolby_dap_send_params(int port_id, char *params, uint32_t params_length)
+{
+ struct adm_cmd_set_pp_params_v5 *adm_params = NULL;
+ int sz, rc = 0, index = afe_get_port_index(port_id);
+
+ pr_debug("%s\n", __func__);
+ if (index < 0 || index >= AFE_MAX_PORTS) {
+ pr_err("%s: invalid port idx %d portid %#x\n",
+ __func__, index, port_id);
+ return -EINVAL;
+ }
+ sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
+ adm_params = kzalloc(sz, GFP_KERNEL);
+ if (!adm_params) {
+ pr_err("%s, adm params memory alloc failed", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
+ params, params_length);
+ adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ adm_params->hdr.pkt_size = sz;
+ adm_params->hdr.src_svc = APR_SVC_ADM;
+ adm_params->hdr.src_domain = APR_DOMAIN_APPS;
+ adm_params->hdr.src_port = port_id;
+ adm_params->hdr.dest_svc = APR_SVC_ADM;
+ adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
+ adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
+ adm_params->hdr.token = port_id;
+ adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+ adm_params->payload_addr_lsw = 0;
+ adm_params->payload_addr_msw = 0;
+ adm_params->mem_map_handle = 0;
+ adm_params->payload_size = params_length;
+
+ atomic_set(&this_adm.copp_stat[index], 0);
+ rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
+ if (rc < 0) {
+ pr_err("%s: Set params failed port = %#x\n",
+ __func__, port_id);
+ rc = -EINVAL;
+ goto dolby_dap_send_param_return;
+ }
+ /* Wait for the callback */
+ rc = wait_event_timeout(this_adm.wait[index],
+ atomic_read(&this_adm.copp_stat[index]),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!rc) {
+ pr_err("%s: Set params timed out port = %#x\n",
+ __func__, port_id);
+ rc = -EINVAL;
+ goto dolby_dap_send_param_return;
+ }
+ rc = 0;
+dolby_dap_send_param_return:
+ kfree(adm_params);
+ return rc;
+}
+
+int adm_dolby_dap_get_params(int port_id, uint32_t module_id, uint32_t param_id,
+ uint32_t params_length, char *params)
+{
+ struct adm_cmd_get_pp_params_v5 *adm_params = NULL;
+ int sz, rc = 0, i = 0, index = afe_get_port_index(port_id);
+ int *params_data = (int *)params;
+
+ if (index < 0 || index >= AFE_MAX_PORTS) {
+ pr_err("%s: invalid port idx %d portid %#x\n",
+ __func__, index, port_id);
+ return -EINVAL;
+ }
+ sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
+ adm_params = kzalloc(sz, GFP_KERNEL);
+ if (!adm_params) {
+ pr_err("%s, adm params memory alloc failed", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
+ params, params_length);
+ adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ adm_params->hdr.pkt_size = sz;
+ adm_params->hdr.src_svc = APR_SVC_ADM;
+ adm_params->hdr.src_domain = APR_DOMAIN_APPS;
+ adm_params->hdr.src_port = port_id;
+ adm_params->hdr.dest_svc = APR_SVC_ADM;
+ adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
+ adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
+ adm_params->hdr.token = port_id;
+ adm_params->hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
+ adm_params->data_payload_addr_lsw = 0;
+ adm_params->data_payload_addr_msw = 0;
+ adm_params->mem_map_handle = 0;
+ adm_params->module_id = module_id;
+ adm_params->param_id = param_id;
+ adm_params->param_max_size = params_length;
+ adm_params->reserved = 0;
+
+ atomic_set(&this_adm.copp_stat[index], 0);
+ rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
+ if (rc < 0) {
+ pr_err("%s: Failed to Get DOLBY Params on port %d\n", __func__,
+ port_id);
+ rc = -EINVAL;
+ goto dolby_dap_get_param_return;
+ }
+ /* Wait for the callback with copp id */
+ rc = wait_event_timeout(this_adm.wait[index],
+ atomic_read(&this_adm.copp_stat[index]),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!rc) {
+ pr_err("%s: DOLBY get params timed out port = %d\n", __func__,
+ port_id);
+ rc = -EINVAL;
+ goto dolby_dap_get_param_return;
+ }
+ if (params_data) {
+ for (i = 0; i < adm_dolby_get_parameters[0]; i++)
+ params_data[i] = adm_dolby_get_parameters[1+i];
+ }
+ rc = 0;
+dolby_dap_get_param_return:
+ kfree(adm_params);
+ return rc;
+}
+
static void adm_callback_debug_print(struct apr_client_data *data)
{
uint32_t *payload;
@@ -428,6 +559,13 @@
__func__, payload[0]);
rtac_make_adm_callback(payload,
data->payload_size);
+ adm_dolby_get_parameters[0] = payload[3];
+ pr_debug("GET_PP PARAM:received parameter length: %x\n",
+ adm_dolby_get_parameters[0]);
+ for (i = 0; i < payload[3]; i++)
+ adm_dolby_get_parameters[1+i] = payload[4+i];
+ atomic_set(&this_adm.copp_stat[index], 1);
+ wake_up(&this_adm.wait[index]);
break;
case ADM_CMDRSP_SHARED_MEM_MAP_REGIONS:
pr_debug("%s: ADM_CMDRSP_SHARED_MEM_MAP_REGIONS\n",
@@ -807,20 +945,10 @@
open.endpoint_id_1 = tmp_port;
open.endpoint_id_2 = 0xFFFF;
- /* convert path to acdb path */
- if (path == ADM_PATH_PLAYBACK)
- open.topology_id = get_adm_rx_topology();
- else {
- open.topology_id = get_adm_tx_topology();
- if ((open.topology_id ==
- VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
- (open.topology_id ==
- VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
+ open.topology_id = topology;
+ if ((open.topology_id == VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
+ (open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
rate = 16000;
- }
-
- if (open.topology_id == 0)
- open.topology_id = topology;
open.dev_num_channel = channel_mode & 0x00FF;
open.bit_width = bits_per_sample;
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index fed0d81..2b0d155 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -17,7 +17,7 @@
#include <linux/wait.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
-#include <linux/msm_ion.h>
+#include <linux/msm_audio_ion.h>
#include <sound/apr_audio-v2.h>
#include <sound/q6afe-v2.h>
#include <sound/q6audio-v2.h>
@@ -56,6 +56,10 @@
#define TIMEOUT_MS 1000
#define Q6AFE_MAX_VOLUME 0x3FFF
+static int pcm_afe_instance[2];
+static int proxy_afe_instance[2];
+bool afe_close_done[2] = {true, true};
+
#define SIZEOF_CFG_CMD(y) \
(sizeof(struct apr_hdr) + sizeof(u16) + (sizeof(struct y)))
@@ -76,6 +80,8 @@
static int32_t afe_callback(struct apr_client_data *data, void *priv)
{
+ int i;
+
if (!data) {
pr_err("%s: Invalid param data\n", __func__);
return -EINVAL;
@@ -83,6 +89,12 @@
if (data->opcode == RESET_EVENTS) {
pr_debug("q6afe: reset event = %d %d apr[%p]\n",
data->reset_event, data->reset_proc, this_afe.apr);
+
+ for (i = 0; i < MAX_AUDPROC_TYPES; i++) {
+ this_afe.afe_cal_addr[i].cal_paddr = 0;
+ this_afe.afe_cal_addr[i].cal_size = 0;
+ }
+
if (this_afe.apr) {
apr_reset(this_afe.apr);
atomic_set(&this_afe.state, 0);
@@ -224,6 +236,7 @@
case AFE_PORT_ID_SECONDARY_MI2S_RX:
case AFE_PORT_ID_TERTIARY_MI2S_RX:
case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+ case AFE_PORT_ID_SECONDARY_PCM_RX:
ret = MSM_AFE_PORT_TYPE_RX;
break;
@@ -247,6 +260,7 @@
case AFE_PORT_ID_SECONDARY_MI2S_TX:
case AFE_PORT_ID_TERTIARY_MI2S_TX:
case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+ case AFE_PORT_ID_SECONDARY_PCM_TX:
ret = MSM_AFE_PORT_TYPE_TX;
break;
@@ -295,6 +309,8 @@
break;
case PCM_RX:
case PCM_TX:
+ case AFE_PORT_ID_SECONDARY_PCM_RX:
+ case AFE_PORT_ID_SECONDARY_PCM_TX:
default:
ret_size = SIZEOF_CFG_CMD(afe_param_id_pcm_cfg);
break;
@@ -939,8 +955,8 @@
int i;
i = port_id - SLIMBUS_0_RX;
- if (i < 0 || i > ARRAY_SIZE(afe_ports_mad_type)) {
- pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+ if (i < 0 || i >= ARRAY_SIZE(afe_ports_mad_type)) {
+ pr_debug("%s: Non Slimbus port_id 0x%x\n", __func__, port_id);
return MAD_HW_NONE;
}
return (enum afe_mad_type) atomic_read(&afe_ports_mad_type[i]);
@@ -1057,11 +1073,30 @@
}
if ((port_id == RT_PROXY_DAI_001_RX) ||
- (port_id == RT_PROXY_DAI_002_TX))
- return 0;
- if ((port_id == RT_PROXY_DAI_002_RX) ||
- (port_id == RT_PROXY_DAI_001_TX))
+ (port_id == RT_PROXY_DAI_002_TX)) {
+ pr_debug("%s: before incrementing pcm_afe_instance %d"\
+ " port_id %d\n", __func__,
+ pcm_afe_instance[port_id & 0x1], port_id);
port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ pcm_afe_instance[port_id & 0x1]++;
+ return 0;
+ }
+ if ((port_id == RT_PROXY_DAI_002_RX) ||
+ (port_id == RT_PROXY_DAI_001_TX)) {
+ pr_debug("%s: before incrementing proxy_afe_instance %d"\
+ " port_id %d\n", __func__,
+ proxy_afe_instance[port_id & 0x1], port_id);
+
+ if (!afe_close_done[port_id & 0x1]) {
+ /*close pcm dai corresponding to the proxy dai*/
+ afe_close(port_id - 0x10);
+ pcm_afe_instance[port_id & 0x1]++;
+ pr_debug("%s: reconfigure afe port again\n", __func__);
+ }
+ proxy_afe_instance[port_id & 0x1]++;
+ afe_close_done[port_id & 0x1] = false;
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ }
pr_debug("%s: port id: %#x\n", __func__, port_id);
@@ -1120,7 +1155,9 @@
break;
case PCM_RX:
case PCM_TX:
- cfg_type = AFE_PARAM_ID_HDMI_CONFIG;
+ case AFE_PORT_ID_SECONDARY_PCM_RX:
+ case AFE_PORT_ID_SECONDARY_PCM_TX:
+ cfg_type = AFE_PARAM_ID_PCM_CONFIG;
break;
case SECONDARY_I2S_RX:
case SECONDARY_I2S_TX:
@@ -1215,6 +1252,10 @@
case PRIMARY_I2S_TX: return IDX_PRIMARY_I2S_TX;
case PCM_RX: return IDX_PCM_RX;
case PCM_TX: return IDX_PCM_TX;
+ case AFE_PORT_ID_SECONDARY_PCM_RX:
+ return IDX_AFE_PORT_ID_SECONDARY_PCM_RX;
+ case AFE_PORT_ID_SECONDARY_PCM_TX:
+ return IDX_AFE_PORT_ID_SECONDARY_PCM_TX;
case SECONDARY_I2S_RX: return IDX_SECONDARY_I2S_RX;
case SECONDARY_I2S_TX: return IDX_SECONDARY_I2S_TX;
case MI2S_RX: return IDX_MI2S_RX;
@@ -1310,6 +1351,8 @@
break;
case PCM_RX:
case PCM_TX:
+ case AFE_PORT_ID_SECONDARY_PCM_RX:
+ case AFE_PORT_ID_SECONDARY_PCM_TX:
cfg_type = AFE_PARAM_ID_PCM_CONFIG;
break;
case SECONDARY_I2S_RX:
@@ -1707,35 +1750,13 @@
ac->port[dir].buf = buf;
- buf[0].client = msm_ion_client_create(UINT_MAX, "audio_client");
- if (IS_ERR_OR_NULL((void *)buf[0].client)) {
- pr_err("%s: ION create client for AUDIO failed\n", __func__);
- goto fail;
- }
- buf[0].handle = ion_alloc(buf[0].client, bufsz * bufcnt, SZ_4K,
- (0x1 << ION_AUDIO_HEAP_ID), 0);
- if (IS_ERR_OR_NULL((void *) buf[0].handle)) {
- pr_err("%s: ION memory allocation for AUDIO failed\n",
- __func__);
- goto fail;
- }
-
- rc = ion_phys(buf[0].client, buf[0].handle,
- (ion_phys_addr_t *)&buf[0].phys, (size_t *)&len);
+ rc = msm_audio_ion_alloc("audio_client", &buf[0].client,
+ &buf[0].handle, bufsz*bufcnt,
+ (ion_phys_addr_t *)&buf[0].phys, (size_t *)&len,
+ &buf[0].data);
if (rc) {
- pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ pr_err("%s: audio ION alloc failed, rc = %d\n",
__func__, rc);
- goto fail;
- }
-
- buf[0].data = ion_map_kernel(buf[0].client, buf[0].handle);
- if (IS_ERR_OR_NULL((void *) buf[0].data)) {
- pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
- goto fail;
- }
- memset((void *)buf[0].data, 0, (bufsz * bufcnt));
- if (!buf[0].data) {
- pr_err("%s:invalid vaddr, iomap failed\n", __func__);
mutex_unlock(&ac->cmd_lock);
goto fail;
}
@@ -1944,9 +1965,7 @@
cnt = port->max_buf_cnt - 1;
if (port->buf[0].data) {
- ion_unmap_kernel(port->buf[0].client, port->buf[0].handle);
- ion_free(port->buf[0].client, port->buf[0].handle);
- ion_client_destroy(port->buf[0].client);
+ msm_audio_ion_free(port->buf[0].client, port->buf[0].handle);
pr_debug("%s:data[%p]phys[%p][%p] , client[%p] handle[%p]\n",
__func__,
(void *)port->buf[0].data,
@@ -2521,6 +2540,8 @@
case PRIMARY_I2S_TX:
case PCM_RX:
case PCM_TX:
+ case AFE_PORT_ID_SECONDARY_PCM_RX:
+ case AFE_PORT_ID_SECONDARY_PCM_TX:
case SECONDARY_I2S_RX:
case SECONDARY_I2S_TX:
case MI2S_RX:
@@ -2628,6 +2649,31 @@
goto fail_cmd;
}
pr_debug("%s: port_id=%d\n", __func__, port_id);
+ if ((port_id == RT_PROXY_DAI_001_RX) ||
+ (port_id == RT_PROXY_DAI_002_TX)) {
+ pr_debug("%s: before decrementing pcm_afe_instance %d\n",
+ __func__, pcm_afe_instance[port_id & 0x1]);
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ pcm_afe_instance[port_id & 0x1]--;
+ if (!(pcm_afe_instance[port_id & 0x1] == 0 &&
+ proxy_afe_instance[port_id & 0x1] == 0))
+ return 0;
+ else
+ afe_close_done[port_id & 0x1] = true;
+ }
+
+ if ((port_id == RT_PROXY_DAI_002_RX) ||
+ (port_id == RT_PROXY_DAI_001_TX)) {
+ pr_debug("%s: before decrementing proxy_afe_instance %d\n",
+ __func__, proxy_afe_instance[port_id & 0x1]);
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ proxy_afe_instance[port_id & 0x1]--;
+ if (!(pcm_afe_instance[port_id & 0x1] == 0 &&
+ proxy_afe_instance[port_id & 0x1] == 0))
+ return 0;
+ else
+ afe_close_done[port_id & 0x1] = true;
+ }
port_id = q6audio_convert_virtual_to_portid(port_id);
index = q6audio_get_port_index(port_id);
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 3dbe49a..59d4de2 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -29,6 +29,7 @@
#include <linux/debugfs.h>
#include <linux/time.h>
#include <linux/atomic.h>
+#include <linux/msm_audio_ion.h>
#include <asm/ioctls.h>
@@ -455,11 +456,8 @@
while (cnt >= 0) {
if (port->buf[cnt].data) {
- ion_unmap_kernel(port->buf[cnt].client,
- port->buf[cnt].handle);
- ion_free(port->buf[cnt].client,
- port->buf[cnt].handle);
- ion_client_destroy(port->buf[cnt].client);
+ msm_audio_ion_free(port->buf[cnt].client,
+ port->buf[cnt].handle);
port->buf[cnt].data = NULL;
port->buf[cnt].phys = 0;
--(port->max_buf_cnt);
@@ -496,9 +494,7 @@
}
if (port->buf[0].data) {
- ion_unmap_kernel(port->buf[0].client, port->buf[0].handle);
- ion_free(port->buf[0].client, port->buf[0].handle);
- ion_client_destroy(port->buf[0].client);
+ msm_audio_ion_free(port->buf[0].client, port->buf[0].handle);
pr_debug("%s:data[%p]phys[%p][%p] , client[%p] handle[%p]\n",
__func__,
(void *)port->buf[0].data,
@@ -723,44 +719,19 @@
while (cnt < bufcnt) {
if (bufsz > 0) {
if (!buf[cnt].data) {
- buf[cnt].client = msm_ion_client_create
- (UINT_MAX, "audio_client");
- if (IS_ERR_OR_NULL((void *)
- buf[cnt].client)) {
- pr_err("%s: ION create client for AUDIO failed\n",
- __func__);
- goto fail;
- }
- buf[cnt].handle = ion_alloc
- (buf[cnt].client, bufsz, SZ_4K,
- (0x1 << ION_AUDIO_HEAP_ID), 0);
- if (IS_ERR_OR_NULL((void *)
- buf[cnt].handle)) {
- pr_err("%s: ION memory allocation for AUDIO failed\n",
- __func__);
- goto fail;
- }
-
- rc = ion_phys(buf[cnt].client,
- buf[cnt].handle,
- (ion_phys_addr_t *)
- &buf[cnt].phys,
- (size_t *)&len);
+ msm_audio_ion_alloc("audio_client",
+ &buf[cnt].client, &buf[cnt].handle,
+ bufsz,
+ (ion_phys_addr_t *)&buf[cnt].phys,
+ (size_t *)&len,
+ &buf[cnt].data);
if (rc) {
pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
__func__, rc);
- goto fail;
+ mutex_unlock(&ac->cmd_lock);
+ goto fail;
}
- buf[cnt].data = ion_map_kernel
- (buf[cnt].client, buf[cnt].handle);
- if (IS_ERR_OR_NULL((void *)
- buf[cnt].data)) {
- pr_err("%s: ION memory mapping for AUDIO failed\n",
- __func__);
- goto fail;
- }
- memset((void *)buf[cnt].data, 0, bufsz);
buf[cnt].used = 1;
buf[cnt].size = bufsz;
buf[cnt].actual_size = bufsz;
@@ -823,35 +794,13 @@
ac->port[dir].buf = buf;
- buf[0].client = msm_ion_client_create(UINT_MAX, "audio_client");
- if (IS_ERR_OR_NULL((void *)buf[0].client)) {
- pr_err("%s: ION create client for AUDIO failed\n", __func__);
- goto fail;
- }
- buf[0].handle = ion_alloc(buf[0].client, bufsz * bufcnt, SZ_4K,
- (0x1 << ION_AUDIO_HEAP_ID), 0);
- if (IS_ERR_OR_NULL((void *) buf[0].handle)) {
- pr_err("%s: ION memory allocation for AUDIO failed\n",
- __func__);
- goto fail;
- }
-
- rc = ion_phys(buf[0].client, buf[0].handle,
- (ion_phys_addr_t *)&buf[0].phys, (size_t *)&len);
+ rc = msm_audio_ion_alloc("audio_client", &buf[0].client, &buf[0].handle,
+ bufsz*bufcnt,
+ (ion_phys_addr_t *)&buf[0].phys, (size_t *)&len,
+ &buf[0].data);
if (rc) {
- pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ pr_err("%s: Audio ION alloc is failed, rc = %d\n",
__func__, rc);
- goto fail;
- }
-
- buf[0].data = ion_map_kernel(buf[0].client, buf[0].handle);
- if (IS_ERR_OR_NULL((void *) buf[0].data)) {
- pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
- goto fail;
- }
- memset((void *)buf[0].data, 0, (bufsz * bufcnt));
- if (!buf[0].data) {
- pr_err("%s:invalid vaddr, iomap failed\n", __func__);
mutex_unlock(&ac->cmd_lock);
goto fail;
}
@@ -917,7 +866,6 @@
data->reset_proc,
this_mmap.apr);
apr_reset(this_mmap.apr);
- atomic_set(&this_mmap.ref_cnt, 0);
this_mmap.apr = NULL;
reset_custom_topology_flags();
set_custom_topology = 1;
@@ -1569,6 +1517,12 @@
case FORMAT_MP3:
open.dec_fmt_id = ASM_MEDIA_FMT_MP3;
break;
+ case FORMAT_AC3:
+ open.dec_fmt_id = ASM_MEDIA_FMT_EAC3_DEC;
+ break;
+ case FORMAT_EAC3:
+ open.dec_fmt_id = ASM_MEDIA_FMT_EAC3_DEC;
+ break;
default:
pr_err("%s: Invalid format[%d]\n", __func__, format);
goto fail_cmd;
@@ -2570,6 +2524,40 @@
return -EINVAL;
}
+int q6asm_ds1_set_endp_params(struct audio_client *ac,
+ int param_id, int param_value)
+{
+ struct asm_dec_ddp_endp_param_v2 ddp_cfg;
+ int rc = 0;
+
+ pr_debug("%s: session[%d]param_id[%d]param_value[%d]", __func__,
+ ac->session, param_id, param_value);
+ q6asm_add_hdr(ac, &ddp_cfg.hdr, sizeof(ddp_cfg), TRUE);
+ ddp_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ ddp_cfg.encdec.param_id = param_id;
+ ddp_cfg.encdec.param_size = sizeof(struct asm_dec_ddp_endp_param_v2) -
+ (sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_encdec_param));
+ ddp_cfg.endp_param_value = param_value;
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &ddp_cfg);
+ if (rc < 0) {
+ pr_err("%s:Command opcode[0x%x] failed\n",
+ __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout opcode[0x%x]\n", __func__,
+ ddp_cfg.hdr.opcode);
+ rc = -ETIMEDOUT;
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return rc;
+}
+
int q6asm_memory_map(struct audio_client *ac, uint32_t buf_add, int dir,
uint32_t bufsz, uint32_t bufcnt)
{
@@ -2877,6 +2865,7 @@
if (buf_node->buf_addr_lsw == buf_add) {
list_del(&buf_node->list);
kfree(buf_node);
+ break;
}
}
rc = 0;
diff --git a/sound/soc/msm/qdsp6v2/q6audio-v2.c b/sound/soc/msm/qdsp6v2/q6audio-v2.c
index d3d335d..faf5f35 100644
--- a/sound/soc/msm/qdsp6v2/q6audio-v2.c
+++ b/sound/soc/msm/qdsp6v2/q6audio-v2.c
@@ -26,6 +26,10 @@
case PRIMARY_I2S_TX: return IDX_PRIMARY_I2S_TX;
case PCM_RX: return IDX_PCM_RX;
case PCM_TX: return IDX_PCM_TX;
+ case AFE_PORT_ID_SECONDARY_PCM_RX:
+ return IDX_AFE_PORT_ID_SECONDARY_PCM_RX;
+ case AFE_PORT_ID_SECONDARY_PCM_TX:
+ return IDX_AFE_PORT_ID_SECONDARY_PCM_TX;
case SECONDARY_I2S_RX: return IDX_SECONDARY_I2S_RX;
case SECONDARY_I2S_TX: return IDX_SECONDARY_I2S_TX;
case MI2S_RX: return IDX_MI2S_RX;
@@ -74,6 +78,10 @@
case PRIMARY_I2S_TX: return AFE_PORT_ID_PRIMARY_MI2S_TX;
case PCM_RX: return AFE_PORT_ID_PRIMARY_PCM_RX;
case PCM_TX: return AFE_PORT_ID_PRIMARY_PCM_TX;
+ case AFE_PORT_ID_SECONDARY_PCM_RX:
+ return AFE_PORT_ID_SECONDARY_PCM_RX;
+ case AFE_PORT_ID_SECONDARY_PCM_TX:
+ return AFE_PORT_ID_SECONDARY_PCM_TX;
case SECONDARY_I2S_RX: return AFE_PORT_ID_SECONDARY_MI2S_RX;
case SECONDARY_I2S_TX: return AFE_PORT_ID_SECONDARY_MI2S_TX;
case MI2S_RX: return AFE_PORT_ID_PRIMARY_MI2S_RX;
@@ -146,6 +154,8 @@
case PRIMARY_I2S_TX:
case PCM_RX:
case PCM_TX:
+ case AFE_PORT_ID_SECONDARY_PCM_RX:
+ case AFE_PORT_ID_SECONDARY_PCM_TX:
case SECONDARY_I2S_RX:
case SECONDARY_I2S_TX:
case MI2S_RX:
@@ -171,6 +181,8 @@
case PRIMARY_I2S_TX:
case PCM_RX:
case PCM_TX:
+ case AFE_PORT_ID_SECONDARY_PCM_RX:
+ case AFE_PORT_ID_SECONDARY_PCM_TX:
case SECONDARY_I2S_RX:
case SECONDARY_I2S_TX:
case MI2S_RX:
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index 557b326..42cbcd1 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,7 +32,7 @@
struct avcs_cmd_rsp_get_low_power_segments_info_t *lp_ocm_payload;
};
-struct q6core_str q6core_lcl;
+static struct q6core_str q6core_lcl;
static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
{
@@ -40,7 +40,7 @@
uint32_t nseg;
int i, j;
- pr_info("core msg: payload len = %u, apr resp opcode = 0x%X\n",
+ pr_debug("core msg: payload len = %u, apr resp opcode = 0x%X\n",
data->payload_size, data->opcode);
switch (data->opcode) {
@@ -121,6 +121,33 @@
pr_err("%s: Unable to register CORE\n", __func__);
}
+uint32_t core_set_dolby_manufacturer_id(int manufacturer_id)
+{
+ struct adsp_dolby_manufacturer_id payload;
+ int rc = 0;
+ pr_debug("%s manufacturer_id :%d\n", __func__, manufacturer_id);
+ ocm_core_open();
+ if (q6core_lcl.core_handle_q) {
+ payload.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ payload.hdr.pkt_size =
+ sizeof(struct adsp_dolby_manufacturer_id);
+ payload.hdr.src_port = 0;
+ payload.hdr.dest_port = 0;
+ payload.hdr.token = 0;
+ payload.hdr.opcode = ADSP_CMD_SET_DOLBY_MANUFACTURER_ID;
+ payload.manufacturer_id = manufacturer_id;
+ pr_debug("Send Dolby security opcode=%x manufacturer ID = %d\n",
+ payload.hdr.opcode, payload.manufacturer_id);
+ rc = apr_send_pkt(q6core_lcl.core_handle_q,
+ (uint32_t *)&payload);
+ if (rc < 0)
+ pr_err("%s: SET_DOLBY_MANUFACTURER_ID failed op[0x%x]rc[%d]\n",
+ __func__, payload.hdr.opcode, rc);
+ }
+ return rc;
+}
+
int core_get_low_power_segments(
struct avcs_cmd_rsp_get_low_power_segments_info_t **lp_memseg)
{
diff --git a/sound/soc/msm/qdsp6v2/q6core.h b/sound/soc/msm/qdsp6v2/q6core.h
index ff611d5..39bf4ab 100644
--- a/sound/soc/msm/qdsp6v2/q6core.h
+++ b/sound/soc/msm/qdsp6v2/q6core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -90,4 +90,13 @@
int core_get_low_power_segments(
struct avcs_cmd_rsp_get_low_power_segments_info_t **);
+#define ADSP_CMD_SET_DOLBY_MANUFACTURER_ID 0x00012918
+
+struct adsp_dolby_manufacturer_id {
+ struct apr_hdr hdr;
+ int manufacturer_id;
+};
+
+uint32_t core_set_dolby_manufacturer_id(int manufacturer_id);
+
#endif /* __Q6CORE_H__ */
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 754a4fa..e9d0a7e 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -16,6 +16,7 @@
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/mutex.h>
+#include <linux/msm_audio_ion.h>
#include <asm/mach-types.h>
#include <mach/qdsp6v2/rtac.h>
@@ -2532,14 +2533,8 @@
mvm_set_voice_timing.hdr.opcode = VSS_ICOMMON_CMD_SET_VOICE_TIMING;
mvm_set_voice_timing.timing.mode = 0;
mvm_set_voice_timing.timing.enc_offset = 8000;
- if ((machine_is_apq8064_sim()) || (machine_is_msm8974_sim())) {
- pr_debug("%s: Machine is MSM8974 sim\n", __func__);
- mvm_set_voice_timing.timing.dec_req_offset = 0;
- mvm_set_voice_timing.timing.dec_offset = 18000;
- } else {
- mvm_set_voice_timing.timing.dec_req_offset = 3300;
- mvm_set_voice_timing.timing.dec_offset = 8300;
- }
+ mvm_set_voice_timing.timing.dec_req_offset = 3300;
+ mvm_set_voice_timing.timing.dec_offset = 8300;
v->mvm_state = CMD_STATUS_FAIL;
@@ -2968,6 +2963,7 @@
cvp_mute_cmd.hdr.opcode = VSS_IVOLUME_CMD_MUTE_V2;
cvp_mute_cmd.cvp_set_mute.direction = VSS_IVOLUME_DIRECTION_RX;
cvp_mute_cmd.cvp_set_mute.mute_flag = v->dev_rx.mute;
+ cvp_mute_cmd.cvp_set_mute.ramp_duration_ms = DEFAULT_MUTE_RAMP_DURATION;
v->cvp_state = CMD_STATUS_FAIL;
ret = apr_send_pkt(common.apr_q6_cvp, (uint32_t *) &cvp_mute_cmd);
@@ -4121,6 +4117,11 @@
if (v != NULL)
v->voc_state = VOC_ERROR;
+ session_id = voc_get_session_id(VOICE2_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+
session_id = voc_get_session_id(VOLTE_SESSION_NAME);
v = voice_get_session(session_id);
if (v != NULL)
@@ -4255,6 +4256,11 @@
if (v != NULL)
v->voc_state = VOC_ERROR;
+ session_id = voc_get_session_id(VOICE2_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+
session_id = voc_get_session_id(VOLTE_SESSION_NAME);
v = voice_get_session(session_id);
if (v != NULL)
@@ -4520,6 +4526,11 @@
if (v != NULL)
v->voc_state = VOC_ERROR;
+ session_id = voc_get_session_id(VOICE2_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+
session_id = voc_get_session_id(VOLTE_SESSION_NAME);
v = voice_get_session(session_id);
if (v != NULL)
@@ -4639,35 +4650,16 @@
pr_err("%s: v is NULL\n", __func__);
return -EINVAL;
}
- v->shmem_info.sh_buf.client = msm_ion_client_create(UINT_MAX,
- "voip_client");
- if (IS_ERR_OR_NULL((void *)v->shmem_info.sh_buf.client)) {
- pr_err("%s: ION create client failed\n", __func__);
- goto err;
- }
- v->shmem_info.sh_buf.handle = ion_alloc(v->shmem_info.sh_buf.client,
- bufsz * bufcnt, SZ_4K,
- (0x1 << ION_AUDIO_HEAP_ID), 0);
- if (IS_ERR_OR_NULL((void *)v->shmem_info.sh_buf.handle)) {
- pr_err("%s: ION memory allocation failed\n",
- __func__);
- goto err_ion_client;
- }
-
- rc = ion_phys(v->shmem_info.sh_buf.client, v->shmem_info.sh_buf.handle,
- (ion_phys_addr_t *)&phys, (size_t *)&len);
+ rc = msm_audio_ion_alloc("voip_client", &(v->shmem_info.sh_buf.client),
+ &(v->shmem_info.sh_buf.handle),
+ bufsz*bufcnt,
+ (ion_phys_addr_t *)&phys, (size_t *)&len,
+ &mem_addr);
if (rc) {
- pr_err("%s: ION Get Physical failed, rc = %d\n",
+ pr_err("%s: audio ION alloc failed, rc = %d\n",
__func__, rc);
- goto err_ion_handle;
- }
-
- mem_addr = ion_map_kernel(v->shmem_info.sh_buf.client,
- v->shmem_info.sh_buf.handle);
- if (IS_ERR_OR_NULL(mem_addr)) {
- pr_err("%s: ION memory mapping failed\n", __func__);
- goto err_ion_handle;
+ return -EINVAL;
}
while (cnt < bufcnt) {
@@ -4691,13 +4683,6 @@
memset((void *)v->shmem_info.sh_buf.buf[0].data, 0, (bufsz * bufcnt));
return 0;
-
-err_ion_handle:
- ion_free(v->shmem_info.sh_buf.client, v->shmem_info.sh_buf.handle);
-err_ion_client:
- ion_client_destroy(v->shmem_info.sh_buf.client);
-err:
- return -EINVAL;
}
static int voice_alloc_oob_mem_table(void)
@@ -4711,41 +4696,19 @@
pr_err("%s: v is NULL\n", __func__);
return -EINVAL;
}
- v->shmem_info.memtbl.client = msm_ion_client_create(UINT_MAX,
- "voip_client");
- if (IS_ERR_OR_NULL((void *)v->shmem_info.memtbl.client)) {
- pr_err("%s: ION create client for memtbl failed\n", __func__);
- goto err;
- }
- v->shmem_info.memtbl.handle = ion_alloc(v->shmem_info.memtbl.client,
- sizeof(struct vss_imemory_table_t), SZ_4K,
- (0x1 << ION_AUDIO_HEAP_ID), 0);
- if (IS_ERR_OR_NULL((void *) v->shmem_info.memtbl.handle)) {
- pr_err("%s: ION memory allocation for memtbl failed\n",
- __func__);
- goto err_ion_client;
- }
-
- rc = ion_phys(v->shmem_info.memtbl.client, v->shmem_info.memtbl.handle,
- (ion_phys_addr_t *)&v->shmem_info.memtbl.phys, (size_t *)&len);
+ rc = msm_audio_ion_alloc("voip_client", &(v->shmem_info.memtbl.client),
+ &(v->shmem_info.memtbl.handle),
+ sizeof(struct vss_imemory_table_t),
+ (ion_phys_addr_t *)&v->shmem_info.memtbl.phys,
+ (size_t *)&len,
+ &(v->shmem_info.memtbl.data));
if (rc) {
- pr_err("%s: ION Get Physical for memtbl failed, rc = %d\n",
+ pr_err("%s: audio ION alloc failed, rc = %d\n",
__func__, rc);
- goto err_ion_handle;
+ return -EINVAL;
}
- v->shmem_info.memtbl.data = ion_map_kernel(v->shmem_info.memtbl.client,
- v->shmem_info.memtbl.handle);
- if (IS_ERR_OR_NULL((void *)v->shmem_info.memtbl.data)) {
- pr_err("%s: ION memory mapping for memtbl failed\n",
- __func__);
- goto err_ion_handle;
- }
-
- memset(v->shmem_info.memtbl.data, 0,
- sizeof(struct vss_imemory_table_t));
-
v->shmem_info.memtbl.size = sizeof(struct vss_imemory_table_t);
pr_debug("%s data[%p]phys[%p][%p]\n", __func__,
@@ -4755,12 +4718,6 @@
return 0;
-err_ion_handle:
- ion_free(v->shmem_info.memtbl.client, v->shmem_info.memtbl.handle);
-err_ion_client:
- ion_client_destroy(v->shmem_info.memtbl.client);
-err:
- return -EINVAL;
}
static int voice_alloc_cal_mem_map_table(void)
@@ -4768,67 +4725,25 @@
int ret = 0;
int len;
- common.cal_mem_map_table.client = msm_ion_client_create(UINT_MAX,
- "voc_client");
-
- if (IS_ERR_OR_NULL((void *) common.cal_mem_map_table.client)) {
- pr_err("%s: ION create client for cal mem map table failed\n",
- __func__);
-
- goto err;
- }
-
- common.cal_mem_map_table.handle =
- ion_alloc(common.cal_mem_map_table.client,
- sizeof(struct vss_imemory_table_t),
- SZ_4K, (0x1 << ION_AUDIO_HEAP_ID), 0);
- if (IS_ERR_OR_NULL((void *) common.cal_mem_map_table.handle)) {
- pr_err("%s: ION memory alloc for cal mem map table failed\n",
- __func__);
-
- goto err_ion_client;
- }
-
- ret = ion_phys(common.cal_mem_map_table.client,
- common.cal_mem_map_table.handle,
- (ion_phys_addr_t *) &common.cal_mem_map_table.phys,
- (size_t *) &len);
+ ret = msm_audio_ion_alloc("voip_client",
+ &(common.cal_mem_map_table.client),
+ &(common.cal_mem_map_table.handle),
+ sizeof(struct vss_imemory_table_t),
+ (ion_phys_addr_t *)&common.cal_mem_map_table.phys,
+ (size_t *) &len,
+ &(common.cal_mem_map_table.data));
if (ret) {
- pr_err("%s: Phy addr for cal mem map table failed %d\n",
- __func__, ret);
-
- goto err_ion_handle;
+ pr_err("%s: audio ION alloc failed, rc = %d\n",
+ __func__, ret);
+ return -EINVAL;
}
- common.cal_mem_map_table.data =
- ion_map_kernel(common.cal_mem_map_table.client,
- common.cal_mem_map_table.handle);
- if (IS_ERR_OR_NULL((void *) common.cal_mem_map_table.data)) {
- pr_err("%s: Virtual addr for cal memory map table failed\n",
- __func__);
-
- goto err_ion_handle;
- }
-
- memset(common.cal_mem_map_table.data, 0,
- sizeof(struct vss_imemory_table_t));
-
common.cal_mem_map_table.size = sizeof(struct vss_imemory_table_t);
-
pr_debug("%s: data 0x%x phys 0x%x\n", __func__,
(unsigned int) common.cal_mem_map_table.data,
common.cal_mem_map_table.phys);
return 0;
-
-err_ion_handle:
- ion_free(common.cal_mem_map_table.client,
- common.cal_mem_map_table.handle);
-err_ion_client:
- ion_client_destroy(common.cal_mem_map_table.client);
- memset(&common.cal_mem_map_table, 0, sizeof(common.cal_mem_map_table));
-err:
- return -EINVAL;
}
static int __init voice_init(void)
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 40595bf..519f325 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1767,7 +1767,12 @@
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
+ !((be->dpcm[stream].state == SND_SOC_DPCM_STATE_START) &&
+ ((fe->dpcm[stream].state != SND_SOC_DPCM_STATE_START) &&
+ (fe->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
+ (fe->dpcm[stream].state !=
+ SND_SOC_DPCM_STATE_SUSPEND))))
continue;
dev_dbg(be->dev, "dpcm: hw_free BE %s\n",