Merge "input: atmel_mxt_ts: Disabling secure touch for power events."
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
index eb3986e..7851d53 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
@@ -6,7 +6,7 @@
The timer is attached to a GIC to deliver its two per-processor
interrupts (one for the secure mode, one for the non-secure mode).
-** Timer node properties:
+** CP15 Timer node properties:
- compatible : Should be "arm,armv7-timer"
@@ -21,3 +21,52 @@
interrupts = <1 13 0xf08 1 14 0xf08>;
clock-frequency = <100000000>;
};
+
+** Memory mapped timer node properties:
+
+- compatible : Should at least contain "arm,armv7-timer-mem".
+
+- clock-frequency : The frequency of the main counter, in Hz. Optional.
+
+- reg : The control frame base address.
+
+Note that #address-cells, #size-cells, and ranges shall be present to ensure
+the CPU can address the frame's registers.
+
+Each timer node has up to 8 frame sub-nodes with the following properties:
+
+- frame-number: 0 to 7.
+
+- interrupts : Interrupt list for physical and virtual timers in that order.
+ The virtual timer interrupt is optional.
+
+- reg : The first and second view base addresses in that order. The second view
+ base address is optional.
+
+- status : "disabled" indicates the frame is not available for use. Optional.
+
+Example:
+
+ timer@f0000000 {
+ compatible = "arm,armv7-timer-mem";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xf0000000 0x1000>;
+ clock-frequency = <50000000>;
+
+ frame0@f0001000 {
+ frame-number = <0>
+ interrupts = <0 13 0x8>,
+ <0 14 0x8>;
+ reg = <0xf0001000 0x1000>,
+ <0xf0002000 0x1000>;
+ };
+
+ frame1@f0003000 {
+ frame-number = <1>
+ interrupts = <0 15 0x8>;
+ reg = <0xf0003000 0x1000>;
+ status = "disabled";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
index 4d441ba..6b2f962 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
@@ -87,6 +87,10 @@
(In Bytes).
qcom,prio-rd: Read priority for a BIMC bus master (Can be 0/1/2)
qcom,prio-wr: Write priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio0: Priority low signal for a NoC bus master
+ (Can be 0/1/2).
+qcom,prio1: Priority high signal for a NoC bus master
+ (Can be 0/1/2)
Example:
@@ -149,7 +153,7 @@
- qcom,msm-bus,name: String representing the client-name
- qcom,msm-bus,num-cases: Total number of usecases
-- qcom,msm-bus,active-only: Context flag for requests in active or
+- qcom,msm-bus,active-only: Boolean context flag for requests in active or
dual (active & sleep) contex
- qcom,msm-bus,num-paths: Total number of master-slave pairs
- qcom,msm-bus,vectors-KBps: Arrays of unsigned integers representing:
@@ -160,7 +164,7 @@
qcom,msm-bus,name = "client-name";
qcom,msm-bus,num-cases = <3>;
- qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,active-only;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors =
<22 512 0 0>, <26 512 0 0>,
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt b/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt
index 5d1fafb..31600ca 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt
@@ -19,9 +19,55 @@
Typically the sensor closest to CPU0.
- qcom,poll-ms: Sampling interval to read sensor, in ms.
- qcom,limit-temp: Threshold temperature to start stepping CPU down, in degC.
-- qcom,temp-hysteresis: Degrees below threshold temperature to step CPU up.
+- qcom,temp-hysteresis: Degrees C below threshold temperature to step CPU up.
- qcom,freq-step: Number of frequency steps to take on each CPU mitigation.
+Optional properties
+
+- qcom,core-limit-temp: Threshold temperature to start shutting down cores
+ in degC
+- qcom,core-temp-hysterisis: Degrees C below which the cores will be brought
+ online in sequence.
+- qcom,core-control-mask: The cpu mask that will be used to determine if a
+ core can be controlled or not. A mask of 0 indicates
+ the feature is disabled.
+- qcom,vdd-restriction-temp: When temperature is below this threshold, will
+ enable vdd restriction which will set higher voltage on
+ key voltage rails, in degC.
+- qcom,vdd-restriction-temp-hysteresis: When temperature is above this threshold
+ will disable vdd restriction on key rails, in degC.
+- qcom,pmic-sw-mode-temp: Threshold temperature to disable auto mode on the
+ rail, in degC. If this property exists,
+ qcom,pmic-sw-mode-temp-hysteresis and
+ qcom,pmic-sw-mode-regs need to exist, otherwise return error.
+- qcom,pmic-sw-mode-temp-hysteresis: Degree below threshold temperature to
+ enable auto mode on the rail, in degC. If this property exists,
+ qcom,pmic-sw-mode-temp and qcom,pmic-sw-mode-regs need to
+ exist, otherwise return error.
+- qcom,pmic-sw-mode-regs: Array of the regulator names that will want to
+ disable/enable automode based on the threshold. If this
+ property exists, qcom,pmic-sw-mode-temp and
+ qcom,pmic-sw-mode-temp-hysteresis need to exist, otherwise
+ return error. Also, if this property is defined, will have to
+ define <consumer_supply_name>-supply = <&phandle_of_regulator>
+- <consumer_supply_name>-supply = <&phandle_of_regulator>: consumer_supply_name
+ is the name that's defined in thermal driver.
+ phandle_of_regulator is defined by reuglator device tree.
+
+Optional child nodes
+- qcom,<vdd restriction child node name>: Define the name of the child node.
+ If this property exisits, qcom,vdd-rstr-reg, qcom,levels,
+ qcom,min-level and qcom,freq-req need to exist, otherwise
+ we return an error.
+- qcom,vdd-rstr-reg: Name of the rail
+- qcom,levels: Array of the level values. Unit is corner voltage for voltage request
+ or kHz for frequency request.
+- qcom,min-level: Request this level as minimum level when disabling voltage
+ restriction. Unit is corner voltage for voltage request
+ or kHz for frequency request.
+- qcom,freq-req: Flag to determine if we should restrict frequency on this rail
+ instead of voltage.
+
Example:
qcom,msm-thermal {
@@ -31,4 +77,20 @@
qcom,limit-temp = <60>;
qcom,temp-hysteresis = <10>;
qcom,freq-step = <2>;
+ qcom,core-limit-temp = <90>;
+ qcom,core-temp-hysterisis = <10>;
+ qcom,core-control-mask = <7>;
+ qcom,pmic-sw-mode-temp = <90>;
+ qcom,pmic-sw-mode-temp-hysteresis = <80>;
+ qcom,pmic-sw-mode-regs = "vdd_dig";
+ qcom,vdd-restriction-temp = <5>;
+ qcom,vdd-restriction-temp-hysteresis = <10>;
+ vdd_dig-supply=<&pm8841_s2_floor_corner>
+
+ qcom,vdd-dig-rstr{
+ qcom,vdd-rstr-reg = "vdd_dig";
+ qcom,levels = <5 7 7>; /* Nominal, Super Turbo, Super Turbo */
+ qcom,min-level = <1>; /* No Request */
+ };
};
+
diff --git a/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt b/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt
index c741514..82e7e2a 100644
--- a/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt
+++ b/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt
@@ -25,6 +25,7 @@
- qcom,saw-turns-off-pll: Version of SAW2.1 or can turn off the HFPLL, when
doing power collapse and so the core need to switch to Global PLL before
PC.
+- qcom,pc-resets-timer: Indicates that the timer gets reset during power collapse.
Example:
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
index b9a71f6..7eb65d2 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -9,7 +9,7 @@
- qcom,ce-hw-instance : should contain crypto HW instance.
- qcom,msm_bus,name: Should be "qcedev-noc"
- qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
- - qcom,msm_bus,active-only: Default vector index
+ - qcom,msm_bus,active-only: Boolean flag for context of request (actve/dual)
- qcom,msm_bus,num_paths: The paths for source and destination ports
- qcom,msm_bus,vectors: Vectors for bus topology.
@@ -31,7 +31,6 @@
qcom,ce-hw-shared;
qcom,msm-bus,name = "qcedev-noc";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<56 512 0 0>,
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
index 59f9879..79dc287 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -9,7 +9,7 @@
- qcom,ce-hw-instance : should contain crypto HW instance.
- qcom,msm_bus,name: Should be "qcrypto-noc"
- qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
- - qcom,msm_bus,active-only: Default vector index
+ - qcom,msm_bus,active-only: Boolean flag for context of request (actve/dual)
- qcom,msm_bus,num_paths: The paths for source and destination ports
- qcom,msm_bus,vectors: Vectors for bus topology.
@@ -30,7 +30,6 @@
qcom,ce-hw-shared;
qcom,msm-bus,name = "qcrypto-noc";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<56 512 0 0>,
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt
index 5c426f2..ce4972a 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt
@@ -17,12 +17,6 @@
- label: A string used to describe the controller used.
- qcom,supply-names: A list of strings that lists the names of the
regulator supplies.
-- qcom,supply-type: A list of strings that list the type of supply(ies)
- mentioned above. This list maps in the order of
- the supply names listed above.
- regulator = supply with controlled output
- switch = supply without controlled output. i.e.
- voltage switch
- qcom,supply-min-voltage-level: A list that specifies minimum voltage level
of supply(ies) mentioned above. This list maps
in the order of the supply names listed above.
@@ -44,7 +38,6 @@
vddio-supply = <&pm8226_l8>;
vdda-supply = <&pm8226_l4>;
qcom,supply-names = "vdd", "vddio", "vdda";
- qcom,supply-type = "regulator", "regulator", "regulator";
qcom,supply-min-voltage-level = <2800000 1800000 1200000>;
qcom,supply-max-voltage-level = <2800000 1800000 1200000>;
qcom,supply-peak-current = <150000 100000 100000>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-mdp.txt b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
index 0422b57..bb19768 100644
--- a/Documentation/devicetree/bindings/fb/mdss-mdp.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
@@ -108,7 +108,10 @@
- qcom,mdss-rot-block-size: The size of a memory block (in pixels) to be used
by the rotator. If this property is not specified,
then a default value of 128 pixels would be used.
-
+- qcom,mdss-has-bwc: Boolean property to indicate the presence of bandwidth
+ compression feature in the rotator.
+- qcom,mdss-has-decimation: Boolean property to indicate the presence of
+ decimation feature in fetch.
Optional subnodes:
Child nodes representing the frame buffer virtual devices.
@@ -141,6 +144,8 @@
qcom,mdss-pipe-dma-fetch-id = <10 13>;
qcom,mdss-smp-data = <22 4096>;
qcom,mdss-rot-block-size = <64>;
+ qcom,mdss-has-bwc;
+ qcom,mdss-has-decimation;
qcom,mdss-ctl-off = <0x00000600 0x00000700 0x00000800
0x00000900 0x0000A00>;
diff --git a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
index a2b66f7..8579ec0 100644
--- a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
+++ b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
@@ -12,15 +12,12 @@
- core-vcc-supply: phandle to the HDMI vcc regulator device tree node.
- qcom,hdmi-tx-supply-names: a list of strings that map in order
to the list of supplies.
-- qcom,hdmi-tx-supply-type: a type of supply(ies) mentioned above.
- 0 = supply with controlled output
- 1 = supply without controlled output. i.e. voltage switch
- qcom,hdmi-tx-min-voltage-level: specifies minimum voltage level
of supply(ies) mentioned above.
- qcom,hdmi-tx-max-voltage-level: specifies maximum voltage level
of supply(ies) mentioned above.
-- qcom,hdmi-tx-op-mode: specifies optimum operating mode of
- supply(ies) mentioned above.
+- qcom,hdmi-tx-peak-current: specifies the peak current that will be
+ drawn from the supply(ies) mentioned above.
- qcom,hdmi-tx-cec: gpio for Consumer Electronics Control (cec) line.
- qcom,hdmi-tx-ddc-clk: gpio for Display Data Channel (ddc) clock line.
@@ -56,10 +53,9 @@
core-vdda-supply = <&pm8941_l12>;
core-vcc-supply = <&pm8941_s3>;
qcom,hdmi-tx-supply-names = "hpd-gdsc", "hpd-5v", "core-vdda", "core-vcc";
- qcom,hdmi-tx-supply-type = <1 1 0 0>;
qcom,hdmi-tx-min-voltage-level = <0 0 1800000 1800000>;
qcom,hdmi-tx-max-voltage-level = <0 0 1800000 1800000>;
- qcom,hdmi-tx-op-mode = <0 0 1800000 0>;
+ qcom,hdmi-tx-peak-current = <0 0 1800000 0>;
qcom,hdmi-tx-cec = <&msmgpio 31 0>;
qcom,hdmi-tx-ddc-clk = <&msmgpio 32 0>;
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 0004302..436dfc7 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -97,7 +97,6 @@
/* Bus Scale Settings */
qcom,msm-bus,name = "grp3d";
qcom,msm-bus,num-cases = <6>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>, <89 604 0 0>,
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
index fbe8ffa..418447d 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
@@ -16,7 +16,10 @@
- interrupt-names : Should contain "eoc-int-en-set".
- qcom,adc-bit-resolution : Bit resolution of the ADC.
- qcom,adc-vdd-reference : Voltage reference used by the ADC.
-- qcom,rsense : Internal rsense resistor used for current measurements.
+
+Optional properties:
+- qcom,rsense : Use this property when external rsense should be used
+ for current calculation and specify the units in nano-ohms.
Channel node
NOTE: Atleast one Channel node is required.
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index 2caa959..ac60e38 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -6,6 +6,8 @@
- qcom,hfi : supported Host-Firmware Interface, one of:
- "venus"
- "q6"
+- qcom,max-hw-load: The maximum load the hardware can support expressed in units
+ of macroblocks per second.
Optional properties:
- reg : offset and length of the register set for the device.
@@ -40,8 +42,6 @@
(enum hal_buffer) to its corresponding TZ usage. The TZ usages are defined
as "enum cp_mem_usage" in include/linux/msm_ion.h
- qcom,has-ocmem: indicate the target has ocmem if this property exists
-- qcom,max-hw-load: The maximum load the hardware can support expressed in units
- of macroblocks per second.
Example:
diff --git a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
index b99b716..caead84 100644
--- a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
+++ b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
@@ -92,7 +92,6 @@
qcom,msm-bus,name = "sdcc2";
qcom,msm-bus,num-cases = <7>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
<81 512 6656 13312>, /* 13 MB/s*/
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 87281f7..013d56e 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -155,7 +155,6 @@
qcom,cpu-dma-latency-us = <200>;
qcom,msm-bus,name = "sdhc2";
qcom,msm-bus,num-cases = <7>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
<81 512 6656 13312>, /* 13 MB/s*/
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
index 70f8b55..ac8ea73 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
@@ -14,6 +14,8 @@
- interrupts: The lpass watchdog interrupt
- vdd_cx-supply: Reference to the regulator that supplies the vdd_cx domain.
- qcom,firmware-name: Base name of the firmware image. Ex. "lpass"
+- qcom,gpio-err-fatal: GPIO used by the lpass to indicate error fatal to the apps.
+- qcom,gpio-force-stop: GPIO used by the apps to force the lpass to shutdown.
Optional properties:
- vdd_pll-supply: Reference to the regulator that supplies the PLL's rail.
@@ -29,4 +31,10 @@
interrupts = <0 194 1>;
vdd_cx-supply = <&pm8841_s2>;
qcom,firmware-name = "lpass";
+
+ /* GPIO input from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
};
diff --git a/Documentation/devicetree/bindings/power/qpnp-charger.txt b/Documentation/devicetree/bindings/power/qpnp-charger.txt
index f5465a4..fced0d7 100644
--- a/Documentation/devicetree/bindings/power/qpnp-charger.txt
+++ b/Documentation/devicetree/bindings/power/qpnp-charger.txt
@@ -7,12 +7,12 @@
Each of these peripherals are implemented as subnodes in the example at the
end of this file.
-- qcom,chg-chgr: Supports charging control and status
+- qcom,chgr: Supports charging control and status
reporting.
-- qcom,chg-bat-if: Battery status reporting such as presence,
+- qcom,bat-if: Battery status reporting such as presence,
temperature reporting and voltage collapse
protection.
-- qcom,chg-buck: Charger buck configuration and status
+- qcom,buck: Charger buck configuration and status
reporting with regards to several regulation
loops such as vdd, ibat etc.
- qcom,usb-chgpth: USB charge path detection and input current
@@ -23,38 +23,39 @@
settings, comparator override features etc.
Parent node required properties:
-- qcom,chg-vddmax-mv: Target voltage of battery in mV.
-- qcom,chg-vddsafe-mv: Maximum Vdd voltage in mV.
-- qcom,chg-vinmin-mv: Minimum input voltage in mV.
-- qcom,chg-ibatmax-ma: Maximum battery charge current in mA
-- qcom,chg-ibatsafe-ma: Safety battery current setting
-- qcom,chg-thermal-mitigation: Array of ibatmax values for different
+- qcom,vddmax-mv: Target voltage of battery in mV.
+- qcom,vddsafe-mv: Maximum Vdd voltage in mV.
+- qcom,vinmin-mv: Minimum input voltage in mV.
+- qcom,ibatmax-ma: Maximum battery charge current in mA
+- qcom,ibatsafe-ma: Safety battery current setting
+- qcom,thermal-mitigation: Array of ibatmax values for different
system thermal mitigation level.
Parent node optional properties:
-- qcom,chg-ibatterm-ma: Current at which charging is terminated when
+- qcom,ibatterm-ma: Current at which charging is terminated when
the analog end of charge option is selected.
-- qcom,chg-maxinput-usb-ma: Maximum input current USB.
-- qcom,chg-maxinput-dc-ma: Maximum input current DC.
-- qcom,chg-vbatdet-delta-mv: Battery charging resume delta.
-- qcom,chg-charging-disabled: Set this property to disable charging
+- qcom,maxinput-usb-ma: Maximum input current USB.
+- qcom,maxinput-dc-ma: Maximum input current DC.
+- qcom,vbatdet-delta-mv: Battery charging resume delta.
+- qcom,charging-disabled: Set this property to disable charging
by default. This can then be overriden
writing the the module parameter
"charging_disabled".
-- qcom,chg-use-default-batt-values: Set this flag to force reporting of
+- qcom,use-default-batt-values: Set this flag to force reporting of
battery temperature of 250 decidegree
Celsius, state of charge to be 50%
and disable charging.
-- qcom,chg-warm-bat-decidegc: Warm battery temperature in decidegC.
-- qcom,chg-cool-bat-decidegc: Cool battery temperature in decidegC.
+- qcom,warm-bat-decidegc: Warm battery temperature in decidegC.
+- qcom,cool-bat-decidegc: Cool battery temperature in decidegC.
Note that if both warm and cool battery
temperatures are set, the corresponding
ibatmax and bat-mv properties are
required to be set.
-- qcom,chg-ibatmax-cool-ma: Maximum cool battery charge current.
-- qcom,chg-ibatmax-warm-ma: Maximum warm battery charge current.
-- qcom,chg-warm-bat-mv: Warm temperature battery target voltage.
-- qcom,chg-cool-bat-mv: Cool temperature battery target voltage.
+- qcom,ibatmax-cool-ma: Maximum cool battery charge current.
+- qcom,ibatmax-warm-ma: Maximum warm battery charge current.
+- qcom,warm-bat-mv: Warm temperature battery target voltage.
+- qcom,cool-bat-mv: Cool temperature battery target voltage.
+- qcom,tchg-mins: Maximum total software initialized charge time.
Sub node required structure:
- A qcom,chg node must be a child of an SPMI node that has specified
@@ -77,13 +78,13 @@
qcom,usb-chgpth:
- usbin-valid
- qcom,chg-chgr:
+ qcom,chgr:
- chg-done
- chg-failed
The following interrupts are available:
- qcom,chg-chgr:
+ qcom,chgr:
- chg-done: Triggers on charge completion.
- chg-failed: Notifies of charge failures.
- fast-chg-on: Notifies of fast charging state.
@@ -98,7 +99,7 @@
setting, can be used as
battery alarm.
- qcom,chg-buck:
+ qcom,buck:
- vdd-loop: VDD loop change interrupt.
- ibat-loop: Ibat loop change interrupt.
- ichg-loop: Charge current loop change.
@@ -107,7 +108,7 @@
- vref-ov: Reference overvoltage interrupt.
- vbat-ov: Battery overvoltage interrupt.
- qcom,chg-bat-if:
+ qcom,bat-if:
- psi: PMIC serial interface interrupt.
- vcp-on: Voltage collapse protection
status interrupt.
@@ -140,22 +141,22 @@
#address-cells = <1>;
#size-cells = <1>;
- qcom,chg-vddmax-mv = <4200>;
- qcom,chg-vddsafe-mv = <4200>;
- qcom,chg-vinmin-mv = <4200>;
- qcom,chg-ibatmax-ma = <1500>;
- qcom,chg-ibatterm-ma = <200>;
- qcom,chg-ibatsafe-ma = <1500>;
- qcom,chg-thermal-mitigation = <1500 700 600 325>;
- qcom,chg-cool-bat-degc = <10>;
- qcom,chg-cool-bat-mv = <4100>;
- qcom,chg-ibatmax-warm-ma = <350>;
- qcom,chg-warm-bat-degc = <45>;
- qcom,chg-warm-bat-mv = <4100>;
- qcom,chg-ibatmax-cool-ma = <350>;
- qcom,chg-vbatdet-delta-mv = <60>;
+ qcom,vddmax-mv = <4200>;
+ qcom,vddsafe-mv = <4200>;
+ qcom,vinmin-mv = <4200>;
+ qcom,ibatmax-ma = <1500>;
+ qcom,ibatterm-ma = <200>;
+ qcom,ibatsafe-ma = <1500>;
+ qcom,thermal-mitigation = <1500 700 600 325>;
+ qcom,cool-bat-degc = <10>;
+ qcom,cool-bat-mv = <4100>;
+ qcom,ibatmax-warm-ma = <350>;
+ qcom,warm-bat-degc = <45>;
+ qcom,warm-bat-mv = <4100>;
+ qcom,ibatmax-cool-ma = <350>;
+ qcom,vbatdet-delta-mv = <60>;
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
reg = <0x1000 0x100>;
interrupts = <0x0 0x10 0x0>,
<0x0 0x10 0x1>,
@@ -176,7 +177,7 @@
"vbat-det-lo";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
reg = <0x1100 0x100>;
interrupts = <0x0 0x11 0x0>,
<0x0 0x11 0x1>,
@@ -195,7 +196,7 @@
"vbat-ov";
};
- qcom,chg-bat-if@1200 {
+ qcom,bat-if@1200 {
reg = <0x1200 0x100>;
interrupts = <0x0 0x12 0x0>,
<0x0 0x12 0x1>,
@@ -210,7 +211,7 @@
"batt-pres";
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
reg = <0x1300 0x100>;
interrupts = <0 0x13 0x0>,
<0 0x13 0x1>,
@@ -221,7 +222,7 @@
"chg-gone";
};
- qcom,chg-dc-chgpth@1400 {
+ qcom,dc-chgpth@1400 {
reg = <0x1400 0x100>;
interrupts = <0x0 0x14 0x0>,
<0x0 0x14 0x1>;
@@ -230,7 +231,7 @@
"coarse-det-dc";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
reg = <0x1500 0x100>;
interrupts = <0x0 0x15 0x0>,
<0x0 0x15 0x1>;
@@ -239,7 +240,7 @@
"boost-pwr-ok";
};
- qcom,chg-misc@1600 {
+ qcom,misc@1600 {
reg = <0x1600 0x100>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index fe3d62f..65de56f 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -443,7 +443,6 @@
Each entry is a pair of strings, the first being the connection's sink,
the second being the connection's source.
- qcom,cdc-mclk-gpios : GPIO on which mclk signal is comming.
-- taiko-mclk-clk : phandle to PMIC8941 clkdiv1 node.
- qcom,taiko-mclk-clk-freq : Taiko mclk Freq in Hz. currently only 9600000Hz
is supported.
- qcom,prim-auxpcm-gpio-clk : GPIO on which Primary AUXPCM clk signal is coming.
@@ -495,7 +494,6 @@
"MIC BIAS4 External", "Digital Mic6";
qcom,cdc-mclk-gpios = <&pm8941_gpios 15 0>;
- taiko-mclk-clk = <&pm8941_clkdiv1>;
qcom,taiko-mclk-clk-freq = <9600000>;
qcom,us-euro-gpios = <&pm8941_gpios 20 0>;
diff --git a/Documentation/devicetree/bindings/sound/taiko_codec.txt b/Documentation/devicetree/bindings/sound/taiko_codec.txt
index ffea58f..777933a 100644
--- a/Documentation/devicetree/bindings/sound/taiko_codec.txt
+++ b/Documentation/devicetree/bindings/sound/taiko_codec.txt
@@ -35,6 +35,10 @@
- qcom,cdc-vddcx-2-voltage: cx-2 supply's voltage level min and max in mV.
- qcom,cdc-vddcx-2-current: cx-2 supply's max current in mA.
+ - qcom,cdc-static-supplies: List of supplies to be enabled prior to codec
+ hardware probe. Supplies in this list will be
+ stay enabled.
+
- qcom,cdc-micbias-ldoh-v - LDOH output in volts (should be 1.95 V and 3.00 V).
- qcom,cdc-micbias-cfilt1-mv - cfilt1 output voltage in milli volts.
@@ -67,6 +71,11 @@
values for 9.6MHZ mclk can be 2400000 Hz, 3200000 Hz
and 4800000 Hz. The values for 12.288MHz mclk can be
3072200 Hz, 4096000 Hz and 6144000 Hz.
+
+ - qcom,cdc-on-demand-supplies: List of supplies which can be enabled
+ dynamically.
+ Supplies in this list are off by default.
+
Example:
taiko_codec {
@@ -103,6 +112,16 @@
qcom,cdc-vddcx-2-voltage = <1225000 1225000>;
qcom,cdc-vddcx-2-current = <5000>;
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-vdd-tx-h",
+ "cdc-vdd-rx-h",
+ "cdc-vddpx-1",
+ "cdc-vdd-a-1p2v",
+ "cdc-vddcx-1",
+ "cdc-vddcx-2";
+
+ com,cdc-on-demand-supplies = "cdc-vdd-spkdrv";
+
qcom,cdc-micbias-ldoh-v = <0x3>;
qcom,cdc-micbias-cfilt1-mv = <1800>;
qcom,cdc-micbias-cfilt2-mv = <2700>;
@@ -155,6 +174,10 @@
- qcom,cdc-vddcx-2-voltage: cx-2 supply's voltage level min and max in mV.
- qcom,cdc-vddcx-2-current: cx-2 supply's max current in mA.
+ - qcom,cdc-static-supplies: List of supplies to be enabled prior to codec
+ hardware probe. Supplies in this list will be
+ stay enabled.
+
- qcom,cdc-micbias-ldoh-v - LDOH output in volts (should be 1.95 V and 3.00 V).
- qcom,cdc-micbias-cfilt1-mv - cfilt1 output voltage in milli volts.
@@ -179,6 +202,20 @@
- qcom,cdc-mclk-clk-rate - Specifies the master clock rate in Hz required for
codec.
+Optional properties:
+
+ - cdc-vdd-spkdrv-supply: phandle of spkdrv supply's regulator device tree node.
+ - qcom,cdc-vdd-spkdrv-voltage: spkdrv supply voltage level min and max in mV.
+ - qcom,cdc-vdd-spkdrv-current: spkdrv supply max current in mA.
+
+ - cdc-vdd-spkdrv-supply: phandle of spkdrv supply's regulator device tree node.
+ - qcom,cdc-vdd-spkdrv-voltage: spkdrv supply voltage level min and max in mV.
+ - qcom,cdc-vdd-spkdrv-current: spkdrv supply max current in mA.
+
+ - qcom,cdc-on-demand-supplies: List of supplies which can be enabled
+ dynamically.
+ Supplies in this list are off by default.
+
Example:
i2c@f9925000 {
cell-index = <3>;
@@ -228,6 +265,16 @@
qcom,cdc-vddcx-2-voltage = <1200000 1200000>;
qcom,cdc-vddcx-2-current = <10000>;
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-vdd-tx-h",
+ "cdc-vdd-rx-h",
+ "cdc-vddpx-1",
+ "cdc-vdd-a-1p2v",
+ "cdc-vddcx-1",
+ "cdc-vddcx-2";
+
+ com,cdc-on-demand-supplies = "cdc-vdd-spkdrv";
+
qcom,cdc-micbias-ldoh-v = <0x3>;
qcom,cdc-micbias-cfilt1-mv = <1800>;
qcom,cdc-micbias-cfilt2-mv = <2700>;
diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt
index 1388b7d..9b0f97b 100644
--- a/Documentation/devicetree/bindings/thermal/tsens.txt
+++ b/Documentation/devicetree/bindings/thermal/tsens.txt
@@ -43,6 +43,9 @@
no need to re-initialize them. The control registers are also
under a secure domain which can prevent them from being initialized
locally.
+- qcom,sensor-id : If the flag is present map the TSENS sensors based on the
+ remote sensors that are enabled in HW. Ensure the mapping is not
+ more than the number of supported sensors.
Example:
tsens@fc4a8000 {
diff --git a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
index 5861eea..9754c2e 100644
--- a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
+++ b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
@@ -77,7 +77,6 @@
qcom,msm-bus,name = "serial_uart0";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<84 512 0 0>,
diff --git a/Documentation/devicetree/bindings/tty/serial/msm_serial_hs.txt b/Documentation/devicetree/bindings/tty/serial/msm_serial_hs.txt
index c597536..96c9486 100644
--- a/Documentation/devicetree/bindings/tty/serial/msm_serial_hs.txt
+++ b/Documentation/devicetree/bindings/tty/serial/msm_serial_hs.txt
@@ -93,7 +93,6 @@
qcom,msm-bus,name = "uart7";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<84 512 0 0>,
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index fd5b93e..6d54f7e 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -12,6 +12,7 @@
Optional properties:
- tx-fifo-resize: determines if the FIFO *has* to be reallocated.
+ - host-only-mode: if present then dwc3 should be run in HOST only mode.
This is usually a subnode to DWC3 glue to which it is connected.
diff --git a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
index 6ea9e62..8ce31d9 100644
--- a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
@@ -74,7 +74,6 @@
qcom,msm-bus,name = "hsic";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<85 512 0 0>,
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index c1d4a05..6d06e99 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -7,7 +7,8 @@
- regs : offset and length of the register set in the memory map
- interrupts: IRQ line
- interrupt-names: OTG interrupt name(s) referenced in interrupts above
- HSUSB OTG expects "core_irq" and optionally "async_irq".
+ HSUSB OTG expects "core_irq" which is IRQ line from CORE and
+ optional ones are described in next section.
- qcom,hsusb-otg-phy-type: PHY type can be one of
1 - Chipidea 45nm PHY
2 - Synopsis 28nm PHY
@@ -28,6 +29,9 @@
"HSUSB_1p8-supply" and "HSUSB_3p3-supply".
Optional properties :
+- interrupt-names : Optional interrupt resource entries are:
+ "async_irq" : Interrupt from HSPHY for asynchronous wakeup events in LPM.
+ "pmic_id_irq" : Interrupt from PMIC for external ID pin notification.
- qcom,hsusb-otg-disable-reset: If present then core is RESET only during
init, otherwise core is RESET for every cable disconnect as well
- qcom,hsusb-otg-pnoc-errata-fix: If present then workaround for PNOC
@@ -43,7 +47,6 @@
- qcom,hsusb-otg-power-budget: VBUS power budget in mA
0 will be treated as 500mA
- qcom,hsusb-otg-pclk-src-name: The source of pclk
-- qcom,hsusb-otg-pmic-id-irq: ID, routed to PMIC IRQ number
- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
below optional properties:
- qcom,msm_bus,name
@@ -66,6 +69,8 @@
- qcom,dp-manual-pullup: If present, vbus is not routed to USB controller/phy
and controller driver therefore enables pull-up explicitly before
starting controller using usbcmd run/stop bit.
+- qcom,usb2-enable-hsphy2: If present then USB2 controller is connected to 2nd
+ HSPHY.
Example HSUSB OTG controller device node :
usb@f9690000 {
@@ -83,7 +88,6 @@
qcom,hsusb-otg-phy-init-seq = <0x01 0x90 0xffffffff>;
qcom,hsusb-otg-power-budget = <500>;
qcom,hsusb-otg-pclk-src-name = "dfab_usb_clk";
- qcom,hsusb-otg-pmic-id-irq = <47>
qcom,hsusb-otg-lpm-on-dev-suspend;
qcom,hsusb-otg-clk-always-on-workaround;
hsusb_vdd_dig-supply = <&pm8226_s1_corner>;
diff --git a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
index e394b56..c130b26 100644
--- a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
+++ b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
@@ -7,8 +7,8 @@
Required properties:
- compatible: "wcnss_wlan"
-- reg: offset and length of the register set for the device. The pair
- corresponds to PRONTO.
+- reg: physical address and length of the register set for the device.
+- reg-names: "wcnss_mmio", "wcnss_fiq"
- interupts: Pronto to Apps interrupts for tx done and rx pending.
- qcom,pronto-vddmx-supply: regulator to supply pronto pll.
- qcom,pronto-vddcx-supply: regulator to supply WLAN/BT/FM digital module.
@@ -25,8 +25,9 @@
qcom,wcnss-wlan@fb000000 {
compatible = "qcom,wcnss_wlan";
- reg = <0xfb000000 0x280000>;
- reg-names = "wcnss_mmio";
+ reg = <0xfb000000 0x280000>,
+ <0xf9011008 0x04>;
+ reg-names = "wcnss_mmio", "wcnss_fiq";
interrupts = <0 145 0 0 146 0>;
interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 17a44b3..8226e43 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -213,6 +213,9 @@
config ARCH_MTD_XIP
bool
+config ARCH_WANT_KMAP_ATOMIC_FLUSH
+ bool
+
config VECTORS_BASE
hex
default 0xffff0000 if MMU || CPU_HIGH_VECTOR
diff --git a/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi
index c52b7dd..c0c9107 100644
--- a/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi
@@ -38,7 +38,7 @@
qcom,mdss-pan-dsi-stream = <0>;
qcom,mdss-pan-dsi-mdp-tr = <0x0>;
qcom,mdss-pan-dsi-dma-tr = <0x04>;
- qcom,mdss-pan-frame-rate = <60>;
+ qcom,mdss-pan-dsi-frame-rate = <60>;
qcom,panel-phy-regulatorSettings = [07 09 03 00 /* Regualotor settings */
20 00 01];
qcom,panel-phy-timingSettings = [7d 25 1d 00 37 33
diff --git a/arch/arm/boot/dts/dsi-panel-orise-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-orise-720p-video.dtsi
index 7bd95e7..448d357 100644
--- a/arch/arm/boot/dts/dsi-panel-orise-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-orise-720p-video.dtsi
@@ -36,7 +36,7 @@
qcom,mdss-pan-dsi-stream = <0>;
qcom,mdss-pan-dsi-mdp-tr = <0x0>;
qcom,mdss-pan-dsi-dma-tr = <0x04>;
- qcom,mdss-pan-frame-rate = <60>;
+ qcom,mdss-pan-dsi-frame-rate = <60>;
qcom,panel-phy-regulatorSettings = [03 01 01 00 /* Regualotor settings */
20 00 01];
qcom,panel-phy-timingSettings = [69 29 1f 00 55 55
diff --git a/arch/arm/boot/dts/dsi-panel-sharp-qhd-video.dtsi b/arch/arm/boot/dts/dsi-panel-sharp-qhd-video.dtsi
new file mode 100644
index 0000000..f853285
--- /dev/null
+++ b/arch/arm/boot/dts/dsi-panel-sharp-qhd-video.dtsi
@@ -0,0 +1,67 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ qcom,mdss_dsi_sharp_qhd_video {
+ compatible = "qcom,mdss-dsi-panel";
+ label = "sharp QHD LS043T1LE01 video mode dsi panel";
+ status = "disable";
+ qcom,dsi-ctrl-phandle = <&mdss_dsi0>;
+ qcom,enable-gpio = <&msmgpio 58 0>;
+ qcom,rst-gpio = <&pm8941_gpios 19 0>;
+ qcom,mdss-pan-res = <540 960>;
+ qcom,mdss-pan-bpp = <24>;
+ qcom,mdss-pan-dest = "display_1";
+ qcom,mdss-pan-porch-values = <80 32 48 15 10 3>; /* HBP, HPW, HFP, VBP, VPW, VFP */
+ qcom,mdss-pan-underflow-clr = <0xff>;
+ qcom,mdss-pan-bl-ctrl = "bl_ctrl_wled";
+ qcom,mdss-pan-bl-levels = <1 4095>;
+ qcom,mdss-pan-dsi-mode = <0>;
+ qcom,mdss-pan-dsi-h-pulse-mode = <1>;
+ qcom,mdss-pan-dsi-h-power-stop = <0 0 0>;
+ qcom,mdss-pan-dsi-bllp-power-stop = <1 1>;
+ qcom,mdss-pan-dsi-traffic-mode = <0>;
+ qcom,mdss-pan-dsi-dst-format = <3>;
+ qcom,mdss-pan-dsi-vc = <0>;
+ qcom,mdss-pan-dsi-rgb-swap = <2>;
+ qcom,mdss-pan-dsi-data-lanes = <1 1 0 0>;
+ qcom,mdss-pan-dsi-dlane-swap = <0>;
+ qcom,mdss-pan-dsi-t-clk = <0x1c 0x04>;
+ qcom,mdss-pan-dsi-stream = <0>;
+ qcom,mdss-pan-dsi-mdp-tr = <0x04>;
+ qcom,mdss-pan-dsi-dma-tr = <0x04>;
+ qcom,mdss-pan-frame-rate = <60>;
+ qcom,panel-phy-regulatorSettings = [07 09 03 00 /* Regulator settings */
+ 20 00 01];
+ qcom,panel-phy-timingSettings = [46 1d 20 00 39 3a
+ 21 21 32 03 04 00];
+ qcom,panel-phy-strengthCtrl = [ff 06];
+ qcom,panel-phy-bistCtrl = [00 00 b1 ff /* BIST Ctrl settings */
+ 00 00];
+ qcom,panel-phy-laneConfig = [00 00 00 00 00 00 00 01 97 /* lane0 config */
+ 00 00 00 00 05 00 00 01 97 /* lane1 config */
+ 00 00 00 00 0a 00 00 01 97 /* lane2 config */
+ 00 00 00 00 0f 00 00 01 97 /* lane3 config */
+ 00 c0 00 00 00 00 00 01 bb]; /* Clk ln config */
+ qcom,panel-on-cmds = [05 01 00 00 32 02 01 00 /* sw reset */
+ 05 01 00 00 0a 02 11 00 /* exit sleep */
+ 15 01 00 00 0a 02 53 2c /* backlight on */
+ 15 01 00 00 0a 02 51 ff /* brightness max */
+ 05 01 00 00 0a 02 29 00 /* display on */
+ 15 01 00 00 0a 02 ae 03 /* set num of lanes */
+ 15 01 00 00 0a 02 3a 77 /* rgb_888 */];
+ qcom,on-cmds-dsi-state = "DSI_LP_MODE";
+ qcom,panel-off-cmds = [05 01 00 00 0a 02 28 00 /* display off */
+ 05 01 00 00 78 02 10 00 /* enter sleep */];
+ qcom,off-cmds-dsi-state = "DSI_HS_MODE";
+ };
+};
diff --git a/arch/arm/boot/dts/dsi-panel-sim-video.dtsi b/arch/arm/boot/dts/dsi-panel-sim-video.dtsi
index 98074c8..9a734a0 100644
--- a/arch/arm/boot/dts/dsi-panel-sim-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-sim-video.dtsi
@@ -37,7 +37,7 @@
qcom,mdss-pan-dsi-stream = <0>;
qcom,mdss-pan-dsi-mdp-tr = <0x04>;
qcom,mdss-pan-dsi-dma-tr = <0x04>;
- qcom,mdss-pan-frame-rate = <60>;
+ qcom,mdss-pan-dsi-frame-rate = <60>;
qcom,panel-on-cmds = [32 01 00 00 00 02 00 00];
qcom,on-cmds-dsi-state = "DSI_LP_MODE";
qcom,panel-off-cmds = [22 01 00 00 00 02 00 00];
diff --git a/arch/arm/boot/dts/dsi-panel-toshiba-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-toshiba-720p-video.dtsi
index 42f6033..2937cde 100644
--- a/arch/arm/boot/dts/dsi-panel-toshiba-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-toshiba-720p-video.dtsi
@@ -40,7 +40,7 @@
qcom,mdss-pan-dsi-stream = <0>;
qcom,mdss-pan-dsi-mdp-tr = <0x0>;
qcom,mdss-pan-dsi-dma-tr = <0x04>;
- qcom,mdss-pan-frame-rate = <60>;
+ qcom,mdss-pan-dsi-frame-rate = <60>;
qcom,panel-phy-regulatorSettings = [07 09 03 00 /* Regualotor settings */
20 00 01];
qcom,panel-phy-timingSettings = [b0 23 1b 00 94 93
diff --git a/arch/arm/boot/dts/msm-pm8110-rpm-regulator.dtsi b/arch/arm/boot/dts/msm-pm8110-rpm-regulator.dtsi
new file mode 100644
index 0000000..0de72b0
--- /dev/null
+++ b/arch/arm/boot/dts/msm-pm8110-rpm-regulator.dtsi
@@ -0,0 +1,381 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&rpm_bus {
+ rpm-regulator-smpa1 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <1>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ status = "disabled";
+
+ regulator-s1 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_s1";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-smpa3 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <3>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ status = "disabled";
+
+ regulator-s3 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_s3";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-smpa4 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <4>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ status = "disabled";
+
+ regulator-s4 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_s4";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa1 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <1>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l1 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l1";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa2 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <2>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l2 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l2";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa3 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <3>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l3 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l3";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa4 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <4>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l4 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l4";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa5 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <5>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l5 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l5";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa6 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <6>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l6 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l6";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa7 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <7>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l7 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l7";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa8 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <8>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <5000>;
+ status = "disabled";
+
+ regulator-l8 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l8";
+ qcom,set = <1>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa9 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <9>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l9 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l9";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa10 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <10>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l10 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l10";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa12 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <12>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l12 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l12";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa14 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <14>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l14 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l14";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa15 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <15>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l15 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l15";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa16 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <16>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l16 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l16";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa17 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <17>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l17 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l17";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa18 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <18>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l18 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l18";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa19 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <19>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l19 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l19";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa20 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <20>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <5000>;
+ status = "disabled";
+
+ regulator-l20 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l20";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa21 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <21>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l21 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l21";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa22 {
+ compatible = "qcom,rpm-regulator-smd-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <22>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l22 {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l22";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msm-pm8110.dtsi b/arch/arm/boot/dts/msm-pm8110.dtsi
index ec42cfc..9ebfb56 100644
--- a/arch/arm/boot/dts/msm-pm8110.dtsi
+++ b/arch/arm/boot/dts/msm-pm8110.dtsi
@@ -22,6 +22,98 @@
#address-cells = <1>;
#size-cells = <1>;
+ pm8110_chg: qcom,charger {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-charger";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ qcom,chg-vddmax-mv = <4200>;
+ qcom,chg-vddsafe-mv = <4200>;
+ qcom,chg-vinmin-mv = <4200>;
+ qcom,chg-vbatdet-mv = <4100>;
+ qcom,chg-ibatmax-ma = <1500>;
+ qcom,chg-ibatterm-ma = <200>;
+ qcom,chg-ibatsafe-ma = <1500>;
+ qcom,chg-thermal-mitigation = <1500 700 600 325>;
+
+ qcom,chg-chgr@1000 {
+ status = "disabled";
+ reg = <0x1000 0x100>;
+ interrupts = <0x0 0x10 0x0>,
+ <0x0 0x10 0x1>,
+ <0x0 0x10 0x2>,
+ <0x0 0x10 0x3>,
+ <0x0 0x10 0x4>,
+ <0x0 0x10 0x5>,
+ <0x0 0x10 0x6>,
+ <0x0 0x10 0x7>;
+
+ interrupt-names = "vbat-det-lo",
+ "vbat-det-hi",
+ "chgwdog",
+ "state-change",
+ "trkl-chg-on",
+ "fast-chg-on",
+ "chg-failed",
+ "chg-done";
+ };
+
+ qcom,chg-buck@1100 {
+ status = "disabled";
+ reg = <0x1100 0x100>;
+ interrupts = <0x0 0x11 0x0>,
+ <0x0 0x11 0x1>,
+ <0x0 0x11 0x2>,
+ <0x0 0x11 0x3>,
+ <0x0 0x11 0x4>,
+ <0x0 0x11 0x5>,
+ <0x0 0x11 0x6>;
+
+ interrupt-names = "vbat-ov",
+ "vreg-ov",
+ "overtemp",
+ "vchg-loop",
+ "ichg-loop",
+ "ibat-loop",
+ "vdd-loop";
+ };
+
+ qcom,chg-bat-if@1200 {
+ status = "disabled";
+ reg = <0x1200 0x100>;
+ interrupts = <0x0 0x12 0x0>,
+ <0x0 0x12 0x1>,
+ <0x0 0x12 0x2>,
+ <0x0 0x12 0x3>,
+ <0x0 0x12 0x4>;
+
+ interrupt-names = "batt-pres",
+ "bat-temp-ok",
+ "bat-fet-on",
+ "vcp-on",
+ "psi";
+ };
+
+ qcom,chg-usb-chgpth@1300 {
+ status = "disabled";
+ reg = <0x1300 0x100>;
+ interrupts = <0 0x13 0x0>,
+ <0 0x13 0x1>,
+ <0x0 0x13 0x2>;
+
+ interrupt-names = "coarse-det-usb",
+ "usbin-valid",
+ "chg-gone";
+ };
+
+ qcom,chg-misc@1600 {
+ status = "disabled";
+ reg = <0x1600 0x100>;
+ };
+ };
+
pm8110_vadc: vadc@3100 {
compatible = "qcom,qpnp-vadc";
reg = <0x3100 0x100>;
@@ -75,7 +167,6 @@
interrupt-names = "eoc-int-en-set";
qcom,adc-bit-resolution = <16>;
qcom,adc-vdd-reference = <1800>;
- qcom,rsense = <1500>;
chan@0 {
label = "internal_rsense";
@@ -88,6 +179,24 @@
qcom,fast-avg-setup = <0>;
};
};
+
+ qcom,pm8110_rtc {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-rtc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ qcom,qpnp-rtc-write = <0>;
+ qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+ qcom,pm8110_rtc_rw@6000 {
+ reg = <0x6000 0x100>;
+ };
+
+ qcom,pm8110_rtc_alarm@6100 {
+ reg = <0x6100 0x100>;
+ interrupts = <0x0 0x61 0x1>;
+ };
+ };
};
qcom,pm8110@1 {
diff --git a/arch/arm/boot/dts/msm-pm8226.dtsi b/arch/arm/boot/dts/msm-pm8226.dtsi
index f702abb..99410b3 100644
--- a/arch/arm/boot/dts/msm-pm8226.dtsi
+++ b/arch/arm/boot/dts/msm-pm8226.dtsi
@@ -56,16 +56,17 @@
#size-cells = <1>;
status = "disabled";
- qcom,chg-vddmax-mv = <4200>;
- qcom,chg-vddsafe-mv = <4200>;
- qcom,chg-vinmin-mv = <4200>;
- qcom,chg-vbatdet-mv = <4100>;
- qcom,chg-ibatmax-ma = <1500>;
- qcom,chg-ibatterm-ma = <200>;
- qcom,chg-ibatsafe-ma = <1500>;
- qcom,chg-thermal-mitigation = <1500 700 600 325>;
+ qcom,vddmax-mv = <4200>;
+ qcom,vddsafe-mv = <4200>;
+ qcom,vinmin-mv = <4200>;
+ qcom,vbatdet-delta-mv = <150>;
+ qcom,ibatmax-ma = <1500>;
+ qcom,ibatterm-ma = <200>;
+ qcom,ibatsafe-ma = <1500>;
+ qcom,thermal-mitigation = <1500 700 600 325>;
+ qcom,tchg-mins = <150>;
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "disabled";
reg = <0x1000 0x100>;
interrupts = <0x0 0x10 0x0>,
@@ -87,7 +88,7 @@
"chg-done";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "disabled";
reg = <0x1100 0x100>;
interrupts = <0x0 0x11 0x0>,
@@ -107,7 +108,7 @@
"vdd-loop";
};
- qcom,chg-bat-if@1200 {
+ qcom,bat-if@1200 {
status = "disabled";
reg = <0x1200 0x100>;
interrupts = <0x0 0x12 0x0>,
@@ -124,7 +125,7 @@
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "disabled";
reg = <0x1300 0x100>;
interrupts = <0 0x13 0x0>,
@@ -136,7 +137,7 @@
"chg-gone";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "disabled";
reg = <0x1500 0x100>;
interrupts = <0x0 0x15 0x0>,
@@ -367,7 +368,6 @@
interrupt-names = "eoc-int-en-set";
qcom,adc-bit-resolution = <16>;
qcom,adc-vdd-reference = <1800>;
- qcom,rsense = <1500>;
chan@0 {
label = "internal_rsense";
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index d712e5f..43b7d03 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -171,21 +171,22 @@
#size-cells = <1>;
status = "disabled";
- qcom,chg-vddmax-mv = <4200>;
- qcom,chg-vddsafe-mv = <4200>;
- qcom,chg-vinmin-mv = <4200>;
- qcom,chg-ibatmax-ma = <1500>;
- qcom,chg-ibatsafe-ma = <1500>;
- qcom,chg-thermal-mitigation = <1500 700 600 325>;
- qcom,chg-cool-bat-decidegc = <100>;
- qcom,chg-cool-bat-mv = <4100>;
- qcom,chg-ibatmax-warm-ma = <350>;
- qcom,chg-warm-bat-decidegc = <450>;
- qcom,chg-warm-bat-mv = <4100>;
- qcom,chg-ibatmax-cool-ma = <350>;
- qcom,chg-vbatdet-delta-mv = <350>;
+ qcom,vddmax-mv = <4200>;
+ qcom,vddsafe-mv = <4200>;
+ qcom,vinmin-mv = <4200>;
+ qcom,ibatmax-ma = <1500>;
+ qcom,ibatsafe-ma = <1500>;
+ qcom,thermal-mitigation = <1500 700 600 325>;
+ qcom,cool-bat-decidegc = <100>;
+ qcom,cool-bat-mv = <4100>;
+ qcom,ibatmax-warm-ma = <350>;
+ qcom,warm-bat-decidegc = <450>;
+ qcom,warm-bat-mv = <4100>;
+ qcom,ibatmax-cool-ma = <350>;
+ qcom,vbatdet-delta-mv = <350>;
+ qcom,tchg-mins = <150>;
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "disabled";
reg = <0x1000 0x100>;
interrupts = <0x0 0x10 0x0>,
@@ -207,7 +208,7 @@
"chg-done";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "disabled";
reg = <0x1100 0x100>;
interrupts = <0x0 0x11 0x0>,
@@ -227,7 +228,7 @@
"vdd-loop";
};
- qcom,chg-bat-if@1200 {
+ qcom,bat-if@1200 {
status = "disabled";
reg = <0x1200 0x100>;
interrupts = <0x0 0x12 0x0>,
@@ -244,7 +245,7 @@
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "disabled";
reg = <0x1300 0x100>;
interrupts = <0 0x13 0x0>,
@@ -256,7 +257,7 @@
"chg-gone";
};
- qcom,chg-dc-chgpth@1400 {
+ qcom,dc-chgpth@1400 {
status = "disabled";
reg = <0x1400 0x100>;
interrupts = <0x0 0x14 0x0>,
@@ -266,7 +267,7 @@
"dcin-valid";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "disabled";
reg = <0x1500 0x100>;
interrupts = <0x0 0x15 0x0>,
@@ -768,6 +769,17 @@
qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
+
+ chan@39 {
+ label = "usb_id_nopull";
+ reg = <0x39>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
};
iadc@3600 {
@@ -779,7 +791,6 @@
interrupt-names = "eoc-int-en-set";
qcom,adc-bit-resolution = <16>;
qcom,adc-vdd-reference = <1800>;
- qcom,rsense = <1500>;
chan@0 {
label = "internal_rsense";
diff --git a/arch/arm/boot/dts/msm8226-cdp.dts b/arch/arm/boot/dts/msm8226-cdp.dts
index 0186e54..e4700a1 100644
--- a/arch/arm/boot/dts/msm8226-cdp.dts
+++ b/arch/arm/boot/dts/msm8226-cdp.dts
@@ -99,9 +99,9 @@
"MIC BIAS1 Internal1", "Handset Mic",
"AMIC2", "MIC BIAS2 External",
"MIC BIAS2 External", "Headset Mic",
- "AMIC3", "MIC BIAS2 External",
- "MIC BIAS2 External", "ANCRight Headset Mic",
"AMIC4", "MIC BIAS2 External",
+ "MIC BIAS2 External", "ANCRight Headset Mic",
+ "AMIC5", "MIC BIAS2 External",
"MIC BIAS2 External", "ANCLeft Headset Mic",
"DMIC1", "MIC BIAS1 External",
"MIC BIAS1 External", "Digital Mic1",
@@ -140,6 +140,30 @@
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
qcom,nonremovable;
+ status = "disabled";
+};
+
+&sdhc_1 {
+ vdd-supply = <&pm8226_l17>;
+ qcom,vdd-always-on;
+ qcom,vdd-lpm-sup;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <800 500000>;
+
+ vdd-io-supply = <&pm8226_l6>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <250 154000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,nonremovable;
+
status = "ok";
};
@@ -177,6 +201,38 @@
interrupt-names = "core_irq", "bam_irq", "status_irq";
cd-gpios = <&msmgpio 38 0x1>;
+ status = "disabled";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pm8226_l18>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <9000 800000>;
+
+ vdd-io-supply = <&pm8226_l21>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <6 22000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+
+ #address-cells = <0>;
+ interrupt-parent = <&sdhc_2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 221 0
+ 2 &msmgpio 38 0x3>;
+ interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+ cd-gpios = <&msmgpio 38 0x1>;
+
status = "ok";
};
@@ -210,7 +266,7 @@
qcom,mode = <1>; /* Digital output */
qcom,output-type = <0>; /* CMOS logic */
qcom,pull = <5>; /* QPNP_PIN_PULL_NO*/
- qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,vin-sel = <3>; /* QPNP_PIN_VIN3 */
qcom,out-strength = <3>;/* QPNP_PIN_OUT_STRENGTH_HIGH */
qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
qcom,master-en = <1>; /* Enable GPIO */
@@ -220,7 +276,7 @@
qcom,mode = <1>;
qcom,output-type = <0>;
qcom,pull = <5>;
- qcom,vin-sel = <2>;
+ qcom,vin-sel = <3>;
qcom,out-strength = <3>;
qcom,src-sel = <2>;
qcom,master-en = <1>;
@@ -272,6 +328,6 @@
};
&pm8226_chg {
- qcom,chg-charging-disabled;
- qcom,chg-use-default-batt-values;
+ qcom,charging-disabled;
+ qcom,use-default-batt-values;
};
diff --git a/arch/arm/boot/dts/msm8226-gpu.dtsi b/arch/arm/boot/dts/msm8226-gpu.dtsi
index 6a8ba3a..bb2f0d4 100644
--- a/arch/arm/boot/dts/msm8226-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8226-gpu.dtsi
@@ -33,7 +33,6 @@
/* Bus Scale Settings */
qcom,msm-bus,name = "grp3d";
qcom,msm-bus,num-cases = <4>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>, <89 604 0 0>,
diff --git a/arch/arm/boot/dts/msm8226-mdss.dtsi b/arch/arm/boot/dts/msm8226-mdss.dtsi
index 7ab76f1..5aa39d3 100644
--- a/arch/arm/boot/dts/msm8226-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8226-mdss.dtsi
@@ -65,7 +65,6 @@
vddio-supply = <&pm8226_l8>;
vdda-supply = <&pm8226_l4>;
qcom,supply-names = "vdd", "vddio", "vdda";
- qcom,supply-type = "regulator", "regulator", "regulator";
qcom,supply-min-voltage-level = <2800000 1800000 1200000>;
qcom,supply-max-voltage-level = <2800000 1800000 1200000>;
qcom,supply-peak-current = <150000 100000 100000>;
diff --git a/arch/arm/boot/dts/msm8226-mtp.dts b/arch/arm/boot/dts/msm8226-mtp.dts
index b0a4a3d..478d064 100644
--- a/arch/arm/boot/dts/msm8226-mtp.dts
+++ b/arch/arm/boot/dts/msm8226-mtp.dts
@@ -132,6 +132,30 @@
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
qcom,nonremovable;
+ status = "disabled";
+};
+
+&sdhc_1 {
+ vdd-supply = <&pm8226_l17>;
+ qcom,vdd-always-on;
+ qcom,vdd-lpm-sup;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <800 500000>;
+
+ vdd-io-supply = <&pm8226_l6>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <250 154000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,nonremovable;
+
status = "ok";
};
@@ -166,6 +190,38 @@
interrupt-names = "core_irq", "bam_irq", "status_irq";
cd-gpios = <&msmgpio 38 0x1>;
+ status = "disabled";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pm8226_l18>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <9000 800000>;
+
+ vdd-io-supply = <&pm8226_l21>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <6 22000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+
+ #address-cells = <0>;
+ interrupt-parent = <&sdhc_2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 221 0
+ 2 &msmgpio 38 0x3>;
+ interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+ cd-gpios = <&msmgpio 38 0x1>;
+
status = "ok";
};
@@ -203,7 +259,7 @@
qcom,mode = <1>; /* Digital output */
qcom,output-type = <0>; /* CMOS logic */
qcom,pull = <5>; /* QPNP_PIN_PULL_NO*/
- qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,vin-sel = <3>; /* QPNP_PIN_VIN3 */
qcom,out-strength = <3>;/* QPNP_PIN_OUT_STRENGTH_HIGH */
qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
qcom,master-en = <1>; /* Enable GPIO */
@@ -213,7 +269,7 @@
qcom,mode = <1>;
qcom,output-type = <0>;
qcom,pull = <5>;
- qcom,vin-sel = <2>;
+ qcom,vin-sel = <3>;
qcom,out-strength = <3>;
qcom,src-sel = <2>;
qcom,master-en = <1>;
@@ -303,3 +359,7 @@
&pm8226_bms {
status = "ok";
};
+
+&pm8226_chg {
+ qcom,charging-disabled;
+};
diff --git a/arch/arm/boot/dts/msm8226-pm.dtsi b/arch/arm/boot/dts/msm8226-pm.dtsi
index 2613e11..97b22aa 100644
--- a/arch/arm/boot/dts/msm8226-pm.dtsi
+++ b/arch/arm/boot/dts/msm8226-pm.dtsi
@@ -366,6 +366,7 @@
reg = <0xfe805664 0x40>;
qcom,pc-mode = "tz_l2_int";
qcom,use-sync-timer;
+ qcom,pc-resets-timer;
};
qcom,rpm-log@fc19dc00 {
diff --git a/arch/arm/boot/dts/msm8226-qrd.dts b/arch/arm/boot/dts/msm8226-qrd.dts
index 7d4f0d5..ecb3b5a 100644
--- a/arch/arm/boot/dts/msm8226-qrd.dts
+++ b/arch/arm/boot/dts/msm8226-qrd.dts
@@ -132,6 +132,30 @@
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
qcom,nonremovable;
+ status = "disabled";
+};
+
+&sdhc_1 {
+ vdd-supply = <&pm8226_l17>;
+ qcom,vdd-always-on;
+ qcom,vdd-lpm-sup;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <800 500000>;
+
+ vdd-io-supply = <&pm8226_l6>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <250 154000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,nonremovable;
+
status = "ok";
};
@@ -148,7 +172,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -169,6 +193,38 @@
interrupt-names = "core_irq", "bam_irq", "status_irq";
cd-gpios = <&msmgpio 38 0x1>;
+ status = "disabled";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pm8226_l18>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <9000 800000>;
+
+ vdd-io-supply = <&pm8226_l21>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <6 22000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+
+ #address-cells = <0>;
+ interrupt-parent = <&sdhc_2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 221 0
+ 2 &msmgpio 38 0x3>;
+ interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+ cd-gpios = <&msmgpio 38 0x1>;
+
status = "ok";
};
@@ -213,7 +269,7 @@
qcom,mode = <1>; /* Digital output */
qcom,output-type = <0>; /* CMOS logic */
qcom,pull = <5>; /* QPNP_PIN_PULL_NO*/
- qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,vin-sel = <3>; /* QPNP_PIN_VIN3 */
qcom,out-strength = <3>;/* QPNP_PIN_OUT_STRENGTH_HIGH */
qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
qcom,master-en = <1>; /* Enable GPIO */
@@ -223,7 +279,7 @@
qcom,mode = <1>;
qcom,output-type = <0>;
qcom,pull = <5>;
- qcom,vin-sel = <2>;
+ qcom,vin-sel = <3>;
qcom,out-strength = <3>;
qcom,src-sel = <2>;
qcom,master-en = <1>;
diff --git a/arch/arm/boot/dts/msm8226-smp2p.dtsi b/arch/arm/boot/dts/msm8226-smp2p.dtsi
index 91029e2..079e4ca 100644
--- a/arch/arm/boot/dts/msm8226-smp2p.dtsi
+++ b/arch/arm/boot/dts/msm8226-smp2p.dtsi
@@ -148,6 +148,29 @@
gpios = <&smp2pgpio_smp2p_2_out 0 0>;
};
+ /* SMP2P SSR Driver for inbound entry from lpass. */
+ smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* SMP2P SSR Driver for outbound entry to lpass */
+ smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
smp2pgpio_smp2p_4_in: qcom,smp2pgpio-smp2p-4-in {
compatible = "qcom,smp2pgpio";
qcom,entry-name = "smp2p";
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 4748ecb..b949d3b 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -49,6 +49,8 @@
aliases {
spi0 = &spi_0;
+ sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
+ sdhc2 = &sdhc_2; /* SDC2 SD card slot */
};
memory {
@@ -65,6 +67,65 @@
clock-frequency = <19200000>;
};
+ timer@f9020000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0xf9020000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@f9021000 {
+ frame-number = <0>;
+ interrupts = <0 8 0x4>,
+ <0 7 0x4>;
+ reg = <0xf9021000 0x1000>,
+ <0xf9022000 0x1000>;
+ };
+
+ frame@f9023000 {
+ frame-number = <1>;
+ interrupts = <0 9 0x4>;
+ reg = <0xf9023000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9024000 {
+ frame-number = <2>;
+ interrupts = <0 10 0x4>;
+ reg = <0xf9024000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9025000 {
+ frame-number = <3>;
+ interrupts = <0 11 0x4>;
+ reg = <0xf9025000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9026000 {
+ frame-number = <4>;
+ interrupts = <0 12 0x4>;
+ reg = <0xf9026000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9027000 {
+ frame-number = <5>;
+ interrupts = <0 13 0x4>;
+ reg = <0xf9027000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9028000 {
+ frame-number = <6>;
+ interrupts = <0 14 0x4>;
+ reg = <0xf9028000 0x1000>;
+ status = "disabled";
+ };
+ };
+
qcom,vidc@fdc00000 {
compatible = "qcom,msm-vidc";
reg = <0xfdc00000 0xff000>;
@@ -176,7 +237,6 @@
qcom,msm-bus,name = "usb2";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<87 512 0 0>,
@@ -235,6 +295,12 @@
qcom,cdc-vdd-cx-voltage = <1200000 1200000>;
qcom,cdc-vdd-cx-current = <10000>;
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-vdd-h",
+ "cdc-vdd-px",
+ "cdc-vdd-a-1p2v",
+ "cdc-vdd-cx";
+
qcom,cdc-micbias-ldoh-v = <0x3>;
qcom,cdc-micbias-cfilt1-mv = <1800>;
qcom,cdc-micbias-cfilt2-mv = <1800>;
@@ -413,8 +479,9 @@
qcom,wcnss-wlan@fb000000 {
compatible = "qcom,wcnss_wlan";
- reg = <0xfb000000 0x280000>;
- reg-names = "wcnss_mmio";
+ reg = <0xfb000000 0x280000>,
+ <0xf9011008 0x04>;
+ reg-names = "wcnss_mmio", "wcnss_fiq";
interrupts = <0 145 0 0 146 0>;
interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
@@ -536,6 +603,18 @@
status = "disabled";
};
+ sdhc_1: sdhci@f9824900 {
+ compatible = "qcom,sdhci-msm";
+ reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 123 0>, <0 138 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ qcom,bus-width = <8>;
+ status = "disabled";
+ };
+
sdcc2: qcom,sdcc@f98a4000 {
cell-index = <2>; /* SDC2 SD card slot */
compatible = "qcom,msm-sdcc";
@@ -551,6 +630,18 @@
status = "disabled";
};
+ sdhc_2: sdhci@f98a4900 {
+ compatible = "qcom,sdhci-msm";
+ reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 0>, <0 221 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ qcom,bus-width = <4>;
+ status = "disabled";
+ };
+
spmi_bus: qcom,spmi@fc4c0000 {
cell-index = <0>;
compatible = "qcom,spmi-pmic-arb";
@@ -664,6 +755,12 @@
interrupts = <0 162 1>;
qcom,firmware-name = "adsp";
+
+ /* GPIO input from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
};
qcom,mss@fc880000 {
@@ -936,28 +1033,38 @@
qcom,fast-avg-setup = <0>;
};
+ chan@39 {
+ label = "usb_id_nopull";
+ reg = <0x39>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
};
&pm8226_chg {
status = "ok";
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "ok";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "ok";
};
- qcom,chg-bat-if@1200 {
+ qcom,bat-if@1200 {
status = "ok";
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "ok";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8610-cdp.dts b/arch/arm/boot/dts/msm8610-cdp.dts
index 5b0eb33..533ad53 100644
--- a/arch/arm/boot/dts/msm8610-cdp.dts
+++ b/arch/arm/boot/dts/msm8610-cdp.dts
@@ -38,7 +38,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -61,7 +61,7 @@
qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
- qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
+ qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
@@ -79,3 +79,25 @@
status = "ok";
};
+
+&pm8110_chg {
+ status = "ok";
+ qcom,chg-charging-disabled;
+ qcom,chg-use-default-batt-values;
+
+ qcom,chg-chgr@1000 {
+ status = "ok";
+ };
+
+ qcom,chg-buck@1100 {
+ status = "ok";
+ };
+
+ qcom,chg-usb-chgpth@1300 {
+ status = "ok";
+ };
+
+ qcom,chg-misc@1600 {
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/msm8610-gpu.dtsi b/arch/arm/boot/dts/msm8610-gpu.dtsi
index f3a8259..5e57430 100644
--- a/arch/arm/boot/dts/msm8610-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8610-gpu.dtsi
@@ -33,7 +33,6 @@
/* Bus Scale Settings */
qcom,msm-bus,name = "grp3d";
qcom,msm-bus,num-cases = <4>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>,
diff --git a/arch/arm/boot/dts/msm8610-ion.dtsi b/arch/arm/boot/dts/msm8610-ion.dtsi
index 848a6f5..41b58da 100644
--- a/arch/arm/boot/dts/msm8610-ion.dtsi
+++ b/arch/arm/boot/dts/msm8610-ion.dtsi
@@ -35,14 +35,6 @@
qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
qcom,memory-reservation-size = <0x100000>;
};
-
- qcom,ion-heap@28 { /* AUDIO HEAP */
- compatible = "qcom,msm-ion-reserve";
- reg = <28>;
- qcom,heap-align = <0x1000>;
- qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
- qcom,memory-reservation-size = <0x314000>;
- };
};
};
diff --git a/arch/arm/boot/dts/msm8610-mtp.dts b/arch/arm/boot/dts/msm8610-mtp.dts
index e3eed72..c906e89 100644
--- a/arch/arm/boot/dts/msm8610-mtp.dts
+++ b/arch/arm/boot/dts/msm8610-mtp.dts
@@ -79,3 +79,28 @@
status = "ok";
};
+
+&pm8110_chg {
+ status = "ok";
+ qcom,chg-charging-disabled;
+
+ qcom,chg-chgr@1000 {
+ status = "ok";
+ };
+
+ qcom,chg-buck@1100 {
+ status = "ok";
+ };
+
+ qcom,chg-bat-if@1200 {
+ status = "ok";
+ };
+
+ qcom,chg-usb-chgpth@1300 {
+ status = "ok";
+ };
+
+ qcom,chg-misc@1600 {
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/msm8610-pm.dtsi b/arch/arm/boot/dts/msm8610-pm.dtsi
index 08a3758..e8849f6 100644
--- a/arch/arm/boot/dts/msm8610-pm.dtsi
+++ b/arch/arm/boot/dts/msm8610-pm.dtsi
@@ -368,6 +368,7 @@
reg = <0xfe805664 0x40>;
qcom,pc-mode = "tz_l2_int";
qcom,use-sync-timer;
+ qcom,pc-resets-timer;
};
qcom,rpm-log@fc19dc00 {
diff --git a/arch/arm/boot/dts/msm8610-regulator.dtsi b/arch/arm/boot/dts/msm8610-regulator.dtsi
index d50902c..67eee5c 100644
--- a/arch/arm/boot/dts/msm8610-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8610-regulator.dtsi
@@ -10,19 +10,6 @@
* GNU General Public License for more details.
*/
- /* Stub Regulators */
-
-/ {
- pm8110_s1_corner: regulator-s1-corner {
- compatible = "qcom,stub-regulator";
- regulator-name = "8110_s1_corner";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <1>;
- regulator-max-microvolt = <7>;
- qcom,consumer-supplies = "vdd_dig", "", "vdd_sr2_dig", "";
- };
-};
-
/* SPM controlled regulators */
&spmi_bus {
@@ -60,195 +47,274 @@
};
};
-/* QPNP controlled regulators: */
+/* RPM controlled regulators: */
-&spmi_bus {
+&rpm_bus {
- qcom,pm8110@1 {
-
- pm8110_s1: regulator@1400 {
+ rpm-regulator-smpa1 {
+ status = "okay";
+ pm8110_s1: regulator-s1 {
status = "okay";
- regulator-min-microvolt = <1150000>;
- regulator-max-microvolt = <1150000>;
- qcom,enable-time = <500>;
- qcom,system-load = <100000>;
- regulator-always-on;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1275000>;
};
- pm8110_s3: regulator@1a00 {
- status = "okay";
- regulator-min-microvolt = <1350000>;
+ pm8110_s1_corner: regulator-s1-corner {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_s1_corner";
+ qcom,set = <3>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <7>;
+ qcom,use-voltage-corner;
+ qcom,consumer-supplies = "vdd_dig", "", "vdd_sr2_dig", "";
+ };
+
+ pm8110_s1_corner_ao: regulator-s1-corner-ao {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_s1_corner_ao";
+ qcom,set = <1>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <7>;
+ qcom,use-voltage-corner;
+ };
+ };
+
+ rpm-regulator-smpa3 {
+ status = "okay";
+ pm8110_s3: regulator-s3 {
+ regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1350000>;
- qcom,enable-time = <500>;
- qcom,system-load = <100000>;
- regulator-always-on;
- };
-
- pm8110_s4: regulator@1d00 {
+ qcom,init-voltage = <1200000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-smpa4 {
+ status = "okay";
+ pm8110_s4: regulator-s4 {
regulator-min-microvolt = <2150000>;
regulator-max-microvolt = <2150000>;
- qcom,enable-time = <500>;
- qcom,system-load = <100000>;
- regulator-always-on;
- };
-
- pm8110_l1: regulator@4000 {
+ qcom,init-voltage = <2150000>;
status = "okay";
- parent-supply = <&pm8110_s3>;
+ };
+ };
+
+ rpm-regulator-ldoa1 {
+ status = "okay";
+ pm8110_l1: regulator-l1 {
regulator-min-microvolt = <1225000>;
regulator-max-microvolt = <1225000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l2: regulator@4100 {
+ qcom,init-voltage = <1225000>;
status = "okay";
- parent-supply = <&pm8110_s3>;
+ };
+ };
+
+ rpm-regulator-ldoa2 {
+ status = "okay";
+ pm8110_l2: regulator-l2 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
- qcom,enable-time = <200>;
- qcom,system-load = <10000>;
- regulator-always-on;
+ qcom,init-voltage = <1200000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa3 {
+ status = "okay";
+ pm8110_l3: regulator-l3 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1275000>;
+ status = "okay";
};
- pm8110_l3: regulator@4200 {
+ pm8110_l3_ao: regulator-l3-ao {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l3_ao";
+ qcom,set = <1>;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1275000>;
status = "okay";
- parent-supply = <&pm8110_s3>;
- regulator-min-microvolt = <1150000>;
- regulator-max-microvolt = <1150000>;
- qcom,enable-time = <200>;
- qcom,system-load = <10000>;
- regulator-always-on;
};
- pm8110_l4: regulator@4300 {
+ pm8110_l3_so: regulator-l3-so {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8110_l3_so";
+ qcom,set = <2>;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1275000>;
+ qcom,init-voltage = <750000>;
status = "okay";
- parent-supply = <&pm8110_s3>;
+ };
+ };
+
+ rpm-regulator-ldoa4 {
+ status = "okay";
+ pm8110_l4: regulator-l4 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l5: regulator@4400 {
+ qcom,init-voltage = <1200000>;
status = "okay";
- parent-supply = <&pm8110_s3>;
+ };
+ };
+
+ rpm-regulator-ldoa5 {
+ status = "okay";
+ pm8110_l5: regulator-l5 {
regulator-min-microvolt = <1300000>;
regulator-max-microvolt = <1300000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l6: regulator@4500 {
+ qcom,init-voltage = <1300000>;
status = "okay";
- parent-supply = <&pm8110_s4>;
+ };
+ };
+
+ rpm-regulator-ldoa6 {
+ status = "okay";
+ pm8110_l6: regulator-l6 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- qcom,enable-time = <200>;
- qcom,system-load = <10000>;
- regulator-always-on;
- };
-
- pm8110_l7: regulator@4600 {
+ qcom,init-voltage = <1800000>;
status = "okay";
- parent-supply = <&pm8110_s4>;
+ };
+ };
+
+ rpm-regulator-ldoa7 {
+ status = "okay";
+ pm8110_l7: regulator-l7 {
regulator-min-microvolt = <2050000>;
regulator-max-microvolt = <2050000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l8: regulator@4700 {
+ qcom,init-voltage = <2050000>;
status = "okay";
- parent-supply = <&pm8110_s4>;
+ };
+ };
+
+ rpm-regulator-ldoa8 {
+ status = "okay";
+ pm8110_l8: regulator-l8 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l9: regulator@4800 {
+ qcom,init-voltage = <1800000>;
status = "okay";
- parent-supply = <&pm8110_s4>;
+ };
+ };
+
+ rpm-regulator-ldoa9 {
+ status = "okay";
+ pm8110_l9: regulator-l9 {
regulator-min-microvolt = <2050000>;
regulator-max-microvolt = <2050000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l10: regulator@4900 {
+ qcom,init-voltage = <2050000>;
status = "okay";
- parent-supply = <&pm8110_s4>;
+ };
+ };
+
+ rpm-regulator-ldoa10 {
+ status = "okay";
+ pm8110_l10: regulator-l10 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- qcom,enable-time = <200>;
+ qcom,init-voltage = <1800000>;
+ status = "okay";
qcom,consumer-supplies = "vdd_sr2_pll", "";
};
+ };
- pm8110_l12: regulator@4b00 {
- status = "okay";
+ rpm-regulator-ldoa12 {
+ status = "okay";
+ pm8110_l12: regulator-l12 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l14: regulator@4d00 {
+ qcom,init-voltage = <3300000>;
status = "okay";
- parent-supply = <&pm8110_s4>;
+ };
+ };
+
+ rpm-regulator-ldoa14 {
+ status = "okay";
+ pm8110_l14: regulator-l14 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l15: regulator@4e00 {
+ qcom,init-voltage = <1800000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa15 {
+ status = "okay";
+ pm8110_l15: regulator-l15 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l16: regulator@4f00 {
+ qcom,init-voltage = <3300000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa16 {
+ status = "okay";
+ pm8110_l16: regulator-l16 {
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l17: regulator@5000 {
+ qcom,init-voltage = <3000000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa17 {
+ status = "okay";
+ pm8110_l17: regulator-l17 {
regulator-min-microvolt = <2900000>;
regulator-max-microvolt = <2900000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l18: regulator@5100 {
+ qcom,init-voltage = <2900000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa18 {
+ status = "okay";
+ pm8110_l18: regulator-l18 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2950000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l19: regulator@5200 {
+ qcom,init-voltage = <2950000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa19 {
+ status = "okay";
+ pm8110_l19: regulator-l19 {
regulator-min-microvolt = <2850000>;
regulator-max-microvolt = <2850000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l20: regulator@5300 {
+ qcom,init-voltage = <2850000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa20 {
+ status = "okay";
+ pm8110_l20: regulator-l20 {
regulator-min-microvolt = <3075000>;
regulator-max-microvolt = <3075000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l21: regulator@5400 {
+ qcom,init-voltage = <3075000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa21 {
+ status = "okay";
+ pm8110_l21: regulator-l21 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2950000>;
- qcom,enable-time = <200>;
- };
-
- pm8110_l22: regulator@5500 {
+ qcom,init-voltage = <2950000>;
status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa22 {
+ status = "okay";
+ pm8110_l22: regulator-l22 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- qcom,enable-time = <200>;
+ qcom,init-voltage = <3300000>;
+ status = "okay";
};
};
};
diff --git a/arch/arm/boot/dts/msm8610-smp2p.dtsi b/arch/arm/boot/dts/msm8610-smp2p.dtsi
index 91029e2..079e4ca 100644
--- a/arch/arm/boot/dts/msm8610-smp2p.dtsi
+++ b/arch/arm/boot/dts/msm8610-smp2p.dtsi
@@ -148,6 +148,29 @@
gpios = <&smp2pgpio_smp2p_2_out 0 0>;
};
+ /* SMP2P SSR Driver for inbound entry from lpass. */
+ smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* SMP2P SSR Driver for outbound entry to lpass */
+ smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
smp2pgpio_smp2p_4_in: qcom,smp2pgpio-smp2p-4-in {
compatible = "qcom,smp2pgpio";
qcom,entry-name = "smp2p";
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index 595fabd..ad8b24b 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -57,6 +57,65 @@
clock-frequency = <19200000>;
};
+ timer@f9020000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0xf9020000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@f9021000 {
+ frame-number = <0>;
+ interrupts = <0 8 0x4>,
+ <0 7 0x4>;
+ reg = <0xf9021000 0x1000>,
+ <0xf9022000 0x1000>;
+ };
+
+ frame@f9023000 {
+ frame-number = <1>;
+ interrupts = <0 9 0x4>;
+ reg = <0xf9023000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9024000 {
+ frame-number = <2>;
+ interrupts = <0 10 0x4>;
+ reg = <0xf9024000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9025000 {
+ frame-number = <3>;
+ interrupts = <0 11 0x4>;
+ reg = <0xf9025000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9026000 {
+ frame-number = <4>;
+ interrupts = <0 12 0x4>;
+ reg = <0xf9026000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9027000 {
+ frame-number = <5>;
+ interrupts = <0 13 0x4>;
+ reg = <0xf9027000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9028000 {
+ frame-number = <6>;
+ interrupts = <0 14 0x4>;
+ reg = <0xf9028000 0x1000>;
+ status = "disabled";
+ };
+ };
+
qcom,msm-adsp-loader {
compatible = "qcom,adsp-loader";
qcom,adsp-state = <0>;
@@ -92,6 +151,35 @@
qcom,max-hw-load = <97200>; /* FWVGA @ 30 * 2 */
};
+ qcom,usbbam@f9a44000 {
+ compatible = "qcom,usb-bam-msm";
+ reg = <0xf9a44000 0x11000>;
+ reg-names = "hsusb";
+ interrupts = <0 135 0>;
+ interrupt-names = "hsusb";
+ qcom,usb-bam-num-pipes = <16>;
+ qcom,usb-bam-fifo-baseaddr = <0xfe803000>;
+ qcom,ignore-core-reset-ack;
+ qcom,disable-clk-gating;
+
+ qcom,pipe0 {
+ label = "hsusb-qdss-in-0";
+ qcom,usb-bam-mem-type = <3>;
+ qcom,bam-type = <1>;
+ qcom,dir = <1>;
+ qcom,pipe-num = <0>;
+ qcom,peer-bam = <1>;
+ qcom,src-bam-physical-address = <0xfc37c000>;
+ qcom,src-bam-pipe-index = <0>;
+ qcom,dst-bam-physical-address = <0xf9a44000>;
+ qcom,dst-bam-pipe-index = <2>;
+ qcom,data-fifo-offset = <0x0>;
+ qcom,data-fifo-size = <0x600>;
+ qcom,descriptor-fifo-offset = <0x600>;
+ qcom,descriptor-fifo-size = <0x200>;
+ };
+ };
+
usb@f9a55000 {
compatible = "qcom,hsusb-otg";
reg = <0xf9a55000 0x400>;
@@ -313,7 +401,7 @@
reg = <0xf9011050 0x8>;
reg-names = "rcg_base";
a7_cpu-supply = <&apc_vreg_corner>;
- a7_mem-supply = <&pm8110_l3>;
+ a7_mem-supply = <&pm8110_l3_ao>;
};
spmi_bus: qcom,spmi@fc4c0000 {
@@ -495,6 +583,25 @@
compatible = "qcom,msm-pcm-hostless";
};
+ qcom,wcnss-wlan@fb000000 {
+ compatible = "qcom,wcnss_wlan";
+ reg = <0xfb000000 0x280000>;
+ reg-names = "wcnss_mmio";
+ interrupts = <0 145 0>, <0 146 0>;
+ interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
+
+ qcom,pronto-vddmx-supply = <&pm8110_l3>;
+ qcom,pronto-vddcx-supply = <&pm8110_s1>;
+ qcom,pronto-vddpx-supply = <&pm8110_l6>;
+ qcom,iris-vddxo-supply = <&pm8110_l10>;
+ qcom,iris-vddrfa-supply = <&pm8110_l5>;
+ qcom,iris-vddpa-supply = <&pm8110_l16>;
+ qcom,iris-vdddig-supply = <&pm8110_l5>;
+
+ gpios = <&msmgpio 23 0>, <&msmgpio 24 0>, <&msmgpio 25 0>, <&msmgpio 26 0>, <&msmgpio 27 0>;
+ qcom,has_pronto_hw;
+ };
+
qcom,mss@fc880000 {
compatible = "qcom,pil-q6v5-mss";
reg = <0xfc880000 0x100>,
@@ -532,6 +639,12 @@
interrupts = <0 162 1>;
vdd_cx-supply = <&pm8110_s1_corner>;
qcom,firmware-name = "adsp";
+
+ /* GPIO input from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
};
tsens: tsens@fc4a8000 {
@@ -545,6 +658,7 @@
qcom,calib-mode = "fuse_map3";
qcom,calibration-less-mode;
qcom,tsens-local-init;
+ qcom,sensor-id = <0 5>;
};
qcom,msm-thermal {
@@ -561,6 +675,58 @@
reg = <0xfd484000 0x400>;
qcom,num-locks = <8>;
};
+
+ qcom,bam_dmux@fc834000 {
+ compatible = "qcom,bam_dmux";
+ reg = <0xfc834000 0x7000>;
+ interrupts = <0 29 1>;
+ };
+
+ qcom,qseecom@7B00000 {
+ compatible = "qcom,qseecom";
+ reg = <0x7B00000 0x500000>;
+ reg-names = "secapp-region";
+ qcom,disk-encrypt-pipe-pair = <2>;
+ qcom,hlos-ce-hw-instance = <0>;
+ qcom,qsee-ce-hw-instance = <0>;
+ qcom,msm-bus,name = "qseecom-noc";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <55 512 0 0>,
+ <55 512 3936000 393600>,
+ <55 512 3936000 393600>,
+ <55 512 3936000 393600>;
+ };
+
+ jtag_mm0: jtagmm@fc34c000 {
+ compatible = "qcom,jtag-mm";
+ reg = <0xfc34c000 0x1000>,
+ <0xfc340000 0x1000>;
+ reg-names = "etm-base","debug-base";
+ };
+
+ jtag_mm1: jtagmm@fc34d000 {
+ compatible = "qcom,jtag-mm";
+ reg = <0xfc34d000 0x1000>,
+ <0xfc342000 0x1000>;
+ reg-names = "etm-base","debug-base";
+ };
+
+ jtag_mm2: jtagmm@fc34e000 {
+ compatible = "qcom,jtag-mm";
+ reg = <0xfc34e000 0x1000>,
+ <0xfc344000 0x1000>;
+ reg-names = "etm-base","debug-base";
+ };
+
+ jtag_mm3: jtagmm@fc34f000 {
+ compatible = "qcom,jtag-mm";
+ reg = <0xfc34f000 0x1000>,
+ <0xfc346000 0x1000>;
+ reg-names = "etm-base","debug-base";
+ };
};
&gdsc_vfe {
@@ -597,6 +763,7 @@
/include/ "msm8610-iommu-domains.dtsi"
+/include/ "msm-pm8110-rpm-regulator.dtsi"
/include/ "msm-pm8110.dtsi"
/include/ "msm8610-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm8974-bus.dtsi b/arch/arm/boot/dts/msm8974-bus.dtsi
index cebb907..bb4b48e 100644
--- a/arch/arm/boot/dts/msm8974-bus.dtsi
+++ b/arch/arm/boot/dts/msm8974-bus.dtsi
@@ -284,8 +284,8 @@
qcom,qport = <0>;
qcom,mas-hw-id = <18>;
qcom,mode = "Fixed";
- qcom,prio-rd = <2>;
- qcom,prio-wr = <2>;
+ qcom,prio1 = <2>;
+ qcom,prio0 = <2>;
};
mas-qdss-bam {
@@ -296,8 +296,8 @@
qcom,mode = "Fixed";
qcom,qport = <1>;
qcom,mas-hw-id = <19>;
- qcom,prio-rd = <1>;
- qcom,prio-wr = <1>;
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
qcom,hw-sel = "NoC";
};
@@ -342,8 +342,8 @@
qcom,mas-hw-id = <29>;
qcom,slv-hw-id = <28>;
qcom,mode = "Fixed";
- qcom,prio-rd = <2>;
- qcom,prio-wr = <2>;
+ qcom,prio1 = <2>;
+ qcom,prio0 = <2>;
};
fab-ovnoc {
@@ -364,8 +364,8 @@
qcom,qport = <2>;
qcom,mas-hw-id = <23>;
qcom,hw-sel = "NoC";
- qcom,prio-rd = <1>;
- qcom,prio-wr = <1>;
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
};
mas-crypto-core1 {
@@ -377,8 +377,8 @@
qcom,qport = <3>;
qcom,mas-hw-id = <24>;
qcom,hw-sel = "NoC";
- qcom,prio-rd = <1>;
- qcom,prio-wr = <1>;
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
};
mas-lpass-proc {
@@ -389,8 +389,8 @@
qcom,qport = <4>;
qcom,mas-hw-id = <25>;
qcom,mode = "Fixed";
- qcom,prio-rd = <2>;
- qcom,prio-wr = <2>;
+ qcom,prio1 = <2>;
+ qcom,prio0 = <2>;
};
mas-mss {
@@ -435,8 +435,8 @@
qcom,qport = <10>;
qcom,mode = "Fixed";
qcom,mas-hw-id = <31>;
- qcom,prio-rd = <1>;
- qcom,prio-wr = <1>;
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
qcom,hw-sel = "NoC";
};
@@ -448,8 +448,8 @@
qcom,mode = "Fixed";
qcom,qport = <11>;
qcom,mas-hw-id = <32>;
- qcom,prio-rd = <1>;
- qcom,prio-wr = <1>;
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
qcom,hw-sel = "NoC";
qcom,iface-clk-node = "msm_usb3";
};
diff --git a/arch/arm/boot/dts/msm8974-cdp.dtsi b/arch/arm/boot/dts/msm8974-cdp.dtsi
index 41e3783..0319128 100644
--- a/arch/arm/boot/dts/msm8974-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-cdp.dtsi
@@ -229,13 +229,25 @@
qcom,msm-bus,name = "hsic";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<85 512 0 0>,
<85 512 40000 160000>;
};
+ wlan0: qca,wlan {
+ compatible = "qca,ar6004-hsic";
+ qcom,msm-bus,name = "wlan";
+ qcom,msm-bus,num-cases = <5>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <85 512 0 0>,
+ <85 512 40000 160000>,
+ <85 512 40000 320000>,
+ <85 512 40000 480000>,
+ <85 512 40000 640000>;
+ };
};
&spmi_bus {
@@ -382,23 +394,23 @@
&pm8941_chg {
status = "ok";
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "ok";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "ok";
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "ok";
};
- qcom,chg-dc-chgpth@1400 {
+ qcom,dc-chgpth@1400 {
status = "ok";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8974-fluid.dtsi b/arch/arm/boot/dts/msm8974-fluid.dtsi
index de370e7..25d0885 100644
--- a/arch/arm/boot/dts/msm8974-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-fluid.dtsi
@@ -353,29 +353,29 @@
&pm8941_chg {
status = "ok";
- qcom,chg-charging-disabled;
+ qcom,charging-disabled;
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "ok";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "ok";
};
- qcom,chg-bat-if@1200 {
+ qcom,bat-if@1200 {
status = "ok";
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "ok";
};
- qcom,chg-dc-chgpth@1400 {
+ qcom,dc-chgpth@1400 {
status = "ok";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8974-gpu.dtsi b/arch/arm/boot/dts/msm8974-gpu.dtsi
index 28d1d61..3779dbd 100644
--- a/arch/arm/boot/dts/msm8974-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8974-gpu.dtsi
@@ -33,7 +33,6 @@
/* Bus Scale Settings */
qcom,msm-bus,name = "grp3d";
qcom,msm-bus,num-cases = <6>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>, <89 604 0 0>,
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index 0f38e44..be890b1 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -382,7 +382,6 @@
qcom,msm-bus,name = "hsic";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<85 512 0 0>,
@@ -737,25 +736,25 @@
&pm8941_chg {
status = "ok";
- qcom,chg-charging-disabled;
+ qcom,charging-disabled;
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "ok";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "ok";
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "ok";
};
- qcom,chg-dc-chgpth@1400 {
+ qcom,dc-chgpth@1400 {
status = "ok";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8974-mdss.dtsi b/arch/arm/boot/dts/msm8974-mdss.dtsi
index 88641f9..86f8141 100644
--- a/arch/arm/boot/dts/msm8974-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8974-mdss.dtsi
@@ -111,10 +111,9 @@
core-vdda-supply = <&pm8941_l12>;
core-vcc-supply = <&pm8941_s3>;
qcom,hdmi-tx-supply-names = "hpd-gdsc", "hpd-5v", "core-vdda", "core-vcc";
- qcom,hdmi-tx-supply-type = <1 1 0 0>;
qcom,hdmi-tx-min-voltage-level = <0 0 1800000 1800000>;
qcom,hdmi-tx-max-voltage-level = <0 0 1800000 1800000>;
- qcom,hdmi-tx-op-mode = <0 0 1800000 0>;
+ qcom,hdmi-tx-peak-current = <0 0 1800000 0>;
qcom,hdmi-tx-cec = <&msmgpio 31 0>;
qcom,hdmi-tx-ddc-clk = <&msmgpio 32 0>;
diff --git a/arch/arm/boot/dts/msm8974-mtp.dtsi b/arch/arm/boot/dts/msm8974-mtp.dtsi
index a81fc20..8b9ef87 100644
--- a/arch/arm/boot/dts/msm8974-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-mtp.dtsi
@@ -336,29 +336,29 @@
&pm8941_chg {
status = "ok";
- qcom,chg-charging-disabled;
+ qcom,charging-disabled;
- qcom,chg-chgr@1000 {
+ qcom,chgr@1000 {
status = "ok";
};
- qcom,chg-buck@1100 {
+ qcom,buck@1100 {
status = "ok";
};
- qcom,chg-bat-if@1200 {
+ qcom,bat-if@1200 {
status = "ok";
};
- qcom,chg-usb-chgpth@1300 {
+ qcom,usb-chgpth@1300 {
status = "ok";
};
- qcom,chg-dc-chgpth@1400 {
+ qcom,dc-chgpth@1400 {
status = "ok";
};
- qcom,chg-boost@1500 {
+ qcom,boost@1500 {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8974-smp2p.dtsi b/arch/arm/boot/dts/msm8974-smp2p.dtsi
index 91029e2..079e4ca 100644
--- a/arch/arm/boot/dts/msm8974-smp2p.dtsi
+++ b/arch/arm/boot/dts/msm8974-smp2p.dtsi
@@ -148,6 +148,29 @@
gpios = <&smp2pgpio_smp2p_2_out 0 0>;
};
+ /* SMP2P SSR Driver for inbound entry from lpass. */
+ smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* SMP2P SSR Driver for outbound entry to lpass */
+ smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
smp2pgpio_smp2p_4_in: qcom,smp2pgpio-smp2p-4-in {
compatible = "qcom,smp2pgpio";
qcom,entry-name = "smp2p";
diff --git a/arch/arm/boot/dts/msm8974-v1-fluid.dts b/arch/arm/boot/dts/msm8974-v1-fluid.dts
index 8f2ef31..8ab24df 100644
--- a/arch/arm/boot/dts/msm8974-v1-fluid.dts
+++ b/arch/arm/boot/dts/msm8974-v1-fluid.dts
@@ -23,7 +23,7 @@
};
&pm8941_chg {
- qcom,chg-charging-disabled;
+ qcom,charging-disabled;
};
&sdcc1 {
diff --git a/arch/arm/boot/dts/msm8974-v1-mtp.dts b/arch/arm/boot/dts/msm8974-v1-mtp.dts
index 6cb9f09..09ea84b 100644
--- a/arch/arm/boot/dts/msm8974-v1-mtp.dts
+++ b/arch/arm/boot/dts/msm8974-v1-mtp.dts
@@ -22,5 +22,5 @@
};
&pm8941_chg {
- qcom,chg-charging-disabled;
+ qcom,charging-disabled;
};
diff --git a/arch/arm/boot/dts/msm8974-v1-pm.dtsi b/arch/arm/boot/dts/msm8974-v1-pm.dtsi
index a0b9be6..ec6b14a 100644
--- a/arch/arm/boot/dts/msm8974-v1-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1-pm.dtsi
@@ -188,7 +188,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <1>;
qcom,ss-power = <784>;
qcom,energy-overhead = <190000>;
@@ -205,7 +205,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <75>;
qcom,ss-power = <735>;
qcom,energy-overhead = <77341>;
@@ -223,7 +223,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <95>;
qcom,ss-power = <725>;
qcom,energy-overhead = <99500>;
@@ -240,7 +240,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <2000>;
qcom,ss-power = <138>;
qcom,energy-overhead = <1208400>;
@@ -257,7 +257,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <3000>;
qcom,ss-power = <110>;
qcom,energy-overhead = <1250300>;
@@ -446,9 +446,9 @@
qcom,offset-page-indices = <56>;
};
- qcom,rpm-stats@0xfc19dbd0{
+ qcom,rpm-stats@fc19dba0 {
compatible = "qcom,rpm-stats";
- reg = <0xfc19dbd0 0x1000>;
+ reg = <0xfc19dba0 0x1000>;
reg-names = "phys_addr_base";
qcom,sleep-stats-version = <2>;
};
diff --git a/arch/arm/boot/dts/msm8974-v2-pm.dtsi b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
index 24b68b5..41837c1 100644
--- a/arch/arm/boot/dts/msm8974-v2-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
@@ -188,7 +188,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <1>;
qcom,ss-power = <784>;
qcom,energy-overhead = <190000>;
@@ -205,7 +205,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <75>;
qcom,ss-power = <735>;
qcom,energy-overhead = <77341>;
@@ -223,7 +223,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <95>;
qcom,ss-power = <725>;
qcom,energy-overhead = <99500>;
@@ -240,7 +240,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <2000>;
qcom,ss-power = <138>;
qcom,energy-overhead = <1208400>;
@@ -257,7 +257,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <3000>;
qcom,ss-power = <110>;
qcom,energy-overhead = <1250300>;
@@ -446,9 +446,9 @@
qcom,offset-page-indices = <56>;
};
- qcom,rpm-stats@0xfc19dbd0{
+ qcom,rpm-stats@fc19dba0 {
compatible = "qcom,rpm-stats";
- reg = <0xfc19dbd0 0x1000>;
+ reg = <0xfc19dba0 0x1000>;
reg-names = "phys_addr_base";
qcom,sleep-stats-version = <2>;
};
diff --git a/arch/arm/boot/dts/msm8974-v2.dtsi b/arch/arm/boot/dts/msm8974-v2.dtsi
index 777d26c..494b12c 100644
--- a/arch/arm/boot/dts/msm8974-v2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.dtsi
@@ -63,6 +63,8 @@
qcom,mdss-intf-off = <0x00012500 0x00012700
0x00012900 0x00012b00>;
qcom,mdss-pingpong-off = <0x00012D00 0x00012E00 0x00012F00>;
+ qcom,mdss-has-bwc;
+ qcom,mdss-has-decimation;
};
&mdss_hdmi_tx {
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 2f958aeeb1..a9685cc 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -84,6 +84,66 @@
clock-frequency = <19200000>;
};
+ timer@f9020000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0xf9020000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@f9021000 {
+ frame-number = <0>;
+ interrupts = <0 8 0x4>,
+ <0 7 0x4>;
+ reg = <0xf9021000 0x1000>,
+ <0xf9022000 0x1000>;
+ };
+
+ frame@f9023000 {
+ frame-number = <1>;
+ interrupts = <0 9 0x4>;
+ reg = <0xf9023000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9024000 {
+ frame-number = <2>;
+ interrupts = <0 10 0x4>;
+ reg = <0xf9024000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9025000 {
+ frame-number = <3>;
+ interrupts = <0 11 0x4>;
+ reg = <0xf9025000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9026000 {
+ frame-number = <4>;
+ interrupts = <0 12 0x4>;
+ reg = <0xf9026000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9027000 {
+ frame-number = <5>;
+ interrupts = <0 13 0x4>;
+ reg = <0xf9027000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9028000 {
+ frame-number = <6>;
+ interrupts = <0 14 0x4>;
+ reg = <0xf9028000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+
qcom,mpm2-sleep-counter@fc4a3000 {
compatible = "qcom,mpm2-sleep-counter";
reg = <0xfc4a3000 0x1000>;
@@ -102,6 +162,7 @@
qcom,vidc {
compatible = "qcom,msm-vidc";
qcom,hfi = "q6";
+ qcom,max-hw-load = <108000>; /* 720p @ 30 */
};
qcom,wfd {
@@ -130,7 +191,6 @@
qcom,msm-bus,name = "serial_uart2";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<84 512 0 0>,
@@ -157,7 +217,6 @@
qcom,msm-bus,name = "usb2";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<87 512 0 0>,
@@ -198,7 +257,6 @@
qcom,msm-bus,name = "sdcc1";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */
<78 512 1600 3200>, /* 400 KB/s*/
@@ -245,7 +303,6 @@
qcom,msm-bus,name = "sdcc2";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
<81 512 1600 3200>, /* 400 KB/s*/
@@ -292,7 +349,6 @@
qcom,msm-bus,name = "sdcc3";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <79 512 0 0>, /* No vote */
<79 512 1600 3200>, /* 400 KB/s*/
@@ -338,7 +394,6 @@
qcom,msm-bus,name = "sdcc4";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <80 512 0 0>, /* No vote */
<80 512 1600 3200>, /* 400 KB/s*/
@@ -365,7 +420,6 @@
qcom,msm-bus,name = "sdhc1";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */
<78 512 1600 3200>, /* 400 KB/s*/
@@ -392,7 +446,6 @@
qcom,msm-bus,name = "sdhc2";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
<81 512 1600 3200>, /* 400 KB/s*/
@@ -426,7 +479,6 @@
qcom,msm-bus,name = "sdhc3";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <79 512 0 0>, /* No vote */
<79 512 1600 3200>, /* 400 KB/s*/
@@ -460,7 +512,6 @@
qcom,msm-bus,name = "sdhc4";
qcom,msm-bus,num-cases = <8>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <80 512 0 0>, /* No vote */
<80 512 1600 3200>, /* 400 KB/s*/
@@ -582,6 +633,14 @@
qcom,cdc-vddcx-2-voltage = <1225000 1225000>;
qcom,cdc-vddcx-2-current = <10000>;
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-vdd-tx-h",
+ "cdc-vdd-rx-h",
+ "cdc-vddpx-1",
+ "cdc-vdd-a-1p2v",
+ "cdc-vddcx-1",
+ "cdc-vddcx-2";
+
qcom,cdc-micbias-ldoh-v = <0x3>;
qcom,cdc-micbias-cfilt1-mv = <1800>;
qcom,cdc-micbias-cfilt2-mv = <2700>;
@@ -627,7 +686,6 @@
"MIC BIAS4 External", "Digital Mic6";
qcom,cdc-mclk-gpios = <&pm8941_gpios 15 0>;
- taiko-mclk-clk = <&pm8941_clkdiv1>;
qcom,taiko-mclk-clk-freq = <9600000>;
qcom,prim-auxpcm-gpio-clk = <&msmgpio 65 0>;
qcom,prim-auxpcm-gpio-sync = <&msmgpio 66 0>;
@@ -734,7 +792,6 @@
qcom,msm-bus,name = "usb3";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<61 512 0 0>,
@@ -776,6 +833,12 @@
interrupts = <0 162 1>;
qcom,firmware-name = "adsp";
+
+ /* GPIO input from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
};
qcom,msm-adsp-loader {
@@ -1002,7 +1065,6 @@
compatible = "qcom,msm-ocmem-audio";
qcom,msm-bus,name = "audio-ocmem";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<11 604 0 0>,
@@ -1069,8 +1131,9 @@
qcom,wcnss-wlan@fb000000 {
compatible = "qcom,wcnss_wlan";
- reg = <0xfb000000 0x280000>;
- reg-names = "wcnss_mmio";
+ reg = <0xfb000000 0x280000>,
+ <0xf9011008 0x04>;
+ reg-names = "wcnss_mmio", "wcnss_fiq";
interrupts = <0 145 0 0 146 0>;
interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
@@ -1143,7 +1206,6 @@
qcom,qsee-ce-hw-instance = <0>;
qcom,msm-bus,name = "qseecom-noc";
qcom,msm-bus,num-cases = <4>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<55 512 0 0>,
@@ -1224,7 +1286,6 @@
qcom,ce-hw-instance = <1>;
qcom,msm-bus,name = "qcedev-noc";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<56 512 0 0>,
@@ -1241,7 +1302,6 @@
qcom,ce-hw-instance = <1>;
qcom,msm-bus,name = "qcrypto-noc";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<56 512 0 0>,
@@ -1304,6 +1364,29 @@
qcom,limit-temp = <60>;
qcom,temp-hysteresis = <10>;
qcom,freq-step = <2>;
+ qcom,core-limit-temp = <80>;
+ qcom,core-temp-hysteresis = <10>;
+ qcom,core-control-mask = <0xe>;
+ qcom,vdd-restriction-temp = <5>;
+ qcom,vdd-restriction-temp-hysteresis = <10>;
+ qcom,pmic-sw-mode-temp = <90>;
+ qcom,pmic-sw-mode-temp-hysteresis = <70>;
+ qcom,pmic-sw-mode-regs = "vdd_dig";
+ vdd_dig-supply = <&pm8841_s2_floor_corner>;
+ vdd_gfx-supply = <&pm8841_s4_floor_corner>;
+
+ qcom,vdd-dig-rstr{
+ qcom,vdd-rstr-reg = "vdd_dig";
+ qcom,levels = <5 7 7>; /* Nominal, Super Turbo, Super Turbo */
+ qcom,min-level = <1>; /* No Request */
+ };
+
+ qcom,vdd-gfx-rstr{
+ qcom,vdd-rstr-reg = "vdd_gfx";
+ qcom,levels = <5 7 7>; /* Nominal, Super Turbo, Super Turbo */
+ qcom,min-level = <1>; /* No Request */
+ };
+
};
qcom,bam_dmux@fc834000 {
@@ -1331,7 +1414,6 @@
qcom,bam-rx-ep-pipe-index = <1>;
qcom,msm-bus,name = "uart7";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<84 512 0 0>,
diff --git a/arch/arm/boot/dts/msm9625-v2-1-cdp.dts b/arch/arm/boot/dts/msm9625-cdp.dtsi
similarity index 79%
rename from arch/arm/boot/dts/msm9625-v2-1-cdp.dts
rename to arch/arm/boot/dts/msm9625-cdp.dtsi
index da07100..1f9cbb0 100644
--- a/arch/arm/boot/dts/msm9625-v2-1-cdp.dts
+++ b/arch/arm/boot/dts/msm9625-cdp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,17 +10,10 @@
* GNU General Public License for more details.
*/
-/dts-v1/;
-
-/include/ "msm9625-v2-1.dtsi"
+/include/ "msm9625-display.dtsi"
+/include/ "qpic-panel-ili-qvga.dtsi"
/ {
- model = "Qualcomm MSM 9625V2.1 CDP";
- compatible = "qcom,msm9625-cdp", "qcom,msm9625", "qcom,cdp";
- qcom,msm-id = <134 1 0x20001>, <152 1 0x20001>, <149 1 0x20001>,
- <150 1 0x20001>, <151 1 0x20001>, <148 1 0x20001>,
- <173 1 0x20001>, <174 1 0x20001>, <175 1 0x20001>;
-
i2c@f9925000 {
charger@57 {
compatible = "summit,smb137c";
@@ -42,10 +35,18 @@
wlan0: qca,wlan {
cell-index = <0>;
- compatible = "qca,ar6004-sdio";
+ compatible = "qca,ar6004-hsic";
qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,ar6004-vdd-io-supply = <&pm8019_l11>;
+ qca,vdd-io-supply = <&pm8019_l11>;
+ };
+
+ qca,wlan_ar6003 {
+ cell-index = <0>;
+ compatible = "qca,ar6003-sdio";
+ qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
+ qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
+ qca,vdd-io-supply = <&pm8019_l11>;
};
};
diff --git a/arch/arm/boot/dts/msm9625-v2-1-cdp.dts b/arch/arm/boot/dts/msm9625-mtp.dtsi
similarity index 73%
copy from arch/arm/boot/dts/msm9625-v2-1-cdp.dts
copy to arch/arm/boot/dts/msm9625-mtp.dtsi
index da07100..cc0bf5e 100644
--- a/arch/arm/boot/dts/msm9625-v2-1-cdp.dts
+++ b/arch/arm/boot/dts/msm9625-mtp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,17 +10,7 @@
* GNU General Public License for more details.
*/
-/dts-v1/;
-
-/include/ "msm9625-v2-1.dtsi"
-
/ {
- model = "Qualcomm MSM 9625V2.1 CDP";
- compatible = "qcom,msm9625-cdp", "qcom,msm9625", "qcom,cdp";
- qcom,msm-id = <134 1 0x20001>, <152 1 0x20001>, <149 1 0x20001>,
- <150 1 0x20001>, <151 1 0x20001>, <148 1 0x20001>,
- <173 1 0x20001>, <174 1 0x20001>, <175 1 0x20001>;
-
i2c@f9925000 {
charger@57 {
compatible = "summit,smb137c";
@@ -42,10 +32,18 @@
wlan0: qca,wlan {
cell-index = <0>;
- compatible = "qca,ar6004-sdio";
+ compatible = "qca,ar6004-hsic";
qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,ar6004-vdd-io-supply = <&pm8019_l11>;
+ qca,vdd-io-supply = <&pm8019_l11>;
+ };
+
+ qca,wlan_ar6003 {
+ cell-index = <0>;
+ compatible = "qca,ar6003-sdio";
+ qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
+ qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
+ qca,vdd-io-supply = <&pm8019_l11>;
};
};
@@ -89,11 +87,23 @@
};
mpp@a300 { /* MPP 4 */
+ /* VADC channel 19 */
+ qcom,mode = <4>;
+ qcom,ain-route = <3>; /* AMUX 8 */
+ qcom,master-en = <1>;
+ qcom,src-sel = <0>; /* Function constant */
+ qcom,invert = <1>;
};
mpp@a400 { /* MPP 5 */
};
mpp@a500 { /* MPP 6 */
+ /* channel 21 */
+ qcom,mode = <4>;
+ qcom,ain-route = <1>; /* AMUX 6 */
+ qcom,master-en = <1>;
+ qcom,src-sel = <0>; /* Function constant */
+ qcom,invert = <1>;
};
};
diff --git a/arch/arm/boot/dts/msm9625-pm.dtsi b/arch/arm/boot/dts/msm9625-pm.dtsi
index 51a3faa..3e421a8 100644
--- a/arch/arm/boot/dts/msm9625-pm.dtsi
+++ b/arch/arm/boot/dts/msm9625-pm.dtsi
@@ -80,7 +80,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <100>;
qcom,ss-power = <8000>;
qcom,energy-overhead = <100000>;
@@ -97,7 +97,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <2000>;
qcom,ss-power = <5000>;
qcom,energy-overhead = <60100000>;
@@ -114,7 +114,7 @@
qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <3500>;
qcom,ss-power = <5000>;
qcom,energy-overhead = <60350000>;
@@ -131,7 +131,7 @@
qcom,vdd-dig-upper-bound = <4>; /* NORMAL */
qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
qcom,irqs-detectable;
- qcom.gpios-detectable;
+ qcom,gpio-detectable;
qcom,latency-us = <4500>;
qcom,ss-power = <5000>;
qcom,energy-overhead = <60350000>;
@@ -289,9 +289,9 @@
qcom,offset-page-indices = <56>;
};
- qcom,rpm-stats@fc19dbd0 {
+ qcom,rpm-stats@fc19dba0 {
compatible = "qcom,rpm-stats";
- reg = <0xfc19dbd0 0x1000>;
+ reg = <0xfc19dba0 0x1000>;
reg-names = "phys_addr_base";
qcom,sleep-stats-version = <2>;
};
diff --git a/arch/arm/boot/dts/msm9625-v1-cdp.dts b/arch/arm/boot/dts/msm9625-v1-cdp.dts
index cf17c69..d7537eb 100644
--- a/arch/arm/boot/dts/msm9625-v1-cdp.dts
+++ b/arch/arm/boot/dts/msm9625-v1-cdp.dts
@@ -13,6 +13,7 @@
/dts-v1/;
/include/ "msm9625-v1.dtsi"
+/include/ "msm9625-cdp.dtsi"
/ {
model = "Qualcomm MSM 9625V1 CDP";
@@ -20,88 +21,4 @@
qcom,msm-id = <134 1 0>, <152 1 0>, <149 1 0>, <150 1 0>,
<151 1 0>, <148 1 0>, <173 1 0>, <174 1 0>,
<175 1 0>;
-
- i2c@f9925000 {
- charger@57 {
- compatible = "summit,smb137c";
- reg = <0x57>;
- summit,chg-current-ma = <1500>;
- summit,term-current-ma = <50>;
- summit,pre-chg-current-ma = <100>;
- summit,float-voltage-mv = <4200>;
- summit,thresh-voltage-mv = <3000>;
- summit,recharge-thresh-mv = <75>;
- summit,system-voltage-mv = <4250>;
- summit,charging-timeout = <382>;
- summit,pre-charge-timeout = <48>;
- summit,therm-current-ua = <10>;
- summit,temperature-min = <4>; /* 0 C */
- summit,temperature-max = <3>; /* 45 C */
- };
- };
-
- wlan0: qca,wlan {
- cell-index = <0>;
- compatible = "qca,ar6004-sdio";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,vdd-io-supply = <&pm8019_l11>;
- };
-
- qca,wlan_ar6003 {
- cell-index = <0>;
- compatible = "qca,ar6003-sdio";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,vdd-io-supply = <&pm8019_l11>;
- };
-};
-
-/* PM8019 GPIO and MPP configuration */
-&pm8019_gpios {
- gpio@c000 { /* GPIO 1 */
- };
-
- gpio@c100 { /* GPIO 2 */
- };
-
- gpio@c200 { /* GPIO 3 */
- };
-
- gpio@c300 { /* GPIO 4 */
- /* ext_2p95v regulator enable config */
- qcom,mode = <1>; /* Digital output */
- qcom,output-type = <0>; /* CMOS */
- qcom,invert = <0>; /* Output low */
- qcom,out-strength = <1>; /* Low */
- qcom,vin-sel = <2>; /* PM8019 L11 - 1.8V */
- qcom,src-sel = <0>; /* Constant */
- qcom,master-en = <1>; /* Enable GPIO */
- };
-
- gpio@c400 { /* GPIO 5 */
- };
-
- gpio@c500 { /* GPIO 6 */
- };
-};
-
-&pm8019_mpps {
- mpp@a000 { /* MPP 1 */
- };
-
- mpp@a100 { /* MPP 2 */
- };
-
- mpp@a200 { /* MPP 3 */
- };
-
- mpp@a300 { /* MPP 4 */
- };
-
- mpp@a400 { /* MPP 5 */
- };
-
- mpp@a500 { /* MPP 6 */
- };
};
diff --git a/arch/arm/boot/dts/msm9625-v1-mtp.dts b/arch/arm/boot/dts/msm9625-v1-mtp.dts
index 24aa3af..a70ec1a 100644
--- a/arch/arm/boot/dts/msm9625-v1-mtp.dts
+++ b/arch/arm/boot/dts/msm9625-v1-mtp.dts
@@ -13,6 +13,7 @@
/dts-v1/;
/include/ "msm9625-v1.dtsi"
+/include/ "msm9625-mtp.dtsi"
/ {
model = "Qualcomm MSM 9625V1 MTP";
@@ -20,100 +21,4 @@
qcom,msm-id = <134 7 0>, <152 7 0>, <149 7 0>, <150 7 0>,
<151 7 0>, <148 7 0>, <173 7 0>, <174 7 0>,
<175 7 0>;
-
- i2c@f9925000 {
- charger@57 {
- compatible = "summit,smb137c";
- reg = <0x57>;
- summit,chg-current-ma = <1500>;
- summit,term-current-ma = <50>;
- summit,pre-chg-current-ma = <100>;
- summit,float-voltage-mv = <4200>;
- summit,thresh-voltage-mv = <3000>;
- summit,recharge-thresh-mv = <75>;
- summit,system-voltage-mv = <4250>;
- summit,charging-timeout = <382>;
- summit,pre-charge-timeout = <48>;
- summit,therm-current-ua = <10>;
- summit,temperature-min = <4>; /* 0 C */
- summit,temperature-max = <3>; /* 45 C */
- };
- };
-
- wlan0: qca,wlan {
- cell-index = <0>;
- compatible = "qca,ar6004-sdio";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,vdd-io-supply = <&pm8019_l11>;
- };
-
- qca,wlan_ar6003 {
- cell-index = <0>;
- compatible = "qca,ar6003-sdio";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,vdd-io-supply = <&pm8019_l11>;
- };
-};
-
-/* PM8019 GPIO and MPP configuration */
-&pm8019_gpios {
- gpio@c000 { /* GPIO 1 */
- };
-
- gpio@c100 { /* GPIO 2 */
- };
-
- gpio@c200 { /* GPIO 3 */
- };
-
- gpio@c300 { /* GPIO 4 */
- /* ext_2p95v regulator enable config */
- qcom,mode = <1>; /* Digital output */
- qcom,output-type = <0>; /* CMOS */
- qcom,invert = <0>; /* Output low */
- qcom,out-strength = <1>; /* Low */
- qcom,vin-sel = <2>; /* PM8019 L11 - 1.8V */
- qcom,src-sel = <0>; /* Constant */
- qcom,master-en = <1>; /* Enable GPIO */
- };
-
- gpio@c400 { /* GPIO 5 */
- };
-
- gpio@c500 { /* GPIO 6 */
- };
-};
-
-&pm8019_mpps {
- mpp@a000 { /* MPP 1 */
- };
-
- mpp@a100 { /* MPP 2 */
- };
-
- mpp@a200 { /* MPP 3 */
- };
-
- mpp@a300 { /* MPP 4 */
- /* VADC channel 19 */
- qcom,mode = <4>;
- qcom,ain-route = <3>; /* AMUX 8 */
- qcom,master-en = <1>;
- qcom,src-sel = <0>; /* Function constant */
- qcom,invert = <1>;
- };
-
- mpp@a400 { /* MPP 5 */
- };
-
- mpp@a500 { /* MPP 6 */
- /* VADC channel 21 */
- qcom,mode = <4>;
- qcom,ain-route = <1>; /* AMUX 6 */
- qcom,master-en = <1>;
- qcom,src-sel = <0>; /* Function constant */
- qcom,invert = <1>;
- };
};
diff --git a/arch/arm/boot/dts/msm9625-v2-1-mtp.dts b/arch/arm/boot/dts/msm9625-v2-1-mtp.dts
deleted file mode 100644
index 1e0f3c0..0000000
--- a/arch/arm/boot/dts/msm9625-v2-1-mtp.dts
+++ /dev/null
@@ -1,99 +0,0 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/dts-v1/;
-
-/include/ "msm9625-v2-1.dtsi"
-
-/ {
- model = "Qualcomm MSM 9625V2.1 MTP";
- compatible = "qcom,msm9625-mtp", "qcom,msm9625", "qcom,mtp";
- qcom,msm-id = <134 7 0x20001>, <152 7 0x20001>, <149 7 0x20001>,
- <150 7 0x20001>, <151 7 0x20001>, <148 7 0x20001>,
- <173 7 0x20001>, <174 7 0x20001>, <175 7 0x20001>;
-
- i2c@f9925000 {
- charger@57 {
- compatible = "summit,smb137c";
- reg = <0x57>;
- summit,chg-current-ma = <1500>;
- summit,term-current-ma = <50>;
- summit,pre-chg-current-ma = <100>;
- summit,float-voltage-mv = <4200>;
- summit,thresh-voltage-mv = <3000>;
- summit,recharge-thresh-mv = <75>;
- summit,system-voltage-mv = <4250>;
- summit,charging-timeout = <382>;
- summit,pre-charge-timeout = <48>;
- summit,therm-current-ua = <10>;
- summit,temperature-min = <4>; /* 0 C */
- summit,temperature-max = <3>; /* 45 C */
- };
- };
-
- wlan0: qca,wlan {
- cell-index = <0>;
- compatible = "qca,ar6004-sdio";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,ar6004-vdd-io-supply = <&pm8019_l11>;
- };
-};
-
-/* PM8019 GPIO and MPP configuration */
-&pm8019_gpios {
- gpio@c000 { /* GPIO 1 */
- };
-
- gpio@c100 { /* GPIO 2 */
- };
-
- gpio@c200 { /* GPIO 3 */
- };
-
- gpio@c300 { /* GPIO 4 */
- /* ext_2p95v regulator enable config */
- qcom,mode = <1>; /* Digital output */
- qcom,output-type = <0>; /* CMOS */
- qcom,invert = <0>; /* Output low */
- qcom,out-strength = <1>; /* Low */
- qcom,vin-sel = <2>; /* PM8019 L11 - 1.8V */
- qcom,src-sel = <0>; /* Constant */
- qcom,master-en = <1>; /* Enable GPIO */
- };
-
- gpio@c400 { /* GPIO 5 */
- };
-
- gpio@c500 { /* GPIO 6 */
- };
-};
-
-&pm8019_mpps {
- mpp@a000 { /* MPP 1 */
- };
-
- mpp@a100 { /* MPP 2 */
- };
-
- mpp@a200 { /* MPP 3 */
- };
-
- mpp@a300 { /* MPP 4 */
- };
-
- mpp@a400 { /* MPP 5 */
- };
-
- mpp@a500 { /* MPP 6 */
- };
-};
diff --git a/arch/arm/boot/dts/msm9625-v2-cdp.dts b/arch/arm/boot/dts/msm9625-v2-cdp.dts
index 660bdbd..9fbe5ec 100644
--- a/arch/arm/boot/dts/msm9625-v2-cdp.dts
+++ b/arch/arm/boot/dts/msm9625-v2-cdp.dts
@@ -13,8 +13,7 @@
/dts-v1/;
/include/ "msm9625-v2.dtsi"
-/include/ "msm9625-display.dtsi"
-/include/ "qpic-panel-ili-qvga.dtsi"
+/include/ "msm9625-cdp.dtsi"
/ {
model = "Qualcomm MSM 9625V2 CDP";
@@ -22,88 +21,4 @@
qcom,msm-id = <134 1 0x20000>, <152 1 0x20000>, <149 1 0x20000>,
<150 1 0x20000>, <151 1 0x20000>, <148 1 0x20000>,
<173 1 0x20000>, <174 1 0x20000>, <175 1 0x20000>;
-
- i2c@f9925000 {
- charger@57 {
- compatible = "summit,smb137c";
- reg = <0x57>;
- summit,chg-current-ma = <1500>;
- summit,term-current-ma = <50>;
- summit,pre-chg-current-ma = <100>;
- summit,float-voltage-mv = <4200>;
- summit,thresh-voltage-mv = <3000>;
- summit,recharge-thresh-mv = <75>;
- summit,system-voltage-mv = <4250>;
- summit,charging-timeout = <382>;
- summit,pre-charge-timeout = <48>;
- summit,therm-current-ua = <10>;
- summit,temperature-min = <4>; /* 0 C */
- summit,temperature-max = <3>; /* 45 C */
- };
- };
-
- wlan0: qca,wlan {
- cell-index = <0>;
- compatible = "qca,ar6004-hsic";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,vdd-io-supply = <&pm8019_l11>;
- };
-
- qca,wlan_ar6003 {
- cell-index = <0>;
- compatible = "qca,ar6003-sdio";
- qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
- qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
- qca,vdd-io-supply = <&pm8019_l11>;
- };
-};
-
-/* PM8019 GPIO and MPP configuration */
-&pm8019_gpios {
- gpio@c000 { /* GPIO 1 */
- };
-
- gpio@c100 { /* GPIO 2 */
- };
-
- gpio@c200 { /* GPIO 3 */
- };
-
- gpio@c300 { /* GPIO 4 */
- /* ext_2p95v regulator enable config */
- qcom,mode = <1>; /* Digital output */
- qcom,output-type = <0>; /* CMOS */
- qcom,invert = <0>; /* Output low */
- qcom,out-strength = <1>; /* Low */
- qcom,vin-sel = <2>; /* PM8019 L11 - 1.8V */
- qcom,src-sel = <0>; /* Constant */
- qcom,master-en = <1>; /* Enable GPIO */
- };
-
- gpio@c400 { /* GPIO 5 */
- };
-
- gpio@c500 { /* GPIO 6 */
- };
-};
-
-&pm8019_mpps {
- mpp@a000 { /* MPP 1 */
- };
-
- mpp@a100 { /* MPP 2 */
- };
-
- mpp@a200 { /* MPP 3 */
- };
-
- mpp@a300 { /* MPP 4 */
- };
-
- mpp@a400 { /* MPP 5 */
- };
-
- mpp@a500 { /* MPP 6 */
- };
};
diff --git a/arch/arm/boot/dts/msm9625-v2-mtp.dts b/arch/arm/boot/dts/msm9625-v2-mtp.dts
index c9e54be..5324e2c 100644
--- a/arch/arm/boot/dts/msm9625-v2-mtp.dts
+++ b/arch/arm/boot/dts/msm9625-v2-mtp.dts
@@ -13,6 +13,7 @@
/dts-v1/;
/include/ "msm9625-v2.dtsi"
+/include/ "msm9625-mtp.dtsi"
/ {
model = "Qualcomm MSM 9625V2 MTP";
diff --git a/arch/arm/boot/dts/msm9625-v2.1-cdp.dts b/arch/arm/boot/dts/msm9625-v2.1-cdp.dts
new file mode 100644
index 0000000..b643593
--- /dev/null
+++ b/arch/arm/boot/dts/msm9625-v2.1-cdp.dts
@@ -0,0 +1,24 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "msm9625-v2.1.dtsi"
+/include/ "msm9625-cdp.dtsi"
+
+/ {
+ model = "Qualcomm MSM 9625V2.1 CDP";
+ compatible = "qcom,msm9625-cdp", "qcom,msm9625", "qcom,cdp";
+ qcom,msm-id = <134 1 0x20001>, <152 1 0x20001>, <149 1 0x20001>,
+ <150 1 0x20001>, <151 1 0x20001>, <148 1 0x20001>,
+ <173 1 0x20001>, <174 1 0x20001>, <175 1 0x20001>;
+};
diff --git a/arch/arm/boot/dts/msm9625-v2.1-mtp.dts b/arch/arm/boot/dts/msm9625-v2.1-mtp.dts
new file mode 100644
index 0000000..8bbcc0d
--- /dev/null
+++ b/arch/arm/boot/dts/msm9625-v2.1-mtp.dts
@@ -0,0 +1,24 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "msm9625-v2.1.dtsi"
+/include/ "msm9625-mtp.dtsi"
+
+/ {
+ model = "Qualcomm MSM 9625V2.1 MTP";
+ compatible = "qcom,msm9625-mtp", "qcom,msm9625", "qcom,mtp";
+ qcom,msm-id = <134 7 0x20001>, <152 7 0x20001>, <149 7 0x20001>,
+ <150 7 0x20001>, <151 7 0x20001>, <148 7 0x20001>,
+ <173 7 0x20001>, <174 7 0x20001>, <175 7 0x20001>;
+};
diff --git a/arch/arm/boot/dts/msm9625-v2-1.dtsi b/arch/arm/boot/dts/msm9625-v2.1.dtsi
similarity index 100%
rename from arch/arm/boot/dts/msm9625-v2-1.dtsi
rename to arch/arm/boot/dts/msm9625-v2.1.dtsi
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index ee61dc3..348e8c9 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -58,12 +58,70 @@
clock-frequency = <32768>;
};
- timer: msm-qtimer@f9021000 {
- compatible = "arm,armv7-timer";
- reg = <0xF9021000 0x1000>;
- interrupts = <0 7 0>;
- irq-is-not-percpu;
+ timer@f9020000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0xf9020000 0x1000>;
clock-frequency = <19200000>;
+
+ frame@f9021000 {
+ frame-number = <0>;
+ interrupts = <0 7 0x4>,
+ <0 6 0x4>;
+ reg = <0xf9021000 0x1000>,
+ <0xf9022000 0x1000>;
+ };
+
+ frame@f9023000 {
+ frame-number = <1>;
+ interrupts = <0 8 0x4>;
+ reg = <0xf9023000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9024000 {
+ frame-number = <2>;
+ interrupts = <0 9 0x4>;
+ reg = <0xf9024000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9025000 {
+ frame-number = <3>;
+ interrupts = <0 10 0x4>;
+ reg = <0xf9025000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9026000 {
+ frame-number = <4>;
+ interrupts = <0 11 0x4>;
+ reg = <0xf9026000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9027000 {
+ frame-number = <5>;
+ interrupts = <0 12 0x4>;
+ reg = <0xf9027000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9028000 {
+ frame-number = <6>;
+ interrupts = <0 13 0x4>;
+ reg = <0xf9028000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@f9029000 {
+ frame-number = <7>;
+ interrupts = <0 14 0x4>;
+ reg = <0xf9029000 0x1000>;
+ status = "disabled";
+ };
};
qcom,sps@f9980000 {
@@ -101,7 +159,6 @@
qcom,msm-bus,name = "usb2";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<87 512 0 0>,
@@ -118,7 +175,6 @@
qcom,msm-bus,name = "hsic";
qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<85 512 0 0>,
@@ -507,6 +563,14 @@
qcom,cdc-vddcx-2-voltage = <1200000 1200000>;
qcom,cdc-vddcx-2-current = <10000>;
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-vdd-tx-h",
+ "cdc-vdd-rx-h",
+ "cdc-vddpx-1",
+ "cdc-vdd-a-1p2v",
+ "cdc-vddcx-1",
+ "cdc-vddcx-2";
+
qcom,cdc-micbias-ldoh-v = <0x3>;
qcom,cdc-micbias-cfilt1-mv = <1800>;
qcom,cdc-micbias-cfilt2-mv = <2700>;
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index f23a096..3291919 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -267,6 +267,8 @@
CONFIG_MSMB_CAMERA=y
CONFIG_OV9724=y
CONFIG_MSMB_JPEG=y
+CONFIG_SWITCH=y
+CONFIG_MSM_WFD=y
CONFIG_MSM_VIDC_V4L2=y
CONFIG_VIDEOBUF2_MSM_MEM=y
CONFIG_V4L_PLATFORM_DRIVERS=y
@@ -327,7 +329,6 @@
CONFIG_QPNP_PWM=y
CONFIG_QPNP_POWER_ON=y
CONFIG_MSM_IOMMU=y
-CONFIG_MSM_IOMMU_PMON=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_TMC=y
CONFIG_CORESIGHT_TPIU=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index f67cb0d..aa2c028 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -98,6 +98,7 @@
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index b5e67fd..c9f068a 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -102,6 +102,7 @@
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
diff --git a/arch/arm/configs/msm9625-perf_defconfig b/arch/arm/configs/msm9625-perf_defconfig
index 42acd99..ae73bad 100644
--- a/arch/arm/configs/msm9625-perf_defconfig
+++ b/arch/arm/configs/msm9625-perf_defconfig
@@ -230,9 +230,6 @@
CONFIG_REGULATOR_QPNP=y
CONFIG_ION=y
CONFIG_ION_MSM=y
-CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_QPIC_PANEL_DETECT=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index 041e89a..f7c3bff 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -231,9 +231,6 @@
CONFIG_REGULATOR_QPNP=y
CONFIG_ION=y
CONFIG_ION_MSM=y
-CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_QPIC_PANEL_DETECT=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 366debb..ac4c7a3 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/export.h>
+#include <linux/slab.h>
#include <asm/cputype.h>
#include <asm/delay.h>
@@ -33,46 +34,13 @@
#include <asm/system_info.h>
static unsigned long arch_timer_rate;
+static int arch_timer_spi;
static int arch_timer_ppi;
static int arch_timer_ppi2;
-static int is_irq_percpu;
static struct clock_event_device __percpu **arch_timer_evt;
static void __iomem *timer_base;
-static u32 timer_reg_read_cp15(int reg);
-static void timer_reg_write_cp15(int reg, u32 val);
-static inline cycle_t counter_get_cntpct_cp15(void);
-static inline cycle_t counter_get_cntvct_cp15(void);
-
-static u32 timer_reg_read_mem(int reg);
-static void timer_reg_write_mem(int reg, u32 val);
-static inline cycle_t counter_get_cntpct_mem(void);
-static inline cycle_t counter_get_cntvct_mem(void);
-
-struct arch_timer_operations {
- void (*reg_write)(int, u32);
- u32 (*reg_read)(int);
- cycle_t (*get_cntpct)(void);
- cycle_t (*get_cntvct)(void);
-};
-
-static struct arch_timer_operations arch_timer_ops_cp15 = {
- .reg_read = &timer_reg_read_cp15,
- .reg_write = &timer_reg_write_cp15,
- .get_cntpct = &counter_get_cntpct_cp15,
- .get_cntvct = &counter_get_cntvct_cp15,
-};
-
-static struct arch_timer_operations arch_timer_ops_mem = {
- .reg_read = &timer_reg_read_mem,
- .reg_write = &timer_reg_write_mem,
- .get_cntpct = &counter_get_cntpct_mem,
- .get_cntvct = &counter_get_cntvct_mem,
-};
-
-static struct arch_timer_operations *arch_specific_timer = &arch_timer_ops_cp15;
-
static struct delay_timer arch_delay_timer;
/*
@@ -97,7 +65,7 @@
#define QTIMER_CNTP_TVAL_REG 0x028
#define QTIMER_CNTV_TVAL_REG 0x038
-static void timer_reg_write_mem(int reg, u32 val)
+static inline void timer_reg_write_mem(int reg, u32 val)
{
switch (reg) {
case ARCH_TIMER_REG_CTRL:
@@ -109,7 +77,7 @@
}
}
-static void timer_reg_write_cp15(int reg, u32 val)
+static inline void timer_reg_write_cp15(int reg, u32 val)
{
switch (reg) {
case ARCH_TIMER_REG_CTRL:
@@ -123,7 +91,15 @@
isb();
}
-static u32 timer_reg_read_mem(int reg)
+static inline void arch_timer_reg_write(int cp15, int reg, u32 val)
+{
+ if (cp15)
+ timer_reg_write_cp15(reg, val);
+ else
+ timer_reg_write_mem(reg, val);
+}
+
+static inline u32 timer_reg_read_mem(int reg)
{
u32 val;
@@ -144,7 +120,7 @@
return val;
}
-static u32 timer_reg_read_cp15(int reg)
+static inline u32 timer_reg_read_cp15(int reg)
{
u32 val;
@@ -165,17 +141,23 @@
return val;
}
-static irqreturn_t arch_timer_handler(int irq, void *dev_id)
+static inline u32 arch_timer_reg_read(int cp15, int reg)
{
- struct clock_event_device *evt;
+ if (cp15)
+ return timer_reg_read_cp15(reg);
+ else
+ return timer_reg_read_mem(reg);
+}
+
+static inline irqreturn_t arch_timer_handler(int cp15,
+ struct clock_event_device *evt)
+{
unsigned long ctrl;
- ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
+ ctrl = arch_timer_reg_read(cp15, ARCH_TIMER_REG_CTRL);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
- arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL,
- ctrl);
- evt = *__this_cpu_ptr(arch_timer_evt);
+ arch_timer_reg_write(cp15, ARCH_TIMER_REG_CTRL, ctrl);
evt->event_handler(evt);
return IRQ_HANDLED;
}
@@ -183,16 +165,18 @@
return IRQ_NONE;
}
-static void arch_timer_disable(void)
+static irqreturn_t arch_timer_handler_cp15(int irq, void *dev_id)
{
- unsigned long ctrl;
-
- ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
- ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, ctrl);
+ struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
+ return arch_timer_handler(1, evt);
}
-static void arch_timer_set_mode(enum clock_event_mode mode,
+static irqreturn_t arch_timer_handler_mem(int irq, void *dev_id)
+{
+ return arch_timer_handler(0, dev_id);
+}
+
+static inline void arch_timer_set_mode(int cp15, enum clock_event_mode mode,
struct clock_event_device *clk)
{
unsigned long ctrl;
@@ -200,46 +184,72 @@
switch (mode) {
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
- arch_timer_disable();
+ ctrl = arch_timer_reg_read(cp15, ARCH_TIMER_REG_CTRL);
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_reg_write(cp15, ARCH_TIMER_REG_CTRL, ctrl);
break;
case CLOCK_EVT_MODE_ONESHOT:
- ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
+ ctrl = arch_timer_reg_read(cp15, ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
- arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, ctrl);
+ arch_timer_reg_write(cp15, ARCH_TIMER_REG_CTRL, ctrl);
default:
break;
}
}
-static int arch_timer_set_next_event(unsigned long evt,
+static void arch_timer_set_mode_cp15(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ arch_timer_set_mode(1, mode, clk);
+}
+
+static void arch_timer_set_mode_mem(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ arch_timer_set_mode(0, mode, clk);
+}
+
+static int arch_timer_set_next_event(int cp15, unsigned long evt,
struct clock_event_device *unused)
{
unsigned long ctrl;
- ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
+ ctrl = arch_timer_reg_read(cp15, ARCH_TIMER_REG_CTRL);
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
- arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, ctrl);
- arch_specific_timer->reg_write(ARCH_TIMER_REG_TVAL, evt);
+ arch_timer_reg_write(cp15, ARCH_TIMER_REG_CTRL, ctrl);
+ arch_timer_reg_write(cp15, ARCH_TIMER_REG_TVAL, evt);
return 0;
}
+static int arch_timer_set_next_event_cp15(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ return arch_timer_set_next_event(1, evt, unused);
+}
+
+static int arch_timer_set_next_event_mem(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ return arch_timer_set_next_event(0, evt, unused);
+}
+
static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
{
/* setup clock event only once for CPU 0 */
if (!smp_processor_id() && clk->irq == arch_timer_ppi)
return 0;
- /* Be safe... */
- arch_timer_disable();
-
- clk->features = CLOCK_EVT_FEAT_ONESHOT;
+ clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
clk->rating = 450;
- clk->set_mode = arch_timer_set_mode;
- clk->set_next_event = arch_timer_set_next_event;
+ clk->set_mode = arch_timer_set_mode_cp15;
+ clk->set_next_event = arch_timer_set_next_event_cp15;
clk->irq = arch_timer_ppi;
+ /* Be safe... */
+ clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk);
+
clockevents_config_and_register(clk, arch_timer_rate,
0xf, 0x7fffffff);
@@ -264,8 +274,8 @@
unsigned long freq;
if (arch_timer_rate == 0) {
- arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, 0);
- freq = arch_specific_timer->reg_read(ARCH_TIMER_REG_FREQ);
+ arch_timer_reg_write(1, ARCH_TIMER_REG_CTRL, 0);
+ freq = arch_timer_reg_read(1, ARCH_TIMER_REG_FREQ);
/* Check the timer frequency. */
if (freq == 0) {
@@ -323,9 +333,12 @@
return ((cycle_t) cvalh << 32) | cvall;
}
+static cycle_t (*get_cntpct_func)(void) = counter_get_cntpct_cp15;
+static cycle_t (*get_cntvct_func)(void) = counter_get_cntvct_cp15;
+
cycle_t arch_counter_get_cntpct(void)
{
- return arch_specific_timer->get_cntpct();
+ return get_cntpct_func();
}
EXPORT_SYMBOL(arch_counter_get_cntpct);
@@ -351,7 +364,7 @@
{
cycle_t cntvct;
- cntvct = arch_specific_timer->get_cntvct();
+ cntvct = get_cntvct_func();
/*
* The sched_clock infrastructure only knows about counters
@@ -373,7 +386,7 @@
disable_percpu_irq(clk->irq);
if (arch_timer_ppi2)
disable_percpu_irq(arch_timer_ppi2);
- arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+ clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
}
static struct local_timer_ops arch_timer_ops __cpuinitdata = {
@@ -383,13 +396,23 @@
static struct clock_event_device arch_timer_global_evt;
+static void __init arch_timer_counter_init(void)
+{
+ clocksource_register_hz(&clocksource_counter, arch_timer_rate);
+
+ setup_sched_clock(arch_timer_update_sched_clock, 32, arch_timer_rate);
+
+ /* Use the architected timer for the delay loop. */
+ arch_delay_timer.read_current_timer = &arch_timer_read_current_timer;
+ arch_delay_timer.freq = arch_timer_rate;
+ register_current_timer_delay(&arch_delay_timer);
+}
+
static int __init arch_timer_common_register(void)
{
int err;
- if (timer_base)
- arch_specific_timer = &arch_timer_ops_mem;
- else if (!local_timer_is_architected())
+ if (!local_timer_is_architected())
return -ENXIO;
err = arch_timer_available();
@@ -400,16 +423,8 @@
if (!arch_timer_evt)
return -ENOMEM;
- clocksource_register_hz(&clocksource_counter, arch_timer_rate);
-
- setup_sched_clock(arch_timer_update_sched_clock, 32, arch_timer_rate);
-
- if (is_irq_percpu)
- err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
- "arch_timer", arch_timer_evt);
- else
- err = request_irq(arch_timer_ppi, arch_timer_handler, 0,
- "arch_timer", arch_timer_evt);
+ err = request_percpu_irq(arch_timer_ppi, arch_timer_handler_cp15,
+ "arch_timer", arch_timer_evt);
if (err) {
pr_err("arch_timer: can't register interrupt %d (%d)\n",
arch_timer_ppi, err);
@@ -417,13 +432,9 @@
}
if (arch_timer_ppi2) {
- if (is_irq_percpu)
- err = request_percpu_irq(arch_timer_ppi2,
- arch_timer_handler, "arch_timer",
- arch_timer_evt);
- else
- err = request_irq(arch_timer_ppi2, arch_timer_handler,
- 0, "arch_timer", arch_timer_evt);
+ err = request_percpu_irq(arch_timer_ppi2,
+ arch_timer_handler_cp15,
+ "arch_timer", arch_timer_evt);
if (err) {
pr_err("arch_timer: can't register interrupt %d (%d)\n",
arch_timer_ppi2, err);
@@ -447,10 +458,6 @@
if (err)
goto out_free_irq;
- /* Use the architected timer for the delay loop. */
- arch_delay_timer.read_current_timer = &arch_timer_read_current_timer;
- arch_delay_timer.freq = arch_timer_rate;
- register_current_timer_delay(&arch_delay_timer);
return 0;
out_free_irq:
@@ -464,6 +471,34 @@
return err;
}
+static int __init arch_timer_mem_register(void)
+{
+ int err;
+ struct clock_event_device *clk;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return -ENOMEM;
+
+ clk->features = CLOCK_EVT_FEAT_ONESHOT;
+ clk->name = "arch_mem_timer";
+ clk->rating = 400;
+ clk->set_mode = arch_timer_set_mode_mem;
+ clk->set_next_event = arch_timer_set_next_event_mem;
+ clk->irq = arch_timer_spi;
+ clk->cpumask = cpu_all_mask;
+
+ clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk);
+
+ clockevents_config_and_register(clk, arch_timer_rate,
+ 0xf, 0x7fffffff);
+
+ err = request_irq(arch_timer_spi, arch_timer_handler_mem, 0,
+ "arch_timer", clk);
+
+ return err;
+}
+
int __init arch_timer_register(struct arch_timer *at)
{
if (at->res[0].start <= 0 || !(at->res[0].flags & IORESOURCE_IRQ))
@@ -493,48 +528,86 @@
{},
};
+static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
+ { .compatible = "arm,armv7-timer-mem", },
+ {},
+};
+
int __init arch_timer_of_register(void)
{
- struct device_node *np;
+ struct device_node *np, *frame;
u32 freq;
int ret;
+ int has_cp15 = false, has_mem = false;
np = of_find_matching_node(NULL, arch_timer_of_match);
- if (!np) {
- pr_err("arch_timer: can't find DT node\n");
- return -ENODEV;
+ if (np) {
+ has_cp15 = true;
+ /*
+ * Try to determine the frequency from the device tree
+ */
+ if (!of_property_read_u32(np, "clock-frequency", &freq))
+ arch_timer_rate = freq;
+
+ ret = irq_of_parse_and_map(np, 0);
+ if (ret <= 0) {
+ pr_err("arch_timer: interrupt not specified in timer node\n");
+ return -ENODEV;
+ }
+ arch_timer_ppi = ret;
+ ret = irq_of_parse_and_map(np, 1);
+ if (ret > 0)
+ arch_timer_ppi2 = ret;
+
+ ret = arch_timer_common_register();
+ if (ret)
+ return ret;
}
- /* Try to determine the frequency from the device tree or CNTFRQ */
- if (!of_property_read_u32(np, "clock-frequency", &freq))
- arch_timer_rate = freq;
+ np = of_find_matching_node(NULL, arch_timer_mem_of_match);
+ if (np) {
+ has_mem = true;
- ret = irq_of_parse_and_map(np, 0);
- if (ret <= 0) {
- pr_err("arch_timer: interrupt not specified in timer node\n");
- return -ENODEV;
- }
+ if (!has_cp15) {
+ get_cntpct_func = counter_get_cntpct_mem;
+ get_cntvct_func = counter_get_cntvct_mem;
+ }
+ /*
+ * Try to determine the frequency from the device tree
+ */
+ if (!of_property_read_u32(np, "clock-frequency", &freq))
+ arch_timer_rate = freq;
- if (of_get_address(np, 0, NULL, NULL)) {
- timer_base = of_iomap(np, 0);
+ frame = of_get_next_child(np, NULL);
+ if (!frame) {
+ pr_err("arch_timer: no child frame\n");
+ return -EINVAL;
+ }
+
+ timer_base = of_iomap(frame, 0);
if (!timer_base) {
pr_err("arch_timer: cant map timer base\n");
return -ENOMEM;
}
+
+ arch_timer_spi = irq_of_parse_and_map(frame, 0);
+ if (!arch_timer_spi) {
+ pr_err("arch_timer: no physical timer irq\n");
+ return -EINVAL;
+ }
+
+ ret = arch_timer_mem_register();
+ if (ret)
+ return ret;
}
- if (of_get_property(np, "irq-is-not-percpu", NULL))
- is_irq_percpu = 0;
- else
- is_irq_percpu = 1;
+ if (!has_cp15 && !has_mem) {
+ pr_err("arch_timer: can't find DT node\n");
+ return -ENODEV;
+ }
- arch_timer_ppi = ret;
- ret = irq_of_parse_and_map(np, 1);
- if (ret > 0)
- arch_timer_ppi2 = ret;
- pr_info("arch_timer: found %s irqs %d %d\n",
- np->name, arch_timer_ppi, arch_timer_ppi2);
+ arch_timer_counter_init();
- return arch_timer_common_register();
+ return 0;
}
#endif
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index c510889..a6e6914 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -284,6 +284,7 @@
select MSM_ULTRASOUND_B
select MSM_LPM_TEST
select MSM_RPM_LOG
+ select ARCH_WANT_KMAP_ATOMIC_FLUSH
config ARCH_MSMZINC
bool "MSMZINC"
@@ -302,6 +303,7 @@
select REGULATOR
select ARM_HAS_SG_CHAIN
select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
+ select ARCH_WANT_KMAP_ATOMIC_FLUSH
config ARCH_MPQ8092
bool "MPQ8092"
@@ -485,6 +487,7 @@
select MSM_CPR_REGULATOR
select MSM_RPM_LOG
select MSM_RPM_STATS_LOG
+ select ARCH_WANT_KMAP_ATOMIC_FLUSH
endmenu
choice
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index f20f6ae..8c366da 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -76,8 +76,8 @@
dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v1-rumi.dtb
dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2-cdp.dtb
dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2-mtp.dtb
- dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2-1-mtp.dtb
- dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2-1-cdp.dtb
+ dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2.1-mtp.dtb
+ dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2.1-cdp.dtb
# MSM8226
zreladdr-$(CONFIG_ARCH_MSM8226) := 0x00008000
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index a0644e6..2f09162 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -913,8 +913,10 @@
if (!bam_is_connected) {
read_unlock(&ul_wakeup_lock);
ul_wakeup();
- if (unlikely(in_global_reset == 1))
+ if (unlikely(in_global_reset == 1)) {
+ kfree(hdr);
return -EFAULT;
+ }
read_lock(&ul_wakeup_lock);
notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
}
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 0fe94d5..f969e31 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -437,7 +437,7 @@
if (fixed_position != NOT_FIXED)
fixed_size += heap->size;
- else
+ else if (!use_cma)
reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
if (fixed_position == FIXED_LOW) {
@@ -3585,6 +3585,18 @@
if (socinfo_get_pmic_model() == PMIC_MODEL_PM8917)
apq8064_pm8917_pdata_fixup();
platform_device_register(&msm_gpio_device);
+ if (cpu_is_apq8064ab())
+ apq8064ab_update_krait_spm();
+ if (cpu_is_krait_v3()) {
+ struct msm_pm_init_data_type *pdata =
+ msm8064_pm_8x60.dev.platform_data;
+ pdata->retention_calls_tz = false;
+ apq8064ab_update_retention_spm();
+ }
+ platform_device_register(&msm8064_pm_8x60);
+
+ msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
+ msm_spm_l2_init(msm_spm_l2_data);
msm_tsens_early_init(&apq_tsens_pdata);
msm_thermal_init(&msm_thermal_pdata);
if (socinfo_init() < 0)
@@ -3699,18 +3711,6 @@
apq8064_init_dsps();
platform_device_register(&msm_8960_riva);
}
- if (cpu_is_apq8064ab())
- apq8064ab_update_krait_spm();
- if (cpu_is_krait_v3()) {
- struct msm_pm_init_data_type *pdata =
- msm8064_pm_8x60.dev.platform_data;
- pdata->retention_calls_tz = false;
- apq8064ab_update_retention_spm();
- }
- platform_device_register(&msm8064_pm_8x60);
-
- msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
- msm_spm_l2_init(msm_spm_l2_data);
BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
apq8064_epm_adc_init();
}
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 3582914..a892e32 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -73,6 +73,10 @@
"msm_sdcc.1", NULL),
OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98A4000, \
"msm_sdcc.2", NULL),
+ OF_DEV_AUXDATA("qcom,sdhci-msm", 0xF9824900, \
+ "msm_sdcc.1", NULL),
+ OF_DEV_AUXDATA("qcom,sdhci-msm", 0xF98A4900, \
+ "msm_sdcc.2", NULL),
{}
};
diff --git a/arch/arm/mach-msm/board-8610-gpiomux.c b/arch/arm/mach-msm/board-8610-gpiomux.c
index 0fdb298..4b435de 100644
--- a/arch/arm/mach-msm/board-8610-gpiomux.c
+++ b/arch/arm/mach-msm/board-8610-gpiomux.c
@@ -47,6 +47,36 @@
.pull = GPIOMUX_PULL_DOWN,
};
+static struct gpiomux_setting lcd_en_act_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+ .dir = GPIOMUX_OUT_HIGH,
+};
+
+static struct gpiomux_setting lcd_en_sus_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct msm_gpiomux_config msm_lcd_configs[] __initdata = {
+ {
+ .gpio = 41,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &lcd_en_act_cfg,
+ [GPIOMUX_SUSPENDED] = &lcd_en_sus_cfg,
+ },
+ },
+ {
+ .gpio = 7,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &lcd_en_act_cfg,
+ [GPIOMUX_SUSPENDED] = &lcd_en_sus_cfg,
+ },
+ },
+};
+
static struct msm_gpiomux_config msm_blsp_configs[] __initdata = {
{
.gpio = 10, /* BLSP1 QUP3 I2C_SDA */
@@ -137,4 +167,5 @@
msm_gpiomux_install(msm_blsp_configs, ARRAY_SIZE(msm_blsp_configs));
msm_gpiomux_install(wcnss_5wire_interface,
ARRAY_SIZE(wcnss_5wire_interface));
+ msm_gpiomux_install(msm_lcd_configs, ARRAY_SIZE(msm_lcd_configs));
}
diff --git a/arch/arm/mach-msm/board-8610.c b/arch/arm/mach-msm/board-8610.c
index 67334d5..2cd7134 100644
--- a/arch/arm/mach-msm/board-8610.c
+++ b/arch/arm/mach-msm/board-8610.c
@@ -44,6 +44,7 @@
#include <mach/clk-provider.h>
#include <mach/msm_smd.h>
#include <mach/rpm-smd.h>
+#include <mach/rpm-regulator-smd.h>
#include <linux/msm_thermal.h>
#include "board-dt.h"
#include "clock.h"
@@ -105,6 +106,7 @@
msm_rpm_driver_init();
msm_lpmrs_module_init();
msm_spm_device_init();
+ rpm_regulator_smd_driver_init();
qpnp_regulator_init();
tsens_tm_init_driver();
msm_thermal_device_init();
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 01b8a4c..6ccaba6 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -477,7 +477,7 @@
if (fixed_position != NOT_FIXED)
fixed_size += heap->size;
- else
+ else if (!use_cma)
reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
if (fixed_position == FIXED_LOW) {
@@ -957,7 +957,7 @@
},
{
ARRAY_SIZE(qseecom_enable_dfab_vectors),
- qseecom_enable_sfpb_vectors,
+ qseecom_enable_dfab_vectors,
},
{
ARRAY_SIZE(qseecom_enable_sfpb_vectors),
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index c5fc418..5d96389 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -532,7 +532,7 @@
if (fixed_position != NOT_FIXED)
fixed_size += heap->size;
- else
+ else if (!use_cma)
reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
if (fixed_position == FIXED_LOW) {
diff --git a/arch/arm/mach-msm/board-8974-gpiomux.c b/arch/arm/mach-msm/board-8974-gpiomux.c
index e30d0ba..705275c 100644
--- a/arch/arm/mach-msm/board-8974-gpiomux.c
+++ b/arch/arm/mach-msm/board-8974-gpiomux.c
@@ -743,7 +743,8 @@
.pull = GPIOMUX_PULL_DOWN,
};
-static struct msm_gpiomux_config msm8974_pri_auxpcm_configs[] __initdata = {
+/* Primary AUXPCM port sharing GPIO lines with Primary MI2S */
+static struct msm_gpiomux_config msm8974_pri_pri_auxpcm_configs[] __initdata = {
{
.gpio = 65,
.settings = {
@@ -774,6 +775,38 @@
},
};
+/* Primary AUXPCM port sharing GPIO lines with Tertiary MI2S */
+static struct msm_gpiomux_config msm8974_pri_ter_auxpcm_configs[] __initdata = {
+ {
+ .gpio = 74,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
+ },
+ },
+ {
+ .gpio = 75,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
+ },
+ },
+ {
+ .gpio = 76,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
+ },
+ },
+ {
+ .gpio = 77,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &auxpcm_sus_cfg,
+ [GPIOMUX_ACTIVE] = &auxpcm_act_cfg,
+ },
+ },
+};
+
static struct msm_gpiomux_config msm8974_sec_auxpcm_configs[] __initdata = {
{
.gpio = 79,
@@ -1064,8 +1097,9 @@
ARRAY_SIZE(msm_blsp2_uart7_configs));
msm_gpiomux_install(wcnss_5wire_interface,
ARRAY_SIZE(wcnss_5wire_interface));
- msm_gpiomux_install_nowrite(ath_gpio_configs,
- ARRAY_SIZE(ath_gpio_configs));
+ if (of_board_is_liquid())
+ msm_gpiomux_install_nowrite(ath_gpio_configs,
+ ARRAY_SIZE(ath_gpio_configs));
msm_gpiomux_install(msm8974_slimbus_config,
ARRAY_SIZE(msm8974_slimbus_config));
@@ -1090,8 +1124,13 @@
msm_gpiomux_install(msm_mhl_configs,
ARRAY_SIZE(msm_mhl_configs));
- msm_gpiomux_install(msm8974_pri_auxpcm_configs,
- ARRAY_SIZE(msm8974_pri_auxpcm_configs));
+ if (of_board_is_liquid())
+ msm_gpiomux_install(msm8974_pri_ter_auxpcm_configs,
+ ARRAY_SIZE(msm8974_pri_ter_auxpcm_configs));
+ else
+ msm_gpiomux_install(msm8974_pri_pri_auxpcm_configs,
+ ARRAY_SIZE(msm8974_pri_pri_auxpcm_configs));
+
msm_gpiomux_install(msm8974_sec_auxpcm_configs,
ARRAY_SIZE(msm8974_sec_auxpcm_configs));
diff --git a/arch/arm/mach-msm/board-9625-gpiomux.c b/arch/arm/mach-msm/board-9625-gpiomux.c
index 75aaaec..a6ac986 100644
--- a/arch/arm/mach-msm/board-9625-gpiomux.c
+++ b/arch/arm/mach-msm/board-9625-gpiomux.c
@@ -276,6 +276,7 @@
},
};
+#ifdef CONFIG_FB_MSM_QPIC
static struct gpiomux_setting qpic_lcdc_a_d = {
.func = GPIOMUX_FUNC_1,
.drv = GPIOMUX_DRV_10MA,
@@ -327,6 +328,17 @@
},
};
+static void msm9625_disp_init_gpiomux(void)
+{
+ msm_gpiomux_install(msm9625_qpic_lcdc_configs,
+ ARRAY_SIZE(msm9625_qpic_lcdc_configs));
+}
+#else
+static void msm9625_disp_init_gpiomux(void)
+{
+}
+#endif /* CONFIG_FB_MSM_QPIC */
+
void __init msm9625_init_gpiomux(void)
{
int rc;
@@ -347,7 +359,5 @@
ARRAY_SIZE(mdm9625_cdc_reset_config));
msm_gpiomux_install(sdc2_card_det_config,
ARRAY_SIZE(sdc2_card_det_config));
- msm_gpiomux_install(msm9625_qpic_lcdc_configs,
- ARRAY_SIZE(msm9625_qpic_lcdc_configs));
-
+ msm9625_disp_init_gpiomux();
}
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index 80907c8..4eb3d29 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -2776,6 +2776,7 @@
},
.base = &virt_bases[APCS_PLL_BASE],
.c = {
+ .parent = &xo_a_clk.c,
.dbg_name = "a7sspll",
.ops = &clk_ops_sr2_pll,
.vdd_class = &vdd_sr2_pll,
@@ -3059,7 +3060,7 @@
/* WCNSS CLOCKS */
CLK_LOOKUP("xo", cxo_wlan_clk.c, "fb000000.qcom,wcnss-wlan"),
- CLK_LOOKUP("rf_clk", cxo_a2.c, "fb000000.qcom,wcnss-wlan"),
+ CLK_LOOKUP("rf_clk", cxo_a1.c, "fb000000.qcom,wcnss-wlan"),
/* BUS DRIVER */
CLK_LOOKUP("bus_clk", cnoc_msmbus_clk.c, "msm_config_noc"),
@@ -3529,17 +3530,6 @@
panic("clock-8226: Unable to get the vdd_sr2_dig regulator!");
/*
- * These regulators are used at boot. Ensure they stay on
- * while the clock framework comes online.
- */
- vote_vdd_level(&vdd_sr2_pll, VDD_SR2_PLL_TUR);
- regulator_enable(vdd_sr2_pll.regulator[0]);
- regulator_enable(vdd_sr2_pll.regulator[1]);
-
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- regulator_enable(vdd_dig.regulator[0]);
-
- /*
* Hold an active set vote at a rate of 40MHz for the MMSS NOC AHB
* source. Sleep set vote is 0.
* RPM will also turn on gcc_mmss_noc_cfg_ahb_clk, which is needed to
@@ -3561,17 +3551,9 @@
mdss_clk_ctrl_pre_init(&mdss_ahb_clk.c);
}
-static int __init msm8226_clock_late_init(void)
-{
- unvote_vdd_level(&vdd_sr2_pll, VDD_SR2_PLL_TUR);
- unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- return 0;
-}
-
struct clock_init_data msm8226_clock_init_data __initdata = {
.table = msm_clocks_8226,
.size = ARRAY_SIZE(msm_clocks_8226),
.pre_init = msm8226_clock_pre_init,
.post_init = msm8226_clock_post_init,
- .late_init = msm8226_clock_late_init,
};
diff --git a/arch/arm/mach-msm/clock-8610.c b/arch/arm/mach-msm/clock-8610.c
index c9c71f0..8880661 100644
--- a/arch/arm/mach-msm/clock-8610.c
+++ b/arch/arm/mach-msm/clock-8610.c
@@ -143,6 +143,7 @@
#define CE1_AHB_CBCR 0x104C
#define COPSS_SMMU_AHB_CBCR 0x015C
#define LPSS_SMMU_AHB_CBCR 0x0158
+#define BIMC_SMMU_CBCR 0x1120
#define LPASS_Q6_AXI_CBCR 0x11C0
#define APCS_GPLL_ENA_VOTE 0x1480
#define APCS_CLOCK_BRANCH_ENA_VOTE 0x1484
@@ -461,9 +462,9 @@
#define D0_ID 1
#define D1_ID 2
-#define A0_ID 3
-#define A1_ID 4
-#define A2_ID 5
+#define A0_ID 4
+#define A1_ID 5
+#define A2_ID 6
#define DIFF_CLK_ID 7
#define DIV_CLK_ID 11
@@ -564,6 +565,7 @@
},
.base = &virt_bases[APCS_PLL_BASE],
.c = {
+ .parent = &gcc_xo_a_clk_src.c,
.dbg_name = "a7sspll",
.ops = &clk_ops_sr2_pll,
.vdd_class = &vdd_sr2_pll,
@@ -1499,6 +1501,17 @@
},
};
+static struct branch_clk gcc_bimc_smmu_clk = {
+ .cbcr_reg = BIMC_SMMU_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_bimc_smmu_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_bimc_smmu_clk.c),
+ },
+};
+
static struct clk_freq_tbl ftbl_csi0_1_clk[] = {
F_MM(100000000, gpll0, 6, 0, 0),
F_MM(200000000, mmpll0, 4, 0, 0),
@@ -1794,6 +1807,8 @@
.dbg_name = "bimc_gfx_clk",
.ops = &clk_ops_branch,
CLK_INIT(bimc_gfx_clk.c),
+ /* FIXME: Remove once kgsl votes on the depends clock. */
+ .depends = &gcc_bimc_smmu_clk.c,
},
};
@@ -2289,6 +2304,7 @@
{ &gcc_ce1_ahb_clk.c, GCC_BASE, 0x013a},
{ &gcc_xo_clk_src.c, GCC_BASE, 0x0149},
{ &bimc_clk.c, GCC_BASE, 0x0154},
+ { &gcc_bimc_smmu_clk.c, GCC_BASE, 0x015e},
{ &gcc_lpass_q6_axi_clk.c, GCC_BASE, 0x0160},
{ &mmssnoc_ahb_clk.c, MMSS_BASE, 0x0001},
@@ -2549,6 +2565,8 @@
CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f991f000.serial"),
CLK_LOOKUP("core_clk", gcc_blsp1_uart3_apps_clk.c, "f991f000.serial"),
+ CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f991e000.serial"),
+ CLK_LOOKUP("core_clk", gcc_blsp1_uart2_apps_clk.c, "f991e000.serial"),
CLK_LOOKUP("dfab_clk", pnoc_sps_clk.c, "msm_sps"),
CLK_LOOKUP("bus_clk", pnoc_qseecom_clk.c, "qseecom"),
@@ -2602,6 +2620,10 @@
CLK_LOOKUP("core_clk", qdss_clk.c, "fc352000.cti"),
CLK_LOOKUP("core_clk", qdss_clk.c, "fc353000.cti"),
CLK_LOOKUP("core_clk", qdss_clk.c, "fc354000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc34c000.jtagmm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc34d000.jtagmm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc34e000.jtagmm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc34f000.jtagmm"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc326000.tmc"),
@@ -2631,6 +2653,10 @@
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc352000.cti"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc353000.cti"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc354000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc34c000.jtagmm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc34d000.jtagmm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc34e000.jtagmm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc34f000.jtagmm"),
@@ -2752,6 +2778,8 @@
CLK_LOOKUP("iface_clk", oxili_ahb_clk.c, "fdc00000.qcom,kgsl-3d0"),
CLK_LOOKUP("mem_iface_clk", bimc_gfx_clk.c, "fdc00000.qcom,kgsl-3d0"),
CLK_LOOKUP("mem_clk", gmem_gfx3d_clk.c, "fdc00000.qcom,kgsl-3d0"),
+ CLK_LOOKUP("alt_mem_iface_clk", gcc_bimc_smmu_clk.c,
+ "fdc00000.qcom,kgsl-3d0"),
CLK_LOOKUP("iface_clk", vfe_ahb_clk.c, "fd890000.qcom,iommu"),
CLK_LOOKUP("core_clk", vfe_axi_clk.c, "fd890000.qcom,iommu"),
@@ -2761,6 +2789,7 @@
CLK_LOOKUP("core_clk", mdp_axi_clk.c, "fd870000.qcom,iommu"),
CLK_LOOKUP("iface_clk", oxili_ahb_clk.c, "fd880000.qcom,iommu"),
CLK_LOOKUP("core_clk", bimc_gfx_clk.c, "fd880000.qcom,iommu"),
+ CLK_LOOKUP("alt_core_clk", gcc_bimc_smmu_clk.c, "fd880000.qcom,iommu"),
CLK_LOOKUP("iface_clk", gcc_lpss_smmu_ahb_clk.c, "fd000000.qcom,iommu"),
CLK_LOOKUP("core_clk", gcc_lpass_q6_axi_clk.c, "fd000000.qcom,iommu"),
CLK_LOOKUP("iface_clk", gcc_copss_smmu_ahb_clk.c,
@@ -2783,17 +2812,24 @@
CLK_LOOKUP("measure_clk", l2_m_clk, ""),
CLK_LOOKUP("xo", gcc_xo_clk_src.c, "fb000000.qcom,wcnss-wlan"),
- CLK_LOOKUP("rf_clk", cxo_a2.c, "fb000000.qcom,wcnss-wlan"),
+ CLK_LOOKUP("rf_clk", cxo_a1.c, "fb000000.qcom,wcnss-wlan"),
CLK_LOOKUP("iface_clk", mdp_ahb_clk.c, "fd900000.qcom,mdss_mdp"),
CLK_LOOKUP("core_clk", mdp_axi_clk.c, "fd900000.qcom,mdss_mdp"),
CLK_LOOKUP("lcdc_clk", mdp_lcdc_clk.c, "fd900000.qcom,mdss_mdp"),
CLK_LOOKUP("vsync_clk", mdp_vsync_clk.c, "fd900000.qcom,mdss_mdp"),
+ CLK_LOOKUP("dsi_clk", mdp_dsi_clk.c, "fd900000.qcom,mdss_mdp"),
CLK_LOOKUP("iface_clk", dsi_ahb_clk.c, "fdd00000.qcom,mdss_dsi"),
- CLK_LOOKUP("core_clk", dsi_clk.c, "fdd00000.qcom,mdss_dsi"),
+ CLK_LOOKUP("dsi_clk", dsi_clk.c, "fdd00000.qcom,mdss_dsi"),
CLK_LOOKUP("byte_clk", dsi_byte_clk.c, "fdd00000.qcom,mdss_dsi"),
CLK_LOOKUP("esc_clk", dsi_esc_clk.c, "fdd00000.qcom,mdss_dsi"),
CLK_LOOKUP("pixel_clk", dsi_pclk_clk.c, "fdd00000.qcom,mdss_dsi"),
+
+ /* QSEECOM Clocks */
+ CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "qseecom"),
+ CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "qseecom"),
+ CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "qseecom"),
+ CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "qseecom"),
};
static struct clk_lookup msm_clocks_8610_rumi[] = {
@@ -2969,19 +3005,6 @@
if (IS_ERR(vdd_sr2_pll.regulator[1]))
panic("clock-8610: Unable to get the vdd_sr2_dig regulator!");
- vote_vdd_level(&vdd_sr2_pll, VDD_SR2_PLL_TUR);
- regulator_enable(vdd_sr2_pll.regulator[0]);
- regulator_enable(vdd_sr2_pll.regulator[1]);
-
- /*
- * TODO: Set a voltage and enable vdd_dig, leaving the voltage high
- * until late_init. This may not be necessary with clock handoff;
- * Investigate this code on a real non-simulator target to determine
- * its necessity.
- */
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- regulator_enable(vdd_dig.regulator[0]);
-
enable_rpm_scaling();
/* Enable a clock to allow access to MMSS clock registers */
@@ -2998,17 +3021,9 @@
clk_prepare_enable(&mmss_s0_axi_clk.c);
}
-static int __init msm8610_clock_late_init(void)
-{
- unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- unvote_vdd_level(&vdd_sr2_pll, VDD_SR2_PLL_TUR);
- return 0;
-}
-
struct clock_init_data msm8610_clock_init_data __initdata = {
.table = msm_clocks_8610,
.size = ARRAY_SIZE(msm_clocks_8610),
.pre_init = msm8610_clock_pre_init,
.post_init = msm8610_clock_post_init,
- .late_init = msm8610_clock_late_init,
};
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index aefaa5c..509443d 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -6740,8 +6740,6 @@
if ((readl_relaxed(PRNG_CLK_NS_REG) & 0x7F) == 0x2B)
prng_clk.freq_tbl = clk_tbl_prng_64;
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
-
clk_ops_local_pll.enable = sr_pll_clk_enable;
}
@@ -6852,7 +6850,7 @@
if (WARN(rc, "cfpb_a_clk not enabled (%d)\n", rc))
return rc;
- return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
+ return 0;
}
struct clock_init_data msm8960_clock_init_data __initdata = {
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 9751ab2..1d7af9b 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -1405,7 +1405,14 @@
};
static struct clk_freq_tbl ftbl_gcc_gp_clk[] = {
- F(19200000, cxo, 1, 0, 0),
+ F( 4800000, cxo, 4, 0, 0),
+ F( 6000000, gpll0, 10, 1, 10),
+ F( 6750000, gpll0, 1, 1, 89),
+ F( 8000000, gpll0, 15, 1, 5),
+ F( 9600000, cxo, 2, 0, 0),
+ F(16000000, gpll0, 1, 2, 75),
+ F(19200000, cxo, 1, 0, 0),
+ F(24000000, gpll0, 5, 1, 5),
F_END
};
@@ -4750,7 +4757,8 @@
CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f991f000.serial"),
CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f9924000.i2c"),
CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f991e000.serial"),
- CLK_LOOKUP("core_clk", gcc_blsp1_qup1_i2c_apps_clk.c, ""),
+ CLK_LOOKUP("core_clk", gcc_blsp1_qup1_i2c_apps_clk.c, "f9923000.i2c"),
+ CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f9923000.i2c"),
CLK_LOOKUP("core_clk", gcc_blsp1_qup2_i2c_apps_clk.c, "f9924000.i2c"),
CLK_LOOKUP("core_clk", gcc_blsp1_qup2_spi_apps_clk.c, ""),
CLK_LOOKUP("core_clk", gcc_blsp1_qup1_spi_apps_clk.c, "f9923000.spi"),
@@ -4816,6 +4824,11 @@
CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "qseecom"),
CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "qseecom"),
+ CLK_LOOKUP("ce_drv_core_clk", gcc_ce2_clk.c, "qseecom"),
+ CLK_LOOKUP("ce_drv_iface_clk", gcc_ce2_ahb_clk.c, "qseecom"),
+ CLK_LOOKUP("ce_drv_bus_clk", gcc_ce2_axi_clk.c, "qseecom"),
+ CLK_LOOKUP("ce_drv_core_clk_src", ce2_clk_src.c, "qseecom"),
+
CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "scm"),
CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "scm"),
CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "scm"),
@@ -4998,6 +5011,8 @@
CLK_LOOKUP("csi3_rdi_clk", camss_csi3rdi_clk.c, "fda08c00.qcom,csid"),
/* ISPIF clocks */
+ CLK_LOOKUP("ispif_ahb_clk", camss_ispif_ahb_clk.c,
+ "fda0a000.qcom,ispif"),
CLK_LOOKUP("camss_vfe_vfe_clk", camss_vfe_vfe0_clk.c,
"fda0a000.qcom,ispif"),
CLK_LOOKUP("camss_csi_vfe_clk", camss_csi_vfe0_clk.c,
@@ -5488,15 +5503,6 @@
if (IS_ERR(vdd_dig.regulator[0]))
panic("clock-8974: Unable to get the vdd_dig regulator!");
- /*
- * TODO: Set a voltage and enable vdd_dig, leaving the voltage high
- * until late_init. This may not be necessary with clock handoff;
- * Investigate this code on a real non-simulator target to determine
- * its necessity.
- */
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- regulator_enable(vdd_dig.regulator[0]);
-
enable_rpm_scaling();
reg_init();
@@ -5533,11 +5539,6 @@
mdss_clk_ctrl_pre_init(&mdss_ahb_clk.c);
}
-static int __init msm8974_clock_late_init(void)
-{
- return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
-}
-
static void __init msm8974_rumi_clock_pre_init(void)
{
virt_bases[GCC_BASE] = ioremap(GCC_CC_PHYS, GCC_CC_SIZE);
@@ -5553,15 +5554,6 @@
vdd_dig.regulator[0] = regulator_get(NULL, "vdd_dig");
if (IS_ERR(vdd_dig.regulator[0]))
panic("clock-8974: Unable to get the vdd_dig regulator!");
-
- /*
- * TODO: Set a voltage and enable vdd_dig, leaving the voltage high
- * until late_init. This may not be necessary with clock handoff;
- * Investigate this code on a real non-simulator target to determine
- * its necessity.
- */
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- regulator_enable(vdd_dig.regulator[0]);
}
struct clock_init_data msm8974_clock_init_data __initdata = {
@@ -5569,7 +5561,6 @@
.size = ARRAY_SIZE(msm_clocks_8974),
.pre_init = msm8974_clock_pre_init,
.post_init = msm8974_clock_post_init,
- .late_init = msm8974_clock_late_init,
};
struct clock_init_data msm8974_rumi_clock_init_data __initdata = {
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index d0b4a32..5d55966 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -3777,8 +3777,6 @@
static void __init msm8660_clock_pre_init(void)
{
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
-
/* Setup MM_PLL2 (PLL3), but turn it off. Rate set by set_rate_tv(). */
rmwreg(0, MM_PLL2_MODE_REG, BIT(0)); /* Disable output */
/* Set ref, bypass, assert reset, disable output, disable test mode */
@@ -3900,7 +3898,7 @@
if (WARN(rc, "mmfpb_a_clk not enabled (%d)\n", rc))
return rc;
- return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
+ return 0;
}
struct clock_init_data msm8x60_clock_init_data __initdata = {
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index d6ae4335..6b218a1 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -1754,8 +1754,6 @@
{
u32 regval, is_pll_enabled, pll9_lval;
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
-
clk_ops_local_pll.enable = sr_pll_clk_enable;
/* Enable PDM CXO source. */
@@ -1831,15 +1829,9 @@
clk_disable_unprepare(&pdm_clk.c);
}
-static int __init msm9615_clock_late_init(void)
-{
- return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
-}
-
struct clock_init_data msm9615_clock_init_data __initdata = {
.table = msm_clocks_9615,
.size = ARRAY_SIZE(msm_clocks_9615),
.pre_init = msm9615_clock_pre_init,
.post_init = msm9615_clock_post_init,
- .late_init = msm9615_clock_late_init,
};
diff --git a/arch/arm/mach-msm/clock-9625.c b/arch/arm/mach-msm/clock-9625.c
index 6817c6c..4984255 100644
--- a/arch/arm/mach-msm/clock-9625.c
+++ b/arch/arm/mach-msm/clock-9625.c
@@ -411,6 +411,7 @@
},
.base = &virt_bases[APCS_PLL_BASE],
.c = {
+ .parent = &cxo_a_clk_src.c,
.dbg_name = "apcspll_clk_src",
.ops = &clk_ops_local_pll,
CLK_INIT(apcspll_clk_src.c),
@@ -2017,12 +2018,6 @@
*/
clk_prepare_enable(&cxo_a_clk_src.c);
- /*
- * TODO: This call is to prevent sending 0Hz to rpm to turn off pnoc.
- * Needs to remove this after vote of pnoc from sdcc driver is ready.
- */
- clk_prepare_enable(&pnoc_msmbus_a_clk.c);
-
/* Set rates for single-rate clocks. */
clk_set_rate(&usb_hs_system_clk_src.c,
usb_hs_system_clk_src.freq_tbl[0].freq_hz);
@@ -2089,9 +2084,6 @@
if (IS_ERR(vdd_dig.regulator[0]))
panic("clock-9625: Unable to get the vdd_dig regulator!");
- vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
- regulator_enable(vdd_dig.regulator[0]);
-
enable_rpm_scaling();
reg_init();
@@ -2107,15 +2099,9 @@
measure_mux_common, sizeof(measure_mux_common));
}
-static int __init msm9625_clock_late_init(void)
-{
- return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
-}
-
struct clock_init_data msm9625_clock_init_data __initdata = {
.table = msm_clocks_9625,
.size = ARRAY_SIZE(msm_clocks_9625),
.pre_init = msm9625_clock_pre_init,
.post_init = msm9625_clock_post_init,
- .late_init = msm9625_clock_late_init,
};
diff --git a/arch/arm/mach-msm/clock-mdss-8226.c b/arch/arm/mach-msm/clock-mdss-8226.c
index f2c8d58..edfaf90 100644
--- a/arch/arm/mach-msm/clock-mdss-8226.c
+++ b/arch/arm/mach-msm/clock-mdss-8226.c
@@ -71,7 +71,6 @@
{
u32 status;
- clk_prepare_enable(mdss_dsi_ahb_clk);
/* poll for PLL ready status */
if (readl_poll_timeout_noirq((mdss_dsi_base + 0x02c0),
status,
@@ -83,7 +82,6 @@
} else {
pll_initialized = 1;
}
- clk_disable_unprepare(mdss_dsi_ahb_clk);
return pll_initialized;
}
@@ -177,28 +175,141 @@
return ret;
}
-static void mdss_dsi_uniphy_pll_lock_detect_setting(void)
-{
- REG_W(0x04, mdss_dsi_base + 0x0264); /* LKDetect CFG2 */
- udelay(100);
- REG_W(0x05, mdss_dsi_base + 0x0264); /* LKDetect CFG2 */
- udelay(500);
-}
-
static void mdss_dsi_uniphy_pll_sw_reset(void)
{
+ /*
+ * Add hardware recommended delays after toggling the
+ * software reset bit off and back on.
+ */
REG_W(0x01, mdss_dsi_base + 0x0268); /* PLL TEST CFG */
- udelay(1);
+ udelay(300);
REG_W(0x00, mdss_dsi_base + 0x0268); /* PLL TEST CFG */
+ udelay(300);
+}
+
+static void mdss_dsi_pll_enable_casem(void)
+{
+ int i;
+
+ /*
+ * Add hardware recommended delays between register writes for
+ * the updates to take effect. These delays are necessary for the
+ * PLL to successfully lock.
+ */
+ REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1000);
+
+ for (i = 0; (i < 3) && !mdss_dsi_check_pll_lock(); i++) {
+ REG_W(0x07, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1);
+
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1000);
+ }
+
+ if (pll_initialized)
+ pr_debug("%s: PLL Locked after %d attempts\n", __func__, i);
+ else
+ pr_debug("%s: PLL failed to lock\n", __func__);
+}
+
+static void mdss_dsi_pll_enable_casef1(void)
+{
+ /*
+ * Add hardware recommended delays between register writes for
+ * the updates to take effect. These delays are necessary for the
+ * PLL to successfully lock.
+ */
+ REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x0d, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1000);
+
+ if (mdss_dsi_check_pll_lock())
+ pr_debug("%s: PLL Locked\n", __func__);
+ else
+ pr_debug("%s: PLL failed to lock\n", __func__);
+}
+
+static void mdss_dsi_pll_enable_cased(void)
+{
+ /*
+ * Add hardware recommended delays between register writes for
+ * the updates to take effect. These delays are necessary for the
+ * PLL to successfully lock.
+ */
+ REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
udelay(1);
+ REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1);
+ REG_W(0x07, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1);
+ REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1);
+ REG_W(0x07, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1);
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1);
+
+ if (mdss_dsi_check_pll_lock())
+ pr_debug("%s: PLL Locked\n", __func__);
+ else
+ pr_debug("%s: PLL failed to lock\n", __func__);
+}
+
+static void mdss_dsi_pll_enable_casec(void)
+{
+ /*
+ * Add hardware recommended delays between register writes for
+ * the updates to take effect. These delays are necessary for the
+ * PLL to successfully lock.
+ */
+ REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1000);
+
+ if (mdss_dsi_check_pll_lock())
+ pr_debug("%s: PLL Locked\n", __func__);
+ else
+ pr_debug("%s: PLL failed to lock\n", __func__);
+}
+
+static void mdss_dsi_pll_enable_casee(void)
+{
+ /*
+ * Add hardware recommended delays between register writes for
+ * the updates to take effect. These delays are necessary for the
+ * PLL to successfully lock.
+ */
+ REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(200);
+ REG_W(0x0d, mdss_dsi_base + 0x0220); /* GLB CFG */
+ REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
+ udelay(1000);
+
+ if (mdss_dsi_check_pll_lock())
+ pr_debug("%s: PLL Locked\n", __func__);
+ else
+ pr_debug("%s: PLL failed to lock\n", __func__);
}
static int __mdss_dsi_pll_enable(struct clk *c)
{
- u32 status;
- u32 max_reads, timeout_us;
- int i;
-
if (!pll_initialized) {
if (dsi_pll_rate)
__mdss_dsi_pll_byte_set_rate(c, dsi_pll_rate);
@@ -207,56 +318,45 @@
__func__);
}
+ /*
+ * Try all PLL power-up sequences one-by-one until
+ * PLL lock is detected
+ */
mdss_dsi_uniphy_pll_sw_reset();
- /* PLL power up */
- /* Add HW recommended delay between
- register writes for the update to propagate */
- REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(20);
- REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(100);
- REG_W(0x0d, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(20);
- REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(200);
+ mdss_dsi_pll_enable_casem();
+ if (pll_initialized)
+ goto pll_locked;
- for (i = 0; i < 3; i++) {
- mdss_dsi_uniphy_pll_lock_detect_setting();
- /* poll for PLL ready status */
- max_reads = 5;
- timeout_us = 100;
- if (readl_poll_timeout_noirq((mdss_dsi_base + 0x02c0),
- status,
- ((status & 0x01) == 1),
- max_reads, timeout_us)) {
- pr_debug("%s: DSI PLL status=%x failed to Lock\n",
- __func__, status);
- pr_debug("%s:Trying to power UP PLL again\n",
- __func__);
- } else
- break;
+ mdss_dsi_uniphy_pll_sw_reset();
+ mdss_dsi_pll_enable_cased();
+ if (pll_initialized)
+ goto pll_locked;
- mdss_dsi_uniphy_pll_sw_reset();
- udelay(1000);
- /* Add HW recommended delay between
- register writes for the update to propagate */
- REG_W(0x01, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(20);
- REG_W(0x05, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(100);
- REG_W(0x0d, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(20);
- REG_W(0x0f, mdss_dsi_base + 0x0220); /* GLB CFG */
- udelay(200);
- }
+ mdss_dsi_uniphy_pll_sw_reset();
+ mdss_dsi_pll_enable_cased();
+ if (pll_initialized)
+ goto pll_locked;
- if ((status & 0x01) != 1) {
- pr_err("%s: DSI PLL status=%x failed to Lock\n",
- __func__, status);
- return -EINVAL;
- }
+ mdss_dsi_uniphy_pll_sw_reset();
+ mdss_dsi_pll_enable_casef1();
+ if (pll_initialized)
+ goto pll_locked;
- pr_debug("%s: **** PLL Lock success\n", __func__);
+ mdss_dsi_uniphy_pll_sw_reset();
+ mdss_dsi_pll_enable_casec();
+ if (pll_initialized)
+ goto pll_locked;
+
+ mdss_dsi_uniphy_pll_sw_reset();
+ mdss_dsi_pll_enable_casee();
+ if (pll_initialized)
+ goto pll_locked;
+
+ pr_err("%s: DSI PLL failed to Lock\n", __func__);
+ return -EINVAL;
+
+pll_locked:
+ pr_debug("%s: PLL Lock success\n", __func__);
return 0;
}
@@ -264,7 +364,7 @@
static void __mdss_dsi_pll_disable(void)
{
writel_relaxed(0x00, mdss_dsi_base + 0x0220); /* GLB CFG */
- pr_debug("%s: **** disable pll Initialize\n", __func__);
+ pr_debug("%s: PLL disabled\n", __func__);
pll_initialized = 0;
}
@@ -305,13 +405,17 @@
/* todo: Adjust these values appropriately */
static enum handoff mdss_dsi_pll_byte_handoff(struct clk *c)
{
- if (mdss_gdsc_enabled() && mdss_dsi_check_pll_lock()) {
- c->rate = 59000000;
- dsi_pll_rate = 59000000;
- pll_byte_clk_rate = 59000000;
- pll_pclk_rate = 117000000;
- dsipll_refcount++;
- return HANDOFF_ENABLED_CLK;
+ if (mdss_gdsc_enabled()) {
+ clk_prepare_enable(mdss_dsi_ahb_clk);
+ if (mdss_dsi_check_pll_lock()) {
+ c->rate = 59000000;
+ dsi_pll_rate = 59000000;
+ pll_byte_clk_rate = 59000000;
+ pll_pclk_rate = 117000000;
+ dsipll_refcount++;
+ return HANDOFF_ENABLED_CLK;
+ }
+ clk_disable_unprepare(mdss_dsi_ahb_clk);
}
return HANDOFF_DISABLED_CLK;
@@ -320,10 +424,14 @@
/* todo: Adjust these values appropriately */
static enum handoff mdss_dsi_pll_pixel_handoff(struct clk *c)
{
- if (mdss_gdsc_enabled() && mdss_dsi_check_pll_lock()) {
- c->rate = 117000000;
- dsipll_refcount++;
- return HANDOFF_ENABLED_CLK;
+ if (mdss_gdsc_enabled()) {
+ clk_prepare_enable(mdss_dsi_ahb_clk);
+ if (mdss_dsi_check_pll_lock()) {
+ c->rate = 117000000;
+ dsipll_refcount++;
+ return HANDOFF_ENABLED_CLK;
+ }
+ clk_disable_unprepare(mdss_dsi_ahb_clk);
}
return HANDOFF_DISABLED_CLK;
diff --git a/arch/arm/mach-msm/clock-rpm.c b/arch/arm/mach-msm/clock-rpm.c
index ee91a34..3870e2b 100644
--- a/arch/arm/mach-msm/clock-rpm.c
+++ b/arch/arm/mach-msm/clock-rpm.c
@@ -53,12 +53,8 @@
if (rc < 0)
return rc;
- if (!r->branch) {
- r->last_set_khz = iv.value;
- if (!r->active_only)
- r->last_set_sleep_khz = iv.value;
+ if (!r->branch)
r->c.rate = iv.value * r->factor;
- }
return 0;
}
@@ -78,12 +74,8 @@
static int clk_rpmrs_handoff_smd(struct rpm_clk *r)
{
- if (!r->branch) {
- r->last_set_khz = INT_MAX;
- if (!r->active_only)
- r->last_set_sleep_khz = INT_MAX;
- r->c.rate = 1 * r->factor;
- }
+ if (!r->branch)
+ r->c.rate = INT_MAX;
return 0;
}
@@ -113,6 +105,22 @@
static DEFINE_MUTEX(rpm_clock_lock);
+static void to_active_sleep_khz(struct rpm_clk *r, unsigned long rate,
+ unsigned long *active_khz, unsigned long *sleep_khz)
+{
+ /* Convert the rate (hz) to khz */
+ *active_khz = DIV_ROUND_UP(rate, r->factor);
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep_khz = 0;
+ else
+ *sleep_khz = *active_khz;
+}
+
static int rpm_clk_prepare(struct clk *clk)
{
struct rpm_clk *r = to_rpm_clk(clk);
@@ -124,18 +132,16 @@
mutex_lock(&rpm_clock_lock);
- this_khz = r->last_set_khz;
+ to_active_sleep_khz(r, r->c.rate, &this_khz, &this_sleep_khz);
+
/* Don't send requests to the RPM if the rate has not been set. */
if (this_khz == 0)
goto out;
- this_sleep_khz = r->last_set_sleep_khz;
-
/* Take peer clock's rate into account only if it's enabled. */
- if (peer->enabled) {
- peer_khz = peer->last_set_khz;
- peer_sleep_khz = peer->last_set_sleep_khz;
- }
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
value = max(this_khz, peer_khz);
if (r->branch)
@@ -171,17 +177,16 @@
mutex_lock(&rpm_clock_lock);
- if (r->last_set_khz) {
+ if (r->c.rate) {
uint32_t value;
struct rpm_clk *peer = r->peer;
unsigned long peer_khz = 0, peer_sleep_khz = 0;
int rc;
/* Take peer clock's rate into account only if it's enabled. */
- if (peer->enabled) {
- peer_khz = peer->last_set_khz;
- peer_sleep_khz = peer->last_set_sleep_khz;
- }
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
value = r->branch ? !!peer_khz : peer_khz;
rc = clk_rpmrs_set_rate_active(r, value);
@@ -204,27 +209,19 @@
unsigned long this_khz, this_sleep_khz;
int rc = 0;
- this_khz = DIV_ROUND_UP(rate, r->factor);
-
mutex_lock(&rpm_clock_lock);
- /* Active-only clocks don't care what the rate is during sleep. So,
- * they vote for zero. */
- if (r->active_only)
- this_sleep_khz = 0;
- else
- this_sleep_khz = this_khz;
-
if (r->enabled) {
uint32_t value;
struct rpm_clk *peer = r->peer;
unsigned long peer_khz = 0, peer_sleep_khz = 0;
+ to_active_sleep_khz(r, rate, &this_khz, &this_sleep_khz);
+
/* Take peer clock's rate into account only if it's enabled. */
- if (peer->enabled) {
- peer_khz = peer->last_set_khz;
- peer_sleep_khz = peer->last_set_sleep_khz;
- }
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
value = max(this_khz, peer_khz);
rc = clk_rpmrs_set_rate_active(r, value);
@@ -234,10 +231,6 @@
value = max(this_sleep_khz, peer_sleep_khz);
rc = clk_rpmrs_set_rate_sleep(r, value);
}
- if (!rc) {
- r->last_set_khz = this_khz;
- r->last_set_sleep_khz = this_sleep_khz;
- }
out:
mutex_unlock(&rpm_clock_lock);
diff --git a/arch/arm/mach-msm/clock-rpm.h b/arch/arm/mach-msm/clock-rpm.h
index 8d328e3..b20c3d6 100644
--- a/arch/arm/mach-msm/clock-rpm.h
+++ b/arch/arm/mach-msm/clock-rpm.h
@@ -37,9 +37,6 @@
const int rpm_clk_id;
const int rpm_status_id;
const bool active_only;
- unsigned last_set_khz;
- /* 0 if active_only. Otherwise, same as last_set_khz. */
- unsigned last_set_sleep_khz;
bool enabled;
bool branch; /* true: RPM only accepts 1 for ON and 0 for OFF */
unsigned factor;
@@ -107,8 +104,6 @@
.rpm_status_id = (stat_id), \
.rpm_key = (key), \
.peer = &active, \
- .last_set_khz = ((r) / 1000), \
- .last_set_sleep_khz = ((r) / 1000), \
.factor = 1000, \
.branch = true, \
.rpmrs_data = (rpmrsdata),\
@@ -125,7 +120,6 @@
.rpm_status_id = (stat_id), \
.rpm_key = (key), \
.peer = &name, \
- .last_set_khz = ((r) / 1000), \
.active_only = true, \
.factor = 1000, \
.branch = true, \
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
index fecb720..044fc2c 100644
--- a/arch/arm/mach-msm/clock.c
+++ b/arch/arm/mach-msm/clock.c
@@ -33,6 +33,12 @@
};
static LIST_HEAD(handoff_list);
+struct handoff_vdd {
+ struct list_head list;
+ struct clk_vdd_class *vdd_class;
+};
+static LIST_HEAD(handoff_vdd_list);
+
/* Find the voltage level required for a given rate. */
int find_vdd_level(struct clk *clk, unsigned long rate)
{
@@ -588,6 +594,38 @@
}
EXPORT_SYMBOL(msm_clock_register);
+
+static void vdd_class_init(struct clk_vdd_class *vdd)
+{
+ struct handoff_vdd *v;
+ int i;
+
+ if (!vdd)
+ return;
+
+ list_for_each_entry(v, &handoff_vdd_list, list) {
+ if (v->vdd_class == vdd)
+ return;
+ }
+
+ pr_debug("voting for vdd_class %s\n", vdd->class_name);
+ if (vote_vdd_level(vdd, vdd->num_levels - 1))
+ pr_err("failed to vote for %s\n", vdd->class_name);
+
+ for (i = 0; i < vdd->num_regulators; i++)
+ regulator_enable(vdd->regulator[i]);
+
+ v = kmalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ pr_err("Unable to kmalloc. %s will be stuck at max.\n",
+ vdd->class_name);
+ return;
+ }
+
+ v->vdd_class = vdd;
+ list_add_tail(&v->list, &handoff_vdd_list);
+}
+
static int __init __handoff_clk(struct clk *clk)
{
enum handoff state = HANDOFF_DISABLED_CLK;
@@ -693,6 +731,16 @@
init_sibling_lists(clock_tbl, num_clocks);
/*
+ * Enable regulators and temporarily set them up at maximum voltage.
+ * Once all the clocks have made their respective vote, remove this
+ * temporary vote. The removing of the temporary vote is done at
+ * late_init, by which time we assume all the clocks would have been
+ * handed off.
+ */
+ for (n = 0; n < num_clocks; n++)
+ vdd_class_init(clock_tbl[n].clk->vdd_class);
+
+ /*
* Detect and preserve initial clock state until clock_late_init() or
* a driver explicitly changes it, whichever is first.
*/
@@ -713,8 +761,12 @@
static int __init clock_late_init(void)
{
struct handoff_clk *h, *h_temp;
+ struct handoff_vdd *v, *v_temp;
int ret = 0;
+ if (clk_init_data->late_init)
+ ret = clk_init_data->late_init();
+
pr_info("%s: Removing enables held for handed-off clocks\n", __func__);
list_for_each_entry_safe(h, h_temp, &handoff_list, list) {
clk_disable_unprepare(h->clk);
@@ -722,8 +774,12 @@
kfree(h);
}
- if (clk_init_data->late_init)
- ret = clk_init_data->late_init();
+ list_for_each_entry_safe(v, v_temp, &handoff_vdd_list, list) {
+ unvote_vdd_level(v->vdd_class, v->vdd_class->num_levels - 1);
+ list_del(&v->list);
+ kfree(v);
+ }
+
return ret;
}
late_initcall(clock_late_init);
diff --git a/arch/arm/mach-msm/gdsc.c b/arch/arm/mach-msm/gdsc.c
index 6665d66..a07b13d 100644
--- a/arch/arm/mach-msm/gdsc.c
+++ b/arch/arm/mach-msm/gdsc.c
@@ -38,7 +38,7 @@
#define EN_FEW_WAIT_VAL (0x8 << 16)
#define CLK_DIS_WAIT_VAL (0x2 << 12)
-#define TIMEOUT_US 100
+#define TIMEOUT_US 1000
struct gdsc {
struct regulator_dev *rdev;
diff --git a/arch/arm/mach-msm/include/mach/iommu_perfmon.h b/arch/arm/mach-msm/include/mach/iommu_perfmon.h
index dcae83b..dc4671c 100644
--- a/arch/arm/mach-msm/include/mach/iommu_perfmon.h
+++ b/arch/arm/mach-msm/include/mach/iommu_perfmon.h
@@ -63,6 +63,7 @@
* @iommu_dev: pointer to iommu device
* @ops: iommu access operations pointer.
* @hw_ops: iommu pm hw access operations pointer.
+ * @always_on: 1 if iommu is always on, 0 otherwise.
*/
struct iommu_info {
const char *iommu_name;
@@ -71,6 +72,7 @@
struct device *iommu_dev;
struct iommu_access_ops *ops;
struct iommu_pm_hw_ops *hw_ops;
+ unsigned int always_on;
};
/**
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index 56c4afd..6119a3c 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -70,7 +70,7 @@
#ifndef __ASSEMBLY__
void *allocate_contiguous_ebi(unsigned long, unsigned long, int);
-unsigned long allocate_contiguous_ebi_nomap(unsigned long, unsigned long);
+phys_addr_t allocate_contiguous_ebi_nomap(unsigned long, unsigned long);
void clean_and_invalidate_caches(unsigned long, unsigned long, unsigned long);
void clean_caches(unsigned long, unsigned long, unsigned long);
void invalidate_caches(unsigned long, unsigned long, unsigned long);
diff --git a/arch/arm/mach-msm/include/mach/scm.h b/arch/arm/mach-msm/include/mach/scm.h
index 4258dbd..42e04dd 100644
--- a/arch/arm/mach-msm/include/mach/scm.h
+++ b/arch/arm/mach-msm/include/mach/scm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
#define SCM_SVC_MP 0xC
#define SCM_SVC_CRYPTO 0xA
#define SCM_SVC_DCVS 0xD
+#define SCM_SVC_ES 0x10
#define SCM_SVC_TZSCHEDULER 0xFC
#ifdef CONFIG_MSM_SCM
@@ -32,6 +33,7 @@
extern s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1);
extern s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2);
+extern s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3);
extern s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3,
u32 arg4, u32 *ret1, u32 *ret2);
@@ -59,6 +61,12 @@
return 0;
}
+static inline s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+ u32 arg3)
+{
+ return 0;
+}
+
static inline s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
{
diff --git a/arch/arm/mach-msm/ipc_router.c b/arch/arm/mach-msm/ipc_router.c
index d81dbb4..0d617a6 100644
--- a/arch/arm/mach-msm/ipc_router.c
+++ b/arch/arm/mach-msm/ipc_router.c
@@ -1587,9 +1587,11 @@
if (!rport_ptr)
pr_err("%s: Remote port create "
"failed\n", __func__);
- rport_ptr->sec_rule =
- msm_ipc_get_security_rule(
- msg->srv.service, msg->srv.instance);
+ else
+ rport_ptr->sec_rule =
+ msm_ipc_get_security_rule(
+ msg->srv.service,
+ msg->srv.instance);
}
wake_up(&newserver_wait);
}
@@ -1890,6 +1892,7 @@
head_skb = skb_peek(pkt->pkt_fragment_q);
if (!head_skb) {
pr_err("%s: pkt_fragment_q is empty\n", __func__);
+ release_pkt(pkt);
return -EINVAL;
}
hdr = (struct rr_header *)skb_push(head_skb, IPC_ROUTER_HDR_SIZE);
diff --git a/arch/arm/mach-msm/ipc_socket.c b/arch/arm/mach-msm/ipc_socket.c
index c0422a1..342663e 100644
--- a/arch/arm/mach-msm/ipc_socket.c
+++ b/arch/arm/mach-msm/ipc_socket.c
@@ -367,7 +367,8 @@
if (port_ptr->type == CLIENT_PORT)
wait_for_irsc_completion();
ipc_buf = skb_peek(msg);
- msm_ipc_router_ipc_log(IPC_SEND, ipc_buf, port_ptr);
+ if (ipc_buf)
+ msm_ipc_router_ipc_log(IPC_SEND, ipc_buf, port_ptr);
ret = msm_ipc_router_send_to(port_ptr, msg, &dest->address);
if (ret == (IPC_ROUTER_HDR_SIZE + total_len))
ret = total_len;
@@ -429,7 +430,8 @@
ret = msm_ipc_router_extract_msg(m, msg);
ipc_buf = skb_peek(msg);
- msm_ipc_router_ipc_log(IPC_RECV, ipc_buf, port_ptr);
+ if (ipc_buf)
+ msm_ipc_router_ipc_log(IPC_RECV, ipc_buf, port_ptr);
msm_ipc_router_release_msg(msg);
msg = NULL;
release_sock(sk);
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 5f85ee9..1680993 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -234,7 +234,7 @@
}
EXPORT_SYMBOL(allocate_contiguous_ebi);
-unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
+phys_addr_t allocate_contiguous_ebi_nomap(unsigned long size,
unsigned long align)
{
return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
diff --git a/arch/arm/mach-msm/mpm-of.c b/arch/arm/mach-msm/mpm-of.c
index 09f784d..5c654b0 100644
--- a/arch/arm/mach-msm/mpm-of.c
+++ b/arch/arm/mach-msm/mpm-of.c
@@ -70,9 +70,6 @@
#define MSM_MPM_IRQ_INDEX(irq) (irq / 32)
#define MSM_MPM_IRQ_MASK(irq) BIT(irq % 32)
-#define MSM_MPM_DETECT_CTL_INDEX(irq) (irq / 16)
-#define MSM_MPM_DETECT_CTL_SHIFT(irq) ((irq % 16) * 2)
-
#define hashfn(val) (val % MSM_MPM_NR_MPM_IRQS)
#define SCLK_HZ (32768)
#define ARCH_TIMER_HZ (19200000)
@@ -81,8 +78,8 @@
enum mpm_reg_offsets {
MSM_MPM_REG_WAKEUP,
MSM_MPM_REG_ENABLE,
- MSM_MPM_REG_DETECT_CTL,
- MSM_MPM_REG_DETECT_CTL1,
+ MSM_MPM_REG_FALLING_EDGE,
+ MSM_MPM_REG_RISING_EDGE,
MSM_MPM_REG_POLARITY,
MSM_MPM_REG_STATUS,
};
@@ -91,7 +88,8 @@
static uint32_t msm_mpm_enabled_irq[MSM_MPM_REG_WIDTH];
static uint32_t msm_mpm_wake_irq[MSM_MPM_REG_WIDTH];
-static uint32_t msm_mpm_detect_ctl[MSM_MPM_REG_WIDTH * 2];
+static uint32_t msm_mpm_falling_edge[MSM_MPM_REG_WIDTH];
+static uint32_t msm_mpm_rising_edge[MSM_MPM_REG_WIDTH];
static uint32_t msm_mpm_polarity[MSM_MPM_REG_WIDTH];
enum {
@@ -174,11 +172,11 @@
reg = MSM_MPM_REG_ENABLE;
msm_mpm_write(reg, i, irqs[i]);
- reg = MSM_MPM_REG_DETECT_CTL;
- msm_mpm_write(reg, i, msm_mpm_detect_ctl[i]);
+ reg = MSM_MPM_REG_FALLING_EDGE;
+ msm_mpm_write(reg, i, msm_mpm_falling_edge[i]);
- reg = MSM_MPM_REG_DETECT_CTL1;
- msm_mpm_write(reg, i, msm_mpm_detect_ctl[2+i]);
+ reg = MSM_MPM_REG_RISING_EDGE;
+ msm_mpm_write(reg, i, msm_mpm_rising_edge[i]);
reg = MSM_MPM_REG_POLARITY;
msm_mpm_write(reg, i, msm_mpm_polarity[i]);
@@ -264,23 +262,24 @@
return 0;
}
-static void msm_mpm_set_detect_ctl(int pin, unsigned int flow_type)
+static void msm_mpm_set_edge_ctl(int pin, unsigned int flow_type)
{
uint32_t index;
- uint32_t val = 0;
- uint32_t shift;
+ uint32_t mask;
- index = MSM_MPM_DETECT_CTL_INDEX(pin);
- shift = MSM_MPM_DETECT_CTL_SHIFT(pin);
-
- if (flow_type & IRQ_TYPE_EDGE_RISING)
- val |= 0x02;
+ index = MSM_MPM_IRQ_INDEX(pin);
+ mask = MSM_MPM_IRQ_MASK(pin);
if (flow_type & IRQ_TYPE_EDGE_FALLING)
- val |= 0x01;
+ msm_mpm_falling_edge[index] |= mask;
+ else
+ msm_mpm_falling_edge[index] &= ~mask;
- msm_mpm_detect_ctl[index] &= ~(0x3 << shift);
- msm_mpm_detect_ctl[index] |= (val & 0x03) << shift;
+ if (flow_type & IRQ_TYPE_EDGE_RISING)
+ msm_mpm_rising_edge[index] |= mask;
+ else
+ msm_mpm_rising_edge[index] &= ~mask;
+
}
static int msm_mpm_set_irq_type_exclusive(
@@ -300,7 +299,7 @@
if (index >= MSM_MPM_REG_WIDTH)
return -EFAULT;
- msm_mpm_set_detect_ctl(mpm_irq, flow_type);
+ msm_mpm_set_edge_ctl(mpm_irq, flow_type);
if (flow_type & IRQ_TYPE_LEVEL_HIGH)
msm_mpm_polarity[index] |= mask;
@@ -378,7 +377,7 @@
if (!msm_mpm_is_initialized())
return -EINVAL;
- if (pin > MSM_MPM_NR_MPM_IRQS)
+ if (pin >= MSM_MPM_NR_MPM_IRQS)
return -EINVAL;
spin_lock_irqsave(&msm_mpm_lock, flags);
@@ -429,7 +428,7 @@
spin_lock_irqsave(&msm_mpm_lock, flags);
- msm_mpm_set_detect_ctl(pin, flow_type);
+ msm_mpm_set_edge_ctl(pin, flow_type);
if (flow_type & IRQ_TYPE_LEVEL_HIGH)
msm_mpm_polarity[index] |= mask;
@@ -768,7 +767,7 @@
return;
failed_malloc:
- for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++) {
+ for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
mpm_of_map[i].chip->irq_mask = NULL;
mpm_of_map[i].chip->irq_unmask = NULL;
mpm_of_map[i].chip->irq_disable = NULL;
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_of.c b/arch/arm/mach-msm/msm_bus/msm_bus_of.c
index b9a553a..af3537c 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_of.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_of.c
@@ -79,9 +79,10 @@
}
pdata->num_usecases = num_usecases;
- ret = of_property_read_u32(of_node, "qcom,msm-bus,active-only",
- &pdata->active_only);
- if (ret) {
+
+ if (of_property_read_bool(of_node, "qcom,msm-bus,active-only"))
+ pdata->active_only = 1;
+ else {
pr_debug("active_only flag absent.\n");
pr_debug("Using dual context by default\n");
}
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index 3c50bc6..5a6e66a 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/regulator/krait-regulator.h>
+#include <linux/cpu.h>
#include <mach/msm_iomap.h>
#include <mach/socinfo.h>
#include <mach/system.h>
@@ -56,7 +57,6 @@
#include <mach/event_timer.h>
#define CREATE_TRACE_POINTS
#include "trace_msm_low_power.h"
-
#define SCM_L2_RETENTION (0x2)
#define SCM_CMD_TERMINATE_PC (0x2)
@@ -130,6 +130,7 @@
static uint32_t msm_pm_max_sleep_time;
static bool msm_no_ramp_down_pc;
static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
+static bool msm_pm_pc_reset_timer;
static int msm_pm_get_pc_mode(struct device_node *node,
const char *key, uint32_t *pc_mode_val)
@@ -525,9 +526,14 @@
if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
pr_info("CPU%u: %s: program vector to %p\n",
cpu, __func__, entry);
+ if (from_idle && msm_pm_pc_reset_timer)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
collapsed = msm_pm_collapse();
+ if (from_idle && msm_pm_pc_reset_timer)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+
msm_pm_boot_config_after_pc(cpu);
if (collapsed) {
@@ -1229,18 +1235,11 @@
},
};
-static int __devinit msm_pm_init(void)
+static int __init msm_pm_setup_saved_state(void)
{
pgd_t *pc_pgd;
pmd_t *pmd;
unsigned long pmdval;
- enum msm_pm_time_stats_id enable_stats[] = {
- MSM_PM_STAT_IDLE_WFI,
- MSM_PM_STAT_RETENTION,
- MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
- MSM_PM_STAT_IDLE_POWER_COLLAPSE,
- MSM_PM_STAT_SUSPEND,
- };
unsigned long exit_phys;
/* Page table for cores to come back up safely. */
@@ -1280,12 +1279,63 @@
clean_caches((unsigned long)&msm_pm_pc_pgd, sizeof(msm_pm_pc_pgd),
virt_to_phys(&msm_pm_pc_pgd));
+ return 0;
+}
+core_initcall(msm_pm_setup_saved_state);
+
+static void setup_broadcast_timer(void *arg)
+{
+ unsigned long reason = (unsigned long)arg;
+ int cpu = smp_processor_id();
+
+ reason = reason ?
+ CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
+
+ clockevents_notify(reason, &cpu);
+}
+
+static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
+ unsigned long action, void *hcpu)
+{
+ int hotcpu = (unsigned long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ smp_call_function_single(hotcpu, setup_broadcast_timer,
+ (void *)true, 1);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block setup_broadcast_notifier = {
+ .notifier_call = setup_broadcast_cpuhp_notify,
+};
+
+static int __init msm_pm_init(void)
+{
+ enum msm_pm_time_stats_id enable_stats[] = {
+ MSM_PM_STAT_IDLE_WFI,
+ MSM_PM_STAT_RETENTION,
+ MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+ MSM_PM_STAT_SUSPEND,
+ };
msm_pm_mode_sysfs_add();
msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
suspend_set_ops(&msm_pm_ops);
hrtimer_init(&pm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
msm_cpuidle_init();
+ if (msm_pm_pc_reset_timer) {
+ get_cpu();
+ smp_call_function_many(cpu_online_mask, setup_broadcast_timer,
+ (void *)true, 1);
+ put_cpu();
+ register_cpu_notifier(&setup_broadcast_notifier);
+ }
+
return 0;
}
@@ -1470,6 +1520,10 @@
key = "qcom,saw-turns-off-pll";
msm_no_ramp_down_pc = of_property_read_bool(pdev->dev.of_node,
key);
+
+ key = "qcom,pc-resets-timer";
+ msm_pm_pc_reset_timer = of_property_read_bool(
+ pdev->dev.of_node, key);
}
if (pdata_local.cp15_data.reg_data &&
diff --git a/arch/arm/mach-msm/pm-data.c b/arch/arm/mach-msm/pm-data.c
index ccc2519..249032f 100644
--- a/arch/arm/mach-msm/pm-data.c
+++ b/arch/arm/mach-msm/pm-data.c
@@ -46,7 +46,7 @@
.idle_supported = 0,
.suspend_supported = 1,
.idle_enabled = 0,
- .suspend_enabled = 0,
+ .suspend_enabled = 1,
},
[MSM_PM_MODE(1, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
@@ -74,7 +74,7 @@
.idle_supported = 0,
.suspend_supported = 1,
.idle_enabled = 0,
- .suspend_enabled = 0,
+ .suspend_enabled = 1,
},
[MSM_PM_MODE(2, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
@@ -102,7 +102,7 @@
.idle_supported = 0,
.suspend_supported = 1,
.idle_enabled = 0,
- .suspend_enabled = 0,
+ .suspend_enabled = 1,
},
[MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index b84ade9..6ed80f6 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -673,7 +673,7 @@
static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
{
struct list_head *ptr;
- struct msm_rpm_wait_data *elem;
+ struct msm_rpm_wait_data *elem = NULL;
unsigned long flags;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
@@ -739,7 +739,7 @@
static void msm_rpm_process_ack(uint32_t msg_id, int errno)
{
struct list_head *ptr;
- struct msm_rpm_wait_data *elem;
+ struct msm_rpm_wait_data *elem = NULL;
unsigned long flags;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
diff --git a/arch/arm/mach-msm/rpm.c b/arch/arm/mach-msm/rpm.c
index 5128b44..f9ac00f 100644
--- a/arch/arm/mach-msm/rpm.c
+++ b/arch/arm/mach-msm/rpm.c
@@ -310,7 +310,7 @@
unsigned long flags;
uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
uint32_t ctx_mask_ack = 0;
- uint32_t sel_masks_ack[SEL_MASK_SIZE];
+ uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0};
int i;
msm_rpm_request_irq_mode.req = req;
@@ -369,7 +369,7 @@
unsigned long flags;
uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
uint32_t ctx_mask_ack = 0;
- uint32_t sel_masks_ack[SEL_MASK_SIZE];
+ uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0};
struct irq_chip *irq_chip, *err_chip;
int i;
diff --git a/arch/arm/mach-msm/rpm_log.c b/arch/arm/mach-msm/rpm_log.c
index a2c74a5..53d5752 100644
--- a/arch/arm/mach-msm/rpm_log.c
+++ b/arch/arm/mach-msm/rpm_log.c
@@ -203,11 +203,14 @@
struct msm_rpm_log_buffer *buf;
buf = file->private_data;
- pdata = buf->pdata;
- if (!pdata)
- return -EINVAL;
+
if (!buf)
return -ENOMEM;
+
+ pdata = buf->pdata;
+
+ if (!pdata)
+ return -EINVAL;
if (!buf->data)
return -ENOMEM;
if (!bufu || count < 0)
diff --git a/arch/arm/mach-msm/rpm_stats.c b/arch/arm/mach-msm/rpm_stats.c
index 176c3de..cb8ed19 100644
--- a/arch/arm/mach-msm/rpm_stats.c
+++ b/arch/arm/mach-msm/rpm_stats.c
@@ -63,6 +63,8 @@
u32 count;
u64 last_entered_at;
u64 last_exited_at;
+ u64 accumulated;
+ u32 reserved[4];
};
static inline u64 get_time_in_sec(u64 counter)
@@ -84,6 +86,7 @@
char stat_type[5];
u64 time_in_last_mode;
u64 time_since_last_mode;
+ u64 actual_last_sleep;
stat_type[4] = 0;
memcpy(stat_type, &data->stat_type, sizeof(u32));
@@ -92,12 +95,13 @@
time_in_last_mode = get_time_in_msec(time_in_last_mode);
time_since_last_mode = arch_counter_get_cntpct() - data->last_exited_at;
time_since_last_mode = get_time_in_sec(time_since_last_mode);
+ actual_last_sleep = get_time_in_msec(data->accumulated);
return snprintf(buf , buflength,
"RPM Mode:%s\n\t count:%d\n time in last mode(msec):%llu\n"
- "time since last mode(sec):%llu\n",
+ "time since last mode(sec):%llu\n actual last sleep(msec):%llu\n",
stat_type, data->count, time_in_last_mode,
- time_since_last_mode);
+ time_since_last_mode, actual_last_sleep);
}
static inline u32 msm_rpmstats_read_long_register_v2(void __iomem *regbase,
@@ -140,6 +144,9 @@
i, offsetof(struct msm_rpm_stats_data_v2,
last_exited_at));
+ data.accumulated = msm_rpmstats_read_quad_register_v2(reg,
+ i, offsetof(struct msm_rpm_stats_data_v2,
+ accumulated));
length += msm_rpmstats_append_data_to_buf(prvdata->buf + length,
&data, sizeof(prvdata->buf) - length);
prvdata->read_idx++;
diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
index d070efa..6e05177 100644
--- a/arch/arm/mach-msm/scm.c
+++ b/arch/arm/mach-msm/scm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -347,6 +347,43 @@
}
EXPORT_SYMBOL(scm_call_atomic2);
+/**
+ * scm_call_atomic3() - Send an atomic SCM command with three arguments
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ * @arg3: third argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3)
+{
+ int context_id;
+ register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 3);
+ register u32 r1 asm("r1") = (u32)&context_id;
+ register u32 r2 asm("r2") = arg1;
+ register u32 r3 asm("r3") = arg2;
+ register u32 r4 asm("r4") = arg3;
+
+ asm volatile(
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r0")
+ __asmeq("%2", "r1")
+ __asmeq("%3", "r2")
+ __asmeq("%4", "r3")
+ __asmeq("%5", "r4")
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0 @ switch to secure world\n"
+ : "=r" (r0)
+ : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4));
+ return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic3);
+
s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
{
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index 8a9042e..07ac930 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -3838,6 +3838,12 @@
for_each_child_of_node(pdev->dev.of_node, node) {
compatible = of_get_property(node, "compatible", NULL);
+ if (!compatible) {
+ pr_err("%s: invalid child node: compatible null\n",
+ __func__);
+ ret = -ENODEV;
+ goto rollback_subnodes;
+ }
if (!strcmp(compatible, "qcom,smd")) {
ret = parse_smd_devicetree(node, irq_out_base);
if (ret)
diff --git a/arch/arm/mach-msm/smem_log.c b/arch/arm/mach-msm/smem_log.c
index 169df1e..361df33 100644
--- a/arch/arm/mach-msm/smem_log.c
+++ b/arch/arm/mach-msm/smem_log.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,6 +33,8 @@
#include <mach/msm_iomap.h>
#include <mach/smem_log.h>
+#include <asm/arch_timer.h>
+
#include "smd_private.h"
#include "smd_rpc_sym.h"
#include "modem_notifier.h"
@@ -652,13 +654,7 @@
#else
static inline unsigned int read_timestamp(void)
{
- unsigned long long val;
-
- /* SMEM LOG uses a 32.768KHz timestamp */
- val = sched_clock() * 32768U;
- do_div(val, 1000000000U);
-
- return (unsigned int)val;
+ return (unsigned int)(arch_counter_get_cntpct());
}
#endif
diff --git a/arch/arm/mach-msm/spm_devices.c b/arch/arm/mach-msm/spm_devices.c
index 233c5a5..174d444 100644
--- a/arch/arm/mach-msm/spm_devices.c
+++ b/arch/arm/mach-msm/spm_devices.c
@@ -74,6 +74,7 @@
info.cpu = cpu;
info.vlevel = vlevel;
+ info.err = -ENODEV;
if (cpu_online(cpu)) {
/**
diff --git a/arch/arm/mach-msm/wdog_debug.c b/arch/arm/mach-msm/wdog_debug.c
index cccca26..95a85f26 100644
--- a/arch/arm/mach-msm/wdog_debug.c
+++ b/arch/arm/mach-msm/wdog_debug.c
@@ -24,7 +24,7 @@
ret = scm_call_atomic2(SCM_SVC_BOOT,
SCM_WDOG_DEBUG_BOOT_PART, 0, BOOT_PART_EN_VAL);
if (ret)
- pr_err("failed to enable wdog debug\n");
+ pr_err("failed to enable wdog debug: %d\n", ret);
}
EXPORT_SYMBOL(msm_enable_wdog_debug);
@@ -35,6 +35,6 @@
ret = scm_call_atomic2(SCM_SVC_BOOT,
SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
if (ret)
- pr_err("failed to disable wdog debug\n");
+ pr_err("failed to disable wdog debug: %d\n", ret);
}
EXPORT_SYMBOL(msm_disable_wdog_debug);
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 21b9e1b..d6f9ee8 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
@@ -135,3 +136,58 @@
return pte_page(get_top_pte(vaddr));
}
+
+#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
+static void kmap_remove_unused_cpu(int cpu)
+{
+ int start_idx, idx, type;
+
+ pagefault_disable();
+ type = kmap_atomic_idx();
+ start_idx = type + 1 + KM_TYPE_NR * cpu;
+
+ for (idx = start_idx; idx < KM_TYPE_NR + KM_TYPE_NR * cpu; idx++) {
+ unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ pte_t ptep;
+
+ ptep = get_top_pte(vaddr);
+ if (ptep)
+ set_top_pte(vaddr, __pte(0));
+ }
+ pagefault_enable();
+}
+
+static void kmap_remove_unused(void *unused)
+{
+ kmap_remove_unused_cpu(smp_processor_id());
+}
+
+void kmap_atomic_flush_unused(void)
+{
+ on_each_cpu(kmap_remove_unused, NULL, 1);
+}
+
+static int hotplug_kmap_atomic_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_DYING:
+ kmap_remove_unused_cpu((int)hcpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block hotplug_kmap_atomic_notifier = {
+ .notifier_call = hotplug_kmap_atomic_callback,
+};
+
+static int __init init_kmap_atomic(void)
+{
+ return register_hotcpu_notifier(&hotplug_kmap_atomic_notifier);
+}
+early_initcall(init_kmap_atomic);
+#endif
diff --git a/block/row-iosched.c b/block/row-iosched.c
index c8ba344..e71f6af 100644
--- a/block/row-iosched.c
+++ b/block/row-iosched.c
@@ -200,9 +200,9 @@
struct request *pending_urgent_rq;
int last_served_ioprio_class;
-#define ROW_REG_STARVATION_TOLLERANCE 50
+#define ROW_REG_STARVATION_TOLLERANCE 5000
struct starvation_data reg_prio_starvation;
-#define ROW_LOW_STARVATION_TOLLERANCE 1000
+#define ROW_LOW_STARVATION_TOLLERANCE 10000
struct starvation_data low_prio_starvation;
unsigned int cycle_flags;
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index ed91480..45c9023 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -227,7 +227,7 @@
pr_info("Found %s, memory base %lx, size %ld MiB\n", uname,
(unsigned long)base, (unsigned long)size / SZ_1M);
- dma_contiguous_reserve_area(size, &base, 0, name);
+ dma_contiguous_reserve_area(size, &base, MEMBLOCK_ALLOC_ANYWHERE, name);
return 0;
}
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 2557983..0383d8f 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -39,6 +39,8 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/proc_fs.h>
+
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -47,8 +49,10 @@
#include <mach/msm_serial_hs.h>
#endif
-unsigned int enableuartsleep = 1;
-module_param(enableuartsleep, uint, 0644);
+static int enableuartsleep = 1;
+module_param(enableuartsleep, int, 0644);
+MODULE_PARM_DESC(enableuartsleep, "Enable Atheros Sleep Protocol");
+
/*
* Global variables
*/
@@ -62,6 +66,9 @@
/** Global state flags */
static unsigned long flags;
+/** To Check LPM is enabled */
+static bool is_lpm_enabled;
+
/** Workqueue to respond to change in hostwake line */
static void wakeup_host_work(struct work_struct *work);
@@ -72,6 +79,8 @@
/** Lock for state transitions */
static spinlock_t rw_lock;
+#define PROC_DIR "bluetooth/sleep"
+
#define POLARITY_LOW 0
#define POLARITY_HIGH 1
@@ -80,8 +89,11 @@
unsigned ext_wake; /* wake up device */
unsigned host_wake_irq;
int irq_polarity;
+ struct uart_port *uport;
};
+struct work_struct ws_sleep;
+
/* 1 second timeout */
#define TX_TIMER_INTERVAL 1
@@ -99,23 +111,24 @@
struct sk_buff_head txq;
struct work_struct ctxtsw;
- struct work_struct ws_sleep;
};
-static void hsuart_serial_clock_on(struct tty_struct *tty)
+static void hsuart_serial_clock_on(struct uart_port *port)
{
- struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
BT_DBG("");
- msm_hs_request_clock_on(port);
+ if (port)
+ msm_hs_request_clock_on(port);
+ else
+ BT_INFO("Uart has not voted for Clock ON");
}
-static void hsuart_serial_clock_off(struct tty_struct *tty)
+static void hsuart_serial_clock_off(struct uart_port *port)
{
- struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
BT_DBG("");
- msm_hs_request_clock_off(port);
+ if (port)
+ msm_hs_request_clock_off(port);
+ else
+ BT_INFO("Uart has not voted for Clock OFF");
}
static void modify_timer_task(void)
@@ -127,31 +140,31 @@
}
-static int ath_wakeup_ar3k(struct tty_struct *tty)
+static int ath_wakeup_ar3k(void)
{
int status = 0;
if (test_bit(BT_TXEXPIRED, &flags)) {
- hsuart_serial_clock_on(tty);
- BT_INFO("wakeup device\n");
+ hsuart_serial_clock_on(bsi->uport);
+ BT_DBG("wakeup device\n");
gpio_set_value(bsi->ext_wake, 0);
msleep(20);
gpio_set_value(bsi->ext_wake, 1);
}
- modify_timer_task();
+ if (!is_lpm_enabled)
+ modify_timer_task();
return status;
}
static void wakeup_host_work(struct work_struct *work)
{
- struct ath_struct *ath =
- container_of(work, struct ath_struct, ws_sleep);
- BT_INFO("wake up host");
+ BT_DBG("wake up host");
if (test_bit(BT_SLEEPENABLE, &flags)) {
if (test_bit(BT_TXEXPIRED, &flags))
- hsuart_serial_clock_on(ath->hu->tty);
+ hsuart_serial_clock_on(bsi->uport);
}
- modify_timer_task();
+ if (!is_lpm_enabled)
+ modify_timer_task();
}
static void ath_hci_uart_work(struct work_struct *work)
@@ -159,16 +172,14 @@
int status;
struct ath_struct *ath;
struct hci_uart *hu;
- struct tty_struct *tty;
ath = container_of(work, struct ath_struct, ctxtsw);
hu = ath->hu;
- tty = hu->tty;
/* verify and wake up controller */
if (test_bit(BT_SLEEPENABLE, &flags))
- status = ath_wakeup_ar3k(tty);
+ status = ath_wakeup_ar3k();
/* Ready to send Data */
clear_bit(HCI_UART_SENDING, &hu->tx_state);
hci_uart_tx_wakeup(hu);
@@ -176,15 +187,15 @@
static irqreturn_t bluesleep_hostwake_isr(int irq, void *dev_id)
{
- /* schedule a tasklet to handle the change in the host wake line */
- struct ath_struct *ath = (struct ath_struct *)dev_id;
-
- schedule_work(&ath->ws_sleep);
+ /* schedule a work to global shared workqueue to handle
+ * the change in the host wake line
+ */
+ schedule_work(&ws_sleep);
return IRQ_HANDLED;
}
-static int ath_bluesleep_gpio_config(struct ath_struct *ath, int on)
+static int ath_bluesleep_gpio_config(int on)
{
int ret = 0;
@@ -232,16 +243,16 @@
/* Initialize timer */
init_timer(&tx_timer);
tx_timer.function = bluesleep_tx_timer_expire;
- tx_timer.data = (u_long)ath->hu;
+ tx_timer.data = 0;
if (bsi->irq_polarity == POLARITY_LOW) {
ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
IRQF_DISABLED | IRQF_TRIGGER_FALLING,
- "bluetooth hostwake", (void *)ath);
+ "bluetooth hostwake", NULL);
} else {
ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
IRQF_DISABLED | IRQF_TRIGGER_RISING,
- "bluetooth hostwake", (void *)ath);
+ "bluetooth hostwake", NULL);
}
if (ret < 0) {
BT_ERR("Couldn't acquire BT_HOST_WAKE IRQ");
@@ -257,7 +268,7 @@
return 0;
free_host_wake_irq:
- free_irq(bsi->host_wake_irq, (void *)ath);
+ free_irq(bsi->host_wake_irq, NULL);
delete_timer:
del_timer(&tx_timer);
gpio_ext_wake:
@@ -268,26 +279,76 @@
return ret;
}
+static int ath_lpm_start(void)
+{
+ BT_DBG("Start LPM mode");
+
+ if (!bsi) {
+ BT_ERR("HCIATH3K bluesleep info does not exist");
+ return -EIO;
+ }
+
+ bsi->uport = msm_hs_get_uart_port(0);
+ if (!bsi->uport) {
+ BT_ERR("UART Port is not available");
+ return -ENODEV;
+ }
+
+ INIT_WORK(&ws_sleep, wakeup_host_work);
+
+ if (ath_bluesleep_gpio_config(1) < 0) {
+ BT_ERR("HCIATH3K GPIO Config failed");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ath_lpm_stop(void)
+{
+ BT_DBG("Stop LPM mode");
+ cancel_work_sync(&ws_sleep);
+
+ if (bsi) {
+ bsi->uport = NULL;
+ ath_bluesleep_gpio_config(0);
+ }
+
+ return 0;
+}
+
/* Initialize protocol */
static int ath_open(struct hci_uart *hu)
{
struct ath_struct *ath;
+ struct uart_state *state;
BT_DBG("hu %p, bsi %p", hu, bsi);
- if (!bsi)
+ if (!bsi) {
+ BT_ERR("HCIATH3K bluesleep info does not exist");
return -EIO;
+ }
ath = kzalloc(sizeof(*ath), GFP_ATOMIC);
- if (!ath)
+ if (!ath) {
+ BT_ERR("HCIATH3K Memory not enough to init driver");
return -ENOMEM;
+ }
skb_queue_head_init(&ath->txq);
hu->priv = ath;
ath->hu = hu;
+ state = hu->tty->driver_data;
- if (ath_bluesleep_gpio_config(ath, 1) < 0) {
+ if (!state) {
+ BT_ERR("HCIATH3K tty driver data does not exist");
+ return -ENXIO;
+ }
+ bsi->uport = state->uart_port;
+
+ if (ath_bluesleep_gpio_config(1) < 0) {
BT_ERR("HCIATH3K GPIO Config failed");
hu->priv = NULL;
kfree(ath);
@@ -300,7 +361,7 @@
modify_timer_task();
}
INIT_WORK(&ath->ctxtsw, ath_hci_uart_work);
- INIT_WORK(&ath->ws_sleep, wakeup_host_work);
+ INIT_WORK(&ws_sleep, wakeup_host_work);
return 0;
}
@@ -327,12 +388,13 @@
cancel_work_sync(&ath->ctxtsw);
- cancel_work_sync(&ath->ws_sleep);
+ cancel_work_sync(&ws_sleep);
if (bsi)
- ath_bluesleep_gpio_config(ath, 0);
+ ath_bluesleep_gpio_config(0);
hu->priv = NULL;
+ bsi->uport = NULL;
kfree(ath);
return 0;
@@ -423,14 +485,13 @@
static void bluesleep_tx_timer_expire(unsigned long data)
{
- struct hci_uart *hu = (struct hci_uart *) data;
if (!test_bit(BT_SLEEPENABLE, &flags))
return;
BT_INFO("Tx timer expired\n");
set_bit(BT_TXEXPIRED, &flags);
- hsuart_serial_clock_off(hu->tty);
+ hsuart_serial_clock_off(bsi->uport);
}
static struct hci_uart_proto athp = {
@@ -443,6 +504,88 @@
.flush = ath_flush,
};
+static int lpm_enabled;
+
+static int bluesleep_lpm_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+
+ if (ret) {
+ BT_ERR("HCIATH3K: lpm enable parameter set failed");
+ return ret;
+ }
+
+ BT_DBG("lpm : %d", lpm_enabled);
+
+ if ((lpm_enabled == 0) && is_lpm_enabled) {
+ ath_lpm_stop();
+ clear_bit(BT_SLEEPENABLE, &flags);
+ is_lpm_enabled = false;
+ } else if ((lpm_enabled == 1) && !is_lpm_enabled) {
+ if (ath_lpm_start() < 0) {
+ BT_ERR("HCIATH3K LPM mode failed");
+ return -EIO;
+ }
+ set_bit(BT_SLEEPENABLE, &flags);
+ is_lpm_enabled = true;
+ } else {
+ BT_ERR("HCIATH3K invalid lpm value");
+ return -EINVAL;
+ }
+ return 0;
+
+}
+
+static struct kernel_param_ops bluesleep_lpm_ops = {
+ .set = bluesleep_lpm_set,
+ .get = param_get_int,
+};
+
+module_param_cb(ath_lpm, &bluesleep_lpm_ops,
+ &lpm_enabled, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ath_lpm, "Enable Atheros LPM sleep Protocol");
+
+static int lpm_btwrite;
+
+static int bluesleep_lpm_btwrite(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+
+ if (ret) {
+ BT_ERR("HCIATH3K: lpm btwrite parameter set failed");
+ return ret;
+ }
+
+ BT_DBG("btwrite : %d", lpm_btwrite);
+ if (is_lpm_enabled) {
+ if (lpm_btwrite == 0) {
+ /*Setting TXEXPIRED bit to make it
+ compatible with current solution*/
+ set_bit(BT_TXEXPIRED, &flags);
+ hsuart_serial_clock_off(bsi->uport);
+ } else if (lpm_btwrite == 1) {
+ ath_wakeup_ar3k();
+ clear_bit(BT_TXEXPIRED, &flags);
+ } else {
+ BT_ERR("HCIATH3K invalid btwrite value");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static struct kernel_param_ops bluesleep_lpm_btwrite_ops = {
+ .set = bluesleep_lpm_btwrite,
+ .get = param_get_int,
+};
+
+module_param_cb(ath_btwrite, &bluesleep_lpm_btwrite_ops,
+ &lpm_btwrite, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ath_lpm, "Assert/Deassert the sleep");
static int bluesleep_populate_dt_pinfo(struct platform_device *pdev)
{
@@ -581,5 +724,6 @@
int __exit ath_deinit(void)
{
platform_driver_unregister(&bluesleep_driver);
+
return hci_uart_unregister_proto(&athp);
}
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 5b929d7..a4003ff 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -693,8 +693,7 @@
diag_update_sleeping_process(entry.process_id, PKT_TYPE);
} else {
if (len > 0) {
- if ((entry.client_id >= 0) &&
- (entry.client_id < NUM_SMD_DATA_CHANNELS)) {
+ if (entry.client_id < NUM_SMD_DATA_CHANNELS) {
int index = entry.client_id;
if (driver->smd_data[index].ch) {
if ((index == MODEM_DATA) &&
@@ -907,94 +906,186 @@
/* bld time masks */
switch (ssid_first) {
case MSG_SSID_0:
+ if (ssid_range > sizeof(msg_bld_masks_0)) {
+ pr_warning("diag: truncating ssid range for ssid 0");
+ ssid_range = sizeof(msg_bld_masks_0);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_0[i/4];
break;
case MSG_SSID_1:
+ if (ssid_range > sizeof(msg_bld_masks_1)) {
+ pr_warning("diag: truncating ssid range for ssid 1");
+ ssid_range = sizeof(msg_bld_masks_1);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_1[i/4];
break;
case MSG_SSID_2:
+ if (ssid_range > sizeof(msg_bld_masks_2)) {
+ pr_warning("diag: truncating ssid range for ssid 2");
+ ssid_range = sizeof(msg_bld_masks_2);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_2[i/4];
break;
case MSG_SSID_3:
+ if (ssid_range > sizeof(msg_bld_masks_3)) {
+ pr_warning("diag: truncating ssid range for ssid 3");
+ ssid_range = sizeof(msg_bld_masks_3);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_3[i/4];
break;
case MSG_SSID_4:
+ if (ssid_range > sizeof(msg_bld_masks_4)) {
+ pr_warning("diag: truncating ssid range for ssid 4");
+ ssid_range = sizeof(msg_bld_masks_4);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_4[i/4];
break;
case MSG_SSID_5:
+ if (ssid_range > sizeof(msg_bld_masks_5)) {
+ pr_warning("diag: truncating ssid range for ssid 5");
+ ssid_range = sizeof(msg_bld_masks_5);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_5[i/4];
break;
case MSG_SSID_6:
+ if (ssid_range > sizeof(msg_bld_masks_6)) {
+ pr_warning("diag: truncating ssid range for ssid 6");
+ ssid_range = sizeof(msg_bld_masks_6);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_6[i/4];
break;
case MSG_SSID_7:
+ if (ssid_range > sizeof(msg_bld_masks_7)) {
+ pr_warning("diag: truncating ssid range for ssid 7");
+ ssid_range = sizeof(msg_bld_masks_7);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_7[i/4];
break;
case MSG_SSID_8:
+ if (ssid_range > sizeof(msg_bld_masks_8)) {
+ pr_warning("diag: truncating ssid range for ssid 8");
+ ssid_range = sizeof(msg_bld_masks_8);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_8[i/4];
break;
case MSG_SSID_9:
+ if (ssid_range > sizeof(msg_bld_masks_9)) {
+ pr_warning("diag: truncating ssid range for ssid 9");
+ ssid_range = sizeof(msg_bld_masks_9);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_9[i/4];
break;
case MSG_SSID_10:
+ if (ssid_range > sizeof(msg_bld_masks_10)) {
+ pr_warning("diag: truncating ssid range for ssid 10");
+ ssid_range = sizeof(msg_bld_masks_10);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_10[i/4];
break;
case MSG_SSID_11:
+ if (ssid_range > sizeof(msg_bld_masks_11)) {
+ pr_warning("diag: truncating ssid range for ssid 11");
+ ssid_range = sizeof(msg_bld_masks_11);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_11[i/4];
break;
case MSG_SSID_12:
+ if (ssid_range > sizeof(msg_bld_masks_12)) {
+ pr_warning("diag: truncating ssid range for ssid 12");
+ ssid_range = sizeof(msg_bld_masks_12);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_12[i/4];
break;
case MSG_SSID_13:
+ if (ssid_range > sizeof(msg_bld_masks_13)) {
+ pr_warning("diag: truncating ssid range for ssid 13");
+ ssid_range = sizeof(msg_bld_masks_13);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_13[i/4];
break;
case MSG_SSID_14:
+ if (ssid_range > sizeof(msg_bld_masks_14)) {
+ pr_warning("diag: truncating ssid range for ssid 14");
+ ssid_range = sizeof(msg_bld_masks_14);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_14[i/4];
break;
case MSG_SSID_15:
+ if (ssid_range > sizeof(msg_bld_masks_15)) {
+ pr_warning("diag: truncating ssid range for ssid 15");
+ ssid_range = sizeof(msg_bld_masks_15);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_15[i/4];
break;
case MSG_SSID_16:
+ if (ssid_range > sizeof(msg_bld_masks_16)) {
+ pr_warning("diag: truncating ssid range for ssid 16");
+ ssid_range = sizeof(msg_bld_masks_16);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_16[i/4];
break;
case MSG_SSID_17:
+ if (ssid_range > sizeof(msg_bld_masks_17)) {
+ pr_warning("diag: truncating ssid range for ssid 17");
+ ssid_range = sizeof(msg_bld_masks_17);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_17[i/4];
break;
case MSG_SSID_18:
+ if (ssid_range > sizeof(msg_bld_masks_18)) {
+ pr_warning("diag: truncating ssid range for ssid 18");
+ ssid_range = sizeof(msg_bld_masks_18);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_18[i/4];
break;
case MSG_SSID_19:
+ if (ssid_range > sizeof(msg_bld_masks_19)) {
+ pr_warning("diag: truncating ssid range for ssid 19");
+ ssid_range = sizeof(msg_bld_masks_19);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_19[i/4];
break;
case MSG_SSID_20:
+ if (ssid_range > sizeof(msg_bld_masks_20)) {
+ pr_warning("diag: truncating ssid range for ssid 20");
+ ssid_range = sizeof(msg_bld_masks_20);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_20[i/4];
break;
case MSG_SSID_21:
+ if (ssid_range > sizeof(msg_bld_masks_21)) {
+ pr_warning("diag: truncating ssid range for ssid 21");
+ ssid_range = sizeof(msg_bld_masks_21);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_21[i/4];
break;
case MSG_SSID_22:
+ if (ssid_range > sizeof(msg_bld_masks_22)) {
+ pr_warning("diag: truncating ssid range for ssid 22");
+ ssid_range = sizeof(msg_bld_masks_22);
+ }
for (i = 0; i < ssid_range; i += 4)
*(int *)(ptr + i) = msg_bld_masks_22[i/4];
break;
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index e946b42..9a43ea4 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -181,8 +181,7 @@
pr_err("ion_import_dma_buf() failed\n");
return PTR_ERR(*pihdl);
}
- pr_debug("%s(): ion_hdl %p, ion_fd %d\n", __func__, *pihdl,
- ion_share_dma_buf(msm_rotator_dev->client, *pihdl));
+ pr_debug("%s(): ion_hdl %p, ion_fd %d\n", __func__, *pihdl, mem_id);
if (rot_iommu_split_domain) {
if (secure) {
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
index 24cf30a..7778477 100644
--- a/drivers/crypto/msm/qce.c
+++ b/drivers/crypto/msm/qce.c
@@ -2203,6 +2203,18 @@
}
EXPORT_SYMBOL(qce_process_sha_req);
+int qce_enable_clk(void *handle)
+{
+ return 0;
+}
+EXPORT_SYMBOL(qce_enable_clk);
+
+int qce_disable_clk(void *handle)
+{
+ return 0;
+}
+EXPORT_SYMBOL(qce_disable_clk);
+
/*
* crypto engine open function.
*/
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index 3ff84cf..51a74b6 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -160,5 +160,7 @@
int qce_ablk_cipher_req(void *handle, struct qce_req *req);
int qce_hw_support(void *handle, struct ce_hw_support *support);
int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
+int qce_enable_clk(void *handle);
+int qce_disable_clk(void *handle);
#endif /* __CRYPTO_MSM_QCE_H */
diff --git a/drivers/crypto/msm/qce40.c b/drivers/crypto/msm/qce40.c
index 7b0964d..5249917 100644
--- a/drivers/crypto/msm/qce40.c
+++ b/drivers/crypto/msm/qce40.c
@@ -2426,6 +2426,18 @@
}
EXPORT_SYMBOL(qce_process_sha_req);
+int qce_enable_clk(void *handle)
+{
+ return 0;
+}
+EXPORT_SYMBOL(qce_enable_clk);
+
+int qce_disable_clk(void *handle)
+{
+ return 0;
+}
+EXPORT_SYMBOL(qce_disable_clk);
+
/* crypto engine open function. */
void *qce_open(struct platform_device *pdev, int *rc)
{
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 739a753..2b1ad80 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -39,7 +39,7 @@
#include "qcryptohw_50.h"
#define CRYPTO_CONFIG_RESET 0xE001F
-#define QCE_MAX_NUM_DSCR 0x400
+#define QCE_MAX_NUM_DSCR 0x500
#define QCE_SECTOR_SIZE 0x200
static DEFINE_MUTEX(bam_register_cnt);
@@ -919,17 +919,23 @@
iovec->flags |= flag;
}
-static void _qce_sps_add_data(uint32_t addr, uint32_t len,
+static int _qce_sps_add_data(uint32_t addr, uint32_t len,
struct sps_transfer *sps_bam_pipe)
{
struct sps_iovec *iovec = sps_bam_pipe->iovec +
sps_bam_pipe->iovec_count;
+ if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
if (len) {
iovec->size = len;
iovec->addr = addr;
iovec->flags = 0;
sps_bam_pipe->iovec_count++;
}
+ return 0;
}
static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
@@ -947,6 +953,12 @@
if (pce_dev->ce_sps.minor_version == 0)
len = ALIGN(len, pce_dev->ce_sps.ce_burst_size);
while (len > 0) {
+ if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count,
+ (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
if (len > SPS_MAX_PKT_SIZE) {
data_cnt = SPS_MAX_PKT_SIZE;
iovec->size = data_cnt;
@@ -2375,15 +2387,17 @@
&pce_dev->ce_sps.in_transfer);
if (pce_dev->ce_sps.minor_version == 0) {
- _qce_sps_add_sg_data(pce_dev, areq->src, totallen_in,
- &pce_dev->ce_sps.in_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen_in,
+ &pce_dev->ce_sps.in_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
- _qce_sps_add_sg_data(pce_dev, areq->dst, out_len +
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len +
areq->assoclen + hw_pad_out,
- &pce_dev->ce_sps.out_transfer);
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
if (totallen_in > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
@@ -2391,42 +2405,52 @@
SPS_O_DESC_DONE;
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
- _qce_sps_add_data(GET_PHYS_ADDR(
+ if (_qce_sps_add_data(GET_PHYS_ADDR(
pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
- &pce_dev->ce_sps.out_transfer);
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
}
} else {
- _qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
- &pce_dev->ce_sps.in_transfer);
- _qce_sps_add_data((uint32_t)pce_dev->phy_iv_in, ivsize,
- &pce_dev->ce_sps.in_transfer);
- _qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
- &pce_dev->ce_sps.in_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
+ &pce_dev->ce_sps.in_transfer))
+ goto bad;
+ if (_qce_sps_add_data((uint32_t)pce_dev->phy_iv_in, ivsize,
+ &pce_dev->ce_sps.in_transfer))
+ goto bad;
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
+ &pce_dev->ce_sps.in_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
/* Pass through to ignore associated (+iv, if applicable) data*/
- _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
(ivsize + areq->assoclen),
- &pce_dev->ce_sps.out_transfer);
- _qce_sps_add_sg_data(pce_dev, areq->dst, out_len,
- &pce_dev->ce_sps.out_transfer);
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len,
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
/* Pass through to ignore hw_pad (padding of the MAC data) */
- _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
- hw_pad_out, &pce_dev->ce_sps.out_transfer);
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
+ hw_pad_out, &pce_dev->ce_sps.out_transfer))
+ goto bad;
if (totallen_in > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
- _qce_sps_add_data(
+ if (_qce_sps_add_data(
GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
- &pce_dev->ce_sps.out_transfer);
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
@@ -2530,22 +2554,26 @@
_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
&pce_dev->ce_sps.in_transfer);
- _qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
- &pce_dev->ce_sps.in_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+ &pce_dev->ce_sps.in_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
- _qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
- &pce_dev->ce_sps.out_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
if (areq->nbytes > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
- _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
- CRYPTO_RESULT_DUMP_SIZE,
- &pce_dev->ce_sps.out_transfer);
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
}
@@ -2613,14 +2641,16 @@
_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
&pce_dev->ce_sps.in_transfer);
- _qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
- &pce_dev->ce_sps.in_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+ &pce_dev->ce_sps.in_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
- _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+ if (_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
- &pce_dev->ce_sps.out_transfer);
+ &pce_dev->ce_sps.out_transfer))
+ goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT);
rc = _qce_sps_transfer(pce_dev);
if (rc)
@@ -2800,7 +2830,7 @@
}
}
-static int __qce_enable_clk(void *handle)
+int qce_enable_clk(void *handle)
{
struct qce_device *pce_dev = (struct qce_device *) handle;
int rc = 0;
@@ -2813,6 +2843,7 @@
return rc;
}
}
+
/* Enable CE clk */
if (pce_dev->ce_clk != NULL) {
rc = clk_prepare_enable(pce_dev->ce_clk);
@@ -2834,8 +2865,9 @@
}
return rc;
}
+EXPORT_SYMBOL(qce_enable_clk);
-static int __qce_disable_clk(void *handle)
+int qce_disable_clk(void *handle)
{
struct qce_device *pce_dev = (struct qce_device *) handle;
int rc = 0;
@@ -2849,6 +2881,7 @@
return rc;
}
+EXPORT_SYMBOL(qce_disable_clk);
/* crypto engine open function. */
void *qce_open(struct platform_device *pdev, int *rc)
@@ -2886,19 +2919,20 @@
if (*rc)
goto err_mem;
- *rc = __qce_enable_clk(pce_dev);
+ *rc = qce_enable_clk(pce_dev);
if (*rc)
goto err;
if (_probe_ce_engine(pce_dev)) {
*rc = -ENXIO;
- __qce_disable_clk(pce_dev);
goto err;
}
*rc = 0;
qce_setup_ce_sps_data(pce_dev);
qce_sps_init(pce_dev);
+ qce_disable_clk(pce_dev);
+
return pce_dev;
err:
__qce_deinit_clk(pce_dev);
@@ -2932,7 +2966,7 @@
dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
pce_dev->coh_vmem, pce_dev->coh_pmem);
- __qce_disable_clk(pce_dev);
+ qce_disable_clk(pce_dev);
__qce_deinit_clk(pce_dev);
qce_sps_exit(pce_dev);
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 8cc42df..e91dcaa 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -98,7 +98,7 @@
};
static DEFINE_MUTEX(send_cmd_lock);
-static DEFINE_MUTEX(sent_bw_req);
+static DEFINE_MUTEX(qcedev_sent_bw_req);
/**********************************************************************
* Register ourselves as a misc device to be able to access the dev driver
* from userspace. */
@@ -177,25 +177,51 @@
{
int ret = 0;
- mutex_lock(&sent_bw_req);
+ mutex_lock(&qcedev_sent_bw_req);
if (high_bw_req) {
- if (podev->high_bw_req_count == 0)
+ if (podev->high_bw_req_count == 0) {
+ ret = qce_enable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
ret = msm_bus_scale_client_update_request(
podev->bus_scale_handle, 1);
- if (ret)
- pr_err("%s Unable to set to high bandwidth\n",
+ if (ret) {
+ pr_err("%s Unable to set to high bandwidth\n",
__func__);
+ ret = qce_disable_clk(podev->qce);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ }
podev->high_bw_req_count++;
} else {
- if (podev->high_bw_req_count == 1)
+ if (podev->high_bw_req_count == 1) {
ret = msm_bus_scale_client_update_request(
podev->bus_scale_handle, 0);
- if (ret)
- pr_err("%s Unable to set to low bandwidth\n",
+ if (ret) {
+ pr_err("%s Unable to set to low bandwidth\n",
__func__);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ ret = qce_disable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable disable clk\n", __func__);
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret)
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ }
podev->high_bw_req_count--;
}
- mutex_unlock(&sent_bw_req);
+ mutex_unlock(&qcedev_sent_bw_req);
}
@@ -1854,6 +1880,14 @@
podev->platform_support.hw_key_support = 0;
podev->platform_support.bus_scale_table = NULL;
podev->platform_support.sha_hmac = 1;
+
+ if (podev->ce_support.is_shared == false) {
+ podev->platform_support.bus_scale_table =
+ (struct msm_bus_scale_pdata *)
+ msm_bus_cl_get_pdata(pdev);
+ if (!podev->platform_support.bus_scale_table)
+ pr_err("bus_scale_table is NULL\n");
+ }
} else {
platform_support =
(struct msm_ce_hw_support *)pdev->dev.platform_data;
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 85c25c7..40fb29ac 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -121,7 +121,7 @@
#define NUM_RETRY 1000
#define CE_BUSY 55
-static DEFINE_MUTEX(sent_bw_req);
+static DEFINE_MUTEX(qcrypto_sent_bw_req);
static int qcrypto_scm_cmd(int resource, int cmd, int *response)
{
@@ -346,25 +346,51 @@
{
int ret = 0;
- mutex_lock(&sent_bw_req);
+ mutex_lock(&qcrypto_sent_bw_req);
if (high_bw_req) {
- if (cp->high_bw_req_count == 0)
+ if (cp->high_bw_req_count == 0) {
+ ret = qce_enable_clk(cp->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ mutex_unlock(&qcrypto_sent_bw_req);
+ return;
+ }
ret = msm_bus_scale_client_update_request(
- cp->bus_scale_handle, 1);
- if (ret)
- pr_err("%s Unable to set to high bandwidth\n",
+ cp->bus_scale_handle, 1);
+ if (ret) {
+ pr_err("%s Unable to set to high bandwidth\n",
__func__);
+ qce_disable_clk(cp->qce);
+ mutex_unlock(&qcrypto_sent_bw_req);
+ return;
+ }
+ }
cp->high_bw_req_count++;
} else {
- if (cp->high_bw_req_count == 1)
+ if (cp->high_bw_req_count == 1) {
ret = msm_bus_scale_client_update_request(
- cp->bus_scale_handle, 0);
- if (ret)
- pr_err("%s Unable to set to low bandwidth\n",
+ cp->bus_scale_handle, 0);
+ if (ret) {
+ pr_err("%s Unable to set to low bandwidth\n",
__func__);
+ mutex_unlock(&qcrypto_sent_bw_req);
+ return;
+ }
+ ret = qce_disable_clk(cp->qce);
+ if (ret) {
+ pr_err("%s Unable disable clk\n", __func__);
+ ret = msm_bus_scale_client_update_request(
+ cp->bus_scale_handle, 1);
+ if (ret)
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ mutex_unlock(&qcrypto_sent_bw_req);
+ return;
+ }
+ }
cp->high_bw_req_count--;
}
- mutex_unlock(&sent_bw_req);
+ mutex_unlock(&qcrypto_sent_bw_req);
}
static int _start_qcrypto_process(struct crypto_priv *cp);
@@ -3336,6 +3362,14 @@
cp->platform_support.hw_key_support = 0;
cp->platform_support.bus_scale_table = NULL;
cp->platform_support.sha_hmac = 1;
+
+ if (cp->ce_support.is_shared == false) {
+ cp->platform_support.bus_scale_table =
+ (struct msm_bus_scale_pdata *)
+ msm_bus_cl_get_pdata(pdev);
+ if (!cp->platform_support.bus_scale_table)
+ pr_warn("bus_scale_table is NULL\n");
+ }
} else {
platform_support =
(struct msm_ce_hw_support *)pdev->dev.platform_data;
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index f4f9a92..d7ff73a 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -1,4 +1,6 @@
-obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o ion_iommu_heap.o ion_cp_heap.o ion_removed_heap.o
+obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o \
+ ion_iommu_heap.o ion_cp_heap.o ion_removed_heap.o \
+ ion_page_pool.o ion_chunk_heap.o
obj-$(CONFIG_CMA) += ion_cma_heap.o ion_cma_secure_heap.o
obj-$(CONFIG_ION_TEGRA) += tegra/
obj-$(CONFIG_ION_MSM) += msm/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index d3434d8..4282f02 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -1,4 +1,5 @@
/*
+
* drivers/gpu/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
@@ -18,15 +19,18 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/file.h>
+#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/ion.h>
+#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/memblock.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/rbtree.h>
+#include <linux/rtmutex.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
@@ -43,16 +47,18 @@
/**
* struct ion_device - the metadata of the ion device node
* @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @lock: lock protecting the buffers & heaps trees
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
* @heaps: list of all the heaps in the system
* @user_clients: list of all the clients created from userspace
*/
struct ion_device {
struct miscdevice dev;
struct rb_root buffers;
- struct mutex lock;
- struct rb_root heaps;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct plist_head heaps;
long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
unsigned long arg);
struct rb_root clients;
@@ -65,7 +71,6 @@
* @dev: backpointer to ion device
* @handles: an rb tree of all the handles in this client
* @lock: lock protecting the tree of handles
- * @heap_mask: mask of all supported heaps
* @name: used for debugging
* @task: used for debugging
*
@@ -78,7 +83,7 @@
struct ion_device *dev;
struct rb_root handles;
struct mutex lock;
- unsigned int heap_mask;
+ unsigned int heap_type_mask;
char *name;
struct task_struct *task;
pid_t pid;
@@ -112,7 +117,10 @@
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
}
-static void ion_iommu_release(struct kref *kref);
+bool ion_buffer_cached(struct ion_buffer *buffer)
+{
+ return !!(buffer->flags & ION_FLAG_CACHED);
+}
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
@@ -140,63 +148,9 @@
rb_insert_color(&buffer->node, &dev->buffers);
}
-static void ion_iommu_add(struct ion_buffer *buffer,
- struct ion_iommu_map *iommu)
-{
- struct rb_node **p = &buffer->iommu_maps.rb_node;
- struct rb_node *parent = NULL;
- struct ion_iommu_map *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_iommu_map, node);
-
- if (iommu->key < entry->key) {
- p = &(*p)->rb_left;
- } else if (iommu->key > entry->key) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: buffer %p already has mapping for domain %d"
- " and partition %d\n", __func__,
- buffer,
- iommu_map_domain(iommu),
- iommu_map_partition(iommu));
- BUG();
- }
- }
-
- rb_link_node(&iommu->node, parent, p);
- rb_insert_color(&iommu->node, &buffer->iommu_maps);
-
-}
-
-static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
- unsigned int domain_no,
- unsigned int partition_no)
-{
- struct rb_node **p = &buffer->iommu_maps.rb_node;
- struct rb_node *parent = NULL;
- struct ion_iommu_map *entry;
- uint64_t key = domain_no;
- key = key << 32 | partition_no;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_iommu_map, node);
-
- if (key < entry->key)
- p = &(*p)->rb_left;
- else if (key > entry->key)
- p = &(*p)->rb_right;
- else
- return entry;
- }
-
- return NULL;
-}
-
static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
+static bool ion_heap_drain_freelist(struct ion_heap *heap);
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
@@ -218,9 +172,16 @@
kref_init(&buffer->ref);
ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
if (ret) {
- kfree(buffer);
- return ERR_PTR(ret);
+ if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+ goto err2;
+
+ ion_heap_drain_freelist(heap);
+ ret = heap->ops->allocate(heap, buffer, len, align,
+ flags);
+ if (ret)
+ goto err2;
}
buffer->dev = dev;
@@ -265,74 +226,58 @@
if (sg_dma_address(sg) == 0)
sg_dma_address(sg) = sg_phys(sg);
}
+ mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
+ mutex_unlock(&dev->buffer_lock);
return buffer;
err:
heap->ops->unmap_dma(heap, buffer);
heap->ops->free(buffer);
+err2:
kfree(buffer);
return ERR_PTR(ret);
}
-/**
- * Check for delayed IOMMU unmapping. Also unmap any outstanding
- * mappings which would otherwise have been leaked.
- */
-static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
-{
- struct ion_iommu_map *iommu_map;
- struct rb_node *node;
- const struct rb_root *rb = &(buffer->iommu_maps);
- unsigned long ref_count;
- unsigned int delayed_unmap;
-
- mutex_lock(&buffer->lock);
-
- while ((node = rb_first(rb)) != 0) {
- iommu_map = rb_entry(node, struct ion_iommu_map, node);
- ref_count = atomic_read(&iommu_map->ref.refcount);
- delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
-
- if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
- pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
- __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
- iommu_map->domain_info[DI_PARTITION_NUM]);
- }
- /* set ref count to 1 to force release */
- kref_init(&iommu_map->ref);
- kref_put(&iommu_map->ref, ion_iommu_release);
- }
-
- mutex_unlock(&buffer->lock);
-}
-
static void ion_delayed_unsecure(struct ion_buffer *buffer)
{
if (buffer->heap->ops->unsecure_buffer)
buffer->heap->ops->unsecure_buffer(buffer, 1);
}
-static void ion_buffer_destroy(struct kref *kref)
+static void _ion_buffer_destroy(struct ion_buffer *buffer)
{
- struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
- struct ion_device *dev = buffer->dev;
-
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
ion_delayed_unsecure(buffer);
- ion_iommu_delayed_unmap(buffer);
buffer->heap->ops->free(buffer);
- mutex_lock(&dev->lock);
- rb_erase(&buffer->node, &dev->buffers);
- mutex_unlock(&dev->lock);
if (buffer->flags & ION_FLAG_CACHED)
kfree(buffer->dirty);
kfree(buffer);
}
+static void ion_buffer_destroy(struct kref *kref)
+{
+ struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+ struct ion_heap *heap = buffer->heap;
+ struct ion_device *dev = buffer->dev;
+
+ mutex_lock(&dev->buffer_lock);
+ rb_erase(&buffer->node, &dev->buffers);
+ mutex_unlock(&dev->buffer_lock);
+
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
+ rt_mutex_lock(&heap->lock);
+ list_add(&buffer->list, &heap->free_list);
+ rt_mutex_unlock(&heap->lock);
+ wake_up(&heap->waitqueue);
+ return;
+ }
+ _ion_buffer_destroy(buffer);
+}
+
static void ion_buffer_get(struct ion_buffer *buffer)
{
kref_get(&buffer->ref);
@@ -343,6 +288,37 @@
return kref_put(&buffer->ref, ion_buffer_destroy);
}
+static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+{
+ mutex_lock(&buffer->lock);
+ buffer->handle_count++;
+ mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
+{
+ /*
+ * when a buffer is removed from a handle, if it is not in
+ * any other handles, copy the taskcomm and the pid of the
+ * process it's being removed from into the buffer. At this
+ * point there will be no way to track what processes this buffer is
+ * being used by, it only exists as a dma_buf file descriptor.
+ * The taskcomm and pid can provide a debug hint as to where this fd
+ * is in the system
+ */
+ mutex_lock(&buffer->lock);
+ buffer->handle_count--;
+ BUG_ON(buffer->handle_count < 0);
+ if (!buffer->handle_count) {
+ struct task_struct *task;
+
+ task = current->group_leader;
+ get_task_comm(buffer->task_comm, task);
+ buffer->pid = task_pid_nr(task);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
static struct ion_handle *ion_handle_create(struct ion_client *client,
struct ion_buffer *buffer)
{
@@ -355,6 +331,7 @@
rb_init_node(&handle->node);
handle->client = client;
ion_buffer_get(buffer);
+ ion_buffer_add_to_handle(buffer);
handle->buffer = buffer;
return handle;
@@ -376,7 +353,9 @@
if (!RB_EMPTY_NODE(&handle->node))
rb_erase(&handle->node, &client->handles);
+ ion_buffer_remove_from_handle(buffer);
ion_buffer_put(buffer);
+
kfree(handle);
}
@@ -449,13 +428,13 @@
}
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_mask,
+ size_t align, unsigned int heap_id_mask,
unsigned int flags)
{
- struct rb_node *n;
struct ion_handle *handle;
struct ion_device *dev = client->dev;
struct ion_buffer *buffer = NULL;
+ struct ion_heap *heap;
unsigned long secure_allocation = flags & ION_FLAG_SECURE;
const unsigned int MAX_DBG_STR_LEN = 64;
char dbg_str[MAX_DBG_STR_LEN];
@@ -471,8 +450,8 @@
*/
flags |= ION_FLAG_CACHED_NEEDS_SYNC;
- pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
- align, heap_mask, flags);
+ pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
+ len, align, heap_id_mask, flags);
/*
* traverse the list of heaps available in this system in priority
* order. If the heap type is supported by the client, and matches the
@@ -484,29 +463,26 @@
len = PAGE_ALIGN(len);
- mutex_lock(&dev->lock);
- for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
- struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
- /* if the client doesn't support this heap type */
- if (!((1 << heap->type) & client->heap_mask))
- continue;
- /* if the caller didn't specify this heap type */
- if (!((1 << heap->id) & heap_mask))
+ down_read(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
+ /* if the caller didn't specify this heap id */
+ if (!((1 << heap->id) & heap_id_mask))
continue;
/* Do not allow un-secure heap if secure is specified */
if (secure_allocation &&
!ion_heap_allow_secure_allocation(heap->type))
continue;
trace_ion_alloc_buffer_start(client->name, heap->name, len,
- heap_mask, flags);
+ heap_id_mask, flags);
buffer = ion_buffer_create(heap, dev, len, align, flags);
trace_ion_alloc_buffer_end(client->name, heap->name, len,
- heap_mask, flags);
+ heap_id_mask, flags);
if (!IS_ERR_OR_NULL(buffer))
break;
trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
- heap_mask, flags, PTR_ERR(buffer));
+ heap_id_mask, flags,
+ PTR_ERR(buffer));
if (dbg_str_idx < MAX_DBG_STR_LEN) {
unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
int ret_value = snprintf(&dbg_str[dbg_str_idx],
@@ -523,21 +499,21 @@
}
}
}
- mutex_unlock(&dev->lock);
+ up_read(&dev->lock);
if (buffer == NULL) {
trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
- heap_mask, flags, -ENODEV);
+ heap_id_mask, flags, -ENODEV);
return ERR_PTR(-ENODEV);
}
if (IS_ERR(buffer)) {
trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
- heap_mask, flags, PTR_ERR(buffer));
+ heap_id_mask, flags,
+ PTR_ERR(buffer));
pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
- "0x%x) from heap(s) %sfor client %s with heap "
- "mask 0x%x\n",
- len, align, dbg_str, client->name, client->heap_mask);
+ "0x%x) from heap(s) %sfor client %s\n",
+ len, align, dbg_str, client->name);
return ERR_PTR(PTR_ERR(buffer));
}
@@ -654,212 +630,6 @@
ion_buffer_kmap_put(buffer);
}
-static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
- int domain_num, int partition_num, unsigned long align,
- unsigned long iova_length, unsigned long flags,
- unsigned long *iova)
-{
- struct ion_iommu_map *data;
- int ret;
-
- data = kmalloc(sizeof(*data), GFP_ATOMIC);
-
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- data->buffer = buffer;
- iommu_map_domain(data) = domain_num;
- iommu_map_partition(data) = partition_num;
-
- ret = buffer->heap->ops->map_iommu(buffer, data,
- domain_num,
- partition_num,
- align,
- iova_length,
- flags);
-
- if (ret)
- goto out;
-
- kref_init(&data->ref);
- *iova = data->iova_addr;
-
- ion_iommu_add(buffer, data);
-
- return data;
-
-out:
- kfree(data);
- return ERR_PTR(ret);
-}
-
-int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
- int domain_num, int partition_num, unsigned long align,
- unsigned long iova_length, unsigned long *iova,
- unsigned long *buffer_size,
- unsigned long flags, unsigned long iommu_flags)
-{
- struct ion_buffer *buffer;
- struct ion_iommu_map *iommu_map;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(client)) {
- pr_err("%s: client pointer is invalid\n", __func__);
- return -EINVAL;
- }
- if (IS_ERR_OR_NULL(handle)) {
- pr_err("%s: handle pointer is invalid\n", __func__);
- return -EINVAL;
- }
- if (IS_ERR_OR_NULL(handle->buffer)) {
- pr_err("%s: buffer pointer is invalid\n", __func__);
- return -EINVAL;
- }
-
- if (ION_IS_CACHED(flags)) {
- pr_err("%s: Cannot map iommu as cached.\n", __func__);
- return -EINVAL;
- }
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_kernel.\n",
- __func__);
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
-
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
-
- if (!handle->buffer->heap->ops->map_iommu) {
- pr_err("%s: map_iommu is not implemented by this heap.\n",
- __func__);
- ret = -ENODEV;
- goto out;
- }
-
- /*
- * If clients don't want a custom iova length, just use whatever
- * the buffer size is
- */
- if (!iova_length)
- iova_length = buffer->size;
-
- if (buffer->size > iova_length) {
- pr_debug("%s: iova length %lx is not at least buffer size"
- " %x\n", __func__, iova_length, buffer->size);
- ret = -EINVAL;
- goto out;
- }
-
- if (buffer->size & ~PAGE_MASK) {
- pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
- buffer->size, PAGE_SIZE);
- ret = -EINVAL;
- goto out;
- }
-
- if (iova_length & ~PAGE_MASK) {
- pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
- iova_length, PAGE_SIZE);
- ret = -EINVAL;
- goto out;
- }
-
- iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
- if (!iommu_map) {
- iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
- align, iova_length, flags, iova);
- if (!IS_ERR_OR_NULL(iommu_map)) {
- iommu_map->flags = iommu_flags;
-
- if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
- kref_get(&iommu_map->ref);
- } else {
- ret = PTR_ERR(iommu_map);
- }
- } else {
- if (iommu_map->flags != iommu_flags) {
- pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
- __func__, handle,
- iommu_map->flags, iommu_flags);
- ret = -EINVAL;
- } else if (iommu_map->mapped_size != iova_length) {
- pr_err("%s: handle %p is already mapped with length"
- " %x, trying to map with length %lx\n",
- __func__, handle, iommu_map->mapped_size,
- iova_length);
- ret = -EINVAL;
- } else {
- kref_get(&iommu_map->ref);
- *iova = iommu_map->iova_addr;
- }
- }
- if (!ret)
- buffer->iommu_map_cnt++;
- *buffer_size = buffer->size;
-out:
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return ret;
-}
-EXPORT_SYMBOL(ion_map_iommu);
-
-static void ion_iommu_release(struct kref *kref)
-{
- struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
- ref);
- struct ion_buffer *buffer = map->buffer;
-
- rb_erase(&map->node, &buffer->iommu_maps);
- buffer->heap->ops->unmap_iommu(map);
- kfree(map);
-}
-
-void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
- int domain_num, int partition_num)
-{
- struct ion_iommu_map *iommu_map;
- struct ion_buffer *buffer;
-
- if (IS_ERR_OR_NULL(client)) {
- pr_err("%s: client pointer is invalid\n", __func__);
- return;
- }
- if (IS_ERR_OR_NULL(handle)) {
- pr_err("%s: handle pointer is invalid\n", __func__);
- return;
- }
- if (IS_ERR_OR_NULL(handle->buffer)) {
- pr_err("%s: buffer pointer is invalid\n", __func__);
- return;
- }
-
- mutex_lock(&client->lock);
- buffer = handle->buffer;
-
- mutex_lock(&buffer->lock);
-
- iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
-
- if (!iommu_map) {
- WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
- domain_num, partition_num, buffer);
- goto out;
- }
-
- kref_put(&iommu_map->ref, ion_iommu_release);
-
- buffer->iommu_map_cnt--;
-out:
- mutex_unlock(&buffer->lock);
-
- mutex_unlock(&client->lock);
-
-}
-EXPORT_SYMBOL(ion_unmap_iommu);
-
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
{
struct ion_buffer *buffer;
@@ -903,52 +673,10 @@
}
EXPORT_SYMBOL(ion_unmap_kernel);
-int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
- void *uaddr, unsigned long offset, unsigned long len,
- unsigned int cmd)
-{
- struct ion_buffer *buffer;
- int ret = -EINVAL;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to do_cache_op.\n",
- __func__);
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
-
- if (!ION_IS_CACHED(buffer->flags)) {
- ret = 0;
- goto out;
- }
-
- if (!handle->buffer->heap->ops->cache_op) {
- pr_err("%s: cache_op is not implemented by this heap.\n",
- __func__);
- ret = -ENODEV;
- goto out;
- }
-
-
- ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
- offset, len, cmd);
-
-out:
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return ret;
-
-}
-EXPORT_SYMBOL(ion_do_cache_op);
-
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
struct ion_client *client = s->private;
struct rb_node *n;
- struct rb_node *n2;
seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
"heap_name", "size_in_bytes", "handle refcount",
@@ -958,6 +686,7 @@
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
+
enum ion_heap_type type = handle->buffer->heap->type;
seq_printf(s, "%16.16s: %16x : %16d : %12p",
@@ -973,19 +702,9 @@
else
seq_printf(s, " : %12s", "N/A");
- for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
- n2 = rb_next(n2)) {
- struct ion_iommu_map *imap =
- rb_entry(n2, struct ion_iommu_map, node);
- seq_printf(s, " : [%d,%d] - %8lx",
- imap->domain_info[DI_DOMAIN_NUM],
- imap->domain_info[DI_PARTITION_NUM],
- imap->iova_addr);
- }
seq_printf(s, "\n");
}
mutex_unlock(&client->lock);
-
return 0;
}
@@ -1002,7 +721,6 @@
};
struct ion_client *ion_client_create(struct ion_device *dev,
- unsigned int heap_mask,
const char *name)
{
struct ion_client *client;
@@ -1052,11 +770,10 @@
strlcpy(client->name, name, name_len+1);
}
- client->heap_mask = heap_mask;
client->task = task;
client->pid = pid;
- mutex_lock(&dev->lock);
+ down_write(&dev->lock);
p = &dev->clients.rb_node;
while (*p) {
parent = *p;
@@ -1074,96 +791,16 @@
client->debug_root = debugfs_create_file(name, 0664,
dev->debug_root, client,
&debug_client_fops);
- mutex_unlock(&dev->lock);
+ up_write(&dev->lock);
return client;
}
-
-/**
- * ion_mark_dangling_buffers_locked() - Mark dangling buffers
- * @dev: the ion device whose buffers will be searched
- *
- * Sets marked=1 for all known buffers associated with `dev' that no
- * longer have a handle pointing to them. dev->lock should be held
- * across a call to this function (and should only be unlocked after
- * checking for marked buffers).
- */
-static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
-{
- struct rb_node *n, *n2;
- /* mark all buffers as 1 */
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
- node);
-
- buf->marked = 1;
- }
-
- /* now see which buffers we can access */
- for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
- node);
-
- mutex_lock(&client->lock);
- for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
- struct ion_handle *handle
- = rb_entry(n2, struct ion_handle, node);
-
- handle->buffer->marked = 0;
-
- }
- mutex_unlock(&client->lock);
-
- }
-}
-
-#ifdef CONFIG_ION_LEAK_CHECK
-static u32 ion_debug_check_leaks_on_destroy;
-
-static int ion_check_for_and_print_leaks(struct ion_device *dev)
-{
- struct rb_node *n;
- int num_leaks = 0;
-
- if (!ion_debug_check_leaks_on_destroy)
- return 0;
-
- /* check for leaked buffers (those that no longer have a
- * handle pointing to them) */
- ion_mark_dangling_buffers_locked(dev);
-
- /* Anyone still marked as a 1 means a leaked handle somewhere */
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
- node);
-
- if (buf->marked == 1) {
- pr_info("Leaked ion buffer at %p\n", buf);
- num_leaks++;
- }
- }
- return num_leaks;
-}
-static void setup_ion_leak_check(struct dentry *debug_root)
-{
- debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
- &ion_debug_check_leaks_on_destroy);
-}
-#else
-static int ion_check_for_and_print_leaks(struct ion_device *dev)
-{
- return 0;
-}
-static void setup_ion_leak_check(struct dentry *debug_root)
-{
-}
-#endif
+EXPORT_SYMBOL(ion_client_create);
void ion_client_destroy(struct ion_client *client)
{
struct ion_device *dev = client->dev;
struct rb_node *n;
- int num_leaks;
pr_debug("%s: %d\n", __func__, __LINE__);
while ((n = rb_first(&client->handles))) {
@@ -1171,25 +808,13 @@
node);
ion_handle_destroy(&handle->ref);
}
- mutex_lock(&dev->lock);
+ down_write(&dev->lock);
if (client->task)
put_task_struct(client->task);
rb_erase(&client->node, &dev->clients);
debugfs_remove_recursive(client->debug_root);
- num_leaks = ion_check_for_and_print_leaks(dev);
-
- mutex_unlock(&dev->lock);
-
- if (num_leaks) {
- struct task_struct *current_task = current;
- char current_task_name[TASK_COMM_LEN];
- get_task_comm(current_task_name, current_task);
- WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
- __func__, num_leaks, num_leaks == 1 ? "" : "s");
- pr_info("task name at time of leak: %s, pid: %d\n",
- current_task_name, current_task->pid);
- }
+ up_write(&dev->lock);
kfree(client->name);
kfree(client);
@@ -1462,7 +1087,7 @@
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
struct ion_buffer *buffer = dmabuf->priv;
- return buffer->vaddr + offset;
+ return buffer->vaddr + offset * PAGE_SIZE;
}
static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
@@ -1518,19 +1143,19 @@
.kunmap = ion_dma_buf_kunmap,
};
-int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle)
{
struct ion_buffer *buffer;
struct dma_buf *dmabuf;
bool valid_handle;
- int fd;
mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
mutex_unlock(&client->lock);
if (!valid_handle) {
WARN(1, "%s: invalid handle passed to share.\n", __func__);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
buffer = handle->buffer;
@@ -1538,15 +1163,29 @@
dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
if (IS_ERR(dmabuf)) {
ion_buffer_put(buffer);
- return PTR_ERR(dmabuf);
+ return dmabuf;
}
+
+ return dmabuf;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+ struct dma_buf *dmabuf;
+ int fd;
+
+ dmabuf = ion_share_dma_buf(client, handle);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
if (fd < 0)
dma_buf_put(dmabuf);
return fd;
}
-EXPORT_SYMBOL(ion_share_dma_buf);
+EXPORT_SYMBOL(ion_share_dma_buf_fd);
struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
{
@@ -1655,7 +1294,8 @@
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
- data.fd = ion_share_dma_buf(client, data.handle);
+ data.fd = ion_share_dma_buf_fd(client, data.handle);
+
if (copy_to_user((void __user *)arg, &data, sizeof(data)))
return -EFAULT;
if (data.fd < 0)
@@ -1735,7 +1375,7 @@
pr_debug("%s: %d\n", __func__, __LINE__);
snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
- client = ion_client_create(dev, -1, debug_name);
+ client = ion_client_create(dev, debug_name);
if (IS_ERR_OR_NULL(client))
return PTR_ERR(client);
file->private_data = client;
@@ -1917,9 +1557,12 @@
struct ion_heap *heap = s->private;
struct ion_device *dev = heap->dev;
struct rb_node *n;
+ size_t total_size = 0;
+ size_t total_orphaned_size = 0;
- mutex_lock(&dev->lock);
+ mutex_lock(&dev->buffer_lock);
seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+ seq_printf(s, "----------------------------------------------------\n");
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
@@ -1938,8 +1581,28 @@
client->pid, size);
}
}
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "orphaned allocations (info is from last known client):"
+ "\n");
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+ node);
+ if (buffer->heap->type == heap->type)
+ total_size += buffer->size;
+ if (!buffer->handle_count) {
+ seq_printf(s, "%16.s %16u %16u\n", buffer->task_comm,
+ buffer->pid, buffer->size);
+ total_orphaned_size += buffer->size;
+ }
+ }
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "%16.s %16u\n", "total orphaned",
+ total_orphaned_size);
+ seq_printf(s, "%16.s %16u\n", "total ", total_size);
+ seq_printf(s, "----------------------------------------------------\n");
+
ion_heap_print_debug(s, heap);
- mutex_unlock(&dev->lock);
+ mutex_unlock(&dev->buffer_lock);
return 0;
}
@@ -1955,40 +1618,90 @@
.release = single_release,
};
+static size_t ion_heap_free_list_is_empty(struct ion_heap *heap)
+{
+ bool is_empty;
+
+ rt_mutex_lock(&heap->lock);
+ is_empty = list_empty(&heap->free_list);
+ rt_mutex_unlock(&heap->lock);
+
+ return is_empty;
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+ struct ion_heap *heap = data;
+
+ while (true) {
+ struct ion_buffer *buffer;
+
+ wait_event_freezable(heap->waitqueue,
+ !ion_heap_free_list_is_empty(heap));
+
+ rt_mutex_lock(&heap->lock);
+ if (list_empty(&heap->free_list)) {
+ rt_mutex_unlock(&heap->lock);
+ continue;
+ }
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ rt_mutex_unlock(&heap->lock);
+ _ion_buffer_destroy(buffer);
+ }
+
+ return 0;
+}
+
+static bool ion_heap_drain_freelist(struct ion_heap *heap)
+{
+ struct ion_buffer *buffer, *tmp;
+
+ if (ion_heap_free_list_is_empty(heap))
+ return false;
+ rt_mutex_lock(&heap->lock);
+ list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
+ _ion_buffer_destroy(buffer);
+ list_del(&buffer->list);
+ }
+ BUG_ON(!list_empty(&heap->free_list));
+ rt_mutex_unlock(&heap->lock);
+
+
+ return true;
+}
+
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
- struct rb_node **p = &dev->heaps.rb_node;
- struct rb_node *parent = NULL;
- struct ion_heap *entry;
+ struct sched_param param = { .sched_priority = 0 };
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
!heap->ops->unmap_dma)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
- heap->dev = dev;
- mutex_lock(&dev->lock);
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_heap, node);
-
- if (heap->id < entry->id) {
- p = &(*p)->rb_left;
- } else if (heap->id > entry->id ) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: can not insert multiple heaps with "
- "id %d\n", __func__, heap->id);
- goto end;
- }
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
+ INIT_LIST_HEAD(&heap->free_list);
+ rt_mutex_init(&heap->lock);
+ init_waitqueue_head(&heap->waitqueue);
+ heap->task = kthread_run(ion_heap_deferred_free, heap,
+ "%s", heap->name);
+ sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
+ if (IS_ERR(heap->task))
+ pr_err("%s: creating thread for deferred free failed\n",
+ __func__);
}
- rb_link_node(&heap->node, parent, p);
- rb_insert_color(&heap->node, &dev->heaps);
+ heap->dev = dev;
+ down_write(&dev->lock);
+ /* use negative heap->id to reverse the priority -- when traversing
+ the list later attempt higher id numbers first */
+ plist_node_init(&heap->node, -heap->id);
+ plist_add(&heap->node, &dev->heaps);
debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
&debug_heap_fops);
-end:
- mutex_unlock(&dev->lock);
+ up_write(&dev->lock);
}
int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
@@ -2061,16 +1774,15 @@
int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
void *data)
{
- struct rb_node *n;
int ret_val = 0;
+ struct ion_heap *heap;
/*
* traverse the list of heaps available in this system
* and find the heap that is specified.
*/
- mutex_lock(&dev->lock);
- for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
- struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
+ down_write(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
if (!ion_heap_allow_heap_secure(heap->type))
continue;
if (ION_HEAP(heap->id) != heap_id)
@@ -2081,7 +1793,7 @@
ret_val = -EINVAL;
break;
}
- mutex_unlock(&dev->lock);
+ up_write(&dev->lock);
return ret_val;
}
EXPORT_SYMBOL(ion_secure_heap);
@@ -2089,16 +1801,15 @@
int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
void *data)
{
- struct rb_node *n;
int ret_val = 0;
+ struct ion_heap *heap;
/*
* traverse the list of heaps available in this system
* and find the heap that is specified.
*/
- mutex_lock(&dev->lock);
- for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
- struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
+ down_write(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
if (!ion_heap_allow_heap_secure(heap->type))
continue;
if (ION_HEAP(heap->id) != heap_id)
@@ -2109,50 +1820,11 @@
ret_val = -EINVAL;
break;
}
- mutex_unlock(&dev->lock);
+ up_write(&dev->lock);
return ret_val;
}
EXPORT_SYMBOL(ion_unsecure_heap);
-static int ion_debug_leak_show(struct seq_file *s, void *unused)
-{
- struct ion_device *dev = s->private;
- struct rb_node *n;
-
- seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
- "ref cnt");
-
- mutex_lock(&dev->lock);
- ion_mark_dangling_buffers_locked(dev);
-
- /* Anyone still marked as a 1 means a leaked handle somewhere */
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
- node);
-
- if (buf->marked == 1)
- seq_printf(s, "%16.x %16.s %16.x %16.d\n",
- (int)buf, buf->heap->name, buf->size,
- atomic_read(&buf->ref.refcount));
- }
- mutex_unlock(&dev->lock);
- return 0;
-}
-
-static int ion_debug_leak_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ion_debug_leak_show, inode->i_private);
-}
-
-static const struct file_operations debug_leak_fops = {
- .open = ion_debug_leak_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
-
struct ion_device *ion_device_create(long (*custom_ioctl)
(struct ion_client *client,
unsigned int cmd,
@@ -2181,13 +1853,10 @@
idev->custom_ioctl = custom_ioctl;
idev->buffers = RB_ROOT;
- mutex_init(&idev->lock);
- idev->heaps = RB_ROOT;
+ mutex_init(&idev->buffer_lock);
+ init_rwsem(&idev->lock);
+ plist_head_init(&idev->heaps);
idev->clients = RB_ROOT;
- debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
- &debug_leak_fops);
-
- setup_ion_leak_check(idev->debug_root);
return idev;
}
@@ -2200,16 +1869,35 @@
void __init ion_reserve(struct ion_platform_data *data)
{
- int i, ret;
+ int i;
for (i = 0; i < data->nr; i++) {
if (data->heaps[i].size == 0)
continue;
- ret = memblock_reserve(data->heaps[i].base,
- data->heaps[i].size);
- if (ret)
- pr_err("memblock reserve of %x@%pa failed\n",
- data->heaps[i].size,
- &data->heaps[i].base);
+
+ if (data->heaps[i].base == 0) {
+ phys_addr_t paddr;
+ paddr = memblock_alloc_base(data->heaps[i].size,
+ data->heaps[i].align,
+ MEMBLOCK_ALLOC_ANYWHERE);
+ if (!paddr) {
+ pr_err("%s: error allocating memblock for "
+ "heap %d\n",
+ __func__, i);
+ continue;
+ }
+ data->heaps[i].base = paddr;
+ } else {
+ int ret = memblock_reserve(data->heaps[i].base,
+ data->heaps[i].size);
+ if (ret)
+ pr_err("memblock reserve of %x@%pa failed\n",
+ data->heaps[i].size,
+ &data->heaps[i].base);
+ }
+ pr_info("%s: %s reserved base %pa size %d\n", __func__,
+ data->heaps[i].name,
+ &data->heaps[i].base,
+ data->heaps[i].size);
}
}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index aeffb52..0dd3054 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -24,11 +24,9 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/iommu.h>
#include <linux/seq_file.h>
#include "ion_priv.h"
-#include <mach/iommu_domains.h>
#include <asm/mach/map.h>
#include <asm/cacheflush.h>
#include <linux/msm_ion.h>
@@ -164,91 +162,6 @@
return ret_value;
}
-int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
- void *vaddr, unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
- unsigned int size_to_vmap, total_size;
- int i, j;
- void *ptr = NULL;
- ion_phys_addr_t buff_phys = buffer->priv_phys;
-
- if (!vaddr) {
- /*
- * Split the vmalloc space into smaller regions in
- * order to clean and/or invalidate the cache.
- */
- size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
- total_size = buffer->size;
-
- for (i = 0; i < total_size; i += size_to_vmap) {
- size_to_vmap = min(size_to_vmap, total_size - i);
- for (j = 0; j < 10 && size_to_vmap; ++j) {
- ptr = ioremap(buff_phys, size_to_vmap);
- if (ptr) {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- buff_phys += size_to_vmap;
- break;
- } else {
- size_to_vmap >>= 1;
- }
- }
- if (!ptr) {
- pr_err("Couldn't io-remap the memory\n");
- return -EINVAL;
- }
- iounmap(ptr);
- }
- } else {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- }
-
- if (carveout_heap->has_outer_cache) {
- unsigned long pstart = buffer->priv_phys + offset;
- outer_cache_op(pstart, pstart + length);
- }
- return 0;
-}
-
static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map)
{
@@ -303,110 +216,6 @@
return 0;
}
-int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- struct iommu_domain *domain;
- int ret = 0;
- unsigned long extra;
- struct scatterlist *sglist = 0;
- int prot = IOMMU_WRITE | IOMMU_READ;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = buffer->priv_phys;
- return 0;
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- sglist = vmalloc(sizeof(*sglist));
- if (!sglist)
- goto out1;
-
- sg_init_table(sglist, 1);
- sglist->length = buffer->size;
- sglist->offset = 0;
- sglist->dma_address = buffer->priv_phys;
-
- ret = iommu_map_range(domain, data->iova_addr, sglist,
- buffer->size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- unsigned long phys_addr = sg_phys(sglist);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- vfree(sglist);
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- vfree(sglist);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
-out:
-
- return ret;
-}
-
-void ion_carveout_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
@@ -416,10 +225,7 @@
.unmap_kernel = ion_carveout_heap_unmap_kernel,
.map_dma = ion_carveout_heap_map_dma,
.unmap_dma = ion_carveout_heap_unmap_dma,
- .cache_op = ion_carveout_cache_ops,
.print_debug = ion_carveout_print_debug,
- .map_iommu = ion_carveout_heap_map_iommu,
- .unmap_iommu = ion_carveout_heap_unmap_iommu,
};
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
diff --git a/drivers/gpu/ion/ion_chunk_heap.c b/drivers/gpu/ion/ion_chunk_heap.c
new file mode 100644
index 0000000..b76f898
--- /dev/null
+++ b/drivers/gpu/ion/ion_chunk_heap.c
@@ -0,0 +1,180 @@
+/*
+ * drivers/gpu/ion/ion_chunk_heap.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+//#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion_priv.h"
+
+#include <asm/mach/map.h>
+
+struct ion_chunk_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+ unsigned long chunk_size;
+ unsigned long size;
+ unsigned long allocated;
+};
+
+static int ion_chunk_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret, i;
+ unsigned long num_chunks;
+
+ if (ion_buffer_fault_user_mappings(buffer))
+ return -ENOMEM;
+
+ num_chunks = ALIGN(size, chunk_heap->chunk_size) /
+ chunk_heap->chunk_size;
+ buffer->size = num_chunks * chunk_heap->chunk_size;
+
+ if (buffer->size > chunk_heap->size - chunk_heap->allocated)
+ return -ENOMEM;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ return ret;
+ }
+
+ sg = table->sgl;
+ for (i = 0; i < num_chunks; i++) {
+ unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
+ chunk_heap->chunk_size);
+ if (!paddr)
+ goto err;
+ sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
+ sg = sg_next(sg);
+ }
+
+ buffer->priv_virt = table;
+ chunk_heap->allocated += buffer->size;
+ return 0;
+err:
+ sg = table->sgl;
+ for (i -= 1; i >= 0; i--) {
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ sg_dma_len(sg));
+ sg = sg_next(sg);
+ }
+ sg_free_table(table);
+ kfree(table);
+ return -ENOMEM;
+}
+
+static void ion_chunk_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+ struct sg_table *table = buffer->priv_virt;
+ struct scatterlist *sg;
+ int i;
+
+ ion_heap_buffer_zero(buffer);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, sg, 1, DMA_BIDIRECTIONAL);
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ sg_dma_len(sg));
+ }
+ chunk_heap->allocated -= buffer->size;
+ sg_free_table(table);
+ kfree(table);
+}
+
+struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static struct ion_heap_ops chunk_heap_ops = {
+ .allocate = ion_chunk_heap_allocate,
+ .free = ion_chunk_heap_free,
+ .map_dma = ion_chunk_heap_map_dma,
+ .unmap_dma = ion_chunk_heap_unmap_dma,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_chunk_heap *chunk_heap;
+ struct scatterlist sg;
+
+ chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
+ if (!chunk_heap)
+ return ERR_PTR(-ENOMEM);
+
+ chunk_heap->chunk_size = (unsigned long)heap_data->priv;
+ chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
+ PAGE_SHIFT, -1);
+ if (!chunk_heap->pool) {
+ kfree(chunk_heap);
+ return ERR_PTR(-ENOMEM);
+ }
+ chunk_heap->base = heap_data->base;
+ chunk_heap->size = heap_data->size;
+ chunk_heap->allocated = 0;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, phys_to_page(heap_data->base), heap_data->size, 0);
+ dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
+ gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
+ chunk_heap->heap.ops = &chunk_heap_ops;
+ chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
+ chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ pr_info("%s: base %pa size %zd align %pa\n", __func__,
+ &chunk_heap->base, heap_data->size, &heap_data->align);
+
+ return &chunk_heap->heap;
+}
+
+void ion_chunk_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+
+ gen_pool_destroy(chunk_heap->pool);
+ kfree(chunk_heap);
+ chunk_heap = NULL;
+}
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
index 4f12e38..193f4d4 100644
--- a/drivers/gpu/ion/ion_cma_heap.c
+++ b/drivers/gpu/ion/ion_cma_heap.c
@@ -47,7 +47,7 @@
int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size)
{
- struct page *page = virt_to_page(cpu_addr);
+ struct page *page = phys_to_page(handle);
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -178,148 +178,6 @@
return;
}
-int ion_cma_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- unsigned long extra_iova_addr;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
- struct sg_table *table = info->table;
- int prot = IOMMU_WRITE | IOMMU_READ;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = info->handle;
- return 0;
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -EINVAL;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, table->sgl,
- buffer->size, prot);
-
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- extra_iova_addr = data->iova_addr + buffer->size;
- if (extra) {
- unsigned long phys_addr = sg_phys(table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
-
-void ion_cma_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
-int ion_cma_cache_ops(struct ion_heap *heap,
- struct ion_buffer *buffer, void *vaddr,
- unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
-
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- if (!vaddr)
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- else
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- if (!vaddr)
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- else
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- if (!vaddr) {
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- } else {
- dmac_flush_range(vaddr, vaddr + length);
- }
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
-
- if (cma_heap_has_outer_cache) {
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- outer_cache_op(info->handle, info->handle + length);
- }
-
- return 0;
-}
-
static int ion_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map)
{
@@ -358,9 +216,6 @@
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
- .map_iommu = ion_cma_map_iommu,
- .unmap_iommu = ion_cma_unmap_iommu,
- .cache_op = ion_cma_cache_ops,
.print_debug = ion_cma_print_debug,
};
diff --git a/drivers/gpu/ion/ion_cma_secure_heap.c b/drivers/gpu/ion/ion_cma_secure_heap.c
index 0fbcfbf..e1b3eea 100644
--- a/drivers/gpu/ion/ion_cma_secure_heap.c
+++ b/drivers/gpu/ion/ion_cma_secure_heap.c
@@ -44,7 +44,6 @@
bool is_cached;
};
-static int cma_heap_has_outer_cache;
/*
* Create scatter-list for the already allocated DMA buffer.
* This function could be replace by dma_common_get_sgtable
@@ -53,7 +52,7 @@
int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size)
{
- struct page *page = virt_to_page(cpu_addr);
+ struct page *page = phys_to_page(handle);
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -212,110 +211,6 @@
return;
}
-int ion_secure_cma_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- unsigned long extra_iova_addr;
- struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
- struct sg_table *table = info->table;
- int prot = IOMMU_WRITE | IOMMU_READ;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = info->handle;
- return 0;
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -EINVAL;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, table->sgl,
- buffer->size, prot);
-
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- extra_iova_addr = data->iova_addr + buffer->size;
- if (extra) {
- unsigned long phys_addr = sg_phys(table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
-
-void ion_secure_cma_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
-int ion_secure_cma_cache_ops(struct ion_heap *heap,
- struct ion_buffer *buffer, void *vaddr,
- unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- pr_info("%s: cache operations disallowed from secure heap %s\n",
- __func__, heap->name);
- return -EINVAL;
-}
-
static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map)
{
@@ -354,9 +249,6 @@
.map_user = ion_secure_cma_mmap,
.map_kernel = ion_secure_cma_map_kernel,
.unmap_kernel = ion_secure_cma_unmap_kernel,
- .map_iommu = ion_secure_cma_map_iommu,
- .unmap_iommu = ion_secure_cma_unmap_iommu,
- .cache_op = ion_secure_cma_cache_ops,
.print_debug = ion_secure_cma_print_debug,
.secure_buffer = ion_cp_secure_buffer,
.unsecure_buffer = ion_cp_unsecure_buffer,
@@ -376,7 +268,6 @@
* used to make the link with reserved CMA memory */
heap->priv = data->priv;
heap->type = ION_HEAP_TYPE_SECURE_DMA;
- cma_heap_has_outer_cache = data->has_outer_cache;
return heap;
}
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 88addab..f1868a8 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -67,10 +67,6 @@
* kernel space (un-cached).
* @umap_count: the total number of times this heap has been mapped in
* user space.
- * @iommu_iova: saved iova when mapping full heap at once.
- * @iommu_partition: partition used to map full heap.
- * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
- * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
* @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
*/
struct ion_cp_heap {
@@ -90,11 +86,6 @@
unsigned long kmap_cached_count;
unsigned long kmap_uncached_count;
unsigned long umap_count;
- unsigned long iommu_iova[MAX_DOMAINS];
- unsigned long iommu_partition[MAX_DOMAINS];
- void *reserved_vrange;
- int iommu_map_all;
- int iommu_2x_map_domain;
unsigned int has_outer_cache;
atomic_t protect_cnt;
void *cpu_addr;
@@ -361,29 +352,6 @@
return offset;
}
-static void iommu_unmap_all(unsigned long domain_num,
- struct ion_cp_heap *cp_heap)
-{
- unsigned long left_to_unmap = cp_heap->total_size;
- unsigned long page_size = SZ_64K;
-
- struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
- if (domain) {
- unsigned long temp_iova = cp_heap->iommu_iova[domain_num];
-
- while (left_to_unmap) {
- iommu_unmap(domain, temp_iova, page_size);
- temp_iova += page_size;
- left_to_unmap -= page_size;
- }
- if (domain_num == cp_heap->iommu_2x_map_domain)
- msm_iommu_unmap_extra(domain, temp_iova,
- cp_heap->total_size, SZ_64K);
- } else {
- pr_err("Unable to get IOMMU domain %lu\n", domain_num);
- }
-}
-
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size)
{
@@ -401,25 +369,6 @@
cp_heap->heap_protected == HEAP_NOT_PROTECTED)
ion_on_last_free(heap);
- /* Unmap everything if we previously mapped the whole heap at once. */
- if (!cp_heap->allocated_bytes) {
- unsigned int i;
- for (i = 0; i < MAX_DOMAINS; ++i) {
- if (cp_heap->iommu_iova[i]) {
- unsigned long vaddr_len = cp_heap->total_size;
-
- if (i == cp_heap->iommu_2x_map_domain)
- vaddr_len <<= 1;
- iommu_unmap_all(i, cp_heap);
-
- msm_free_iova_address(cp_heap->iommu_iova[i], i,
- cp_heap->iommu_partition[i],
- vaddr_len);
- }
- cp_heap->iommu_iova[i] = 0;
- cp_heap->iommu_partition[i] = 0;
- }
- }
mutex_unlock(&cp_heap->lock);
}
@@ -674,91 +623,6 @@
mutex_unlock(&cp_heap->lock);
}
-int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
- void *vaddr, unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- unsigned int size_to_vmap, total_size;
- struct ion_cp_buffer *buf = buffer->priv_virt;
- int i, j;
- void *ptr = NULL;
- ion_phys_addr_t buff_phys = buffer->priv_phys;
-
- if (!vaddr) {
- /*
- * Split the vmalloc space into smaller regions in
- * order to clean and/or invalidate the cache.
- */
- size_to_vmap = (VMALLOC_END - VMALLOC_START)/8;
- total_size = buffer->size;
- for (i = 0; i < total_size; i += size_to_vmap) {
- size_to_vmap = min(size_to_vmap, total_size - i);
- for (j = 0; j < 10 && size_to_vmap; ++j) {
- ptr = ioremap(buff_phys, size_to_vmap);
- if (ptr) {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- buff_phys += size_to_vmap;
- break;
- } else {
- size_to_vmap >>= 1;
- }
- }
- if (!ptr) {
- pr_err("Couldn't io-remap the memory\n");
- return -EINVAL;
- }
- iounmap(ptr);
- }
- } else {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- }
-
- if (cp_heap->has_outer_cache) {
- unsigned long pstart = buf->buffer + offset;
- outer_cache_op(pstart, pstart + length);
- }
- return 0;
-}
-
static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map)
{
@@ -859,205 +723,6 @@
return ret_value;
}
-static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
- int partition, unsigned long prot)
-{
- unsigned long left_to_map = cp_heap->total_size;
- unsigned long page_size = SZ_64K;
- int ret_value = 0;
- unsigned long virt_addr_len = cp_heap->total_size;
- struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
-
- /* If we are mapping into the video domain we need to map twice the
- * size of the heap to account for prefetch issue in video core.
- */
- if (domain_num == cp_heap->iommu_2x_map_domain)
- virt_addr_len <<= 1;
-
- if (cp_heap->total_size & (SZ_64K-1)) {
- pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
- ret_value = -EINVAL;
- }
- if (cp_heap->base & (SZ_64K-1)) {
- pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
- ret_value = -EINVAL;
- }
- if (!ret_value && domain) {
- unsigned long temp_phys = cp_heap->base;
- unsigned long temp_iova;
-
- ret_value = msm_allocate_iova_address(domain_num, partition,
- virt_addr_len, SZ_64K,
- &temp_iova);
-
- if (ret_value) {
- pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
- __func__, domain_num, partition);
- goto out;
- }
- cp_heap->iommu_iova[domain_num] = temp_iova;
-
- while (left_to_map) {
- int ret = iommu_map(domain, temp_iova, temp_phys,
- page_size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p, error: %d\n",
- __func__, temp_iova, domain, ret);
- ret_value = -EAGAIN;
- goto free_iova;
- }
- temp_iova += page_size;
- temp_phys += page_size;
- left_to_map -= page_size;
- }
- if (domain_num == cp_heap->iommu_2x_map_domain)
- ret_value = msm_iommu_map_extra(domain, temp_iova,
- cp_heap->base,
- cp_heap->total_size,
- SZ_64K, prot);
- if (ret_value)
- goto free_iova;
- } else {
- pr_err("Unable to get IOMMU domain %lu\n", domain_num);
- ret_value = -ENOMEM;
- }
- goto out;
-
-free_iova:
- msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
- partition, virt_addr_len);
-out:
- return ret_value;
-}
-
-static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- struct iommu_domain *domain;
- int ret = 0;
- unsigned long extra;
- struct ion_cp_heap *cp_heap =
- container_of(buffer->heap, struct ion_cp_heap, heap);
- int prot = IOMMU_WRITE | IOMMU_READ;
- struct ion_cp_buffer *buf = buffer->priv_virt;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = buf->buffer;
- return 0;
- }
-
- if (cp_heap->iommu_iova[domain_num]) {
- /* Already mapped. */
- unsigned long offset = buf->buffer - cp_heap->base;
- data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
- return 0;
- } else if (cp_heap->iommu_map_all) {
- ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
- if (!ret) {
- unsigned long offset =
- buf->buffer - cp_heap->base;
- data->iova_addr =
- cp_heap->iommu_iova[domain_num] + offset;
- cp_heap->iommu_partition[domain_num] = partition_num;
- /*
- clear delayed map flag so that we don't interfere
- with this feature (we are already delaying).
- */
- data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
- return 0;
- } else {
- cp_heap->iommu_iova[domain_num] = 0;
- cp_heap->iommu_partition[domain_num] = 0;
- return ret;
- }
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl,
- buffer->size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
-static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
- struct ion_cp_heap *cp_heap =
- container_of(data->buffer->heap, struct ion_cp_heap, heap);
-
- if (!msm_use_iommu())
- return;
-
-
- domain_num = iommu_map_domain(data);
-
- /* If we are mapping everything we'll wait to unmap until everything
- is freed. */
- if (cp_heap->iommu_iova[domain_num])
- return;
-
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
static struct ion_heap_ops cp_heap_ops = {
.allocate = ion_cp_heap_allocate,
.free = ion_cp_heap_free,
@@ -1068,12 +733,9 @@
.unmap_kernel = ion_cp_heap_unmap_kernel,
.map_dma = ion_cp_heap_map_dma,
.unmap_dma = ion_cp_heap_unmap_dma,
- .cache_op = ion_cp_cache_ops,
.print_debug = ion_cp_print_debug,
.secure_heap = ion_cp_secure_heap,
.unsecure_heap = ion_cp_unsecure_heap,
- .map_iommu = ion_cp_heap_map_iommu,
- .unmap_iommu = ion_cp_heap_unmap_iommu,
.secure_buffer = ion_cp_secure_buffer,
.unsecure_buffer = ion_cp_unsecure_buffer,
};
@@ -1120,10 +782,6 @@
if (extra_data->release_region)
cp_heap->heap_release_region =
extra_data->release_region;
- cp_heap->iommu_map_all =
- extra_data->iommu_map_all;
- cp_heap->iommu_2x_map_domain =
- extra_data->iommu_2x_map_domain;
cp_heap->cma = extra_data->is_cma;
cp_heap->allow_non_secure_allocation =
extra_data->allow_nonsecure_alloc;
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 510b9ce..3d37541 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -17,8 +17,120 @@
#include <linux/err.h>
#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
#include "ion_priv.h"
+void *ion_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct scatterlist *sg;
+ int i, j;
+ void *vaddr;
+ pgprot_t pgprot;
+ struct sg_table *table = buffer->sg_table;
+ int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **tmp = pages;
+
+ if (!pages)
+ return 0;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
+ struct page *page = sg_page(sg);
+ BUG_ON(i >= npages);
+ for (j = 0; j < npages_this_entry; j++) {
+ *(tmp++) = page++;
+ }
+ }
+ vaddr = vmap(pages, npages, VM_MAP, pgprot);
+ vfree(pages);
+
+ return vaddr;
+}
+
+void ion_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ vunmap(buffer->vaddr);
+}
+
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct sg_table *table = buffer->sg_table;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg_dma_len(sg);
+
+ if (offset >= sg_dma_len(sg)) {
+ offset -= sg_dma_len(sg);
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len = sg_dma_len(sg) - offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
+}
+
+int ion_heap_buffer_zero(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->sg_table;
+ pgprot_t pgprot;
+ struct scatterlist *sg;
+ struct vm_struct *vm_struct;
+ int i, j, ret = 0;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
+ if (!vm_struct)
+ return -ENOMEM;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long len = sg_dma_len(sg);
+
+ for (j = 0; j < len / PAGE_SIZE; j++) {
+ struct page *sub_page = page + j;
+ struct page **pages = &sub_page;
+ ret = map_vm_area(vm_struct, pgprot, &pages);
+ if (ret)
+ goto end;
+ memset(vm_struct->addr, 0, PAGE_SIZE);
+ unmap_kernel_range((unsigned long)vm_struct->addr,
+ PAGE_SIZE);
+ }
+ }
+end:
+ free_vm_area(vm_struct);
+ return ret;
+}
+
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_heap *heap = NULL;
@@ -33,6 +145,9 @@
case ION_HEAP_TYPE_CARVEOUT:
heap = ion_carveout_heap_create(heap_data);
break;
+ case ION_HEAP_TYPE_CHUNK:
+ heap = ion_chunk_heap_create(heap_data);
+ break;
default:
pr_err("%s: Invalid heap type %d\n", __func__,
heap_data->type);
@@ -67,6 +182,9 @@
case ION_HEAP_TYPE_CARVEOUT:
ion_carveout_heap_destroy(heap);
break;
+ case ION_HEAP_TYPE_CHUNK:
+ ion_chunk_heap_destroy(heap);
+ break;
default:
pr_err("%s: Invalid heap type %d\n", __func__,
heap->type);
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 512ebf3..53d853d 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -30,7 +30,6 @@
struct ion_iommu_heap {
struct ion_heap heap;
- unsigned int has_outer_cache;
};
/*
@@ -112,7 +111,7 @@
int j;
void *ptr = NULL;
unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
- long size_remaining = PAGE_ALIGN(size);
+ unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
data = kmalloc(sizeof(*data), GFP_KERNEL);
@@ -315,157 +314,6 @@
return 0;
}
-int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- struct iommu_domain *domain;
- int ret = 0;
- unsigned long extra;
- int prot = IOMMU_WRITE | IOMMU_READ;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- BUG_ON(!msm_use_iommu());
-
- data->mapped_size = iova_length;
- extra = iova_length - buffer->size;
-
- /* Use the biggest alignment to allow bigger IOMMU mappings.
- * Use the first entry since the first entry will always be the
- * biggest entry. To take advantage of bigger mapping sizes both the
- * VA and PA addresses have to be aligned to the biggest size.
- */
- if (buffer->sg_table->sgl->length > align)
- align = buffer->sg_table->sgl->length;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr,
- buffer->sg_table->sgl,
- buffer->size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- buffer->size);
-
-out:
-
- return ret;
-}
-
-void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- BUG_ON(!msm_use_iommu());
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
-static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
- void *vaddr, unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
- struct ion_iommu_heap *iommu_heap =
- container_of(heap, struct ion_iommu_heap, heap);
-
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- if (!vaddr)
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- else
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- if (!vaddr)
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- else
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- if (!vaddr) {
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- } else {
- dmac_flush_range(vaddr, vaddr + length);
- }
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
-
- if (iommu_heap->has_outer_cache) {
- unsigned long pstart;
- unsigned int i;
- struct ion_iommu_priv_data *data = buffer->priv_virt;
- if (!data)
- return -ENOMEM;
-
- for (i = 0; i < data->nrpages; ++i) {
- pstart = page_to_phys(data->pages[i]);
- outer_cache_op(pstart, pstart + PAGE_SIZE);
- }
- }
- return 0;
-}
-
static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
@@ -483,9 +331,6 @@
.map_user = ion_iommu_heap_map_user,
.map_kernel = ion_iommu_heap_map_kernel,
.unmap_kernel = ion_iommu_heap_unmap_kernel,
- .map_iommu = ion_iommu_heap_map_iommu,
- .unmap_iommu = ion_iommu_heap_unmap_iommu,
- .cache_op = ion_iommu_cache_ops,
.map_dma = ion_iommu_heap_map_dma,
.unmap_dma = ion_iommu_heap_unmap_dma,
};
@@ -500,7 +345,6 @@
iommu_heap->heap.ops = &iommu_heap_ops;
iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
- iommu_heap->has_outer_cache = heap_data->has_outer_cache;
return &iommu_heap->heap;
}
diff --git a/drivers/gpu/ion/ion_page_pool.c b/drivers/gpu/ion/ion_page_pool.c
new file mode 100644
index 0000000..e8b5489
--- /dev/null
+++ b/drivers/gpu/ion/ion_page_pool.c
@@ -0,0 +1,282 @@
+/*
+ * drivers/gpu/ion/ion_mem_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/shrinker.h>
+#include "ion_priv.h"
+
+/* #define DEBUG_PAGE_POOL_SHRINKER */
+
+static struct plist_head pools = PLIST_HEAD_INIT(pools);
+static struct shrinker shrinker;
+
+struct ion_page_pool_item {
+ struct page *page;
+ struct list_head list;
+};
+
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+{
+ struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+ struct scatterlist sg;
+
+ if (!page)
+ return NULL;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, PAGE_SIZE << pool->order, 0);
+ dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
+
+ return page;
+}
+
+static void ion_page_pool_free_pages(struct ion_page_pool *pool,
+ struct page *page)
+{
+ __free_pages(page, pool->order);
+}
+
+static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+{
+ struct ion_page_pool_item *item;
+
+ item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ mutex_lock(&pool->mutex);
+ item->page = page;
+ if (PageHighMem(page)) {
+ list_add_tail(&item->list, &pool->high_items);
+ pool->high_count++;
+ } else {
+ list_add_tail(&item->list, &pool->low_items);
+ pool->low_count++;
+ }
+ mutex_unlock(&pool->mutex);
+ return 0;
+}
+
+static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
+{
+ struct ion_page_pool_item *item;
+ struct page *page;
+
+ if (high) {
+ BUG_ON(!pool->high_count);
+ item = list_first_entry(&pool->high_items,
+ struct ion_page_pool_item, list);
+ pool->high_count--;
+ } else {
+ BUG_ON(!pool->low_count);
+ item = list_first_entry(&pool->low_items,
+ struct ion_page_pool_item, list);
+ pool->low_count--;
+ }
+
+ list_del(&item->list);
+ page = item->page;
+ kfree(item);
+ return page;
+}
+
+void *ion_page_pool_alloc(struct ion_page_pool *pool)
+{
+ struct page *page = NULL;
+
+ BUG_ON(!pool);
+
+ mutex_lock(&pool->mutex);
+ if (pool->high_count)
+ page = ion_page_pool_remove(pool, true);
+ else if (pool->low_count)
+ page = ion_page_pool_remove(pool, false);
+ mutex_unlock(&pool->mutex);
+
+ if (!page)
+ page = ion_page_pool_alloc_pages(pool);
+
+ return page;
+}
+
+void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
+{
+ int ret;
+
+ ret = ion_page_pool_add(pool, page);
+ if (ret)
+ ion_page_pool_free_pages(pool, page);
+}
+
+#ifdef DEBUG_PAGE_POOL_SHRINKER
+static int debug_drop_pools_set(void *data, u64 val)
+{
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ if (!val)
+ return 0;
+
+ objs = shrinker.shrink(&shrinker, &sc);
+ sc.nr_to_scan = objs;
+
+ shrinker.shrink(&shrinker, &sc);
+ return 0;
+}
+
+static int debug_drop_pools_get(void *data, u64 *val)
+{
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ objs = shrinker.shrink(&shrinker, &sc);
+ *val = objs;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_drop_pools_fops, debug_drop_pools_get,
+ debug_drop_pools_set, "%llu\n");
+
+static int debug_grow_pools_set(void *data, u64 val)
+{
+ struct ion_page_pool *pool;
+ struct page *page;
+
+ plist_for_each_entry(pool, &pools, list) {
+ if (val != pool->list.prio)
+ continue;
+ page = ion_page_pool_alloc_pages(pool);
+ if (page)
+ ion_page_pool_add(pool, page);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_grow_pools_fops, debug_drop_pools_get,
+ debug_grow_pools_set, "%llu\n");
+#endif
+
+static int ion_page_pool_total(bool high)
+{
+ struct ion_page_pool *pool;
+ int total = 0;
+
+ plist_for_each_entry(pool, &pools, list) {
+ total += high ? (pool->high_count + pool->low_count) *
+ (1 << pool->order) :
+ pool->low_count * (1 << pool->order);
+ }
+ return total;
+}
+
+static int ion_page_pool_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct ion_page_pool *pool;
+ int nr_freed = 0;
+ int i;
+ bool high;
+ int nr_to_scan = sc->nr_to_scan;
+
+ if (sc->gfp_mask & __GFP_HIGHMEM)
+ high = true;
+
+ if (nr_to_scan == 0)
+ return ion_page_pool_total(high);
+
+ plist_for_each_entry(pool, &pools, list) {
+ for (i = 0; i < nr_to_scan; i++) {
+ struct page *page;
+
+ mutex_lock(&pool->mutex);
+ if (high && pool->high_count) {
+ page = ion_page_pool_remove(pool, true);
+ } else if (pool->low_count) {
+ page = ion_page_pool_remove(pool, false);
+ } else {
+ mutex_unlock(&pool->mutex);
+ break;
+ }
+ mutex_unlock(&pool->mutex);
+ ion_page_pool_free_pages(pool, page);
+ nr_freed += (1 << pool->order);
+ }
+ nr_to_scan -= i;
+ }
+
+ return ion_page_pool_total(high);
+}
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+{
+ struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
+ GFP_KERNEL);
+ if (!pool)
+ return NULL;
+ pool->high_count = 0;
+ pool->low_count = 0;
+ INIT_LIST_HEAD(&pool->low_items);
+ INIT_LIST_HEAD(&pool->high_items);
+ pool->gfp_mask = gfp_mask;
+ pool->order = order;
+ mutex_init(&pool->mutex);
+ plist_node_init(&pool->list, order);
+ plist_add(&pool->list, &pools);
+
+ return pool;
+}
+
+void ion_page_pool_destroy(struct ion_page_pool *pool)
+{
+ plist_del(&pool->list, &pools);
+ kfree(pool);
+}
+
+static int __init ion_page_pool_init(void)
+{
+ shrinker.shrink = ion_page_pool_shrink;
+ shrinker.seeks = DEFAULT_SEEKS;
+ shrinker.batch = 0;
+ register_shrinker(&shrinker);
+#ifdef DEBUG_PAGE_POOL_SHRINKER
+ debugfs_create_file("ion_pools_shrink", 0644, NULL, NULL,
+ &debug_drop_pools_fops);
+ debugfs_create_file("ion_pools_grow", 0644, NULL, NULL,
+ &debug_grow_pools_fops);
+#endif
+ return 0;
+}
+
+static void __exit ion_page_pool_exit(void)
+{
+ unregister_shrinker(&shrinker);
+}
+
+module_init(ion_page_pool_init);
+module_exit(ion_page_pool_exit);
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 8d45f9d..4b724df 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -18,14 +18,17 @@
#ifndef _ION_PRIV_H
#define _ION_PRIV_H
+#include <linux/ion.h>
#include <linux/kref.h>
#include <linux/mm_types.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
-#include <linux/ion.h>
#include <linux/seq_file.h>
#include "msm_ion_priv.h"
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
@@ -46,10 +49,22 @@
* @vaddr: the kenrel mapping if kmap_cnt is not zero
* @dmap_cnt: number of times the buffer is mapped for dma
* @sg_table: the sg table for the buffer if dmap_cnt is not zero
+ * @dirty: bitmask representing which pages of this buffer have
+ * been dirtied by the cpu and need cache maintenance
+ * before dma
+ * @vmas: list of vma's mapping this buffer
+ * @handle_count: count of handles referencing this buffer
+ * @task_comm: taskcomm of last client to reference this buffer in a
+ * handle, used for debugging
+ * @pid: pid of last client to reference this buffer in a
+ * handle, used for debugging
*/
struct ion_buffer {
struct kref ref;
- struct rb_node node;
+ union {
+ struct rb_node node;
+ struct list_head list;
+ };
struct ion_device *dev;
struct ion_heap *heap;
unsigned long flags;
@@ -65,9 +80,10 @@
struct sg_table *sg_table;
unsigned long *dirty;
struct list_head vmas;
- unsigned int iommu_map_cnt;
- struct rb_root iommu_maps;
- int marked;
+ /* used to track orphaned buffers */
+ int handle_count;
+ char task_comm[TASK_COMM_LEN];
+ pid_t pid;
};
/**
@@ -98,17 +114,6 @@
int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma);
void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
- int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
- void *vaddr, unsigned int offset,
- unsigned int length, unsigned int cmd);
- int (*map_iommu)(struct ion_buffer *buffer,
- struct ion_iommu_map *map_data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags);
- void (*unmap_iommu)(struct ion_iommu_map *data);
int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map);
int (*secure_heap)(struct ion_heap *heap, int version, void *data);
@@ -119,16 +124,28 @@
};
/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+
+/**
* struct ion_heap - represents a heap in the system
* @node: rb node to put the heap on the device's tree of heaps
* @dev: back pointer to the ion_device
* @type: type of heap
* @ops: ops struct as above
+ * @flags: flags
* @id: id of heap, also indicates priority of this heap when
* allocating. These are specified by platform data and
* MUST be unique
* @name: used for debugging
* @priv: private heap data
+ * @free_list: free list head if deferred free is used
+ * @lock: protects the free list
+ * @waitqueue: queue to wait on from deferred free thread
+ * @task: task struct of deferred free thread
+ * @debug_show: called when heap debug file is read to add any
+ * heap specific debug info to output
*
* Represents a pool of memory from which buffers can be made. In some
* systems the only heap is regular system memory allocated via vmalloc.
@@ -136,16 +153,30 @@
* that are allocated from a specially reserved heap.
*/
struct ion_heap {
- struct rb_node node;
+ struct plist_node node;
struct ion_device *dev;
enum ion_heap_type type;
struct ion_heap_ops *ops;
- int id;
+ unsigned long flags;
+ unsigned int id;
const char *name;
void *priv;
+ struct list_head free_list;
+ struct rt_mutex lock;
+ wait_queue_head_t waitqueue;
+ struct task_struct *task;
+ int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
};
/**
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer: buffer
+ *
+ * indicates whether this ion buffer is cached
+ */
+bool ion_buffer_cached(struct ion_buffer *buffer);
+
+/**
* ion_buffer_fault_user_mappings - fault in user mappings of this buffer
* @buffer: buffer
*
@@ -179,6 +210,17 @@
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
+void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
+int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
+ struct vm_area_struct *);
+int ion_heap_buffer_zero(struct ion_buffer *buffer);
+
+
+/**
* functions for creating and destroying the built in ion heaps.
* architectures can add their own custom architecture specific
* heaps as appropriate.
@@ -186,7 +228,6 @@
struct ion_heap *ion_heap_create(struct ion_platform_heap *);
void ion_heap_destroy(struct ion_heap *);
-
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
void ion_system_heap_destroy(struct ion_heap *);
@@ -196,6 +237,8 @@
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
void ion_carveout_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
+void ion_chunk_heap_destroy(struct ion_heap *);
/**
* kernel api to allocate/free from carveout -- used when carveout is
* used to back an architecture specific custom heap
@@ -211,4 +254,52 @@
*/
#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap. Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant peformance benefit on
+ * many systems */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count: number of highmem items in the pool
+ * @low_count: number of lowmem items in the pool
+ * @high_items: list of highmem items
+ * @low_items: list of lowmem items
+ * @shrinker: a shrinker for the items
+ * @mutex: lock protecting this struct and especially the count
+ * item list
+ * @alloc: function to be used to allocate pageory when the pool
+ * is empty
+ * @free: function to be used to free pageory back to the system
+ * when the shrinker fires
+ * @gfp_mask: gfp_mask to use from alloc
+ * @order: order of pages in the pool
+ * @list: plist node for list of pools
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant peformance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+ int high_count;
+ int low_count;
+ struct list_head high_items;
+ struct list_head low_items;
+ struct mutex mutex;
+ void *(*alloc)(struct ion_page_pool *pool);
+ void (*free)(struct ion_page_pool *pool, struct page *page);
+ gfp_t gfp_mask;
+ unsigned int order;
+ struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+void ion_page_pool_destroy(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *);
+void ion_page_pool_free(struct ion_page_pool *, struct page *);
+
#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_removed_heap.c b/drivers/gpu/ion/ion_removed_heap.c
index 4759e40..84d8d37 100644
--- a/drivers/gpu/ion/ion_removed_heap.c
+++ b/drivers/gpu/ion/ion_removed_heap.c
@@ -41,7 +41,6 @@
int (*release_region)(void *);
atomic_t map_count;
void *bus_id;
- unsigned int has_outer_cache;
};
ion_phys_addr_t ion_removed_allocate(struct ion_heap *heap,
@@ -233,91 +232,6 @@
ion_removed_release_region(removed_heap);
}
-int ion_removed_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
- void *vaddr, unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
- struct ion_removed_heap *removed_heap =
- container_of(heap, struct ion_removed_heap, heap);
- unsigned int size_to_vmap, total_size;
- int i, j;
- void *ptr = NULL;
- ion_phys_addr_t buff_phys = buffer->priv_phys;
-
- if (!vaddr) {
- /*
- * Split the vmalloc space into smaller regions in
- * order to clean and/or invalidate the cache.
- */
- size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
- total_size = buffer->size;
-
- for (i = 0; i < total_size; i += size_to_vmap) {
- size_to_vmap = min(size_to_vmap, total_size - i);
- for (j = 0; j < 10 && size_to_vmap; ++j) {
- ptr = ioremap(buff_phys, size_to_vmap);
- if (ptr) {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- buff_phys += size_to_vmap;
- break;
- } else {
- size_to_vmap >>= 1;
- }
- }
- if (!ptr) {
- pr_err("Couldn't io-remap the memory\n");
- return -EINVAL;
- }
- iounmap(ptr);
- }
- } else {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- }
-
- if (removed_heap->has_outer_cache) {
- unsigned long pstart = buffer->priv_phys + offset;
- outer_cache_op(pstart, pstart + length);
- }
- return 0;
-}
-
static int ion_removed_print_debug(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map)
{
@@ -382,7 +296,6 @@
.unmap_kernel = ion_removed_heap_unmap_kernel,
.map_dma = ion_removed_heap_map_dma,
.unmap_dma = ion_removed_heap_unmap_dma,
- .cache_op = ion_removed_cache_ops,
.print_debug = ion_removed_print_debug,
};
@@ -412,7 +325,6 @@
removed_heap->heap.type = ION_HEAP_TYPE_REMOVED;
removed_heap->allocated_bytes = 0;
removed_heap->total_size = heap_data->size;
- removed_heap->has_outer_cache = heap_data->has_outer_cache;
if (heap_data->extra_data) {
struct ion_co_heap_pdata *extra_data =
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index ceb30a4..02f6d93 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -22,11 +22,10 @@
#include <linux/ion.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/iommu.h>
#include <linux/seq_file.h>
-#include <mach/iommu_domains.h>
#include "ion_priv.h"
#include <mach/memory.h>
#include <asm/cacheflush.h>
@@ -35,33 +34,112 @@
static atomic_t system_heap_allocated;
static atomic_t system_contig_heap_allocated;
-static unsigned int system_heap_has_outer_cache;
-static unsigned int system_heap_contig_has_outer_cache;
+
+static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
+ __GFP_NOWARN | __GFP_NORETRY |
+ __GFP_NO_KSWAPD) & ~__GFP_WAIT;
+static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
+ __GFP_NOWARN);
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+ int i;
+ for (i = 0; i < num_orders; i++)
+ if (order == orders[i])
+ return i;
+ BUG();
+ return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+ return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+ struct ion_heap heap;
+ struct ion_page_pool **pools;
+};
struct page_info {
struct page *page;
- unsigned long order;
+ unsigned int order;
struct list_head list;
};
-static struct page_info *alloc_largest_available(unsigned long size,
- bool split_pages)
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long order)
{
- static unsigned int orders[] = {8, 4, 0};
+ bool cached = ion_buffer_cached(buffer);
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ struct page *page;
+
+ if (!cached) {
+ page = ion_page_pool_alloc(pool);
+ } else {
+ struct scatterlist sg;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (order > 4)
+ gfp_flags = high_order_gfp_flags;
+ page = alloc_pages(gfp_flags, order);
+ if (!page)
+ return 0;
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, PAGE_SIZE << order, 0);
+ dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
+ }
+ if (!page)
+ return 0;
+
+ if (split_pages)
+ split_page(page, order);
+ return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer, struct page *page,
+ unsigned int order)
+{
+ bool cached = ion_buffer_cached(buffer);
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
+ int i;
+
+ if (!cached) {
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ ion_page_pool_free(pool, page);
+ } else if (split_pages) {
+ for (i = 0; i < (1 << order); i++)
+ __free_page(page + i);
+ } else {
+ __free_pages(page, order);
+ }
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size,
+ unsigned int max_order)
+{
struct page *page;
struct page_info *info;
int i;
- for (i = 0; i < ARRAY_SIZE(orders); i++) {
- if (size < (1 << orders[i]) * PAGE_SIZE)
+ for (i = 0; i < num_orders; i++) {
+ if (size < order_to_size(orders[i]))
continue;
- page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
- __GFP_NOWARN | __GFP_NORETRY, orders[i]);
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_buffer_page(heap, buffer, orders[i]);
if (!page)
continue;
- if (split_pages)
- split_page(page, orders[i]);
- info = kmap(page);
+
+ info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
info->page = page;
info->order = orders[i];
return info;
@@ -74,23 +152,27 @@
unsigned long size, unsigned long align,
unsigned long flags)
{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
struct sg_table *table;
struct scatterlist *sg;
int ret;
struct list_head pages;
struct page_info *info, *tmp_info;
int i = 0;
- long size_remaining = PAGE_ALIGN(size);
+ unsigned long size_remaining = PAGE_ALIGN(size);
+ unsigned int max_order = orders[0];
bool split_pages = ion_buffer_fault_user_mappings(buffer);
-
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
- info = alloc_largest_available(size_remaining, split_pages);
+ info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
if (!info)
goto err;
list_add_tail(&info->list, &pages);
size_remaining -= (1 << info->order) * PAGE_SIZE;
+ max_order = info->order;
i++;
}
@@ -110,7 +192,6 @@
sg = table->sgl;
list_for_each_entry_safe(info, tmp_info, &pages, list) {
struct page *page = info->page;
-
if (split_pages) {
for (i = 0; i < (1 << info->order); i++) {
sg_set_page(sg, page + i, PAGE_SIZE, 0);
@@ -122,12 +203,9 @@
sg = sg_next(sg);
}
list_del(&info->list);
- kunmap(page);
+ kfree(info);
}
- dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
-
buffer->priv_virt = table;
atomic_add(size, &system_heap_allocated);
return 0;
@@ -135,28 +213,34 @@
kfree(table);
err:
list_for_each_entry(info, &pages, list) {
- if (split_pages)
- for (i = 0; i < (1 << info->order); i++)
- __free_page(info->page + i);
- else
- __free_pages(info->page, info->order);
-
- kunmap(info->page);
+ free_buffer_page(sys_heap, buffer, info->page, info->order);
+ kfree(info);
}
return -ENOMEM;
}
void ion_system_heap_free(struct ion_buffer *buffer)
{
- int i;
+ struct ion_heap *heap = buffer->heap;
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table = buffer->sg_table;
+ bool cached = ion_buffer_cached(buffer);
struct scatterlist *sg;
- struct sg_table *table = buffer->priv_virt;
+ LIST_HEAD(pages);
+ int i;
+
+ /* uncached pages come from the page pools, zero them before returning
+ for security purposes (other allocations are zerod at alloc time */
+ if (!cached)
+ ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i)
- __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
- if (buffer->sg_table)
- sg_free_table(buffer->sg_table);
- kfree(buffer->sg_table);
+ free_buffer_page(sys_heap, buffer, sg_page(sg),
+ get_order(sg_dma_len(sg)));
+ sg_free_table(table);
+ kfree(table);
atomic_sub(buffer->size, &system_heap_allocated);
}
@@ -172,156 +256,6 @@
return;
}
-void *ion_system_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct scatterlist *sg;
- int i, j;
- void *vaddr;
- pgprot_t pgprot;
- struct sg_table *table = buffer->priv_virt;
- int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- struct page **pages = kzalloc(sizeof(struct page *) * npages,
- GFP_KERNEL);
- struct page **tmp = pages;
-
- if (buffer->flags & ION_FLAG_CACHED)
- pgprot = PAGE_KERNEL;
- else
- pgprot = pgprot_writecombine(PAGE_KERNEL);
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
- struct page *page = sg_page(sg);
- BUG_ON(i >= npages);
- for (j = 0; j < npages_this_entry; j++) {
- *(tmp++) = page++;
- }
- }
- vaddr = vmap(pages, npages, VM_MAP, pgprot);
- kfree(pages);
-
- return vaddr;
-}
-
-void ion_system_heap_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- vunmap(buffer->vaddr);
-}
-
-void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
-int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- struct sg_table *table = buffer->priv_virt;
- unsigned long addr = vma->vm_start;
- unsigned long offset = vma->vm_pgoff;
- struct scatterlist *sg;
- int i;
-
- if (!ION_IS_CACHED(buffer->flags)) {
- pr_err("%s: cannot map system heap uncached\n", __func__);
- return -EINVAL;
- }
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- if (offset) {
- offset--;
- continue;
- }
- remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
- sg_dma_len(sg), vma->vm_page_prot);
- addr += sg_dma_len(sg);
- }
- return 0;
-}
-
-int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
- void *vaddr, unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
-
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- if (!vaddr)
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- else
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- if (!vaddr)
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- else
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- if (!vaddr) {
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- } else {
- dmac_flush_range(vaddr, vaddr + length);
- }
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
-
- if (system_heap_has_outer_cache) {
- unsigned long pstart;
- struct sg_table *table = buffer->priv_virt;
- struct scatterlist *sg;
- int i;
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
- pstart = page_to_phys(page);
- /*
- * If page -> phys is returning NULL, something
- * has really gone wrong...
- */
- if (!pstart) {
- WARN(1, "Could not translate virtual address to physical address\n");
- return -EINVAL;
- }
- outer_cache_op(pstart, pstart + PAGE_SIZE);
- }
- }
- return 0;
-}
-
static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *unused)
{
@@ -331,111 +265,65 @@
return 0;
}
-int ion_system_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- unsigned long extra_iova_addr;
- struct sg_table *table = buffer->priv_virt;
- int prot = IOMMU_WRITE | IOMMU_READ;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- if (!ION_IS_CACHED(flags))
- return -EINVAL;
-
- if (!msm_use_iommu())
- return -EINVAL;
-
- data->mapped_size = iova_length;
- extra = iova_length - buffer->size;
-
- /* Use the biggest alignment to allow bigger IOMMU mappings.
- * Use the first entry since the first entry will always be the
- * biggest entry. To take advantage of bigger mapping sizes both the
- * VA and PA addresses have to be aligned to the biggest size.
- */
- if (table->sgl->length > align)
- align = table->sgl->length;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, table->sgl,
- buffer->size, prot);
-
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- extra_iova_addr = data->iova_addr + buffer->size;
- if (extra) {
- unsigned long phys_addr = sg_phys(table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
-static struct ion_heap_ops vmalloc_ops = {
+static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
.map_dma = ion_system_heap_map_dma,
.unmap_dma = ion_system_heap_unmap_dma,
- .map_kernel = ion_system_heap_map_kernel,
- .unmap_kernel = ion_system_heap_unmap_kernel,
- .map_user = ion_system_heap_map_user,
- .cache_op = ion_system_heap_cache_ops,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
.print_debug = ion_system_print_debug,
- .map_iommu = ion_system_heap_map_iommu,
- .unmap_iommu = ion_system_heap_unmap_iommu,
};
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
{
- struct ion_heap *heap;
+ struct ion_system_heap *heap;
+ int i;
- heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+ heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
- heap->ops = &vmalloc_ops;
- heap->type = ION_HEAP_TYPE_SYSTEM;
- system_heap_has_outer_cache = pheap->has_outer_cache;
- return heap;
+ heap->heap.ops = &system_heap_ops;
+ heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+ heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+ GFP_KERNEL);
+ if (!heap->pools)
+ goto err_alloc_pools;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
+ if (!pool)
+ goto err_create_pool;
+ heap->pools[i] = pool;
+ }
+ return &heap->heap;
+err_create_pool:
+ for (i = 0; i < num_orders; i++)
+ if (heap->pools[i])
+ ion_page_pool_destroy(heap->pools[i]);
+ kfree(heap->pools);
+err_alloc_pools:
+ kfree(heap);
+ return ERR_PTR(-ENOMEM);
}
void ion_system_heap_destroy(struct ion_heap *heap)
{
- kfree(heap);
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+
+ for (i = 0; i < num_orders; i++)
+ ion_page_pool_destroy(sys_heap->pools[i]);
+ kfree(sys_heap->pools);
+ kfree(sys_heap);
}
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
@@ -508,46 +396,6 @@
}
}
-int ion_system_contig_heap_cache_ops(struct ion_heap *heap,
- struct ion_buffer *buffer, void *vaddr,
- unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
-
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
-
- if (system_heap_contig_has_outer_cache) {
- unsigned long pstart;
-
- pstart = virt_to_phys(buffer->priv_virt) + offset;
- if (!pstart) {
- WARN(1, "Could not do virt to phys translation on %p\n",
- buffer->priv_virt);
- return -EINVAL;
- }
-
- outer_cache_op(pstart, pstart + PAGE_SIZE);
- }
-
- return 0;
-}
-
static int ion_system_contig_print_debug(struct ion_heap *heap,
struct seq_file *s,
const struct rb_root *unused)
@@ -558,84 +406,6 @@
return 0;
}
-int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- struct scatterlist *sglist = 0;
- struct page *page = 0;
- int prot = IOMMU_WRITE | IOMMU_READ;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- if (!ION_IS_CACHED(flags))
- return -EINVAL;
-
- if (!msm_use_iommu()) {
- data->iova_addr = virt_to_phys(buffer->vaddr);
- return 0;
- }
-
- data->mapped_size = iova_length;
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
- page = virt_to_page(buffer->vaddr);
-
- sglist = vmalloc(sizeof(*sglist));
- if (!sglist)
- goto out1;
-
- sg_init_table(sglist, 1);
- sg_set_page(sglist, page, buffer->size, 0);
-
- ret = iommu_map_range(domain, data->iova_addr, sglist,
- buffer->size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- unsigned long phys_addr = sg_phys(sglist);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- vfree(sglist);
- return ret;
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-
-out1:
- vfree(sglist);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
@@ -654,13 +424,10 @@
.phys = ion_system_contig_heap_phys,
.map_dma = ion_system_contig_heap_map_dma,
.unmap_dma = ion_system_contig_heap_unmap_dma,
- .map_kernel = ion_system_contig_heap_map_kernel,
- .unmap_kernel = ion_system_contig_heap_unmap_kernel,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_system_contig_heap_map_user,
- .cache_op = ion_system_contig_heap_cache_ops,
.print_debug = ion_system_contig_print_debug,
- .map_iommu = ion_system_contig_heap_map_iommu,
- .unmap_iommu = ion_system_heap_unmap_iommu,
};
struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
@@ -672,7 +439,6 @@
return ERR_PTR(-ENOMEM);
heap->ops = &kmalloc_ops;
heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
- system_heap_contig_has_outer_cache = pheap->has_outer_cache;
return heap;
}
diff --git a/drivers/gpu/ion/msm/Makefile b/drivers/gpu/ion/msm/Makefile
index 1893405..becdb02 100644
--- a/drivers/gpu/ion/msm/Makefile
+++ b/drivers/gpu/ion/msm/Makefile
@@ -1 +1 @@
-obj-y += msm_ion.o ion_cp_common.o
+obj-y += msm_ion.o ion_cp_common.o ion_iommu_map.o
diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c
index 7d54cfa..58eca24 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.c
+++ b/drivers/gpu/ion/msm/ion_cp_common.c
@@ -15,6 +15,7 @@
#include <linux/memory_alloc.h>
#include <linux/types.h>
#include <mach/scm.h>
+#include <linux/highmem.h>
#include "../ion_priv.h"
#include "ion_cp_common.h"
@@ -157,6 +158,8 @@
request.chunks.chunk_list_size = nchunks;
request.chunks.chunk_size = chunk_size;
+ kmap_flush_unused();
+ kmap_atomic_flush_unused();
return scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
&request, sizeof(request), &resp, sizeof(resp));
diff --git a/drivers/gpu/ion/msm/ion_iommu_map.c b/drivers/gpu/ion/msm/ion_iommu_map.c
new file mode 100644
index 0000000..5ce03db
--- /dev/null
+++ b/drivers/gpu/ion/msm/ion_iommu_map.c
@@ -0,0 +1,538 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/iommu.h>
+#include <linux/ion.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include <mach/iommu_domains.h>
+
+enum {
+ DI_PARTITION_NUM = 0,
+ DI_DOMAIN_NUM = 1,
+ DI_MAX,
+};
+
+#define iommu_map_domain(__m) ((__m)->domain_info[1])
+#define iommu_map_partition(__m) ((__m)->domain_info[0])
+
+/**
+ * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
+ * @iova_addr - iommu virtual address
+ * @node - rb node to exist in the buffer's tree of iommu mappings
+ * @domain_info - contains the partition number and domain number
+ * domain_info[1] = domain number
+ * domain_info[0] = partition number
+ * @ref - for reference counting this mapping
+ * @mapped_size - size of the iova space mapped
+ * (may not be the same as the buffer size)
+ * @flags - iommu domain/partition specific flags.
+ *
+ * Represents a mapping of one ion buffer to a particular iommu domain
+ * and address range. There may exist other mappings of this buffer in
+ * different domains or address ranges. All mappings will have the same
+ * cacheability and security.
+ */
+struct ion_iommu_map {
+ unsigned long iova_addr;
+ struct rb_node node;
+ union {
+ int domain_info[DI_MAX];
+ uint64_t key;
+ };
+ struct ion_iommu_meta *meta;
+ struct kref ref;
+ int mapped_size;
+ unsigned long flags;
+};
+
+
+struct ion_iommu_meta {
+ struct rb_node node;
+ struct ion_handle *handle;
+ struct rb_root iommu_maps;
+ struct kref ref;
+ struct sg_table *table;
+ unsigned long size;
+ struct mutex lock;
+};
+
+static struct rb_root iommu_root;
+DEFINE_MUTEX(msm_iommu_map_mutex);
+
+static void ion_iommu_meta_add(struct ion_iommu_meta *meta)
+{
+ struct rb_root *root = &iommu_root;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_meta *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_meta, node);
+
+ if (meta->handle < entry->handle) {
+ p = &(*p)->rb_left;
+ } else if (meta->handle > entry->handle) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: handle %p already exists\n", __func__,
+ entry->handle);
+ BUG();
+ }
+ }
+
+ rb_link_node(&meta->node, parent, p);
+ rb_insert_color(&meta->node, root);
+}
+
+
+static struct ion_iommu_meta *ion_iommu_meta_lookup(struct ion_handle *handle)
+{
+ struct rb_root *root = &iommu_root;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_meta *entry = NULL;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_meta, node);
+
+ if (handle < entry->handle)
+ p = &(*p)->rb_left;
+ else if (handle > entry->handle)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ return NULL;
+}
+
+
+
+static void ion_iommu_add(struct ion_iommu_meta *meta,
+ struct ion_iommu_map *iommu)
+{
+ struct rb_node **p = &meta->iommu_maps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_map *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_map, node);
+
+ if (iommu->key < entry->key) {
+ p = &(*p)->rb_left;
+ } else if (iommu->key > entry->key) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: handle %p already has mapping for domain %d and partition %d\n",
+ __func__,
+ meta->handle,
+ iommu_map_domain(iommu),
+ iommu_map_partition(iommu));
+ BUG();
+ }
+ }
+
+ rb_link_node(&iommu->node, parent, p);
+ rb_insert_color(&iommu->node, &meta->iommu_maps);
+}
+
+
+static struct ion_iommu_map *ion_iommu_lookup(
+ struct ion_iommu_meta *meta,
+ unsigned int domain_no,
+ unsigned int partition_no)
+{
+ struct rb_node **p = &meta->iommu_maps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_map *entry;
+ uint64_t key = domain_no;
+ key = key << 32 | partition_no;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_map, node);
+
+ if (key < entry->key)
+ p = &(*p)->rb_left;
+ else if (key > entry->key)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ return NULL;
+}
+
+static int ion_iommu_map_iommu(struct ion_iommu_meta *meta,
+ struct ion_iommu_map *data,
+ unsigned int domain_num,
+ unsigned int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long flags)
+{
+ struct iommu_domain *domain;
+ int ret = 0;
+ unsigned long extra, size;
+ struct sg_table *table;
+ int prot = IOMMU_WRITE | IOMMU_READ;
+
+
+ size = meta->size;
+ data->mapped_size = iova_length;
+ extra = iova_length - size;
+ table = meta->table;
+
+ /* Use the biggest alignment to allow bigger IOMMU mappings.
+ * Use the first entry since the first entry will always be the
+ * biggest entry. To take advantage of bigger mapping sizes both the
+ * VA and PA addresses have to be aligned to the biggest size.
+ */
+ if (sg_dma_len(table->sgl) > align)
+ align = sg_dma_len(table->sgl);
+
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
+
+ if (ret)
+ goto out;
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ ret = iommu_map_range(domain, data->iova_addr,
+ table->sgl,
+ size, prot);
+ if (ret) {
+ pr_err("%s: could not map %lx in domain %p\n",
+ __func__, data->iova_addr, domain);
+ goto out1;
+ }
+
+ if (extra) {
+ unsigned long extra_iova_addr = data->iova_addr + size;
+ unsigned long phys_addr = sg_phys(table->sgl);
+ ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+ extra, SZ_4K, prot);
+ if (ret)
+ goto out2;
+ }
+ return ret;
+
+out2:
+ iommu_unmap_range(domain, data->iova_addr, size);
+out1:
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ size);
+
+out:
+
+ return ret;
+}
+
+static void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+ unsigned int domain_num;
+ unsigned int partition_num;
+ struct iommu_domain *domain;
+
+ BUG_ON(!msm_use_iommu());
+
+ domain_num = iommu_map_domain(data);
+ partition_num = iommu_map_partition(data);
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+ return;
+ }
+
+ iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ data->mapped_size);
+
+ return;
+}
+
+
+
+static struct ion_iommu_map *__ion_iommu_map(struct ion_iommu_meta *meta,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long flags,
+ unsigned long *iova)
+{
+ struct ion_iommu_map *data;
+ int ret;
+
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ iommu_map_domain(data) = domain_num;
+ iommu_map_partition(data) = partition_num;
+
+ ret = ion_iommu_map_iommu(meta, data,
+ domain_num,
+ partition_num,
+ align,
+ iova_length,
+ flags);
+
+ if (ret)
+ goto out;
+
+ kref_init(&data->ref);
+ *iova = data->iova_addr;
+ data->meta = meta;
+
+ ion_iommu_add(meta, data);
+
+ return data;
+
+out:
+ kfree(data);
+ return ERR_PTR(ret);
+}
+
+static struct ion_iommu_meta *ion_iommu_meta_create(struct ion_handle *handle,
+ struct sg_table *table,
+ unsigned long size)
+{
+ struct ion_iommu_meta *meta;
+
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+
+ if (!meta)
+ return ERR_PTR(-ENOMEM);
+
+ meta->handle = handle;
+ meta->table = table;
+ meta->size = size;
+ kref_init(&meta->ref);
+ mutex_init(&meta->lock);
+ ion_iommu_meta_add(meta);
+
+ return meta;
+}
+
+static void ion_iommu_meta_destroy(struct kref *kref)
+{
+ struct ion_iommu_meta *meta = container_of(kref, struct ion_iommu_meta,
+ ref);
+
+
+ rb_erase(&meta->node, &iommu_root);
+ kfree(meta);
+}
+
+static void ion_iommu_meta_put(struct ion_iommu_meta *meta)
+{
+ /*
+ * Need to lock here to prevent race against map/unmap
+ */
+ mutex_lock(&msm_iommu_map_mutex);
+ kref_put(&meta->ref, ion_iommu_meta_destroy);
+ mutex_unlock(&msm_iommu_map_mutex);
+}
+
+int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags, unsigned long iommu_flags)
+{
+ struct ion_iommu_map *iommu_map;
+ struct ion_iommu_meta *iommu_meta = NULL;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret = 0;
+ int i;
+ unsigned long size = 0;
+
+ if (IS_ERR_OR_NULL(client)) {
+ pr_err("%s: client pointer is invalid\n", __func__);
+ return -EINVAL;
+ }
+ if (IS_ERR_OR_NULL(handle)) {
+ pr_err("%s: handle pointer is invalid\n", __func__);
+ return -EINVAL;
+ }
+
+ table = ion_sg_table(client, handle);
+
+ if (IS_ERR_OR_NULL(table))
+ return PTR_ERR(table);
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ size += sg_dma_len(sg);
+
+ if (!msm_use_iommu()) {
+ unsigned long pa = sg_dma_address(table->sgl);
+ if (pa == 0)
+ pa = sg_phys(table->sgl);
+ *iova = pa;
+ *buffer_size = size;
+ }
+ /*
+ * If clients don't want a custom iova length, just use whatever
+ * the buffer size is
+ */
+ if (!iova_length)
+ iova_length = size;
+
+ if (size > iova_length) {
+ pr_debug("%s: iova length %lx is not at least buffer size %lx\n",
+ __func__, iova_length, size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (size & ~PAGE_MASK) {
+ pr_debug("%s: buffer size %lx is not aligned to %lx", __func__,
+ size, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (iova_length & ~PAGE_MASK) {
+ pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
+ iova_length, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&msm_iommu_map_mutex);
+ iommu_meta = ion_iommu_meta_lookup(handle);
+
+ if (!iommu_meta)
+ iommu_meta = ion_iommu_meta_create(handle, table, size);
+ else
+ kref_get(&iommu_meta->ref);
+
+ mutex_unlock(&msm_iommu_map_mutex);
+
+ iommu_map = ion_iommu_lookup(iommu_meta, domain_num, partition_num);
+ if (!iommu_map) {
+ iommu_map = __ion_iommu_map(iommu_meta, domain_num,
+ partition_num, align, iova_length,
+ flags, iova);
+ if (!IS_ERR_OR_NULL(iommu_map)) {
+ iommu_map->flags = iommu_flags;
+ ret = 0;
+ } else {
+ ret = PTR_ERR(iommu_map);
+ goto out;
+ }
+ } else {
+ if (iommu_map->flags != iommu_flags) {
+ pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
+ __func__, handle,
+ iommu_map->flags, iommu_flags);
+ ret = -EINVAL;
+ goto out;
+ } else if (iommu_map->mapped_size != iova_length) {
+ pr_err("%s: handle %p is already mapped with length %x, trying to map with length %lx\n",
+ __func__, handle, iommu_map->mapped_size,
+ iova_length);
+ ret = -EINVAL;
+ goto out;
+ } else {
+ kref_get(&iommu_map->ref);
+ *iova = iommu_map->iova_addr;
+ }
+ }
+ *buffer_size = size;
+ return ret;
+
+out:
+
+ ion_iommu_meta_put(iommu_meta);
+ return ret;
+}
+EXPORT_SYMBOL(ion_map_iommu);
+
+
+static void ion_iommu_map_release(struct kref *kref)
+{
+ struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
+ ref);
+ struct ion_iommu_meta *meta = map->meta;
+
+ rb_erase(&map->node, &meta->iommu_maps);
+ ion_iommu_heap_unmap_iommu(map);
+ kfree(map);
+}
+
+void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num)
+{
+ struct ion_iommu_map *iommu_map;
+ struct ion_iommu_meta *meta;
+
+ if (IS_ERR_OR_NULL(client)) {
+ pr_err("%s: client pointer is invalid\n", __func__);
+ return;
+ }
+ if (IS_ERR_OR_NULL(handle)) {
+ pr_err("%s: handle pointer is invalid\n", __func__);
+ return;
+ }
+
+
+ mutex_lock(&msm_iommu_map_mutex);
+ meta = ion_iommu_meta_lookup(handle);
+ if (!meta) {
+ WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
+ domain_num, partition_num, handle);
+ mutex_lock(&msm_iommu_map_mutex);
+ goto out;
+
+ }
+ mutex_unlock(&msm_iommu_map_mutex);
+
+ mutex_lock(&meta->lock);
+ iommu_map = ion_iommu_lookup(meta, domain_num, partition_num);
+
+ if (!iommu_map) {
+ WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
+ domain_num, partition_num, handle);
+ mutex_unlock(&meta->lock);
+ goto out;
+ }
+
+ kref_put(&iommu_map->ref, ion_iommu_map_release);
+ mutex_unlock(&meta->lock);
+
+ ion_iommu_meta_put(meta);
+
+out:
+ return;
+}
+EXPORT_SYMBOL(ion_unmap_iommu);
+
+
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index a7dcd19..f43d276 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -26,8 +26,10 @@
#include <linux/rwsem.h>
#include <linux/uaccess.h>
#include <linux/memblock.h>
+#include <linux/dma-mapping.h>
#include <mach/ion.h>
#include <mach/msm_memtypes.h>
+#include <asm/cacheflush.h>
#include "../ion_priv.h"
#include "ion_cp_common.h"
@@ -126,7 +128,7 @@
struct ion_client *msm_ion_client_create(unsigned int heap_mask,
const char *name)
{
- return ion_client_create(idev, heap_mask, name);
+ return ion_client_create(idev, name);
}
EXPORT_SYMBOL(msm_ion_client_create);
@@ -177,6 +179,210 @@
}
EXPORT_SYMBOL(msm_ion_do_cache_op);
+static int ion_no_pages_cache_ops(struct ion_client *client,
+ struct ion_handle *handle,
+ void *vaddr,
+ unsigned int offset, unsigned int length,
+ unsigned int cmd)
+{
+ void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
+ unsigned int size_to_vmap, total_size;
+ int i, j, ret;
+ void *ptr = NULL;
+ ion_phys_addr_t buff_phys = 0;
+ ion_phys_addr_t buff_phys_start = 0;
+ size_t buf_length = 0;
+
+ ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
+ if (ret)
+ return -EINVAL;
+
+ buff_phys = buff_phys_start;
+
+ if (!vaddr) {
+ /*
+ * Split the vmalloc space into smaller regions in
+ * order to clean and/or invalidate the cache.
+ */
+ size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
+ total_size = buf_length;
+
+ for (i = 0; i < total_size; i += size_to_vmap) {
+ size_to_vmap = min(size_to_vmap, total_size - i);
+ for (j = 0; j < 10 && size_to_vmap; ++j) {
+ ptr = ioremap(buff_phys, size_to_vmap);
+ if (ptr) {
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ dmac_clean_range(ptr,
+ ptr + size_to_vmap);
+ outer_cache_op =
+ outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ dmac_inv_range(ptr,
+ ptr + size_to_vmap);
+ outer_cache_op =
+ outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ dmac_flush_range(ptr,
+ ptr + size_to_vmap);
+ outer_cache_op =
+ outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+ buff_phys += size_to_vmap;
+ break;
+ } else {
+ size_to_vmap >>= 1;
+ }
+ }
+ if (!ptr) {
+ pr_err("Couldn't io-remap the memory\n");
+ return -EINVAL;
+ }
+ iounmap(ptr);
+ }
+ } else {
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ dmac_clean_range(vaddr, vaddr + length);
+ outer_cache_op = outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ dmac_inv_range(vaddr, vaddr + length);
+ outer_cache_op = outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ dmac_flush_range(vaddr, vaddr + length);
+ outer_cache_op = outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ outer_cache_op(buff_phys_start + offset,
+ buff_phys_start + offset + length);
+
+ return 0;
+}
+
+#ifdef CONFIG_OUTER_CACHE
+static void ion_pages_outer_cache_op(void (*op)(phys_addr_t, phys_addr_t),
+ struct sg_table *table)
+{
+ unsigned long pstart;
+ struct scatterlist *sg;
+ int i;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ pstart = page_to_phys(page);
+ /*
+ * If page -> phys is returning NULL, something
+ * has really gone wrong...
+ */
+ if (!pstart) {
+ WARN(1, "Could not translate virtual address to physical address\n");
+ return;
+ }
+ op(pstart, pstart + PAGE_SIZE);
+ }
+}
+#else
+static void ion_pages_outer_cache_op(void (*op)(phys_addr_t, phys_addr_t),
+ struct sg_table *table)
+{
+
+}
+#endif
+
+static int ion_pages_cache_ops(struct ion_client *client,
+ struct ion_handle *handle,
+ void *vaddr, unsigned int offset, unsigned int length,
+ unsigned int cmd)
+{
+ void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+ struct sg_table *table = NULL;
+
+ table = ion_sg_table(client, handle);
+ if (IS_ERR_OR_NULL(table))
+ return PTR_ERR(table);
+
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ if (!vaddr)
+ dma_sync_sg_for_device(NULL, table->sgl,
+ table->nents, DMA_TO_DEVICE);
+ else
+ dmac_clean_range(vaddr, vaddr + length);
+ outer_cache_op = outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ if (!vaddr)
+ dma_sync_sg_for_cpu(NULL, table->sgl,
+ table->nents, DMA_FROM_DEVICE);
+ else
+ dmac_inv_range(vaddr, vaddr + length);
+ outer_cache_op = outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ if (!vaddr) {
+ dma_sync_sg_for_device(NULL, table->sgl,
+ table->nents, DMA_TO_DEVICE);
+ dma_sync_sg_for_cpu(NULL, table->sgl,
+ table->nents, DMA_FROM_DEVICE);
+ } else {
+ dmac_flush_range(vaddr, vaddr + length);
+ }
+ outer_cache_op = outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ion_pages_outer_cache_op(outer_cache_op, table);
+
+ return 0;
+}
+
+int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+ void *uaddr, unsigned long offset, unsigned long len,
+ unsigned int cmd)
+{
+ int ret = -EINVAL;
+ unsigned long flags;
+ struct sg_table *table;
+ struct page *page;
+
+ ret = ion_handle_get_flags(client, handle, &flags);
+ if (ret)
+ return -EINVAL;
+
+ if (!ION_IS_CACHED(flags))
+ return 0;
+
+ table = ion_sg_table(client, handle);
+
+ if (IS_ERR_OR_NULL(table))
+ return PTR_ERR(table);
+
+ page = sg_page(table->sgl);
+
+ if (page)
+ ret = ion_pages_cache_ops(client, handle, uaddr,
+ offset, len, cmd);
+ else
+ ret = ion_no_pages_cache_ops(client, handle, uaddr,
+ offset, len, cmd);
+
+ return ret;
+
+}
+
static ion_phys_addr_t msm_ion_get_base(unsigned long size, int memory_type,
unsigned int align)
{
diff --git a/drivers/gpu/ion/msm_ion_priv.h b/drivers/gpu/ion/msm_ion_priv.h
index 2729ce2..2de4e8a 100644
--- a/drivers/gpu/ion/msm_ion_priv.h
+++ b/drivers/gpu/ion/msm_ion_priv.h
@@ -26,42 +26,6 @@
#include <linux/iommu.h>
#include <linux/seq_file.h>
-enum {
- DI_PARTITION_NUM = 0,
- DI_DOMAIN_NUM = 1,
- DI_MAX,
-};
-
-/**
- * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
- * @iova_addr - iommu virtual address
- * @node - rb node to exist in the buffer's tree of iommu mappings
- * @domain_info - contains the partition number and domain number
- * domain_info[1] = domain number
- * domain_info[0] = partition number
- * @ref - for reference counting this mapping
- * @mapped_size - size of the iova space mapped
- * (may not be the same as the buffer size)
- * @flags - iommu domain/partition specific flags.
- *
- * Represents a mapping of one ion buffer to a particular iommu domain
- * and address range. There may exist other mappings of this buffer in
- * different domains or address ranges. All mappings will have the same
- * cacheability and security.
- */
-struct ion_iommu_map {
- unsigned long iova_addr;
- struct rb_node node;
- union {
- int domain_info[DI_MAX];
- uint64_t key;
- };
- struct ion_buffer *buffer;
- struct kref ref;
- int mapped_size;
- unsigned long flags;
-};
-
/**
* struct mem_map_data - represents information about the memory map for a heap
* @node: rb node used to store in the tree of mem_map_data
@@ -79,9 +43,6 @@
const char *client_name;
};
-#define iommu_map_domain(__m) ((__m)->domain_info[1])
-#define iommu_map_partition(__m) ((__m)->domain_info[0])
-
struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
void ion_iommu_heap_destroy(struct ion_heap *);
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 5589ff0..439ecdc 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2782,7 +2782,7 @@
if (device->state == KGSL_STATE_ACTIVE) {
/* Is the ring buffer is empty? */
GSL_RB_GET_READPTR(rb, &rb->rptr);
- if (!device->active_cnt && (rb->rptr == rb->wptr)) {
+ if (rb->rptr == rb->wptr) {
/*
* Are there interrupts pending? If so then pretend we
* are not idle - this avoids the possiblity that we go
@@ -3040,7 +3040,7 @@
if (context && device->state != KGSL_STATE_SLUMBER) {
adreno_ringbuffer_issuecmds(device, context->devctxt,
- KGSL_CMD_FLAGS_NONE, NULL, 0);
+ KGSL_CMD_FLAGS_GET_INT, NULL, 0);
}
}
@@ -3596,15 +3596,20 @@
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- unsigned int cycles;
+ unsigned int cycles = 0;
- /* Get the busy cycles counted since the counter was last reset */
- /* Calling this function also resets and restarts the counter */
+ /*
+ * Get the busy cycles counted since the counter was last reset.
+ * If we're not currently active, there shouldn't have been
+ * any cycles since the last time this function was called.
+ */
+ if (device->state == KGSL_STATE_ACTIVE)
+ cycles = adreno_dev->gpudev->busy_cycles(adreno_dev);
- cycles = adreno_dev->gpudev->busy_cycles(adreno_dev);
-
- /* In order to calculate idle you have to have run the algorithm *
- * at least once to get a start time. */
+ /*
+ * In order to calculate idle you have to have run the algorithm
+ * at least once to get a start time.
+ */
if (pwr->time != 0) {
s64 tmp = ktime_to_us(ktime_get());
stats->total_time = tmp - pwr->time;
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 90d6027..3935cd8 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -34,6 +34,7 @@
#define KGSL_CMD_FLAGS_NONE 0x00000000
#define KGSL_CMD_FLAGS_PMODE 0x00000001
#define KGSL_CMD_FLAGS_INTERNAL_ISSUE 0x00000002
+#define KGSL_CMD_FLAGS_GET_INT 0x00000004
#define KGSL_CMD_FLAGS_EOF 0x00000100
/* Command identifiers */
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index ef599e9..980ff13 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -93,4 +93,7 @@
adreno_dev->ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY;
debugfs_create_u32("ft_pagefault_policy", 0644, device->d_debugfs,
&adreno_dev->ft_pf_policy);
+
+ debugfs_create_u32("active_cnt", 0444, device->d_debugfs,
+ &device->active_cnt);
}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index a4bb4fa..628d38c 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -544,13 +544,15 @@
/*
* if the context was not created with per context timestamp
* support, we must use the global timestamp since issueibcmds
- * will be returning that one.
+ * will be returning that one, or if an internal issue then
+ * use global timestamp.
*/
- if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
+ if ((context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) &&
+ !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
context_id = context->id;
- if ((context && context->flags & CTXT_FLAGS_USER_GENERATED_TS) &&
- (!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))) {
+ if ((context && (context->flags & CTXT_FLAGS_USER_GENERATED_TS)) &&
+ !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
if (timestamp_cmp(rb->timestamp[context_id],
timestamp) >= 0) {
KGSL_DRV_ERR(rb->device,
@@ -574,6 +576,11 @@
/* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
total_sizedwords += context ? 13 : 0;
+ if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
+ (flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
+ KGSL_CMD_FLAGS_GET_INT)))
+ total_sizedwords += 2;
+
if (adreno_is_a3xx(adreno_dev))
total_sizedwords += 7;
@@ -584,11 +591,9 @@
total_sizedwords += 3; /* sop timestamp */
total_sizedwords += 4; /* eop timestamp */
- if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS &&
- !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
+ if (KGSL_MEMSTORE_GLOBAL != context_id)
total_sizedwords += 3; /* global timestamp without cache
* flush for non-zero context */
- }
if (adreno_is_a20x(adreno_dev))
total_sizedwords += 2; /* CACHE_FLUSH */
@@ -619,12 +624,12 @@
/* always increment the global timestamp. once. */
rb->timestamp[KGSL_MEMSTORE_GLOBAL]++;
- /* Do not update context's timestamp for internal submissions */
- if (context && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
- if (context_id == KGSL_MEMSTORE_GLOBAL)
- rb->timestamp[context->id] =
- rb->timestamp[KGSL_MEMSTORE_GLOBAL];
- else if (context->flags & CTXT_FLAGS_USER_GENERATED_TS)
+ /*
+ * If global timestamp then we are not using per context ts for
+ * this submission
+ */
+ if (context_id != KGSL_MEMSTORE_GLOBAL) {
+ if (context->flags & CTXT_FLAGS_USER_GENERATED_TS)
rb->timestamp[context_id] = timestamp;
else
rb->timestamp[context_id]++;
@@ -695,9 +700,7 @@
KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
- if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS
- && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
-
+ if (KGSL_MEMSTORE_GLOBAL != context_id) {
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
@@ -749,6 +752,19 @@
GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
}
+ /*
+ * If per context timestamps are enabled and any of the kgsl
+ * internal commands want INT to be generated trigger the INT
+ */
+ if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
+ (flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
+ KGSL_CMD_FLAGS_GET_INT))) {
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_INTERRUPT, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ CP_INT_CNTL__RB_INT_MASK);
+ }
+
if (adreno_is_a3xx(adreno_dev)) {
/* Dummy set-constant to trigger context rollover */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 53ef392..08dd886 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -413,27 +413,6 @@
kfree(context);
}
-static void kgsl_check_idle_locked(struct kgsl_device *device)
-{
- if (device->pwrctrl.nap_allowed == true &&
- device->state == KGSL_STATE_ACTIVE &&
- device->requested_state == KGSL_STATE_NONE) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
- kgsl_pwrscale_idle(device);
- if (kgsl_pwrctrl_sleep(device) != 0)
- mod_timer(&device->idle_timer,
- jiffies +
- device->pwrctrl.interval_timeout);
- }
-}
-
-static void kgsl_check_idle(struct kgsl_device *device)
-{
- mutex_lock(&device->mutex);
- kgsl_check_idle_locked(device);
- mutex_unlock(&device->mutex);
-}
-
struct kgsl_device *kgsl_get_device(int dev_idx)
{
int i;
@@ -496,13 +475,12 @@
policy_saved = device->pwrscale.policy;
device->pwrscale.policy = NULL;
kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
- /* Make sure no user process is waiting for a timestamp *
- * before supending */
- if (device->active_cnt != 0) {
- mutex_unlock(&device->mutex);
- wait_for_completion(&device->suspend_gate);
- mutex_lock(&device->mutex);
- }
+ /*
+ * Make sure no user process is waiting for a timestamp
+ * before supending.
+ */
+ kgsl_active_count_wait(device);
+
/* Don't let the timer wake us during suspended sleep. */
del_timer_sync(&device->idle_timer);
switch (device->state) {
@@ -513,6 +491,8 @@
device->ftbl->idle(device);
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
+ /* make sure power is on to stop the device */
+ kgsl_pwrctrl_enable(device);
/* Get the completion ready to be waited upon. */
INIT_COMPLETION(device->hwaccess_gate);
device->ftbl->suspend_context(device);
@@ -632,9 +612,16 @@
device->pwrctrl.restore_slumber = false;
if (device->pwrscale.policy == NULL)
kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
- kgsl_pwrctrl_wake(device);
+ if (kgsl_pwrctrl_wake(device) != 0)
+ return;
+ /*
+ * We don't have a way to go directly from
+ * a deeper sleep state to NAP, which is
+ * the desired state here.
+ */
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ kgsl_pwrctrl_sleep(device);
mutex_unlock(&device->mutex);
- kgsl_check_idle(device);
KGSL_PWR_WARN(device, "late resume end\n");
}
EXPORT_SYMBOL(kgsl_late_resume_driver);
@@ -745,7 +732,7 @@
filep->private_data = NULL;
mutex_lock(&device->mutex);
- kgsl_check_suspended(device);
+ kgsl_active_count_get(device);
while (1) {
context = idr_get_next(&device->context_idr, &next);
@@ -767,10 +754,17 @@
device->open_count--;
if (device->open_count == 0) {
+ BUG_ON(device->active_cnt > 1);
result = device->ftbl->stop(device);
kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ /*
+ * active_cnt special case: we just stopped the device,
+ * so no need to use kgsl_active_count_put()
+ */
+ device->active_cnt--;
+ } else {
+ kgsl_active_count_put(device);
}
-
mutex_unlock(&device->mutex);
kfree(dev_priv);
@@ -816,9 +810,14 @@
filep->private_data = dev_priv;
mutex_lock(&device->mutex);
- kgsl_check_suspended(device);
if (device->open_count == 0) {
+ /*
+ * active_cnt special case: we are starting up for the first
+ * time, so use this sequence instead of the kgsl_pwrctrl_wake()
+ * which will be called by kgsl_active_count_get().
+ */
+ device->active_cnt++;
kgsl_sharedmem_set(&device->memstore, 0, 0,
device->memstore.size);
@@ -831,6 +830,7 @@
goto err_freedevpriv;
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+ kgsl_active_count_put(device);
}
device->open_count++;
mutex_unlock(&device->mutex);
@@ -856,10 +856,15 @@
mutex_lock(&device->mutex);
device->open_count--;
if (device->open_count == 0) {
+ /* make sure power is on to stop the device */
+ kgsl_pwrctrl_enable(device);
result = device->ftbl->stop(device);
kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
}
err_freedevpriv:
+ /* only the first open takes an active count */
+ if (device->open_count == 0)
+ device->active_cnt--;
mutex_unlock(&device->mutex);
filep->private_data = NULL;
kfree(dev_priv);
@@ -1073,10 +1078,6 @@
struct kgsl_device *device = dev_priv->device;
unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
- /* Set the active count so that suspend doesn't do the wrong thing */
-
- device->active_cnt++;
-
trace_kgsl_waittimestamp_entry(device, context_id,
kgsl_readtimestamp(device, context,
KGSL_TIMESTAMP_RETIRED),
@@ -1090,9 +1091,6 @@
KGSL_TIMESTAMP_RETIRED),
result);
- /* Fire off any pending suspend operations that are in flight */
- kgsl_active_count_put(dev_priv->device);
-
return result;
}
@@ -1887,7 +1885,6 @@
trace_kgsl_mem_map(entry, param->fd);
- kgsl_check_idle(dev_priv->device);
return result;
error_unmap:
@@ -1907,7 +1904,6 @@
}
error:
kfree(entry);
- kgsl_check_idle(dev_priv->device);
return result;
}
@@ -2035,7 +2031,6 @@
entry->memtype = KGSL_MEM_ENTRY_KERNEL;
- kgsl_check_idle(dev_priv->device);
*ret_entry = entry;
return result;
err:
@@ -2370,7 +2365,7 @@
kgsl_ioctl_cff_user_event, 0),
KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
kgsl_ioctl_timestamp_event,
- KGSL_IOCTL_LOCK),
+ KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
kgsl_ioctl_device_setproperty,
KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
@@ -2462,14 +2457,19 @@
if (lock) {
mutex_lock(&dev_priv->device->mutex);
- if (use_hw)
- kgsl_check_suspended(dev_priv->device);
+ if (use_hw) {
+ ret = kgsl_active_count_get(dev_priv->device);
+ if (ret < 0)
+ goto unlock;
+ }
}
ret = func(dev_priv, cmd, uptr);
+unlock:
if (lock) {
- kgsl_check_idle_locked(dev_priv->device);
+ if (use_hw)
+ kgsl_active_count_put(dev_priv->device);
mutex_unlock(&dev_priv->device->mutex);
}
@@ -3032,11 +3032,7 @@
/* For a manual dump, make sure that the system is idle */
if (manual) {
- if (device->active_cnt != 0) {
- mutex_unlock(&device->mutex);
- wait_for_completion(&device->suspend_gate);
- mutex_lock(&device->mutex);
- }
+ kgsl_active_count_wait(device);
if (device->state == KGSL_STATE_ACTIVE)
kgsl_idle(device);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 0d11660..ac82820 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -454,23 +454,4 @@
kref_put(&context->refcount, kgsl_context_destroy);
}
-/**
- * kgsl_active_count_put - Decrease the device active count
- * @device: Pointer to a KGSL device
- *
- * Decrease the active count for the KGSL device and trigger the suspend_gate
- * completion if it hits zero
- */
-static inline void
-kgsl_active_count_put(struct kgsl_device *device)
-{
- if (device->active_cnt == 1)
- INIT_COMPLETION(device->suspend_gate);
-
- device->active_cnt--;
-
- if (device->active_cnt == 0)
- complete(&device->suspend_gate);
-}
-
#endif /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index 9e9c0da..d872783 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -51,6 +51,7 @@
void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
void *owner)
{
+ int ret;
struct kgsl_event *event;
unsigned int cur_ts;
struct kgsl_context *context = NULL;
@@ -82,6 +83,16 @@
if (event == NULL)
return -ENOMEM;
+ /*
+ * Increase the active count on the device to avoid going into power
+ * saving modes while events are pending
+ */
+ ret = kgsl_active_count_get_light(device);
+ if (ret < 0) {
+ kfree(event);
+ return ret;
+ }
+
event->context = context;
event->timestamp = ts;
event->priv = priv;
@@ -112,13 +123,6 @@
} else
_add_event_to_list(&device->events, event);
- /*
- * Increase the active count on the device to avoid going into power
- * saving modes while events are pending
- */
-
- device->active_cnt++;
-
queue_work(device->work_queue, &device->ts_expired_ws);
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 2f8d93e..7d05aea 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1055,6 +1055,14 @@
pwr->power_flags = 0;
}
+/**
+ * kgsl_idle_check() - Work function for GPU interrupts and idle timeouts.
+ * @device: The device
+ *
+ * This function is called for work that is queued by the interrupt
+ * handler or the idle timer. It attempts to transition to a clocks
+ * off state if the active_cnt is 0 and the hardware is idle.
+ */
void kgsl_idle_check(struct work_struct *work)
{
struct kgsl_device *device = container_of(work, struct kgsl_device,
@@ -1064,15 +1072,22 @@
return;
mutex_lock(&device->mutex);
- if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
- kgsl_pwrscale_idle(device);
- if (kgsl_pwrctrl_sleep(device) != 0) {
+ kgsl_pwrscale_idle(device);
+
+ if (device->state == KGSL_STATE_ACTIVE
+ || device->state == KGSL_STATE_NAP) {
+ if (device->active_cnt > 0 || kgsl_pwrctrl_sleep(device) != 0) {
+
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+
mod_timer(&device->idle_timer,
jiffies +
device->pwrctrl.interval_timeout);
- /* If the GPU has been too busy to sleep, make sure *
- * that is acurately reflected in the % busy numbers. */
+ /*
+ * If the GPU has been too busy to sleep, make sure
+ * that is acurately reflected in the % busy numbers.
+ */
device->pwrctrl.clk_stats.no_nap_cnt++;
if (device->pwrctrl.clk_stats.no_nap_cnt >
UPDATE_BUSY) {
@@ -1104,54 +1119,26 @@
}
}
+
+/**
+ * kgsl_pre_hwaccess - Enforce preconditions for touching registers
+ * @device: The device
+ *
+ * This function ensures that the correct lock is held and that the GPU
+ * clock is on immediately before a register is read or written. Note
+ * that this function does not check active_cnt because the registers
+ * must be accessed during device start and stop, when the active_cnt
+ * may legitimately be 0.
+ */
void kgsl_pre_hwaccess(struct kgsl_device *device)
{
+ /* In order to touch a register you must hold the device mutex...*/
BUG_ON(!mutex_is_locked(&device->mutex));
- switch (device->state) {
- case KGSL_STATE_ACTIVE:
- return;
- case KGSL_STATE_NAP:
- case KGSL_STATE_SLEEP:
- case KGSL_STATE_SLUMBER:
- kgsl_pwrctrl_wake(device);
- break;
- case KGSL_STATE_SUSPEND:
- kgsl_check_suspended(device);
- break;
- case KGSL_STATE_INIT:
- case KGSL_STATE_HUNG:
- case KGSL_STATE_DUMP_AND_FT:
- if (test_bit(KGSL_PWRFLAGS_CLK_ON,
- &device->pwrctrl.power_flags))
- break;
- else
- KGSL_PWR_ERR(device,
- "hw access while clocks off from state %d\n",
- device->state);
- break;
- default:
- KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
- device->state);
- break;
- }
+ /* and have the clock on! */
+ BUG_ON(!test_bit(KGSL_PWRFLAGS_CLK_ON, &device->pwrctrl.power_flags));
}
EXPORT_SYMBOL(kgsl_pre_hwaccess);
-void kgsl_check_suspended(struct kgsl_device *device)
-{
- if (device->requested_state == KGSL_STATE_SUSPEND ||
- device->state == KGSL_STATE_SUSPEND) {
- mutex_unlock(&device->mutex);
- wait_for_completion(&device->hwaccess_gate);
- mutex_lock(&device->mutex);
- } else if (device->state == KGSL_STATE_DUMP_AND_FT) {
- mutex_unlock(&device->mutex);
- wait_for_completion(&device->ft_gate);
- mutex_lock(&device->mutex);
- } else if (device->state == KGSL_STATE_SLUMBER)
- kgsl_pwrctrl_wake(device);
-}
-
static int
_nap(struct kgsl_device *device)
{
@@ -1230,6 +1217,8 @@
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
del_timer_sync(&device->idle_timer);
+ /* make sure power is on to stop the device*/
+ kgsl_pwrctrl_enable(device);
device->ftbl->suspend_context(device);
device->ftbl->stop(device);
_sleep_accounting(device);
@@ -1278,9 +1267,9 @@
/******************************************************************/
/* Caller must hold the device mutex. */
-void kgsl_pwrctrl_wake(struct kgsl_device *device)
+int kgsl_pwrctrl_wake(struct kgsl_device *device)
{
- int status;
+ int status = 0;
unsigned int context_id;
unsigned int state = device->state;
unsigned int ts_processed = 0xdeaddead;
@@ -1329,8 +1318,10 @@
KGSL_PWR_WARN(device, "unhandled state %s\n",
kgsl_pwrstate_to_str(device->state));
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ status = -EINVAL;
break;
}
+ return status;
}
EXPORT_SYMBOL(kgsl_pwrctrl_wake);
@@ -1396,3 +1387,124 @@
}
EXPORT_SYMBOL(kgsl_pwrstate_to_str);
+
+/**
+ * kgsl_active_count_get() - Increase the device active count
+ * @device: Pointer to a KGSL device
+ *
+ * Increase the active count for the KGSL device and turn on
+ * clocks if this is the first reference. Code paths that need
+ * to touch the hardware or wait for the hardware to complete
+ * an operation must hold an active count reference until they
+ * are finished. An error code will be returned if waking the
+ * device fails. The device mutex must be held while *calling
+ * this function.
+ */
+int kgsl_active_count_get(struct kgsl_device *device)
+{
+ int ret = 0;
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (device->active_cnt == 0) {
+ if (device->requested_state == KGSL_STATE_SUSPEND ||
+ device->state == KGSL_STATE_SUSPEND) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->hwaccess_gate);
+ mutex_lock(&device->mutex);
+ } else if (device->state == KGSL_STATE_DUMP_AND_FT) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->ft_gate);
+ mutex_lock(&device->mutex);
+ }
+ ret = kgsl_pwrctrl_wake(device);
+ }
+ if (ret == 0)
+ device->active_cnt++;
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_active_count_get);
+
+/**
+ * kgsl_active_count_get_light() - Increase the device active count
+ * @device: Pointer to a KGSL device
+ *
+ * Increase the active count for the KGSL device WITHOUT
+ * turning on the clocks. Currently this is only used for creating
+ * kgsl_events. The device mutex must be held while calling this function.
+ */
+int kgsl_active_count_get_light(struct kgsl_device *device)
+{
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (device->state != KGSL_STATE_ACTIVE) {
+ dev_WARN_ONCE(device->dev, 1, "device in unexpected state %s\n",
+ kgsl_pwrstate_to_str(device->state));
+ return -EINVAL;
+ }
+
+ if (device->active_cnt == 0) {
+ dev_WARN_ONCE(device->dev, 1, "active count is 0!\n");
+ return -EINVAL;
+ }
+
+ device->active_cnt++;
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_active_count_get_light);
+
+/**
+ * kgsl_active_count_put() - Decrease the device active count
+ * @device: Pointer to a KGSL device
+ *
+ * Decrease the active count for the KGSL device and turn off
+ * clocks if there are no remaining references. This function will
+ * transition the device to NAP if there are no other pending state
+ * changes. It also completes the suspend gate. The device mutex must
+ * be held while calling this function.
+ */
+void kgsl_active_count_put(struct kgsl_device *device)
+{
+ BUG_ON(!mutex_is_locked(&device->mutex));
+ BUG_ON(device->active_cnt == 0);
+
+ kgsl_pwrscale_idle(device);
+ if (device->active_cnt > 1) {
+ device->active_cnt--;
+ return;
+ }
+
+ INIT_COMPLETION(device->suspend_gate);
+
+ if (device->pwrctrl.nap_allowed == true &&
+ (device->state == KGSL_STATE_ACTIVE &&
+ device->requested_state == KGSL_STATE_NONE)) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ if (kgsl_pwrctrl_sleep(device) != 0)
+ mod_timer(&device->idle_timer,
+ jiffies
+ + device->pwrctrl.interval_timeout);
+ }
+ device->active_cnt--;
+
+ if (device->active_cnt == 0)
+ complete(&device->suspend_gate);
+}
+EXPORT_SYMBOL(kgsl_active_count_put);
+
+/**
+ * kgsl_active_count_wait() - Wait for activity to finish.
+ * @device: Pointer to a KGSL device
+ *
+ * Block until all active_cnt users put() their reference.
+ */
+void kgsl_active_count_wait(struct kgsl_device *device)
+{
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (device->active_cnt != 0) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->suspend_gate);
+ mutex_lock(&device->mutex);
+ }
+}
+EXPORT_SYMBOL(kgsl_active_count_wait);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index ced52e1..0fd64c3 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -99,9 +99,8 @@
void kgsl_timer(unsigned long data);
void kgsl_idle_check(struct work_struct *work);
void kgsl_pre_hwaccess(struct kgsl_device *device);
-void kgsl_check_suspended(struct kgsl_device *device);
int kgsl_pwrctrl_sleep(struct kgsl_device *device);
-void kgsl_pwrctrl_wake(struct kgsl_device *device);
+int kgsl_pwrctrl_wake(struct kgsl_device *device);
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
unsigned int level);
int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device);
@@ -115,4 +114,10 @@
void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state);
void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state);
+
+int kgsl_active_count_get(struct kgsl_device *device);
+int kgsl_active_count_get_light(struct kgsl_device *device);
+void kgsl_active_count_put(struct kgsl_device *device);
+void kgsl_active_count_wait(struct kgsl_device *device);
+
#endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
index 0b02a34..66811bf 100644
--- a/drivers/hwmon/qpnp-adc-current.c
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -129,6 +129,7 @@
struct qpnp_iadc_drv {
struct qpnp_adc_drv *adc;
int32_t rsense;
+ bool external_rsense;
struct device *iadc_hwmon;
bool iadc_initialized;
int64_t die_temp_calib_offset;
@@ -543,6 +544,9 @@
if (!iadc || !iadc->iadc_initialized)
return -EPROBE_DEFER;
+ if (iadc->external_rsense)
+ *rsense = iadc->rsense;
+
rc = qpnp_iadc_read_reg(QPNP_IADC_NOMINAL_RSENSE, &rslt_rsense);
if (rc < 0) {
pr_err("qpnp adc rsense read failed with %d\n", rc);
@@ -571,15 +575,21 @@
{
struct qpnp_iadc_drv *iadc = qpnp_iadc;
struct qpnp_vadc_result result_pmic_therm;
+ int64_t die_temp_offset;
int rc = 0;
rc = qpnp_vadc_read(DIE_TEMP, &result_pmic_therm);
if (rc < 0)
return rc;
- if (((uint64_t) (result_pmic_therm.physical -
- iadc->die_temp_calib_offset))
- > QPNP_IADC_DIE_TEMP_CALIB_OFFSET) {
+ die_temp_offset = result_pmic_therm.physical -
+ iadc->die_temp_calib_offset;
+ if (die_temp_offset < 0)
+ die_temp_offset = -die_temp_offset;
+
+ if (die_temp_offset > QPNP_IADC_DIE_TEMP_CALIB_OFFSET) {
+ iadc->die_temp_calib_offset =
+ result_pmic_therm.physical;
rc = qpnp_iadc_calibrate_for_trim();
if (rc)
pr_err("periodic IADC calibration failed\n");
@@ -822,9 +832,11 @@
rc = of_property_read_u32(node, "qcom,rsense",
&iadc->rsense);
- if (rc) {
- pr_err("Invalid rsens reference property\n");
- goto fail;
+ if (rc)
+ pr_debug("Defaulting to internal rsense\n");
+ else {
+ pr_debug("Use external rsense\n");
+ iadc->external_rsense = true;
}
rc = devm_request_irq(&spmi->dev, iadc->adc->adc_irq_eoc,
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index b128bee..29b269a 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -128,6 +128,7 @@
#define MXT_SPT_DIGITIZER_T43 43
#define MXT_SPT_MESSAGECOUNT_T44 44
#define MXT_SPT_CTECONFIG_T46 46
+#define MXT_SPT_EXTRANOISESUPCTRLS_T58 58
#define MXT_SPT_TIMER_T61 61
/* MXT_GEN_COMMAND_T6 field */
@@ -433,6 +434,7 @@
case MXT_SPT_USERDATA_T38:
case MXT_SPT_DIGITIZER_T43:
case MXT_SPT_CTECONFIG_T46:
+ case MXT_SPT_EXTRANOISESUPCTRLS_T58:
case MXT_SPT_TIMER_T61:
case MXT_PROCI_ADAPTIVETHRESHOLD_T55:
return true;
@@ -470,6 +472,7 @@
case MXT_SPT_USERDATA_T38:
case MXT_SPT_DIGITIZER_T43:
case MXT_SPT_CTECONFIG_T46:
+ case MXT_SPT_EXTRANOISESUPCTRLS_T58:
case MXT_SPT_TIMER_T61:
case MXT_PROCI_ADAPTIVETHRESHOLD_T55:
return true;
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.c b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
index 417ef83..4e2b1a4 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.c
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
@@ -34,6 +34,9 @@
#define DRIVER_NAME "synaptics_rmi4_i2c"
#define INPUT_PHYS_NAME "synaptics_rmi4_i2c/input0"
+
+#define RESET_DELAY 100
+
#define TYPE_B_PROTOCOL
#define NO_0D_WHILE_2D
@@ -68,6 +71,16 @@
#define NO_SLEEP_OFF (0 << 2)
#define NO_SLEEP_ON (1 << 2)
+enum device_status {
+ STATUS_NO_ERROR = 0x00,
+ STATUS_RESET_OCCURED = 0x01,
+ STATUS_INVALID_CONFIG = 0x02,
+ STATUS_DEVICE_FAILURE = 0x03,
+ STATUS_CONFIG_CRC_FAILURE = 0x04,
+ STATUS_FIRMWARE_CRC_FAILURE = 0x05,
+ STATUS_CRC_IN_PROGRESS = 0x06
+};
+
#define RMI4_VTG_MIN_UV 2700000
#define RMI4_VTG_MAX_UV 3300000
#define RMI4_ACTIVE_LOAD_UA 15000
@@ -79,7 +92,6 @@
#define RMI4_I2C_LPM_LOAD_UA 10
#define RMI4_GPIO_SLEEP_LOW_US 10000
-#define RMI4_GPIO_WAIT_HIGH_MS 25
static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
unsigned short addr, unsigned char *data,
@@ -1482,6 +1494,16 @@
if (retval < 0)
return retval;
+ while (status.status_code == STATUS_CRC_IN_PROGRESS) {
+ msleep(1);
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ status.data,
+ sizeof(status.data));
+ if (retval < 0)
+ return retval;
+ }
+
if (status.flash_prog == 1) {
pr_notice("%s: In flash prog mode, status = 0x%02x\n",
__func__,
@@ -1645,7 +1667,7 @@
return retval;
}
- msleep(100);
+ msleep(RESET_DELAY);
return retval;
};
@@ -2110,7 +2132,7 @@
gpio_set_value(platform_data->reset_gpio, 0);
usleep(RMI4_GPIO_SLEEP_LOW_US);
gpio_set_value(platform_data->reset_gpio, 1);
- msleep(RMI4_GPIO_WAIT_HIGH_MS);
+ msleep(RESET_DELAY);
} else
synaptics_rmi4_reset_command(rmi4_data);
diff --git a/drivers/iommu/msm_iommu_dev-v0.c b/drivers/iommu/msm_iommu_dev-v0.c
index 549800f..7ae0b21 100644
--- a/drivers/iommu/msm_iommu_dev-v0.c
+++ b/drivers/iommu/msm_iommu_dev-v0.c
@@ -414,6 +414,7 @@
pmon_info->iommu.ops = &iommu_access_ops_v0;
pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v0();
pmon_info->iommu.iommu_name = drvdata->name;
+ pmon_info->iommu.always_on = 1;
ret = msm_iommu_pm_iommu_register(pmon_info);
if (ret) {
pr_err("%s iommu register fail\n",
diff --git a/drivers/iommu/msm_iommu_perfmon.c b/drivers/iommu/msm_iommu_perfmon.c
index fee8a4a..a11d794 100644
--- a/drivers/iommu/msm_iommu_perfmon.c
+++ b/drivers/iommu/msm_iommu_perfmon.c
@@ -90,6 +90,19 @@
return pos;
}
+static int iommu_pm_event_class_supported(struct iommu_pmon *pmon,
+ int event_class)
+{
+ unsigned int nevent_cls = pmon->nevent_cls_supported;
+ unsigned int i;
+
+ for (i = 0; i < nevent_cls; ++i) {
+ if (event_class == pmon->event_cls_supported[i])
+ return event_class;
+ }
+ return MSM_IOMMU_PMU_NO_EVENT_CLASS;
+}
+
static const char *iommu_pm_find_event_class_name(int event_class)
{
size_t array_len;
@@ -113,7 +126,8 @@
return event_class_name;
}
-static int iommu_pm_find_event_class(const char *event_class_name)
+static int iommu_pm_find_event_class(struct iommu_pmon *pmon,
+ const char *event_class_name)
{
size_t array_len;
struct event_class *ptr;
@@ -134,6 +148,7 @@
}
out:
+ event_class = iommu_pm_event_class_supported(pmon, event_class);
return event_class;
}
@@ -389,11 +404,11 @@
rv = kstrtol(buf, 10, &value);
if (!rv) {
counter->current_event_class =
- iommu_pm_find_event_class(
+ iommu_pm_find_event_class(pmon,
iommu_pm_find_event_class_name(value));
} else {
counter->current_event_class =
- iommu_pm_find_event_class(buf);
+ iommu_pm_find_event_class(pmon, buf);
} }
if (current_event_class != counter->current_event_class)
@@ -488,14 +503,17 @@
rv = kstrtoul(buf, 10, &cmd);
if (!rv && (cmd < 2)) {
if (pmon->enabled == 1 && cmd == 0) {
- if (pmon->iommu_attach_count > 0)
+ if (pmon->iommu.always_on ||
+ pmon->iommu_attach_count > 0)
iommu_pm_off(pmon);
} else if (pmon->enabled == 0 && cmd == 1) {
/* We can only turn on perf. monitoring if
- * iommu is attached. Delay turning on perf.
- * monitoring until we are attached.
+ * iommu is attached (if not always on).
+ * Delay turning on perf. monitoring until
+ * we are attached.
*/
- if (pmon->iommu_attach_count > 0)
+ if (pmon->iommu.always_on ||
+ pmon->iommu_attach_count > 0)
iommu_pm_on(pmon);
else
pmon->enabled = 1;
@@ -788,9 +806,9 @@
++pmon->iommu_attach_count;
if (pmon->iommu_attach_count == 1) {
/* If perf. mon was enabled before we attached we do
- * the actual after we attach.
+ * the actual enabling after we attach.
*/
- if (pmon->enabled)
+ if (pmon->enabled && !pmon->iommu.always_on)
iommu_pm_on(pmon);
}
mutex_unlock(&pmon->lock);
@@ -805,9 +823,9 @@
mutex_lock(&pmon->lock);
if (pmon->iommu_attach_count == 1) {
/* If perf. mon is still enabled we have to disable
- * before we do the detach.
+ * before we do the detach if iommu is not always on.
*/
- if (pmon->enabled)
+ if (pmon->enabled && !pmon->iommu.always_on)
iommu_pm_off(pmon);
}
BUG_ON(pmon->iommu_attach_count == 0);
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index e88e574..1d34b06 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -1433,7 +1433,7 @@
rc = of_property_read_u32(node, "qcom,pwm-channel", &val);
if (!rc)
- led->rgb_cfg->pwm_channel = (u8) val;
+ led->rgb_cfg->pwm_channel = val;
else
return rc;
@@ -1495,22 +1495,22 @@
rc = of_property_read_u32(node, "qcom,start-idx", &val);
if (!rc) {
- led->rgb_cfg->lut_params.start_idx = (u8) val;
- led->rgb_cfg->duty_cycles->start_idx = (u8) val;
+ led->rgb_cfg->lut_params.start_idx = val;
+ led->rgb_cfg->duty_cycles->start_idx = val;
} else
return rc;
led->rgb_cfg->lut_params.lut_pause_hi = 0;
rc = of_property_read_u32(node, "qcom,pause-hi", &val);
if (!rc)
- led->rgb_cfg->lut_params.lut_pause_hi = (u8) val;
+ led->rgb_cfg->lut_params.lut_pause_hi = val;
else if (rc != -EINVAL)
return rc;
led->rgb_cfg->lut_params.lut_pause_lo = 0;
rc = of_property_read_u32(node, "qcom,pause-lo", &val);
if (!rc)
- led->rgb_cfg->lut_params.lut_pause_lo = (u8) val;
+ led->rgb_cfg->lut_params.lut_pause_lo = val;
else if (rc != -EINVAL)
return rc;
@@ -1518,14 +1518,14 @@
QPNP_LUT_RAMP_STEP_DEFAULT;
rc = of_property_read_u32(node, "qcom,ramp-step-ms", &val);
if (!rc)
- led->rgb_cfg->lut_params.ramp_step_ms = (u8) val;
+ led->rgb_cfg->lut_params.ramp_step_ms = val;
else if (rc != -EINVAL)
return rc;
led->rgb_cfg->lut_params.flags = QPNP_LED_PWM_FLAGS;
rc = of_property_read_u32(node, "qcom,lut-flags", &val);
if (!rc)
- led->rgb_cfg->lut_params.flags = (u8) val;
+ led->rgb_cfg->lut_params.flags = val;
else if (rc != -EINVAL)
return rc;
diff --git a/drivers/media/platform/msm/camera_v2/camera/camera.c b/drivers/media/platform/msm/camera_v2/camera/camera.c
index 802349a..71087d9 100644
--- a/drivers/media/platform/msm/camera_v2/camera/camera.c
+++ b/drivers/media/platform/msm/camera_v2/camera/camera.c
@@ -195,9 +195,18 @@
static int camera_v4l2_reqbufs(struct file *filep, void *fh,
struct v4l2_requestbuffers *req)
{
+ int ret;
+ struct msm_session *session;
struct camera_v4l2_private *sp = fh_to_private(fh);
-
- return vb2_reqbufs(&sp->vb2_q, req);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+ session = msm_session_find(session_id);
+ if (WARN_ON(!session))
+ return -EIO;
+ mutex_lock(&session->lock);
+ ret = vb2_reqbufs(&sp->vb2_q, req);
+ mutex_unlock(&session->lock);
+ return ret;
}
static int camera_v4l2_querybuf(struct file *filep, void *fh,
@@ -209,17 +218,35 @@
static int camera_v4l2_qbuf(struct file *filep, void *fh,
struct v4l2_buffer *pb)
{
+ int ret;
+ struct msm_session *session;
struct camera_v4l2_private *sp = fh_to_private(fh);
-
- return vb2_qbuf(&sp->vb2_q, pb);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+ session = msm_session_find(session_id);
+ if (WARN_ON(!session))
+ return -EIO;
+ mutex_lock(&session->lock);
+ ret = vb2_qbuf(&sp->vb2_q, pb);
+ mutex_unlock(&session->lock);
+ return ret;
}
static int camera_v4l2_dqbuf(struct file *filep, void *fh,
struct v4l2_buffer *pb)
{
+ int ret;
+ struct msm_session *session;
struct camera_v4l2_private *sp = fh_to_private(fh);
-
- return vb2_dqbuf(&sp->vb2_q, pb, filep->f_flags & O_NONBLOCK);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+ session = msm_session_find(session_id);
+ if (WARN_ON(!session))
+ return -EIO;
+ mutex_lock(&session->lock);
+ ret = vb2_dqbuf(&sp->vb2_q, pb, filep->f_flags & O_NONBLOCK);
+ mutex_unlock(&session->lock);
+ return ret;
}
static int camera_v4l2_streamon(struct file *filep, void *fh,
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index f209330..9dcd64c 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -167,6 +167,30 @@
return rc;
}
+static struct msm_cam_clk_info ispif_8974_ahb_clk_info[] = {
+ {"ispif_ahb_clk", -1},
+};
+
+static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable)
+{
+ int rc = 0;
+
+ if (ispif->csid_version < CSID_VERSION_V3) {
+ /* Older ISPIF versiond don't need ahb clokc */
+ return 0;
+ }
+
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8974_ahb_clk_info, &ispif->ahb_clk,
+ ARRAY_SIZE(ispif_8974_ahb_clk_info), enable);
+ if (rc < 0) {
+ pr_err("%s: cannot enable clock, error = %d",
+ __func__, rc);
+ }
+
+ return rc;
+}
+
static int msm_ispif_intf_reset(struct ispif_device *ispif,
struct msm_ispif_param_data *params)
{
@@ -979,12 +1003,20 @@
goto error_irq;
}
+ rc = msm_ispif_clk_ahb_enable(ispif, 1);
+ if (rc) {
+ pr_err("%s: ahb_clk enable failed", __func__);
+ goto error_ahb;
+ }
+
rc = msm_ispif_reset(ispif);
if (rc == 0) {
ispif->ispif_state = ISPIF_POWER_UP;
CDBG("%s: power up done\n", __func__);
goto end;
}
+
+error_ahb:
free_irq(ispif->irq->start, ispif);
error_irq:
iounmap(ispif->base);
@@ -1018,6 +1050,8 @@
/* make sure no streaming going on */
msm_ispif_reset(ispif);
+ msm_ispif_clk_ahb_enable(ispif, 0);
+
free_irq(ispif->irq->start, ispif);
iounmap(ispif->base);
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
index 2c77292..945b5b8 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
@@ -58,5 +58,6 @@
enum msm_ispif_state_t ispif_state;
struct clk *ispif_clk[VFE_MAX][INTF_MAX];
struct msm_ispif_vfe_info vfe_info;
+ struct clk *ahb_clk;
};
#endif
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index b9ca017..9f1c81a 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -30,69 +30,6 @@
#include "msm_vb2.h"
#include "msm_sd.h"
-struct msm_queue_head {
- struct list_head list;
- spinlock_t lock;
- int len;
- int max;
-};
-
-/** msm_event:
- *
- * event sent by imaging server
- **/
-struct msm_event {
- struct video_device *vdev;
- atomic_t on_heap;
-};
-
-struct msm_command {
- struct list_head list;
- struct v4l2_event event;
- atomic_t on_heap;
-};
-
-/** struct msm_command_ack
- *
- * Object of command_ack_q, which is
- * created per open operation
- *
- * contains struct msm_command
- **/
-struct msm_command_ack {
- struct list_head list;
- struct msm_queue_head command_q;
- wait_queue_head_t wait;
- int stream_id;
-};
-
-struct msm_v4l2_subdev {
- /* FIXME: for session close and error handling such
- * as daemon shutdown */
- int close_sequence;
-};
-
-struct msm_session {
- struct list_head list;
-
- /* session index */
- unsigned int session_id;
-
- /* event queue sent by imaging server */
- struct msm_event event_q;
-
- /* ACK by imaging server. Object type of
- * struct msm_command_ack per open,
- * assumption is application can send
- * command on every opened video node */
- struct msm_queue_head command_ack_q;
-
- /* real streams(either data or metadate) owned by one
- * session struct msm_stream */
- struct msm_queue_head stream_q;
- struct mutex lock;
-};
-
static struct v4l2_device *msm_v4l2_dev;
static struct msm_queue_head *msm_session_q;
@@ -248,6 +185,17 @@
return (ack->stream_id == *(unsigned int *)d2) ? 1 : 0;
}
+
+struct msm_session *msm_session_find(unsigned int session_id)
+{
+ struct msm_session *session;
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (WARN_ON(!session))
+ return NULL;
+ return session;
+}
+
int msm_create_stream(unsigned int session_id,
unsigned int stream_id, struct vb2_queue *q)
{
@@ -492,7 +440,7 @@
list_for_each_entry(sd, &msm_v4l2_dev->subdevs, list)
__msm_sd_close_session_streams(sd, sd_close);
spin_unlock_irqrestore(&msm_v4l2_dev->lock, flags);
-
+ INIT_LIST_HEAD(&stream->queued_list);
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/msm.h b/drivers/media/platform/msm/camera_v2/msm.h
index 39901ad..d57cf8d 100644
--- a/drivers/media/platform/msm/camera_v2/msm.h
+++ b/drivers/media/platform/msm/camera_v2/msm.h
@@ -38,6 +38,69 @@
atomic_t opened;
};
+struct msm_queue_head {
+ struct list_head list;
+ spinlock_t lock;
+ int len;
+ int max;
+};
+
+/** msm_event:
+ *
+ * event sent by imaging server
+ **/
+struct msm_event {
+ struct video_device *vdev;
+ atomic_t on_heap;
+};
+
+struct msm_command {
+ struct list_head list;
+ struct v4l2_event event;
+ atomic_t on_heap;
+};
+
+/** struct msm_command_ack
+ *
+ * Object of command_ack_q, which is
+ * created per open operation
+ *
+ * contains struct msm_command
+ **/
+struct msm_command_ack {
+ struct list_head list;
+ struct msm_queue_head command_q;
+ wait_queue_head_t wait;
+ int stream_id;
+};
+
+struct msm_v4l2_subdev {
+ /* FIXME: for session close and error handling such
+ * as daemon shutdown */
+ int close_sequence;
+};
+
+struct msm_session {
+ struct list_head list;
+
+ /* session index */
+ unsigned int session_id;
+
+ /* event queue sent by imaging server */
+ struct msm_event event_q;
+
+ /* ACK by imaging server. Object type of
+ * struct msm_command_ack per open,
+ * assumption is application can send
+ * command on every opened video node */
+ struct msm_queue_head command_ack_q;
+
+ /* real streams(either data or metadate) owned by one
+ * session struct msm_stream */
+ struct msm_queue_head stream_q;
+ struct mutex lock;
+};
+
int msm_post_event(struct v4l2_event *event, int timeout);
int msm_create_session(unsigned int session, struct video_device *vdev);
int msm_destroy_session(unsigned int session_id);
@@ -52,5 +115,5 @@
struct vb2_queue *msm_get_stream_vb2q(unsigned int session_id,
unsigned int stream_id);
struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q);
-
+struct msm_session *msm_session_find(unsigned int session_id);
#endif /*_MSM_H */
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
index 29262af..8fa8f8d 100644
--- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
@@ -48,7 +48,6 @@
}
msm_vb2_buf = container_of(vb, struct msm_vb2_buffer, vb2_buf);
msm_vb2_buf->in_freeq = 0;
- msm_vb2_buf->stream = stream;
return 0;
}
@@ -66,7 +65,7 @@
return;
}
- stream = msm_vb2->stream;
+ stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s:%d] NULL stream", __func__, __LINE__);
return;
@@ -91,7 +90,7 @@
return -EINVAL;
}
- stream = msm_vb2->stream;
+ stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s:%d] NULL stream", __func__, __LINE__);
return -EINVAL;
@@ -122,7 +121,7 @@
return;
}
- stream = msm_vb2->stream;
+ stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s:%d] NULL stream", __func__, __LINE__);
return;
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h
index 027d344..7082f85 100644
--- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h
@@ -42,7 +42,6 @@
struct vb2_buffer vb2_buf;
struct list_head list;
int in_freeq;
- struct msm_stream *stream;
};
struct msm_vb2_private_data {
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 431d35d..14d1197 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -1085,7 +1085,7 @@
feed_data->buffer_desc.desc[0].read_ptr = 0;
feed_data->buffer_desc.desc[0].write_ptr = 0;
feed_data->buffer_desc.desc[0].handle =
- ion_share_dma_buf(client, temp_handle);
+ ion_share_dma_buf_fd(client, temp_handle);
if (IS_ERR_VALUE(feed_data->buffer_desc.desc[0].handle)) {
MPQ_DVB_ERR_PRINT(
"%s: FAILED to share payload buffer %d\n",
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index ef3e698..0d5dc8c 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1203,6 +1203,16 @@
}
case HAL_CONFIG_VPE_DEINTERLACE:
break;
+ case HAL_PARAM_VENC_H264_GENERATE_AUDNAL:
+ {
+ struct hfi_enable *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL;
+ hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+ hfi->enable = ((struct hal_enable *) pdata)->enable;
+ pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ break;
+ }
/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
case HAL_CONFIG_BUFFER_REQUIREMENTS:
case HAL_CONFIG_PRIORITY:
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 3c181fe..f458a0a 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -241,7 +241,7 @@
static u32 get_frame_size_compressed(int plane,
u32 height, u32 width)
{
- return (width * height * 3/2)/2;
+ return (width * height * 3/2)/4;
}
struct msm_vidc_format vdec_formats[] = {
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index da97c7a..8ce7414 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -641,6 +641,15 @@
V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_DISABLED,
.cluster = MSM_VENC_CTRL_CLUSTER_TIMING,
},
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_H264_AU_DELIMITER,
+ .name = "H264 AU Delimiter",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED,
+ .maximum = V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_ENABLED,
+ .default_value =
+ V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED,
+ },
};
#define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
@@ -1680,6 +1689,23 @@
pdata = &vui_timing_info;
break;
}
+ case V4L2_CID_MPEG_VIDC_VIDEO_H264_AU_DELIMITER:
+ property_id = HAL_PARAM_VENC_H264_GENERATE_AUDNAL;
+
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED:
+ enable.enable = 0;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_ENABLED:
+ enable.enable = 1;
+ break;
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ pdata = &enable;
+ break;
default:
rc = -ENOTSUPP;
break;
@@ -1713,10 +1739,13 @@
for (c = 0; c < ctrl->ncontrols; ++c) {
if (ctrl->cluster[c]->is_new) {
- rc = try_set_ctrl(inst, ctrl->cluster[c]);
+ struct v4l2_ctrl *temp = ctrl->cluster[c];
+
+ rc = try_set_ctrl(inst, temp);
if (rc) {
- dprintk(VIDC_ERR, "Failed setting %x",
- ctrl->cluster[c]->id);
+ dprintk(VIDC_ERR, "Failed setting %s (%x)",
+ v4l2_ctrl_get_name(temp->id),
+ temp->id);
break;
}
}
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 8031c74..ddb3063 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -547,7 +547,7 @@
q_info = &device->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
if (!q_info) {
dprintk(VIDC_ERR, "cannot write to shared Q's");
- goto err_q_write;
+ goto err_q_null;
}
mutex_lock(&device->clock_lock);
result = venus_hfi_clk_gating_off(device);
@@ -572,8 +572,9 @@
dprintk(VIDC_ERR, "venus_hfi_iface_cmdq_write:queue_full");
}
err_q_write:
- mutex_unlock(&device->write_lock);
mutex_unlock(&device->clock_lock);
+err_q_null:
+ mutex_unlock(&device->write_lock);
return result;
}
@@ -592,7 +593,7 @@
q_array.align_virtual_addr == 0) {
dprintk(VIDC_ERR, "cannot read from shared MSG Q's");
rc = -ENODATA;
- goto read_error;
+ goto read_error_null;
}
q_info = &device->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
mutex_lock(&device->clock_lock);
@@ -614,8 +615,9 @@
rc = -ENODATA;
}
read_error:
- mutex_unlock(&device->read_lock);
mutex_unlock(&device->clock_lock);
+read_error_null:
+ mutex_unlock(&device->read_lock);
return rc;
}
@@ -634,7 +636,7 @@
q_array.align_virtual_addr == 0) {
dprintk(VIDC_ERR, "cannot read from shared DBG Q's");
rc = -ENODATA;
- goto dbg_error;
+ goto dbg_error_null;
}
mutex_lock(&device->clock_lock);
rc = venus_hfi_clk_gating_off(device);
@@ -656,8 +658,9 @@
rc = -ENODATA;
}
dbg_error:
- mutex_unlock(&device->read_lock);
mutex_unlock(&device->clock_lock);
+dbg_error_null:
+ mutex_unlock(&device->read_lock);
return rc;
}
@@ -1070,8 +1073,8 @@
disable_irq_nosync(dev->hal_data->irq);
dev->intr_status = 0;
venus_hfi_interface_queues_release(dev);
+ mutex_unlock(&dev->clock_lock);
}
- mutex_unlock(&dev->clock_lock);
dprintk(VIDC_INFO, "HAL exited\n");
return 0;
}
@@ -1867,8 +1870,8 @@
if (((ctrl_status & VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK)
!= 0) && !rc)
venus_hfi_clk_gating_on(device);
- mutex_unlock(&device->write_lock);
mutex_unlock(&device->clock_lock);
+ mutex_unlock(&device->write_lock);
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 3729c3a..c91d1d2 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -168,6 +168,7 @@
HAL_PARAM_VENC_H264_ENTROPY_CABAC_MODEL,
HAL_CONFIG_VENC_MAX_BITRATE,
HAL_PARAM_VENC_H264_VUI_TIMING_INFO,
+ HAL_PARAM_VENC_H264_GENERATE_AUDNAL,
};
enum hal_domain {
diff --git a/drivers/media/platform/msm/wfd/mdp-4-subdev.c b/drivers/media/platform/msm/wfd/mdp-4-subdev.c
index d2ecd22..465ec21 100644
--- a/drivers/media/platform/msm/wfd/mdp-4-subdev.c
+++ b/drivers/media/platform/msm/wfd/mdp-4-subdev.c
@@ -11,7 +11,6 @@
*
*/
#include <linux/msm_mdp.h>
-#include <linux/switch.h>
#include <mach/iommu_domains.h>
#include <media/videobuf2-core.h>
#include "enc-subdev.h"
@@ -24,7 +23,6 @@
u32 width;
bool secure;
bool uses_iommu_split_domain;
- struct switch_dev sdev;
};
int mdp_init(struct v4l2_subdev *sd, u32 val)
@@ -56,13 +54,7 @@
rc = -ENODEV;
goto mdp_open_fail;
}
- inst->sdev.name = "wfd";
- /* Register wfd node to switch driver */
- rc = switch_dev_register(&inst->sdev);
- if (rc) {
- WFD_MSG_ERR("WFD switch registration failed\n");
- goto mdp_open_fail;
- }
+
msm_fb_writeback_init(fbi);
inst->mdp = fbi;
inst->secure = mops->secure;
@@ -92,8 +84,6 @@
rc = -ENODEV;
goto exit;
}
- switch_set_state(&inst->sdev, true);
- WFD_MSG_DBG("wfd state switched to %d\n", inst->sdev.state);
}
exit:
return rc;
@@ -110,8 +100,6 @@
return rc;
}
fbi = (struct fb_info *)inst->mdp;
- switch_set_state(&inst->sdev, false);
- WFD_MSG_DBG("wfd state switched to %d\n", inst->sdev.state);
}
return 0;
}
@@ -123,8 +111,6 @@
fbi = (struct fb_info *)inst->mdp;
msm_fb_writeback_terminate(fbi);
kfree(inst);
- /* Unregister wfd node from switch driver */
- switch_dev_unregister(&inst->sdev);
}
return 0;
}
diff --git a/drivers/media/platform/msm/wfd/mdp-5-subdev.c b/drivers/media/platform/msm/wfd/mdp-5-subdev.c
index 55386b9..3c546d0 100644
--- a/drivers/media/platform/msm/wfd/mdp-5-subdev.c
+++ b/drivers/media/platform/msm/wfd/mdp-5-subdev.c
@@ -11,7 +11,6 @@
*
*/
#include <linux/msm_mdp.h>
-#include <linux/switch.h>
#include <mach/iommu_domains.h>
#include <media/videobuf2-core.h>
#include "enc-subdev.h"
@@ -23,7 +22,6 @@
u32 height;
u32 width;
bool secure;
- struct switch_dev sdev;
};
static int mdp_secure(struct v4l2_subdev *sd, void *arg);
@@ -57,13 +55,6 @@
rc = -ENODEV;
goto mdp_open_fail;
}
- inst->sdev.name = "wfd";
- /* Register wfd node to switch driver */
- rc = switch_dev_register(&inst->sdev);
- if (rc) {
- WFD_MSG_ERR("WFD switch registration failed\n");
- goto mdp_open_fail;
- }
msm_fb_writeback_init(fbi);
@@ -81,7 +72,6 @@
mops->cookie = inst;
return 0;
mdp_secure_fail:
- switch_dev_unregister(&inst->sdev);
msm_fb_writeback_terminate(inst->mdp);
mdp_open_fail:
kfree(inst);
@@ -105,8 +95,6 @@
rc = -ENODEV;
goto exit;
}
- switch_set_state(&inst->sdev, true);
- WFD_MSG_DBG("wfd state switched to %d\n", inst->sdev.state);
}
exit:
return rc;
@@ -123,9 +111,8 @@
WFD_MSG_ERR("Failed to stop writeback mode\n");
return rc;
}
+
fbi = (struct fb_info *)inst->mdp;
- switch_set_state(&inst->sdev, false);
- WFD_MSG_DBG("wfd state switched to %d\n", inst->sdev.state);
}
return 0;
}
@@ -139,8 +126,6 @@
if (inst->secure)
msm_fb_writeback_set_secure(inst->mdp, false);
msm_fb_writeback_terminate(fbi);
- /* Unregister wfd node from switch driver */
- switch_dev_unregister(&inst->sdev);
kfree(inst);
}
return 0;
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index e011d8f..046faac 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -34,8 +34,6 @@
#define SLIMBUS_PRESENT_TIMEOUT 100
#define MAX_WCD9XXX_DEVICE 4
-#define TABLA_I2C_MODE 0x03
-#define SITAR_I2C_MODE 0x01
#define CODEC_DT_MAX_PROP_SIZE 40
#define WCD9XXX_I2C_GSBI_SLAVE_ID "3-000d"
#define WCD9XXX_I2C_TOP_SLAVE_ADDR 0x0d
@@ -59,18 +57,9 @@
int mod_id;
};
-static char *taiko_supplies[] = {
- WCD9XXX_SUPPLY_BUCK_NAME, "cdc-vdd-tx-h", "cdc-vdd-rx-h", "cdc-vddpx-1",
- "cdc-vdd-a-1p2v", "cdc-vddcx-1", "cdc-vddcx-2",
-};
-
-static char *tapan_supplies[] = {
- WCD9XXX_SUPPLY_BUCK_NAME, "cdc-vdd-h", "cdc-vdd-px",
- "cdc-vdd-a-1p2v", "cdc-vdd-cx"
-};
-
static int wcd9xxx_dt_parse_vreg_info(struct device *dev,
- struct wcd9xxx_regulator *vreg, const char *vreg_name);
+ struct wcd9xxx_regulator *vreg,
+ const char *vreg_name, bool ondemand);
static int wcd9xxx_dt_parse_micbias_info(struct device *dev,
struct wcd9xxx_micbias_setting *micbias);
static struct wcd9xxx_pdata *wcd9xxx_populate_dt_pdata(struct device *dev);
@@ -292,49 +281,60 @@
},
};
-static struct wcd9xx_codec_type {
- u8 byte[4];
- struct mfd_cell *dev;
- int size;
- int num_irqs;
- int version; /* -1 to retrive version from chip version register */
- enum wcd9xxx_slim_slave_addr_type slim_slave_type;
-} wcd9xxx_codecs[] = {
+
+enum wcd9xxx_chipid_major {
+ TABLA_MAJOR = cpu_to_le16(0x100),
+ SITAR_MAJOR = cpu_to_le16(0x101),
+ TAIKO_MAJOR = cpu_to_le16(0x102),
+ TAPAN_MAJOR = cpu_to_le16(0x103),
+};
+
+static const struct wcd9xxx_codec_type wcd9xxx_codecs[] = {
{
- {0x2, 0x0, 0x0, 0x1}, tabla_devs, ARRAY_SIZE(tabla_devs),
- TABLA_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA
+ TABLA_MAJOR, cpu_to_le16(0x1), tabla1x_devs,
+ ARRAY_SIZE(tabla1x_devs), TABLA_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x03,
},
{
- {0x1, 0x0, 0x0, 0x1}, tabla1x_devs, ARRAY_SIZE(tabla1x_devs),
- TABLA_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA
- },
- { /* wcd9320 version 1 */
- {0x0, 0x0, 0x2, 0x1}, taiko_devs, ARRAY_SIZE(taiko_devs),
- TAIKO_NUM_IRQS, 1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO
- },
- { /* wcd9320 version 2 */
- {0x1, 0x0, 0x2, 0x1}, taiko_devs, ARRAY_SIZE(taiko_devs),
- TAIKO_NUM_IRQS, 2, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO
+ TABLA_MAJOR, cpu_to_le16(0x2), tabla_devs,
+ ARRAY_SIZE(tabla_devs), TABLA_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x03
},
{
- {0x0, 0x0, 0x3, 0x1}, tapan_devs, ARRAY_SIZE(tapan_devs),
- TAPAN_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO
+ /* Siter version 1 has same major chip id with Tabla */
+ TABLA_MAJOR, cpu_to_le16(0x0), sitar_devs,
+ ARRAY_SIZE(sitar_devs), SITAR_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x01
},
{
- {0x1, 0x0, 0x3, 0x1}, tapan_devs, ARRAY_SIZE(tapan_devs),
- TAPAN_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO
+ SITAR_MAJOR, cpu_to_le16(0x1), sitar_devs,
+ ARRAY_SIZE(sitar_devs), SITAR_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x01
},
{
- {0x0, 0x0, 0x0, 0x1}, sitar_devs, ARRAY_SIZE(sitar_devs),
- SITAR_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA
+ SITAR_MAJOR, cpu_to_le16(0x2), sitar_devs,
+ ARRAY_SIZE(sitar_devs), SITAR_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x01
},
{
- {0x1, 0x0, 0x1, 0x1}, sitar_devs, ARRAY_SIZE(sitar_devs),
- SITAR_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA
+ TAIKO_MAJOR, cpu_to_le16(0x0), taiko_devs,
+ ARRAY_SIZE(taiko_devs), TAIKO_NUM_IRQS, 1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO, 0x01
},
{
- {0x2, 0x0, 0x1, 0x1}, sitar_devs, ARRAY_SIZE(sitar_devs),
- SITAR_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA
+ TAIKO_MAJOR, cpu_to_le16(0x1), taiko_devs,
+ ARRAY_SIZE(taiko_devs), TAIKO_NUM_IRQS, 2,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO, 0x01
+ },
+ {
+ TAPAN_MAJOR, cpu_to_le16(0x0), tapan_devs,
+ ARRAY_SIZE(tapan_devs), TAPAN_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO, 0x03
+ },
+ {
+ TAPAN_MAJOR, cpu_to_le16(0x1), tapan_devs,
+ ARRAY_SIZE(tapan_devs), TAPAN_NUM_IRQS, -1,
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO, 0x03
},
};
@@ -384,65 +384,80 @@
wcd9xxx->reset_gpio = 0;
}
}
-static int wcd9xxx_check_codec_type(struct wcd9xxx *wcd9xxx,
- struct mfd_cell **wcd9xxx_dev,
- int *wcd9xxx_dev_size,
- int *wcd9xxx_dev_num_irqs)
+
+static const struct wcd9xxx_codec_type
+*wcd9xxx_check_codec_type(struct wcd9xxx *wcd9xxx, u8 *version)
{
- int i;
- int ret;
- i = WCD9XXX_A_CHIP_ID_BYTE_0;
- while (i <= WCD9XXX_A_CHIP_ID_BYTE_3) {
- ret = wcd9xxx_reg_read(wcd9xxx, i);
- if (ret < 0)
- goto exit;
- wcd9xxx->idbyte[i-WCD9XXX_A_CHIP_ID_BYTE_0] = (u8)ret;
- pr_debug("%s: wcd9xx read = %x, byte = %x\n", __func__, ret,
- i);
- i++;
+ int i, rc;
+ const struct wcd9xxx_codec_type *c, *d = NULL;
+
+ rc = wcd9xxx_bulk_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_0,
+ sizeof(wcd9xxx->id_minor),
+ (u8 *)&wcd9xxx->id_minor);
+ if (rc < 0)
+ goto exit;
+
+ rc = wcd9xxx_bulk_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_2,
+ sizeof(wcd9xxx->id_major),
+ (u8 *)&wcd9xxx->id_major);
+ if (rc < 0)
+ goto exit;
+ dev_dbg(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
+ __func__, wcd9xxx->id_major, wcd9xxx->id_minor);
+
+ for (i = 0, c = &wcd9xxx_codecs[0]; i < ARRAY_SIZE(wcd9xxx_codecs);
+ i++, c++) {
+ if (c->id_major == wcd9xxx->id_major) {
+ if (c->id_minor == wcd9xxx->id_minor) {
+ d = c;
+ dev_dbg(wcd9xxx->dev,
+ "%s: exact match %s\n", __func__,
+ d->dev->name);
+ break;
+ } else if (!d) {
+ d = c;
+ } else {
+ if ((d->id_minor < c->id_minor) ||
+ (d->id_minor == c->id_minor &&
+ d->version < c->version))
+ d = c;
+ }
+ dev_dbg(wcd9xxx->dev,
+ "%s: best match %s, major 0x%x, minor 0x%x\n",
+ __func__, d->dev->name, d->id_major,
+ d->id_minor);
+ }
}
- /* Read codec version */
- ret = wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_VERSION);
- if (ret < 0)
- goto exit;
- wcd9xxx->version = (u8)ret & 0x1F;
- i = 0;
- while (i < ARRAY_SIZE(wcd9xxx_codecs)) {
- if ((wcd9xxx_codecs[i].byte[0] == wcd9xxx->idbyte[0]) &&
- (wcd9xxx_codecs[i].byte[1] == wcd9xxx->idbyte[1]) &&
- (wcd9xxx_codecs[i].byte[2] == wcd9xxx->idbyte[2]) &&
- (wcd9xxx_codecs[i].byte[3] == wcd9xxx->idbyte[3])) {
- pr_info("%s: codec is %s", __func__,
- wcd9xxx_codecs[i].dev->name);
- *wcd9xxx_dev = wcd9xxx_codecs[i].dev;
- *wcd9xxx_dev_size = wcd9xxx_codecs[i].size;
- *wcd9xxx_dev_num_irqs = wcd9xxx_codecs[i].num_irqs;
- wcd9xxx->slim_slave_type =
- wcd9xxx_codecs[i].slim_slave_type;
- if (wcd9xxx_codecs[i].version > -1)
- wcd9xxx->version = wcd9xxx_codecs[i].version;
- break;
+ if (!d) {
+ dev_warn(wcd9xxx->dev,
+ "%s: driver for id major 0x%x, minor 0x%x not found\n",
+ __func__, wcd9xxx->id_major, wcd9xxx->id_minor);
+ } else {
+ if (d->version > -1) {
+ *version = d->version;
+ } else {
+ rc = wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_VERSION);
+ if (rc < 0) {
+ d = NULL;
+ goto exit;
+ }
+ *version = (u8)rc & 0x1F;
}
- i++;
+ dev_info(wcd9xxx->dev,
+ "%s: detected %s, major 0x%x, minor 0x%x, ver 0x%x\n",
+ __func__, d->dev->name, d->id_major, d->id_minor,
+ *version);
}
- if (*wcd9xxx_dev == NULL || *wcd9xxx_dev_size == 0)
- ret = -ENODEV;
- pr_info("%s: Read codec idbytes & version\n"
- "byte_0[%08x] byte_1[%08x] byte_2[%08x]\n"
- " byte_3[%08x] version = %x\n", __func__,
- wcd9xxx->idbyte[0], wcd9xxx->idbyte[1],
- wcd9xxx->idbyte[2], wcd9xxx->idbyte[3],
- wcd9xxx->version);
exit:
- return ret;
+ return d;
}
static int wcd9xxx_device_init(struct wcd9xxx *wcd9xxx)
{
int ret;
- struct mfd_cell *wcd9xxx_dev = NULL;
- int wcd9xxx_dev_size = 0;
+ u8 version;
+ const struct wcd9xxx_codec_type *found;
mutex_init(&wcd9xxx->io_lock);
mutex_init(&wcd9xxx->xfer_lock);
@@ -458,10 +473,14 @@
wcd9xxx_bring_up(wcd9xxx);
- ret = wcd9xxx_check_codec_type(wcd9xxx, &wcd9xxx_dev, &wcd9xxx_dev_size,
- &wcd9xxx->num_irqs);
- if (ret < 0)
+ found = wcd9xxx_check_codec_type(wcd9xxx, &version);
+ if (!found) {
+ ret = -ENODEV;
goto err_irq;
+ } else {
+ wcd9xxx->codec_type = found;
+ wcd9xxx->version = version;
+ }
if (wcd9xxx->irq != -1) {
ret = wcd9xxx_irq_init(wcd9xxx);
@@ -471,7 +490,7 @@
}
}
- ret = mfd_add_devices(wcd9xxx->dev, -1, wcd9xxx_dev, wcd9xxx_dev_size,
+ ret = mfd_add_devices(wcd9xxx->dev, -1, found->dev, found->size,
NULL, 0);
if (ret != 0) {
dev_err(wcd9xxx->dev, "Failed to add children: %d\n", ret);
@@ -605,8 +624,8 @@
};
#endif
-static int wcd9xxx_enable_supplies(struct wcd9xxx *wcd9xxx,
- struct wcd9xxx_pdata *pdata)
+static int wcd9xxx_init_supplies(struct wcd9xxx *wcd9xxx,
+ struct wcd9xxx_pdata *pdata)
{
int ret;
int i;
@@ -620,7 +639,7 @@
wcd9xxx->num_of_supplies = 0;
- if (ARRAY_SIZE(pdata->regulator) > MAX_REGULATOR) {
+ if (ARRAY_SIZE(pdata->regulator) > WCD9XXX_MAX_REGULATOR) {
pr_err("%s: Array Size out of bound\n", __func__);
ret = -EINVAL;
goto err;
@@ -642,8 +661,12 @@
}
for (i = 0; i < wcd9xxx->num_of_supplies; i++) {
+ if (regulator_count_voltages(wcd9xxx->supplies[i].consumer) <=
+ 0)
+ continue;
ret = regulator_set_voltage(wcd9xxx->supplies[i].consumer,
- pdata->regulator[i].min_uV, pdata->regulator[i].max_uV);
+ pdata->regulator[i].min_uV,
+ pdata->regulator[i].max_uV);
if (ret) {
pr_err("%s: Setting regulator voltage failed for "
"regulator %s err = %d\n", __func__,
@@ -652,30 +675,19 @@
}
ret = regulator_set_optimum_mode(wcd9xxx->supplies[i].consumer,
- pdata->regulator[i].optimum_uA);
+ pdata->regulator[i].optimum_uA);
if (ret < 0) {
pr_err("%s: Setting regulator optimum mode failed for "
"regulator %s err = %d\n", __func__,
wcd9xxx->supplies[i].supply, ret);
goto err_get;
+ } else {
+ ret = 0;
}
}
- ret = regulator_bulk_enable(wcd9xxx->num_of_supplies,
- wcd9xxx->supplies);
- if (ret != 0) {
- dev_err(wcd9xxx->dev, "Failed to enable supplies: err = %d\n",
- ret);
- goto err_configure;
- }
return ret;
-err_configure:
- for (i = 0; i < wcd9xxx->num_of_supplies; i++) {
- regulator_set_voltage(wcd9xxx->supplies[i].consumer, 0,
- pdata->regulator[i].max_uV);
- regulator_set_optimum_mode(wcd9xxx->supplies[i].consumer, 0);
- }
err_get:
regulator_bulk_free(wcd9xxx->num_of_supplies, wcd9xxx->supplies);
err_supplies:
@@ -684,6 +696,33 @@
return ret;
}
+static int wcd9xxx_enable_static_supplies(struct wcd9xxx *wcd9xxx,
+ struct wcd9xxx_pdata *pdata)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < wcd9xxx->num_of_supplies; i++) {
+ if (pdata->regulator[i].ondemand)
+ continue;
+ ret = regulator_enable(wcd9xxx->supplies[i].consumer);
+ if (ret) {
+ pr_err("%s: Failed to enable %s\n", __func__,
+ wcd9xxx->supplies[i].supply);
+ break;
+ } else {
+ pr_debug("%s: Enabled regulator %s\n", __func__,
+ wcd9xxx->supplies[i].supply);
+ }
+ }
+
+ while (ret && --i)
+ if (!pdata->regulator[i].ondemand)
+ regulator_disable(wcd9xxx->supplies[i].consumer);
+
+ return ret;
+}
+
static void wcd9xxx_disable_supplies(struct wcd9xxx *wcd9xxx,
struct wcd9xxx_pdata *pdata)
{
@@ -692,8 +731,11 @@
regulator_bulk_disable(wcd9xxx->num_of_supplies,
wcd9xxx->supplies);
for (i = 0; i < wcd9xxx->num_of_supplies; i++) {
+ if (regulator_count_voltages(wcd9xxx->supplies[i].consumer) <=
+ 0)
+ continue;
regulator_set_voltage(wcd9xxx->supplies[i].consumer, 0,
- pdata->regulator[i].max_uV);
+ pdata->regulator[i].max_uV);
regulator_set_optimum_mode(wcd9xxx->supplies[i].consumer, 0);
}
regulator_bulk_free(wcd9xxx->num_of_supplies, wcd9xxx->supplies);
@@ -856,7 +898,6 @@
struct wcd9xxx_pdata *pdata = NULL;
int val = 0;
int ret = 0;
- int i2c_mode = 0;
int wcd9xx_index = 0;
struct device *dev;
@@ -913,18 +954,26 @@
wcd9xxx->slim_device_bootup = true;
if (client->dev.of_node)
wcd9xxx->mclk_rate = pdata->mclk_rate;
- ret = wcd9xxx_enable_supplies(wcd9xxx, pdata);
+
+ ret = wcd9xxx_init_supplies(wcd9xxx, pdata);
if (ret) {
pr_err("%s: Fail to enable Codec supplies\n",
__func__);
goto err_codec;
}
+ ret = wcd9xxx_enable_static_supplies(wcd9xxx, pdata);
+ if (ret) {
+ pr_err("%s: Fail to enable Codec pre-reset supplies\n",
+ __func__);
+ goto err_codec;
+ }
usleep_range(5, 5);
+
ret = wcd9xxx_reset(wcd9xxx);
if (ret) {
pr_err("%s: Resetting Codec failed\n", __func__);
- goto err_supplies;
+ goto err_supplies;
}
ret = wcd9xxx_i2c_get_client_index(client, &wcd9xx_index);
@@ -948,18 +997,14 @@
goto err_device_init;
}
- if ((wcd9xxx->idbyte[0] == 0x2) || (wcd9xxx->idbyte[0] == 0x1))
- i2c_mode = TABLA_I2C_MODE;
- else if (wcd9xxx->idbyte[0] == 0x0)
- i2c_mode = SITAR_I2C_MODE;
-
ret = wcd9xxx_read(wcd9xxx, WCD9XXX_A_CHIP_STATUS, 1, &val, 0);
+ if (ret < 0)
+ pr_err("%s: failed to read the wcd9xxx status (%d)\n",
+ __func__, ret);
+ if (val != wcd9xxx->codec_type->i2c_chip_status)
+ pr_err("%s: unknown chip status 0x%x\n", __func__, val);
- if ((ret < 0) || (val != i2c_mode))
- pr_err("failed to read the wcd9xxx status ret = %d\n",
- ret);
-
- wcd9xxx_intf = WCD9XXX_INTERFACE_TYPE_I2C;
+ wcd9xxx_intf = WCD9XXX_INTERFACE_TYPE_I2C;
return ret;
} else
@@ -988,7 +1033,9 @@
}
static int wcd9xxx_dt_parse_vreg_info(struct device *dev,
- struct wcd9xxx_regulator *vreg, const char *vreg_name)
+ struct wcd9xxx_regulator *vreg,
+ const char *vreg_name,
+ bool ondemand)
{
int len, ret = 0;
const __be32 *prop;
@@ -1006,6 +1053,7 @@
return -ENODEV;
}
vreg->name = vreg_name;
+ vreg->ondemand = ondemand;
snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
"qcom,%s-voltage", vreg_name);
@@ -1014,7 +1062,7 @@
if (!prop || (len != (2 * sizeof(__be32)))) {
dev_err(dev, "%s %s property\n",
prop ? "invalid format" : "no", prop_name);
- return -ENODEV;
+ return -EINVAL;
} else {
vreg->min_uV = be32_to_cpup(&prop[0]);
vreg->max_uV = be32_to_cpup(&prop[1]);
@@ -1027,12 +1075,12 @@
if (ret) {
dev_err(dev, "Looking up %s property in node %s failed",
prop_name, dev->of_node->full_name);
- return -ENODEV;
+ return -EFAULT;
}
vreg->optimum_uA = prop_val;
- dev_info(dev, "%s: vol=[%d %d]uV, curr=[%d]uA\n", vreg->name,
- vreg->min_uV, vreg->max_uV, vreg->optimum_uA);
+ dev_info(dev, "%s: vol=[%d %d]uV, curr=[%d]uA, ond %d\n", vreg->name,
+ vreg->min_uV, vreg->max_uV, vreg->optimum_uA, vreg->ondemand);
return 0;
}
@@ -1150,40 +1198,66 @@
static struct wcd9xxx_pdata *wcd9xxx_populate_dt_pdata(struct device *dev)
{
struct wcd9xxx_pdata *pdata;
- int ret, i;
- char **codec_supplies;
- u32 num_of_supplies = 0;
+ int ret, static_cnt, ond_cnt, idx, i;
+ const char *name = NULL;
u32 mclk_rate = 0;
u32 dmic_sample_rate = 0;
+ const char *static_prop_name = "qcom,cdc-static-supplies";
+ const char *ond_prop_name = "qcom,cdc-on-demand-supplies";
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(dev, "could not allocate memory for platform data\n");
return NULL;
}
- if (!strcmp(dev_name(dev), "taiko-slim-pgd") ||
- (!strcmp(dev_name(dev), WCD9XXX_I2C_GSBI_SLAVE_ID))) {
- codec_supplies = taiko_supplies;
- num_of_supplies = ARRAY_SIZE(taiko_supplies);
- } else if (!strcmp(dev_name(dev), "tapan-slim-pgd")) {
- codec_supplies = tapan_supplies;
- num_of_supplies = ARRAY_SIZE(tapan_supplies);
- } else {
- dev_err(dev, "%s unsupported device %s\n",
- __func__, dev_name(dev));
+
+ static_cnt = of_property_count_strings(dev->of_node, static_prop_name);
+ if (IS_ERR_VALUE(static_cnt)) {
+ dev_err(dev, "%s: Failed to get static supplies %d\n", __func__,
+ static_cnt);
goto err;
}
- if (num_of_supplies > ARRAY_SIZE(pdata->regulator)) {
+ /* On-demand supply list is an optional property */
+ ond_cnt = of_property_count_strings(dev->of_node, ond_prop_name);
+ if (IS_ERR_VALUE(ond_cnt))
+ ond_cnt = 0;
+
+ BUG_ON(static_cnt <= 0 || ond_cnt < 0);
+ if ((static_cnt + ond_cnt) > ARRAY_SIZE(pdata->regulator)) {
dev_err(dev, "%s: Num of supplies %u > max supported %u\n",
- __func__, num_of_supplies, ARRAY_SIZE(pdata->regulator));
-
+ __func__, static_cnt, ARRAY_SIZE(pdata->regulator));
goto err;
}
- for (i = 0; i < num_of_supplies; i++) {
- ret = wcd9xxx_dt_parse_vreg_info(dev, &pdata->regulator[i],
- codec_supplies[i]);
+ for (idx = 0; idx < static_cnt; idx++) {
+ ret = of_property_read_string_index(dev->of_node,
+ static_prop_name, idx,
+ &name);
+ if (ret) {
+ dev_err(dev, "%s: of read string %s idx %d error %d\n",
+ __func__, static_prop_name, idx, ret);
+ goto err;
+ }
+
+ dev_dbg(dev, "%s: Found static cdc supply %s\n", __func__,
+ name);
+ ret = wcd9xxx_dt_parse_vreg_info(dev, &pdata->regulator[idx],
+ name, false);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < ond_cnt; i++, idx++) {
+ ret = of_property_read_string_index(dev->of_node, ond_prop_name,
+ i, &name);
+ if (ret)
+ goto err;
+
+ dev_dbg(dev, "%s: Found on-demand cdc supply %s\n", __func__,
+ name);
+ ret = wcd9xxx_dt_parse_vreg_info(dev, &pdata->regulator[idx],
+ name, true);
if (ret)
goto err;
}
@@ -1325,9 +1399,17 @@
wcd9xxx->mclk_rate = pdata->mclk_rate;
wcd9xxx->slim_device_bootup = true;
- ret = wcd9xxx_enable_supplies(wcd9xxx, pdata);
- if (ret)
+ ret = wcd9xxx_init_supplies(wcd9xxx, pdata);
+ if (ret) {
+ pr_err("%s: Fail to init Codec supplies %d\n", __func__, ret);
goto err_codec;
+ }
+ ret = wcd9xxx_enable_static_supplies(wcd9xxx, pdata);
+ if (ret) {
+ pr_err("%s: Fail to enable Codec pre-reset supplies\n",
+ __func__);
+ goto err_codec;
+ }
usleep_range(5, 5);
ret = wcd9xxx_reset(wcd9xxx);
diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c
index f2c3959..356aecb 100644
--- a/drivers/mfd/wcd9xxx-irq.c
+++ b/drivers/mfd/wcd9xxx-irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -218,7 +218,8 @@
static int wcd9xxx_num_irq_regs(const struct wcd9xxx *wcd9xxx)
{
- return (wcd9xxx->num_irqs / 8) + ((wcd9xxx->num_irqs % 8) ? 1 : 0);
+ return (wcd9xxx->codec_type->num_irqs / 8) +
+ ((wcd9xxx->codec_type->num_irqs % 8) ? 1 : 0);
}
static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
@@ -266,7 +267,8 @@
if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i))
wcd9xxx_irq_dispatch(wcd9xxx, i);
}
- for (i = WCD9XXX_IRQ_BG_PRECHARGE; i < wcd9xxx->num_irqs; i++) {
+ for (i = WCD9XXX_IRQ_BG_PRECHARGE; i < wcd9xxx->codec_type->num_irqs;
+ i++) {
if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i))
wcd9xxx_irq_dispatch(wcd9xxx, i);
}
@@ -301,7 +303,7 @@
pr_debug("%s: enter\n", __func__);
- for (irq = 0; irq < wcd9xxx->num_irqs; irq++) {
+ for (irq = 0; irq < wcd9xxx->codec_type->num_irqs; irq++) {
/* Map OF irq */
virq = wcd9xxx_map_irq(wcd9xxx, irq);
pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
@@ -365,7 +367,7 @@
/* mask all the interrupts */
memset(irq_level, 0, wcd9xxx_num_irq_regs(wcd9xxx));
- for (i = 0; i < wcd9xxx->num_irqs; i++) {
+ for (i = 0; i < wcd9xxx->codec_type->num_irqs; i++) {
wcd9xxx->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
wcd9xxx->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
irq_level[BIT_BYTE(i)] |=
diff --git a/drivers/mfd/wcd9xxx-slimslave.c b/drivers/mfd/wcd9xxx-slimslave.c
index f2d71b6..81262b58 100644
--- a/drivers/mfd/wcd9xxx-slimslave.c
+++ b/drivers/mfd/wcd9xxx-slimslave.c
@@ -31,7 +31,8 @@
static int wcd9xxx_configure_ports(struct wcd9xxx *wcd9xxx)
{
- if (wcd9xxx->slim_slave_type == WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA) {
+ if (wcd9xxx->codec_type->slim_slave_type ==
+ WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA) {
sh_ch.rx_port_ch_reg_base = 0x180;
sh_ch.port_rx_cfg_reg_base = 0x040;
sh_ch.port_tx_cfg_reg_base = 0x040;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 188ac32..55e3e4e 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1,5 +1,3 @@
-
-
/*Qualcomm Secure Execution Environment Communicator (QSEECOM) driver
*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
@@ -53,6 +51,7 @@
#define QSEE_VERSION_01 0x401000
#define QSEE_VERSION_02 0x402000
#define QSEE_VERSION_03 0x403000
+#define QSEE_VERSION_04 0x404000
#define QSEOS_CHECK_VERSION_CMD 0x00001803
@@ -62,6 +61,12 @@
#define QSEECOM_MAX_SG_ENTRY 512
#define QSEECOM_DISK_ENCRYTPION_KEY_ID 0
+/* Save partition image hash for authentication check */
+#define SCM_SAVE_PARTITION_HASH_ID 0x01
+
+/* Check if enterprise security is activate */
+#define SCM_IS_ACTIVATED_ID 0x02
+
enum qseecom_clk_definitions {
CLK_DFAB = 0,
CLK_SFPB,
@@ -74,6 +79,11 @@
QSEECOM_GENERIC,
};
+enum qseecom_ce_hw_instance {
+ CLK_QSEE = 0,
+ CLK_CE_DRV,
+};
+
static struct class *driver_class;
static dev_t qseecom_device_no;
static struct cdev qseecom_cdev;
@@ -85,6 +95,7 @@
static DEFINE_MUTEX(qsee_bw_mutex);
static DEFINE_MUTEX(app_access_lock);
+static DEFINE_MUTEX(clk_access_lock);
struct qseecom_registered_listener_list {
struct list_head list;
@@ -117,10 +128,12 @@
};
struct qseecom_clk {
+ enum qseecom_ce_hw_instance instance;
struct clk *ce_core_clk;
struct clk *ce_clk;
struct clk *ce_core_src_clk;
struct clk *ce_bus_clk;
+ uint32_t clk_access_cnt;
};
struct qseecom_control {
@@ -148,6 +161,7 @@
uint32_t qsee_perf_client;
struct qseecom_clk qsee;
+ struct qseecom_clk ce_drv;
};
struct qseecom_client_handle {
@@ -157,8 +171,6 @@
uint32_t user_virt_sb_base;
size_t sb_length;
struct ion_handle *ihandle; /* Retrieve phy addr */
- bool perf_enabled;
- bool fast_load_enabled;
};
struct qseecom_listener_handle {
@@ -177,6 +189,8 @@
int abort;
wait_queue_head_t abort_wq;
atomic_t ioctl_count;
+ bool perf_enabled;
+ bool fast_load_enabled;
};
enum qseecom_set_clear_key_flag {
@@ -551,13 +565,14 @@
if (wait_event_freezable(qseecom.send_resp_wq,
__qseecom_listener_has_sent_rsp(data))) {
pr_warning("Interrupted: exiting send_cmd loop\n");
- return -ERESTARTSYS;
+ ret = -ERESTARTSYS;
}
- if (data->abort) {
- pr_err("Aborting listener service %d\n",
- data->listener.id);
- rc = -ENODEV;
+ if ((data->abort) || (ret == -ERESTARTSYS)) {
+ pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
+ data->client.app_id, lstnr, ret);
+ if (data->abort)
+ rc = -ENODEV;
send_data_rsp.status = QSEOS_RESULT_FAILURE;
} else {
send_data_rsp.status = QSEOS_RESULT_SUCCESS;
@@ -658,7 +673,7 @@
app_id = ret;
if (app_id) {
- pr_warn("App id %d (%s) already exists\n", app_id,
+ pr_debug("App id %d (%s) already exists\n", app_id,
(char *)(req.app_name));
spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
list_for_each_entry(entry,
@@ -816,7 +831,7 @@
break;
} else {
ptr_app->ref_cnt--;
- pr_warn("Can't unload app(%d) inuse\n",
+ pr_debug("Can't unload app(%d) inuse\n",
ptr_app->app_id);
break;
}
@@ -1789,9 +1804,9 @@
pr_err("Unable to find the handle, exiting\n");
else
ret = qseecom_unload_app(data);
- if (data->client.fast_load_enabled == true)
+ if (data->fast_load_enabled == true)
qsee_disable_clock_vote(data, CLK_SFPB);
- if (data->client.perf_enabled == true)
+ if (data->perf_enabled == true)
qsee_disable_clock_vote(data, CLK_DFAB);
if (ret == 0) {
kzfree(data);
@@ -1891,12 +1906,23 @@
return 0;
}
-static int __qseecom_enable_clk(void)
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
{
int rc = 0;
struct qseecom_clk *qclk;
+ if (ce == CLK_QSEE)
qclk = &qseecom.qsee;
+ else
+ qclk = &qseecom.ce_drv;
+
+ mutex_lock(&clk_access_lock);
+ if (qclk->clk_access_cnt > 0) {
+ qclk->clk_access_cnt++;
+ mutex_unlock(&clk_access_lock);
+ return rc;
+ }
+
/* Enable CE core clk */
rc = clk_prepare_enable(qclk->ce_core_clk);
if (rc) {
@@ -1915,6 +1941,8 @@
pr_err("Unable to enable/prepare CE bus clk\n");
goto ce_bus_clk_err;
}
+ qclk->clk_access_cnt++;
+ mutex_unlock(&clk_access_lock);
return 0;
ce_bus_clk_err:
@@ -1922,20 +1950,30 @@
ce_clk_err:
clk_disable_unprepare(qclk->ce_core_clk);
err:
+ mutex_unlock(&clk_access_lock);
return -EIO;
}
-static void __qseecom_disable_clk(void)
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
{
struct qseecom_clk *qclk;
- qclk = &qseecom.qsee;
- if (qclk->ce_clk != NULL)
- clk_disable_unprepare(qclk->ce_clk);
- if (qclk->ce_core_clk != NULL)
- clk_disable_unprepare(qclk->ce_core_clk);
- if (qclk->ce_bus_clk != NULL)
- clk_disable_unprepare(qclk->ce_bus_clk);
+ if (ce == CLK_QSEE)
+ qclk = &qseecom.qsee;
+ else
+ qclk = &qseecom.ce_drv;
+
+ mutex_lock(&clk_access_lock);
+ if (qclk->clk_access_cnt == 1) {
+ if (qclk->ce_clk != NULL)
+ clk_disable_unprepare(qclk->ce_clk);
+ if (qclk->ce_core_clk != NULL)
+ clk_disable_unprepare(qclk->ce_core_clk);
+ if (qclk->ce_bus_clk != NULL)
+ clk_disable_unprepare(qclk->ce_bus_clk);
+ }
+ qclk->clk_access_cnt--;
+ mutex_unlock(&clk_access_lock);
}
static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
@@ -1957,14 +1995,14 @@
qseecom.qsee_perf_client, 3);
else {
if (qclk->ce_core_src_clk != NULL)
- ret = __qseecom_enable_clk();
+ ret = __qseecom_enable_clk(CLK_QSEE);
if (!ret) {
ret =
msm_bus_scale_client_update_request(
qseecom.qsee_perf_client, 1);
if ((ret) &&
(qclk->ce_core_src_clk != NULL))
- __qseecom_disable_clk();
+ __qseecom_disable_clk(CLK_QSEE);
}
}
if (ret)
@@ -1972,11 +2010,11 @@
ret);
else {
qseecom.qsee_bw_count++;
- data->client.perf_enabled = true;
+ data->perf_enabled = true;
}
} else {
qseecom.qsee_bw_count++;
- data->client.perf_enabled = true;
+ data->perf_enabled = true;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -1988,14 +2026,14 @@
qseecom.qsee_perf_client, 3);
else {
if (qclk->ce_core_src_clk != NULL)
- ret = __qseecom_enable_clk();
+ ret = __qseecom_enable_clk(CLK_QSEE);
if (!ret) {
ret =
msm_bus_scale_client_update_request(
qseecom.qsee_perf_client, 2);
if ((ret) &&
(qclk->ce_core_src_clk != NULL))
- __qseecom_disable_clk();
+ __qseecom_disable_clk(CLK_QSEE);
}
}
@@ -2004,11 +2042,11 @@
ret);
else {
qseecom.qsee_sfpb_bw_count++;
- data->client.fast_load_enabled = true;
+ data->fast_load_enabled = true;
}
} else {
qseecom.qsee_sfpb_bw_count++;
- data->client.fast_load_enabled = true;
+ data->fast_load_enabled = true;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -2046,18 +2084,18 @@
ret = msm_bus_scale_client_update_request(
qseecom.qsee_perf_client, 0);
if ((!ret) && (qclk->ce_core_src_clk != NULL))
- __qseecom_disable_clk();
+ __qseecom_disable_clk(CLK_QSEE);
}
if (ret)
pr_err("SFPB Bandwidth req fail (%d)\n",
ret);
else {
qseecom.qsee_bw_count--;
- data->client.perf_enabled = false;
+ data->perf_enabled = false;
}
} else {
qseecom.qsee_bw_count--;
- data->client.perf_enabled = false;
+ data->perf_enabled = false;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -2076,18 +2114,18 @@
ret = msm_bus_scale_client_update_request(
qseecom.qsee_perf_client, 0);
if ((!ret) && (qclk->ce_core_src_clk != NULL))
- __qseecom_disable_clk();
+ __qseecom_disable_clk(CLK_QSEE);
}
if (ret)
pr_err("SFPB Bandwidth req fail (%d)\n",
ret);
else {
qseecom.qsee_sfpb_bw_count--;
- data->client.fast_load_enabled = false;
+ data->fast_load_enabled = false;
}
} else {
qseecom.qsee_sfpb_bw_count--;
- data->client.fast_load_enabled = false;
+ data->fast_load_enabled = false;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -2278,7 +2316,7 @@
pr_err(" scm call to check if app is loaded failed");
return ret; /* scm call failed */
} else if (ret > 0) {
- pr_warn("App id %d (%s) already exists\n", ret,
+ pr_debug("App id %d (%s) already exists\n", ret,
(char *)(req.app_name));
spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
list_for_each_entry(entry,
@@ -2345,11 +2383,13 @@
memcpy(ireq.key_id, key_id, QSEECOM_KEY_ID_SIZE);
ireq.flags = flags;
+ __qseecom_enable_clk(CLK_QSEE);
ret = scm_call(SCM_SVC_CRYPTO, QSEOS_GENERATE_KEY,
&ireq, sizeof(struct qseecom_key_generate_ireq),
&resp, sizeof(resp));
if (ret) {
pr_err("scm call to generate key failed : %d\n", ret);
+ __qseecom_disable_clk(CLK_QSEE);
return ret;
}
@@ -2367,6 +2407,7 @@
ret = -EINVAL;
break;
}
+ __qseecom_disable_clk(CLK_QSEE);
return ret;
}
@@ -2386,11 +2427,13 @@
memcpy(ireq.key_id, key_id, QSEECOM_KEY_ID_SIZE);
ireq.flags = flags;
+ __qseecom_enable_clk(CLK_QSEE);
ret = scm_call(SCM_SVC_CRYPTO, QSEOS_DELETE_KEY,
&ireq, sizeof(struct qseecom_key_delete_ireq),
&resp, sizeof(struct qseecom_command_scm_resp));
if (ret) {
pr_err("scm call to delete key failed : %d\n", ret);
+ __qseecom_disable_clk(CLK_QSEE);
return ret;
}
@@ -2409,6 +2452,7 @@
ret = -EINVAL;
break;
}
+ __qseecom_disable_clk(CLK_QSEE);
return ret;
}
@@ -2424,6 +2468,12 @@
pr_err("Error:: unsupported usage %d\n", usage);
return -EFAULT;
}
+
+ if (qseecom.qsee.instance == qseecom.ce_drv.instance)
+ __qseecom_enable_clk(CLK_QSEE);
+ else
+ __qseecom_enable_clk(CLK_CE_DRV);
+
memcpy(ireq.key_id, set_key_para->key_id, QSEECOM_KEY_ID_SIZE);
ireq.ce = set_key_para->ce_hw;
ireq.pipe = set_key_para->pipe;
@@ -2459,6 +2509,11 @@
break;
}
+ if (qseecom.qsee.instance == qseecom.ce_drv.instance)
+ __qseecom_disable_clk(CLK_QSEE);
+ else
+ __qseecom_disable_clk(CLK_CE_DRV);
+
return ret;
}
@@ -2570,6 +2625,70 @@
return ret;
}
+static int qseecom_is_es_activated(void __user *argp)
+{
+ struct qseecom_is_es_activated_req req;
+ int ret;
+ int resp_buf;
+
+ if (qseecom.qsee_version < QSEE_VERSION_04) {
+ pr_err("invalid qsee version");
+ return -ENODEV;
+ }
+
+ if (argp == NULL) {
+ pr_err("arg is null");
+ return -EINVAL;
+ }
+
+ ret = scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID, NULL, 0,
+ (void *) &resp_buf, sizeof(resp_buf));
+ if (ret) {
+ pr_err("scm_call failed");
+ return ret;
+ }
+
+ req.is_activated = resp_buf;
+ ret = copy_to_user(argp, &req, sizeof(req));
+ if (ret) {
+ pr_err("copy_to_user failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qseecom_save_partition_hash(void __user *argp)
+{
+ struct qseecom_save_partition_hash_req req;
+ int ret;
+
+ if (qseecom.qsee_version < QSEE_VERSION_04) {
+ pr_err("invalid qsee version ");
+ return -ENODEV;
+ }
+
+ if (argp == NULL) {
+ pr_err("arg is null");
+ return -EINVAL;
+ }
+
+ ret = copy_from_user(&req, argp, sizeof(req));
+ if (ret) {
+ pr_err("copy_from_user failed");
+ return ret;
+ }
+
+ ret = scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
+ (void *) &req, sizeof(req), NULL, 0);
+ if (ret) {
+ pr_err("scm_call failed");
+ return ret;
+ }
+
+ return 0;
+}
+
static long qseecom_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
@@ -2782,6 +2901,24 @@
mutex_unlock(&app_access_lock);
break;
}
+ case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_save_partition_hash(argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_is_es_activated(argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
default:
return -EINVAL;
}
@@ -2853,9 +2990,9 @@
}
}
- if (data->client.fast_load_enabled == true)
+ if (data->fast_load_enabled == true)
qsee_disable_clock_vote(data, CLK_SFPB);
- if (data->client.perf_enabled == true)
+ if (data->perf_enabled == true)
qsee_disable_clock_vote(data, CLK_DFAB);
if (qseecom.qseos_version == QSEOS_VERSION_13) {
@@ -2877,18 +3014,43 @@
.release = qseecom_release
};
-static int __qseecom_init_clk(void)
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
{
int rc = 0;
struct device *pdev;
struct qseecom_clk *qclk;
+ char *core_clk_src = NULL;
+ char *core_clk = NULL;
+ char *iface_clk = NULL;
+ char *bus_clk = NULL;
- qclk = &qseecom.qsee;
-
+ switch (ce) {
+ case CLK_QSEE: {
+ core_clk_src = "core_clk_src";
+ core_clk = "core_clk";
+ iface_clk = "iface_clk";
+ bus_clk = "bus_clk";
+ qclk = &qseecom.qsee;
+ qclk->instance = CLK_QSEE;
+ break;
+ };
+ case CLK_CE_DRV: {
+ core_clk_src = "ce_drv_core_clk_src";
+ core_clk = "ce_drv_core_clk";
+ iface_clk = "ce_drv_iface_clk";
+ bus_clk = "ce_drv_bus_clk";
+ qclk = &qseecom.ce_drv;
+ qclk->instance = CLK_CE_DRV;
+ break;
+ };
+ default:
+ pr_err("Invalid ce hw instance: %d!\n", ce);
+ return -EIO;
+ }
pdev = qseecom.pdev;
- /* Get CE3 src core clk. */
- qclk->ce_core_src_clk = clk_get(pdev, "core_clk_src");
+ /* Get CE3 src core clk. */
+ qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
if (!IS_ERR(qclk->ce_core_src_clk)) {
/* Set the core src clk @100Mhz */
rc = clk_set_rate(qclk->ce_core_src_clk, QSEE_CE_CLK_100MHZ);
@@ -2903,7 +3065,7 @@
}
/* Get CE core clk */
- qclk->ce_core_clk = clk_get(pdev, "core_clk");
+ qclk->ce_core_clk = clk_get(pdev, core_clk);
if (IS_ERR(qclk->ce_core_clk)) {
rc = PTR_ERR(qclk->ce_core_clk);
pr_err("Unable to get CE core clk\n");
@@ -2913,7 +3075,7 @@
}
/* Get CE Interface clk */
- qclk->ce_clk = clk_get(pdev, "iface_clk");
+ qclk->ce_clk = clk_get(pdev, iface_clk);
if (IS_ERR(qclk->ce_clk)) {
rc = PTR_ERR(qclk->ce_clk);
pr_err("Unable to get CE interface clk\n");
@@ -2924,7 +3086,7 @@
}
/* Get CE AXI clk */
- qclk->ce_bus_clk = clk_get(pdev, "bus_clk");
+ qclk->ce_bus_clk = clk_get(pdev, bus_clk);
if (IS_ERR(qclk->ce_bus_clk)) {
rc = PTR_ERR(qclk->ce_bus_clk);
pr_err("Unable to get CE BUS interface clk\n");
@@ -2937,11 +3099,14 @@
return rc;
}
-static void __qseecom_deinit_clk(void)
+static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
{
struct qseecom_clk *qclk;
- qclk = &qseecom.qsee;
+ if (ce == CLK_QSEE)
+ qclk = &qseecom.qsee;
+ else
+ qclk = &qseecom.ce_drv;
if (qclk->ce_clk != NULL) {
clk_put(qclk->ce_clk);
@@ -2979,6 +3144,11 @@
qseecom.qsee.ce_core_src_clk = NULL;
qseecom.qsee.ce_bus_clk = NULL;
+ qseecom.ce_drv.ce_core_clk = NULL;
+ qseecom.ce_drv.ce_clk = NULL;
+ qseecom.ce_drv.ce_core_src_clk = NULL;
+ qseecom.ce_drv.ce_bus_clk = NULL;
+
rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
if (rc < 0) {
pr_err("alloc_chrdev_region failed %d\n", rc);
@@ -3053,6 +3223,7 @@
/* register client for bus scaling */
if (pdev->dev.of_node) {
+
if (of_property_read_u32((&pdev->dev)->of_node,
"qcom,disk-encrypt-pipe-pair",
&qseecom.ce_info.disk_encrypt_pipe)) {
@@ -3089,10 +3260,29 @@
qseecom.ce_info.hlos_ce_hw_instance);
}
- ret = __qseecom_init_clk();
+ qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
+ qseecom.ce_drv.instance = qseecom.ce_info.hlos_ce_hw_instance;
+
+ ret = __qseecom_init_clk(CLK_QSEE);
if (ret)
goto err;
+ if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
+ ret = __qseecom_init_clk(CLK_CE_DRV);
+ if (ret) {
+ __qseecom_deinit_clk(CLK_QSEE);
+ goto err;
+ }
+ } else {
+ struct qseecom_clk *qclk;
+
+ qclk = &qseecom.qsee;
+ qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
+ qseecom.ce_drv.ce_clk = qclk->ce_clk;
+ qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
+ qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
+ }
+
qseecom_platform_support = (struct msm_bus_scale_pdata *)
msm_bus_cl_get_pdata(pdev);
if (qseecom.qsee_version >= (QSEE_VERSION_02)) {
@@ -3203,9 +3393,11 @@
msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
0);
/* register client for bus scaling */
- if (pdev->dev.of_node)
- __qseecom_deinit_clk();
-
+ if (pdev->dev.of_node) {
+ __qseecom_deinit_clk(CLK_QSEE);
+ if (qseecom.qsee.instance != qseecom.ce_drv.instance)
+ __qseecom_deinit_clk(CLK_CE_DRV);
+ }
return ret;
};
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 6b392b9..49222b9 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2188,6 +2188,9 @@
host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
host->quirks2 |= SDHCI_QUIRK2_IGNORE_CMDCRC_FOR_TUNING;
host->quirks2 |= SDHCI_QUIRK2_USE_MAX_DISCARD_SIZE;
+ host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
+ host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
host_version = readl_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
@@ -2259,6 +2262,7 @@
msm_host->mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
msm_host->mmc->caps2 |= MMC_CAP2_POWEROFF_NOTIFY;
msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
+ msm_host->mmc->caps2 |= MMC_CAP2_STOP_REQUEST;
if (msm_host->pdata->nonremovable)
msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bd9a960..0549b4a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -741,10 +741,12 @@
break;
}
- if (count >= 0xF) {
- DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
- mmc_hostname(host->mmc), count, cmd->opcode);
- count = 0xE;
+ if (!(host->quirks2 & SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT)) {
+ if (count >= 0xF) {
+ DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+ mmc_hostname(host->mmc), count, cmd->opcode);
+ count = 0xE;
+ }
}
return count;
@@ -2089,6 +2091,9 @@
if (host->version < SDHCI_SPEC_300)
return;
+ if (host->quirks2 & SDHCI_QUIRK2_BROKEN_PRESET_VALUE)
+ return;
+
spin_lock_irqsave(&host->lock, flags);
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -2119,6 +2124,11 @@
sdhci_runtime_pm_put(host);
}
+static int sdhci_stop_request(struct mmc_host *mmc)
+{
+ return -ENOSYS;
+}
+
static const struct mmc_host_ops sdhci_ops = {
.pre_req = sdhci_pre_req,
.post_req = sdhci_post_req,
@@ -2132,6 +2142,7 @@
.enable_preset_value = sdhci_enable_preset_value,
.enable = sdhci_enable,
.disable = sdhci_disable,
+ .stop_request = sdhci_stop_request,
};
/*****************************************************************************\
@@ -2363,7 +2374,6 @@
sdhci_finish_command(host);
}
-#ifdef CONFIG_MMC_DEBUG
static void sdhci_show_adma_error(struct sdhci_host *host)
{
const char *name = mmc_hostname(host->mmc);
@@ -2379,7 +2389,7 @@
len = (__le16 *)(desc + 2);
attr = *desc;
- DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+ pr_info("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
desc += 8;
@@ -2388,9 +2398,6 @@
break;
}
}
-#else
-static void sdhci_show_adma_error(struct sdhci_host *host) { }
-#endif
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
@@ -2420,6 +2427,9 @@
sdhci_finish_command(host);
return;
}
+ if (host->quirks2 &
+ SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD)
+ return;
}
pr_err("%s: Got data interrupt 0x%08x even "
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 44465dc..1a175c9 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -197,6 +197,7 @@
void __iomem *riva_ccu_base;
void __iomem *pronto_a2xb_base;
void __iomem *pronto_ccpu_base;
+ void __iomem *fiq_reg;
} *penv = NULL;
static ssize_t wcnss_serial_number_show(struct device *dev,
@@ -401,7 +402,7 @@
if (wcnss_hardware_type() == WCNSS_PRONTO_HW) {
wcnss_pronto_log_debug_regs();
wmb();
- __raw_writel(1 << 16, MSM_APCS_GCC_BASE + 0x8);
+ __raw_writel(1 << 16, penv->fiq_reg);
} else {
wcnss_riva_log_debug_regs();
wmb();
@@ -1068,6 +1069,7 @@
struct qcom_wcnss_opts *pdata;
unsigned long wcnss_phys_addr;
int size = 0;
+ struct resource *res;
int has_pronto_hw = of_property_read_bool(pdev->dev.of_node,
"qcom,has_pronto_hw");
@@ -1183,11 +1185,28 @@
pr_err("%s: ioremap wcnss physical failed\n", __func__);
goto fail_ioremap2;
}
+ /* for reset FIQ */
+ res = platform_get_resource_byname(penv->pdev,
+ IORESOURCE_MEM, "wcnss_fiq");
+ if (!res) {
+ dev_err(&pdev->dev, "insufficient irq mem resources\n");
+ ret = -ENOENT;
+ goto fail_ioremap3;
+ }
+ penv->fiq_reg = ioremap_nocache(res->start, resource_size(res));
+ if (!penv->fiq_reg) {
+ pr_err("wcnss: %s: ioremap_nocache() failed fiq_reg addr:%pr\n",
+ __func__, &res->start);
+ ret = -ENOMEM;
+ goto fail_ioremap3;
+ }
}
penv->cold_boot_done = 1;
return 0;
+fail_ioremap3:
+ iounmap(penv->pronto_ccpu_base);
fail_ioremap2:
iounmap(penv->pronto_a2xb_base);
fail_ioremap:
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index db7f7f0..16f722c 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -41,6 +41,7 @@
#define IPA_DMA_POOL_SIZE (512)
#define IPA_DMA_POOL_ALIGNMENT (4)
#define IPA_DMA_POOL_BOUNDARY (1024)
+#define IPA_NUM_DESC_PER_SW_TX (2)
#define IPA_ROUTING_RULE_BYTE_SIZE (4)
#define IPA_BAM_CNFG_BITS_VAL (0x7FFFE004)
@@ -1459,6 +1460,41 @@
WARN_ON(1);
}
+/**
+* ipa_inc_client_enable_clks() - Increase active clients counter, and
+* enable ipa clocks if necessary
+*
+* Return codes:
+* None
+*/
+void ipa_inc_client_enable_clks(void)
+{
+ mutex_lock(&ipa_ctx->ipa_active_clients_lock);
+ ipa_ctx->ipa_active_clients++;
+ if (ipa_ctx->ipa_active_clients == 1)
+ if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
+ ipa_enable_clks();
+ mutex_unlock(&ipa_ctx->ipa_active_clients_lock);
+}
+
+/**
+* ipa_dec_client_disable_clks() - Decrease active clients counter, and
+* disable ipa clocks if necessary
+*
+* Return codes:
+* None
+*/
+void ipa_dec_client_disable_clks(void)
+{
+ mutex_lock(&ipa_ctx->ipa_active_clients_lock);
+ ipa_ctx->ipa_active_clients--;
+ if (ipa_ctx->ipa_active_clients == 0)
+ if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
+ ipa_disable_clks();
+ mutex_unlock(&ipa_ctx->ipa_active_clients_lock);
+}
+
+
static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res)
{
void *bam_cnfg_bits;
@@ -1761,15 +1797,20 @@
* This is an issue with IPA HW v1.0 only.
*/
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
- ipa_ctx->one_kb_no_straddle_pool = dma_pool_create("ipa_1k",
+ ipa_ctx->dma_pool = dma_pool_create("ipa_1k",
NULL,
IPA_DMA_POOL_SIZE, IPA_DMA_POOL_ALIGNMENT,
IPA_DMA_POOL_BOUNDARY);
- if (!ipa_ctx->one_kb_no_straddle_pool) {
- IPAERR("cannot setup 1kb alloc DMA pool.\n");
- result = -ENOMEM;
- goto fail_dma_pool;
- }
+ } else {
+ ipa_ctx->dma_pool = dma_pool_create("ipa_tx", NULL,
+ IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec),
+ 0, 0);
+ }
+
+ if (!ipa_ctx->dma_pool) {
+ IPAERR("cannot alloc DMA pool.\n");
+ result = -ENOMEM;
+ goto fail_dma_pool;
}
ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
@@ -1839,7 +1880,8 @@
ipa_ctx->flt_rule_hdl_tree = RB_ROOT;
ipa_ctx->tag_tree = RB_ROOT;
- atomic_set(&ipa_ctx->ipa_active_clients, 0);
+ mutex_init(&ipa_ctx->ipa_active_clients_lock);
+ ipa_ctx->ipa_active_clients = 0;
result = ipa_bridge_init();
if (result) {
@@ -1976,7 +2018,7 @@
* DMA pool need to be released only for IPA HW v1.0 only.
*/
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
- dma_pool_destroy(ipa_ctx->one_kb_no_straddle_pool);
+ dma_pool_destroy(ipa_ctx->dma_pool);
fail_dma_pool:
kmem_cache_destroy(ipa_ctx->tree_node_cache);
fail_tree_node_cache:
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
index dd00081..83b7175 100644
--- a/drivers/platform/msm/ipa/ipa_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -726,10 +726,7 @@
return -EINVAL;
}
- if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1) {
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_enable_clks();
- }
+ ipa_inc_client_enable_clks();
if (setup_bridge_to_ipa(dir, type, props, clnt_hdl)) {
IPAERR("fail to setup SYS pipe to IPA dir=%d type=%d\n",
@@ -751,10 +748,7 @@
bail_a2:
ipa_bridge_teardown(dir, type, *clnt_hdl);
bail_ipa:
- if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0) {
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_disable_clks();
- }
+ ipa_dec_client_disable_clks();
return ret;
}
EXPORT_SYMBOL(ipa_bridge_setup);
@@ -826,10 +820,7 @@
memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
- if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0) {
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_disable_clks();
- }
+ ipa_dec_client_disable_clks();
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_client.c b/drivers/platform/msm/ipa/ipa_client.c
index f2f80bf..6033510 100644
--- a/drivers/platform/msm/ipa/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_client.c
@@ -204,9 +204,7 @@
int result = -EFAULT;
struct ipa_ep_context *ep;
- if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_enable_clks();
+ ipa_inc_client_enable_clks();
if (in == NULL || sps == NULL || clnt_hdl == NULL ||
in->client >= IPA_CLIENT_MAX ||
@@ -342,11 +340,7 @@
ipa_cfg_ep_fail:
memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
fail:
- if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0) {
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_disable_clks();
- }
-
+ ipa_dec_client_disable_clks();
return result;
}
EXPORT_SYMBOL(ipa_connect);
@@ -421,10 +415,7 @@
ipa_enable_data_path(clnt_hdl);
memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
- if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0) {
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_disable_clks();
- }
+ ipa_dec_client_disable_clks();
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
index 51a950d..bc5aa6f 100644
--- a/drivers/platform/msm/ipa/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -563,7 +563,7 @@
ipa_ctx->stats.rx_repl_repost,
ipa_ctx->stats.x_intr_repost,
ipa_ctx->stats.rx_q_len,
- atomic_read(&ipa_ctx->ipa_active_clients),
+ ipa_ctx->ipa_active_clients,
connect);
cnt += nbytes;
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
index 5f7f3d9..228c77fe 100644
--- a/drivers/platform/msm/ipa/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -42,7 +42,7 @@
* the order for sent packet is the same as expected
* - delete all the tx packet descriptors from the system
* pipe context (not needed anymore)
- * - return the tx buffer back to one_kb_no_straddle_pool
+ * - return the tx buffer back to dma_pool
*/
void ipa_wq_write_done(struct work_struct *work)
{
@@ -80,7 +80,7 @@
list_del(&tx_pkt->link);
spin_unlock_irqrestore(&tx_pkt->sys->spinlock, irq_flags);
if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
- dma_pool_free(ipa_ctx->one_kb_no_straddle_pool,
+ dma_pool_free(ipa_ctx->dma_pool,
tx_pkt->bounce,
tx_pkt->mem.phys_base);
} else {
@@ -97,7 +97,7 @@
}
if (mult.phys_base)
- dma_free_coherent(NULL, mult.size, mult.base, mult.phys_base);
+ dma_pool_free(ipa_ctx->dma_pool, mult.base, mult.phys_base);
}
/**
@@ -144,7 +144,7 @@
* does not cross a 1KB boundary
*/
tx_pkt->bounce = dma_pool_alloc(
- ipa_ctx->one_kb_no_straddle_pool,
+ ipa_ctx->dma_pool,
mem_flag, &dma_address);
if (!tx_pkt->bounce) {
dma_address = 0;
@@ -208,7 +208,7 @@
list_del(&tx_pkt->link);
spin_unlock_irqrestore(&sys->spinlock, irq_flags);
if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
- dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+ dma_pool_free(ipa_ctx->dma_pool, tx_pkt->bounce,
dma_address);
else
dma_unmap_single(NULL, dma_address, desc->len, DMA_TO_DEVICE);
@@ -259,7 +259,7 @@
if (unlikely(!in_atomic))
mem_flag = GFP_KERNEL;
- transfer.iovec = dma_alloc_coherent(NULL, size, &dma_addr, mem_flag);
+ transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag, &dma_addr);
transfer.iovec_phys = dma_addr;
transfer.iovec_count = num_desc;
spin_lock_irqsave(&sys->spinlock, irq_flags);
@@ -306,7 +306,7 @@
* packet does not cross a 1KB boundary
*/
tx_pkt->bounce =
- dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool,
+ dma_pool_alloc(ipa_ctx->dma_pool,
mem_flag,
&tx_pkt->mem.phys_base);
if (!tx_pkt->bounce) {
@@ -377,7 +377,7 @@
next_pkt = list_next_entry(tx_pkt, link);
list_del(&tx_pkt->link);
if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
- dma_pool_free(ipa_ctx->one_kb_no_straddle_pool,
+ dma_pool_free(ipa_ctx->dma_pool,
tx_pkt->bounce,
tx_pkt->mem.phys_base);
else
@@ -392,7 +392,7 @@
if (fail_dma_wrap)
kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
if (transfer.iovec_phys)
- dma_free_coherent(NULL, size, transfer.iovec,
+ dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
transfer.iovec_phys);
failure_coherent:
spin_unlock_irqrestore(&sys->spinlock, irq_flags);
@@ -433,9 +433,7 @@
struct ipa_desc *desc;
int result = 0;
- if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_enable_clks();
+ ipa_inc_client_enable_clks();
if (num_desc == 1) {
init_completion(&descr->xfer_done);
@@ -471,9 +469,7 @@
IPA_STATS_INC_IC_CNT(num_desc, descr, ipa_ctx->stats.imm_cmds);
bail:
- if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_disable_clks();
+ ipa_dec_client_disable_clks();
return result;
}
@@ -1087,6 +1083,7 @@
int inactive_cycles = 0;
int cnt;
+ ipa_inc_client_enable_clks();
do {
cnt = ipa_handle_rx_core(true, true);
if (cnt == 0) {
@@ -1098,6 +1095,7 @@
} while (inactive_cycles <= POLLING_INACTIVITY);
ipa_rx_switch_to_intr_mode();
+ ipa_dec_client_disable_clks();
}
/**
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
index ca5740d..a03ba16 100644
--- a/drivers/platform/msm/ipa/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -585,7 +585,7 @@
* @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
* @empty_rt_tbl_mem: empty routing tables memory
* @pipe_mem_pool: pipe memory pool
- * @one_kb_no_straddle_pool: one kb no straddle pool
+ * @dma_pool: special purpose DMA pool
* @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc')
* @ipa_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe)
*
@@ -643,8 +643,9 @@
bool ip6_flt_tbl_lcl;
struct ipa_mem_buffer empty_rt_tbl_mem;
struct gen_pool *pipe_mem_pool;
- struct dma_pool *one_kb_no_straddle_pool;
- atomic_t ipa_active_clients;
+ struct dma_pool *dma_pool;
+ struct mutex ipa_active_clients_lock;
+ int ipa_active_clients;
u32 clnt_hdl_cmd;
u32 clnt_hdl_data_in;
u32 clnt_hdl_data_out;
@@ -795,6 +796,8 @@
struct ipa_context *ipa_get_ctx(void);
void ipa_enable_clks(void);
void ipa_disable_clks(void);
+void ipa_inc_client_enable_clks(void);
+void ipa_dec_client_disable_clks(void);
int __ipa_del_rt_rule(u32 rule_hdl);
int __ipa_del_hdr(u32 hdr_hdl);
int __ipa_release_hdr(u32 hdr_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_intf.c b/drivers/platform/msm/ipa/ipa_intf.c
index 0f41d2c..5ee1929 100644
--- a/drivers/platform/msm/ipa/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_intf.c
@@ -432,6 +432,7 @@
}
IPA_STATS_INC_CNT(
ipa_ctx->stats.msg_r[msg->meta.msg_type]);
+ kfree(msg);
}
ret = -EAGAIN;
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c
index 0a6771c..3615952 100644
--- a/drivers/platform/msm/ipa/ipa_rm_resource.c
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.c
@@ -80,7 +80,8 @@
int result = 0;
int driver_result;
unsigned long flags;
- IPADBG("IPA RM ::ipa_rm_resource_consumer_request ENTER\n");
+ IPADBG("IPA RM ::ipa_rm_resource_consumer_request %d ENTER\n",
+ consumer->resource.name);
spin_lock_irqsave(&consumer->resource.state_lock, flags);
switch (consumer->resource.state) {
case IPA_RM_RELEASED:
@@ -114,7 +115,8 @@
consumer->usage_count++;
bail:
spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
- IPADBG("IPA RM ::ipa_rm_resource_consumer_request EXIT [%d]\n", result);
+ IPADBG("IPA RM ::ipa_rm_resource_consumer_request %d EXIT %d\n",
+ consumer->resource.name, result);
return result;
}
@@ -125,7 +127,8 @@
int driver_result;
unsigned long flags;
enum ipa_rm_resource_state save_state;
- IPADBG("IPA RM ::ipa_rm_resource_consumer_release ENTER\n");
+ IPADBG("IPA RM ::ipa_rm_resource_consumer_release %d ENTER\n",
+ consumer->resource.name);
spin_lock_irqsave(&consumer->resource.state_lock, flags);
switch (consumer->resource.state) {
case IPA_RM_RELEASED:
@@ -160,7 +163,8 @@
}
bail:
spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
- IPADBG("IPA RM ::ipa_rm_resource_consumer_release EXIT [%d]\n", result);
+ IPADBG("IPA RM ::ipa_rm_resource_consumer_release %d EXIT %d\n",
+ consumer->resource.name, result);
return result;
}
@@ -564,7 +568,7 @@
unsigned long flags;
struct ipa_rm_resource *consumer;
int consumer_result;
- IPADBG("IPA RM ::ipa_rm_resource_producer_request [%d] ENTER\n",
+ IPADBG("IPA RM ::ipa_rm_resource_producer_request %d ENTER\n",
producer->resource.name);
if (ipa_rm_peers_list_is_empty(producer->resource.peers_list)) {
spin_lock_irqsave(&producer->resource.state_lock, flags);
@@ -628,7 +632,8 @@
unlock_and_bail:
spin_unlock_irqrestore(&producer->resource.state_lock, flags);
bail:
- IPADBG("IPA RM ::ipa_rm_resource_producer_request EXIT[%d]\n", result);
+ IPADBG("IPA RM ::ipa_rm_resource_producer_request %d EXIT %d\n",
+ producer->resource.name, result);
return result;
}
@@ -646,7 +651,8 @@
unsigned long flags;
struct ipa_rm_resource *consumer;
int consumer_result;
- IPADBG("IPA RM ::ipa_rm_resource_producer_release ENTER\n");
+ IPADBG("IPA RM ::ipa_rm_resource_producer_release %d ENTER\n",
+ producer->resource.name);
if (ipa_rm_peers_list_is_empty(producer->resource.peers_list)) {
spin_lock_irqsave(&producer->resource.state_lock, flags);
producer->resource.state = IPA_RM_RELEASED;
@@ -702,7 +708,8 @@
return result;
bail:
spin_unlock_irqrestore(&producer->resource.state_lock, flags);
- IPADBG("IPA RM ::ipa_rm_resource_producer_release EXIT[%d]\n", result);
+ IPADBG("IPA RM ::ipa_rm_resource_producer_release %d EXIT %d\n",
+ producer->resource.name, result);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_rt.c b/drivers/platform/msm/ipa/ipa_rt.c
index 1d88280..fc5f668 100644
--- a/drivers/platform/msm/ipa/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_rt.c
@@ -505,6 +505,8 @@
IPAERR("failed to add to tree\n");
WARN_ON(1);
}
+ } else {
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
}
return entry;
diff --git a/drivers/platform/msm/ipa/teth_bridge.c b/drivers/platform/msm/ipa/teth_bridge.c
index 5b26e41..774c0e6 100644
--- a/drivers/platform/msm/ipa/teth_bridge.c
+++ b/drivers/platform/msm/ipa/teth_bridge.c
@@ -58,6 +58,8 @@
#define TETH_AGGR_MAX_DATAGRAMS_DEFAULT 16
#define TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT (8*1024)
+#define TETH_MTU_BYTE 1500
+
struct mac_addresses_type {
u8 host_pc_mac_addr[ETH_ALEN];
bool host_pc_mac_addr_known;
@@ -283,14 +285,7 @@
TETH_ERR("Configuration of header removal/insertion failed\n");
goto bail;
}
-
- res = ipa_commit_hdr();
- if (res) {
- TETH_ERR("Failed committing headers\n");
- goto bail;
- }
TETH_DBG_FUNC_EXIT();
-
bail:
return res;
}
@@ -425,20 +420,7 @@
TETH_ERR("A2 to USB routing block configuration failed\n");
goto bail;
}
-
- /* Commit all the changes to HW in one shot */
- res = ipa_commit_rt(IPA_IP_v4);
- if (res) {
- TETH_ERR("Failed commiting IPv4 routing tables\n");
- goto bail;
- }
- res = ipa_commit_rt(IPA_IP_v6);
- if (res) {
- TETH_ERR("Failed commiting IPv6 routing tables\n");
- goto bail;
- }
TETH_DBG_FUNC_EXIT();
-
bail:
return res;
}
@@ -533,20 +515,7 @@
TETH_ERR("A2_PROD filtering configuration failed\n");
goto bail;
}
-
- /* Commit all the changes to HW in one shot */
- res = ipa_commit_flt(IPA_IP_v4);
- if (res) {
- TETH_ERR("Failed commiting IPv4 filtering tables\n");
- goto bail;
- }
- res = ipa_commit_flt(IPA_IP_v6);
- if (res) {
- TETH_ERR("Failed commiting IPv6 filtering tables\n");
- goto bail;
- }
TETH_DBG_FUNC_EXIT();
-
bail:
return res;
}
@@ -578,8 +547,15 @@
return -EFAULT;
}
+ /*
+ * Due to a HW 'feature', the maximal aggregated packet size may be the
+ * requested aggr_byte_limit plus the MTU. Therefore, the MTU is
+ * subtracted from the requested aggr_byte_limit so that the requested
+ * byte limit is honored .
+ */
ipa_aggr_params->aggr_byte_limit =
- teth_aggr_params->max_transfer_size_byte / 1024;
+ (teth_aggr_params->max_transfer_size_byte - TETH_MTU_BYTE) /
+ 1024;
ipa_aggr_params->aggr_time_limit = TETH_DEFAULT_AGGR_TIME_LIMIT;
TETH_DBG_FUNC_EXIT();
@@ -699,9 +675,6 @@
static void complete_hw_bridge(struct work_struct *work)
{
int res;
- static DEFINE_MUTEX(f_lock);
-
- mutex_lock(&f_lock);
TETH_DBG_FUNC_ENTRY();
TETH_DBG("Completing HW bridge in %s mode\n",
@@ -715,17 +688,6 @@
goto bail;
}
- /*
- * Reset the Header, Routing and Filtering blocks.
- * Resetting the Header block will also reset the other blocks.
- * This reset is not comitted to HW.
- */
- res = ipa_reset_hdr();
- if (res) {
- TETH_ERR("Failed resetting IPA\n");
- goto bail;
- }
-
res = configure_ipa_header_block();
if (res) {
TETH_ERR("Configuration of IPA header block Failed\n");
@@ -744,10 +706,19 @@
goto bail;
}
+ /*
+ * Commit all the data to HW, including header, routing and filtering
+ * blocks, IPv4 and IPv6
+ */
+ res = ipa_commit_hdr();
+ if (res) {
+ TETH_ERR("Failed committing headers / routing / filtering.\n");
+ goto bail;
+ }
+
teth_ctx->is_hw_bridge_complete = true;
- teth_ctx->comp_hw_bridge_in_progress = false;
bail:
- mutex_unlock(&f_lock);
+ teth_ctx->comp_hw_bridge_in_progress = false;
TETH_DBG_FUNC_EXIT();
return;
@@ -1102,6 +1073,19 @@
return -EINVAL;
}
+ /*
+ * In case the requested max transfer size is larger than 8K, set it to
+ * to the default 8K
+ */
+ if (aggr_params->dl.max_transfer_size_byte >
+ TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT)
+ aggr_params->dl.max_transfer_size_byte =
+ TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT;
+ if (aggr_params->ul.max_transfer_size_byte >
+ TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT)
+ aggr_params->ul.max_transfer_size_byte =
+ TETH_AGGR_MAX_AGGR_PACKET_SIZE_DEFAULT;
+
memcpy(&teth_ctx->aggr_params,
aggr_params,
sizeof(struct teth_aggr_params));
@@ -1110,10 +1094,8 @@
teth_ctx->aggr_params_known = true;
res = teth_set_aggregation();
- if (res) {
+ if (res)
TETH_ERR("Failed setting aggregation params\n");
- res = -EFAULT;
- }
return res;
}
diff --git a/drivers/platform/msm/qpnp-pwm.c b/drivers/platform/msm/qpnp-pwm.c
index 1729b49..52c523e 100644
--- a/drivers/platform/msm/qpnp-pwm.c
+++ b/drivers/platform/msm/qpnp-pwm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -463,7 +463,7 @@
int i, pwm_size, rc = 0;
int burst_size = SPMI_MAX_BUF_LEN;
int list_len = lut->list_len << 1;
- int offset = lut->lo_index << 1;
+ int offset = (lut->lo_index << 1) - 2;
pwm_size = QPNP_GET_PWM_SIZE(
chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) &
@@ -1024,8 +1024,8 @@
raw_lut = 1;
lut_config->list_len = len;
- lut_config->lo_index = start_idx;
- lut_config->hi_index = start_idx + len - 1;
+ lut_config->lo_index = start_idx + 1;
+ lut_config->hi_index = start_idx + len;
rc = qpnp_lpg_change_table(pwm, duty_pct, raw_lut);
if (rc) {
@@ -1041,13 +1041,13 @@
QPNP_SET_PAUSE_CNT(lut_config->lut_pause_lo_cnt,
lut_params.lut_pause_lo, ramp_step_ms);
- if (lut_config->lut_pause_lo_cnt > PM_PWM_LUT_PAUSE_MAX)
- lut_config->lut_pause_lo_cnt = PM_PWM_LUT_PAUSE_MAX;
+ if (lut_config->lut_pause_lo_cnt > PM_PWM_MAX_PAUSE_CNT)
+ lut_config->lut_pause_lo_cnt = PM_PWM_MAX_PAUSE_CNT;
QPNP_SET_PAUSE_CNT(lut_config->lut_pause_hi_cnt,
lut_params.lut_pause_hi, ramp_step_ms);
- if (lut_config->lut_pause_hi_cnt > PM_PWM_LUT_PAUSE_MAX)
- lut_config->lut_pause_hi_cnt = PM_PWM_LUT_PAUSE_MAX;
+ if (lut_config->lut_pause_hi_cnt > PM_PWM_MAX_PAUSE_CNT)
+ lut_config->lut_pause_hi_cnt = PM_PWM_MAX_PAUSE_CNT;
lut_config->ramp_step_ms = ramp_step_ms;
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index e9cf973..e00e1c3 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -213,6 +213,8 @@
* @alarm_high_mv: the battery alarm voltage high
* @cool_temp_dc: the cool temp threshold in deciCelcius
* @warm_temp_dc: the warm temp threshold in deciCelcius
+ * @hysteresis_temp_dc: the hysteresis between temp thresholds in
+ * deciCelcius
* @resume_voltage_delta: the voltage delta from vdd max at which the
* battery should resume charging
* @term_current: The charging based term current
@@ -235,6 +237,7 @@
unsigned int alarm_high_mv;
int cool_temp_dc;
int warm_temp_dc;
+ int hysteresis_temp_dc;
unsigned int temp_check_period;
unsigned int cool_bat_chg_current;
unsigned int warm_bat_chg_current;
@@ -308,6 +311,7 @@
static int thermal_mitigation;
static struct pm8921_chg_chip *the_chip;
+static void check_temp_thresholds(struct pm8921_chg_chip *chip);
#define LPM_ENABLE_BIT BIT(2)
static int pm8921_chg_set_lpm(struct pm8921_chg_chip *chip, int enable)
@@ -3162,6 +3166,22 @@
struct delayed_work *dwork = to_delayed_work(work);
struct pm8921_chg_chip *chip = container_of(dwork,
struct pm8921_chg_chip, update_heartbeat_work);
+ bool chg_present = chip->usb_present || chip->dc_present;
+
+ /* for battery health when charger is not connected */
+ if (chip->btc_override && !chg_present)
+ schedule_delayed_work(&chip->btc_override_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (chip->btc_delay_ms)));
+
+ /*
+ * check temp thresholds when charger is present and
+ * and battery is FULL. The temperature here can impact
+ * the charging restart conditions.
+ */
+ if (chip->btc_override && chg_present &&
+ !wake_lock_active(&chip->eoc_wake_lock))
+ check_temp_thresholds(chip);
power_supply_changed(&chip->batt_psy);
if (chip->recent_reported_soc <= 20)
@@ -3317,7 +3337,7 @@
if (chip->warm_temp_dc != INT_MIN) {
if (chip->is_bat_warm
- && temp < chip->warm_temp_dc - TEMP_HYSTERISIS_DECIDEGC)
+ && temp < chip->warm_temp_dc - chip->hysteresis_temp_dc)
battery_warm(false);
else if (!chip->is_bat_warm && temp >= chip->warm_temp_dc)
battery_warm(true);
@@ -3325,7 +3345,7 @@
if (chip->cool_temp_dc != INT_MIN) {
if (chip->is_bat_cool
- && temp > chip->cool_temp_dc + TEMP_HYSTERISIS_DECIDEGC)
+ && temp > chip->cool_temp_dc + chip->hysteresis_temp_dc)
battery_cool(false);
else if (!chip->is_bat_cool && temp <= chip->cool_temp_dc)
battery_cool(true);
@@ -3543,7 +3563,8 @@
temp = pm_chg_get_rt_status(chip, BATTTEMP_HOT_IRQ);
if (temp) {
- if (decidegc < chip->btc_override_hot_decidegc)
+ if (decidegc < chip->btc_override_hot_decidegc -
+ chip->hysteresis_temp_dc)
/* stop forcing batt hot */
rc = pm_chg_override_hot(chip, 0);
if (rc)
@@ -3558,7 +3579,8 @@
temp = pm_chg_get_rt_status(chip, BATTTEMP_COLD_IRQ);
if (temp) {
- if (decidegc > chip->btc_override_cold_decidegc)
+ if (decidegc > chip->btc_override_cold_decidegc +
+ chip->hysteresis_temp_dc)
/* stop forcing batt cold */
rc = pm_chg_override_cold(chip, 0);
if (rc)
@@ -3622,7 +3644,8 @@
end = is_charging_finished(chip, vbat_batt_terminal_uv, ichg_meas_ma);
- if (end == CHG_NOT_IN_PROGRESS) {
+ if (end == CHG_NOT_IN_PROGRESS && (!chip->btc_override ||
+ !(chip->usb_present || chip->dc_present))) {
count = 0;
goto eoc_worker_stop;
}
@@ -3650,7 +3673,8 @@
chgdone_irq_handler(chip->pmic_chg_irq[CHGDONE_IRQ], chip);
} else {
check_temp_thresholds(chip);
- adjust_vdd_max_for_fastchg(chip, vbat_batt_terminal_uv);
+ if (end != CHG_NOT_IN_PROGRESS)
+ adjust_vdd_max_for_fastchg(chip, vbat_batt_terminal_uv);
pr_debug("EOC count = %d\n", count);
schedule_delayed_work(&chip->eoc_work,
round_jiffies_relative(msecs_to_jiffies
@@ -3659,9 +3683,9 @@
}
eoc_worker_stop:
- wake_unlock(&chip->eoc_wake_lock);
/* set the vbatdet back, in case it was changed to trigger charging */
set_appropriate_vbatdet(chip);
+ wake_unlock(&chip->eoc_wake_lock);
}
/**
@@ -3795,6 +3819,11 @@
schedule_delayed_work(&chip->unplug_check_work,
msecs_to_jiffies(UNPLUG_CHECK_WAIT_PERIOD_MS));
pm8921_chg_enable_irq(chip, CHG_GONE_IRQ);
+
+ if (chip->btc_override)
+ schedule_delayed_work(&chip->btc_override_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (chip->btc_delay_ms)));
}
pm8921_chg_enable_irq(chip, DCIN_VALID_IRQ);
@@ -4574,12 +4603,12 @@
int rc;
struct pm8921_chg_chip *chip = dev_get_drvdata(dev);
- pm8921_chg_force_19p2mhz_clk(chip);
-
rc = pm8921_chg_set_lpm(chip, 0);
if (rc)
pr_err("Failed to set lpm rc=%d\n", rc);
+ pm8921_chg_force_19p2mhz_clk(chip);
+
rc = pm_chg_masked_write(chip, CHG_CNTRL, VREF_BATT_THERM_FORCE_ON,
VREF_BATT_THERM_FORCE_ON);
if (rc)
@@ -4600,6 +4629,8 @@
is_usb_chg_plugged_in(the_chip)))
schedule_delayed_work(&chip->btc_override_work, 0);
+ schedule_delayed_work(&chip->update_heartbeat_work, 0);
+
return 0;
}
@@ -4607,6 +4638,8 @@
{
struct pm8921_chg_chip *chip = dev_get_drvdata(dev);
+ cancel_delayed_work_sync(&chip->update_heartbeat_work);
+
if (chip->btc_override)
cancel_delayed_work_sync(&chip->btc_override_work);
@@ -4663,6 +4696,11 @@
else
chip->warm_temp_dc = INT_MIN;
+ if (pdata->hysteresis_temp)
+ chip->hysteresis_temp_dc = pdata->hysteresis_temp * 10;
+ else
+ chip->hysteresis_temp_dc = TEMP_HYSTERISIS_DECIDEGC;
+
chip->temp_check_period = pdata->temp_check_period;
chip->max_bat_chg_current = pdata->max_bat_chg_current;
/* Assign to corresponding module parameter */
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index 331c7f1..da0a5b6 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -105,6 +105,13 @@
#define SMBBP_BOOST_SUBTYPE 0x36
#define SMBBP_MISC_SUBTYPE 0x37
+/* SMBCL peripheral subtype values */
+#define SMBCL_CHGR_SUBTYPE 0x41
+#define SMBCL_BUCK_SUBTYPE 0x42
+#define SMBCL_BAT_IF_SUBTYPE 0x43
+#define SMBCL_USB_CHGPTH_SUBTYPE 0x44
+#define SMBCL_MISC_SUBTYPE 0x47
+
#define QPNP_CHARGER_DEV_NAME "qcom,qpnp-charger"
/* Status bits and masks */
@@ -225,7 +232,6 @@
u16 freq_base;
unsigned int usbin_valid_irq;
unsigned int dcin_valid_irq;
- unsigned int chg_done_irq;
unsigned int chg_fastchg_irq;
unsigned int chg_trklchg_irq;
unsigned int chg_failed_irq;
@@ -639,26 +645,6 @@
return IRQ_HANDLED;
}
-static irqreturn_t
-qpnp_chg_chgr_chg_done_irq_handler(int irq, void *_chip)
-{
- struct qpnp_chg_chip *chip = _chip;
- u8 chgr_sts;
- int rc;
-
- pr_debug("CHG_DONE IRQ triggered\n");
-
- rc = qpnp_chg_read(chip, &chgr_sts,
- INT_RT_STS(chip->chgr_base), 1);
- if (rc)
- pr_err("failed to read interrupt sts %d\n", rc);
-
- chip->chg_done = true;
- power_supply_changed(&chip->batt_psy);
-
- return IRQ_HANDLED;
-}
-
static int
qpnp_batt_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
@@ -1223,7 +1209,7 @@
QPNP_CHG_ITERM_MASK, temp, 1);
}
-#define QPNP_CHG_IBATMAX_MIN 100
+#define QPNP_CHG_IBATMAX_MIN 50
#define QPNP_CHG_IBATMAX_MAX 3250
static int
qpnp_chg_ibatmax_set(struct qpnp_chg_chip *chip, int chg_current)
@@ -1235,11 +1221,28 @@
pr_err("bad mA=%d asked to set\n", chg_current);
return -EINVAL;
}
- temp = (chg_current - QPNP_CHG_I_MIN_MA) / QPNP_CHG_I_STEP_MA;
+ temp = chg_current / QPNP_CHG_I_STEP_MA;
return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_IBAT_MAX,
QPNP_CHG_I_MASK, temp, 1);
}
+#define QPNP_CHG_TCHG_MASK 0x7F
+#define QPNP_CHG_TCHG_MIN 4
+#define QPNP_CHG_TCHG_MAX 512
+#define QPNP_CHG_TCHG_STEP 4
+static int qpnp_chg_tchg_max_set(struct qpnp_chg_chip *chip, int minutes)
+{
+ u8 temp;
+
+ if (minutes < QPNP_CHG_TCHG_MIN || minutes > QPNP_CHG_TCHG_MAX) {
+ pr_err("bad max minutes =%d asked to set\n", minutes);
+ return -EINVAL;
+ }
+
+ temp = (minutes - 1)/QPNP_CHG_TCHG_STEP;
+ return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_TCHG_MAX,
+ QPNP_CHG_I_MASK, temp, 1);
+}
#define QPNP_CHG_VBATDET_MIN_MV 3240
#define QPNP_CHG_VBATDET_MAX_MV 5780
#define QPNP_CHG_VBATDET_STEP_MV 20
@@ -1462,6 +1465,160 @@
chip->flags |= CHG_FLAGS_VCP_WA;
}
+static int
+qpnp_chg_request_irqs(struct qpnp_chg_chip *chip)
+{
+ int rc = 0;
+ struct resource *resource;
+ struct spmi_resource *spmi_resource;
+ u8 subtype;
+ struct spmi_device *spmi = chip->spmi;
+
+ spmi_for_each_container_dev(spmi_resource, chip->spmi) {
+ if (!spmi_resource) {
+ pr_err("qpnp_chg: spmi resource absent\n");
+ return rc;
+ }
+
+ resource = spmi_get_resource(spmi, spmi_resource,
+ IORESOURCE_MEM, 0);
+ if (!(resource && resource->start)) {
+ pr_err("node %s IO resource absent!\n",
+ spmi->dev.of_node->full_name);
+ return rc;
+ }
+
+ rc = qpnp_chg_read(chip, &subtype,
+ resource->start + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ pr_err("Peripheral subtype read failed rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case SMBB_CHGR_SUBTYPE:
+ case SMBBP_CHGR_SUBTYPE:
+ case SMBCL_CHGR_SUBTYPE:
+ chip->chg_fastchg_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "fast-chg-on");
+ if (chip->chg_fastchg_irq < 0) {
+ pr_err("Unable to get fast-chg-on irq\n");
+ return rc;
+ }
+
+ chip->chg_trklchg_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "trkl-chg-on");
+ if (chip->chg_trklchg_irq < 0) {
+ pr_err("Unable to get trkl-chg-on irq\n");
+ return rc;
+ }
+
+ chip->chg_failed_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "chg-failed");
+ if (chip->chg_failed_irq < 0) {
+ pr_err("Unable to get chg_failed irq\n");
+ return rc;
+ }
+
+ rc |= devm_request_irq(chip->dev, chip->chg_failed_irq,
+ qpnp_chg_chgr_chg_failed_irq_handler,
+ IRQF_TRIGGER_RISING, "chg_failed", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d chg_failed chg: %d\n",
+ chip->chg_failed_irq, rc);
+ return rc;
+ }
+
+ rc |= devm_request_irq(chip->dev, chip->chg_fastchg_irq,
+ qpnp_chg_chgr_chg_fastchg_irq_handler,
+ IRQF_TRIGGER_RISING,
+ "fast-chg-on", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d fast-chg-on: %d\n",
+ chip->chg_fastchg_irq, rc);
+ return rc;
+ }
+
+ rc |= devm_request_irq(chip->dev, chip->chg_trklchg_irq,
+ qpnp_chg_chgr_chg_trklchg_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "fast-chg-on", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d trkl-chg-on: %d\n",
+ chip->chg_trklchg_irq, rc);
+ return rc;
+ }
+ enable_irq_wake(chip->chg_fastchg_irq);
+ enable_irq_wake(chip->chg_trklchg_irq);
+ enable_irq_wake(chip->chg_failed_irq);
+
+ break;
+ case SMBB_BAT_IF_SUBTYPE:
+ case SMBBP_BAT_IF_SUBTYPE:
+ case SMBCL_BAT_IF_SUBTYPE:
+ chip->batt_pres_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "batt-pres");
+ if (chip->batt_pres_irq < 0) {
+ pr_err("Unable to get batt-pres irq\n");
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev, chip->batt_pres_irq,
+ qpnp_chg_bat_if_batt_pres_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "bat_if_batt_pres", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d batt-pres irq: %d\n",
+ chip->batt_pres_irq, rc);
+ return rc;
+ }
+
+ enable_irq_wake(chip->batt_pres_irq);
+ break;
+ case SMBB_USB_CHGPTH_SUBTYPE:
+ case SMBBP_USB_CHGPTH_SUBTYPE:
+ case SMBCL_USB_CHGPTH_SUBTYPE:
+ chip->usbin_valid_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "usbin-valid");
+ if (chip->usbin_valid_irq < 0) {
+ pr_err("Unable to get usbin irq\n");
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev, chip->usbin_valid_irq,
+ qpnp_chg_usb_usbin_valid_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "chg_usbin_valid", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d usbinvalid: %d\n",
+ chip->usbin_valid_irq, rc);
+ return rc;
+ }
+ enable_irq_wake(chip->usbin_valid_irq);
+ break;
+ case SMBB_DC_CHGPTH_SUBTYPE:
+ chip->dcin_valid_irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "dcin-valid");
+ if (chip->dcin_valid_irq < 0) {
+ pr_err("Unable to get dcin irq\n");
+ return -rc;
+ }
+ rc = devm_request_irq(chip->dev, chip->dcin_valid_irq,
+ qpnp_chg_dc_dcin_valid_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "chg_dcin_valid", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d dcinvalid: %d\n",
+ chip->dcin_valid_irq, rc);
+ return rc;
+ }
+
+ enable_irq_wake(chip->dcin_valid_irq);
+ break;
+ }
+ }
+
+ return rc;
+}
+
#define WDOG_EN_BIT BIT(7)
static int
qpnp_chg_hwinit(struct qpnp_chg_chip *chip, u8 subtype,
@@ -1473,73 +1630,7 @@
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
case SMBBP_CHGR_SUBTYPE:
- chip->chg_done_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "chg-done");
- if (chip->chg_done_irq < 0) {
- pr_err("Unable to get chg_done irq\n");
- return -ENXIO;
- }
-
- chip->chg_fastchg_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "fast-chg-on");
- if (chip->chg_fastchg_irq < 0) {
- pr_err("Unable to get fast-chg-on irq\n");
- return -ENXIO;
- }
-
- chip->chg_trklchg_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "trkl-chg-on");
- if (chip->chg_trklchg_irq < 0) {
- pr_err("Unable to get trkl-chg-on irq\n");
- return -ENXIO;
- }
-
- chip->chg_failed_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "chg-failed");
- if (chip->chg_failed_irq < 0) {
- pr_err("Unable to get chg_failed irq\n");
- return -ENXIO;
- }
-
- rc |= devm_request_irq(chip->dev, chip->chg_done_irq,
- qpnp_chg_chgr_chg_done_irq_handler,
- IRQF_TRIGGER_RISING,
- "chg_done", chip);
- if (rc < 0) {
- pr_err("Can't request %d chg_done for chg: %d\n",
- chip->chg_done_irq, rc);
- return -ENXIO;
- }
-
- rc |= devm_request_irq(chip->dev, chip->chg_failed_irq,
- qpnp_chg_chgr_chg_failed_irq_handler,
- IRQF_TRIGGER_RISING, "chg_failed", chip);
- if (rc < 0) {
- pr_err("Can't request %d chg_failed chg: %d\n",
- chip->chg_failed_irq, rc);
- return -ENXIO;
- }
-
- rc |= devm_request_irq(chip->dev, chip->chg_fastchg_irq,
- qpnp_chg_chgr_chg_fastchg_irq_handler,
- IRQF_TRIGGER_RISING,
- "fast-chg-on", chip);
- if (rc < 0) {
- pr_err("Can't request %d fast-chg-on for chg: %d\n",
- chip->chg_fastchg_irq, rc);
- return -ENXIO;
- }
-
- rc |= devm_request_irq(chip->dev, chip->chg_trklchg_irq,
- qpnp_chg_chgr_chg_trklchg_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "fast-chg-on", chip);
- if (rc < 0) {
- pr_err("Can't request %d trkl-chg-on for chg: %d\n",
- chip->chg_trklchg_irq, rc);
- return -ENXIO;
- }
-
+ case SMBCL_CHGR_SUBTYPE:
rc = qpnp_chg_vinmin_set(chip, chip->min_voltage_mv);
if (rc) {
pr_debug("failed setting min_voltage rc=%d\n", rc);
@@ -1578,6 +1669,12 @@
pr_debug("failed setting ibat_Safe rc=%d\n", rc);
return rc;
}
+ rc = qpnp_chg_tchg_max_set(chip, chip->tchg_mins);
+ if (rc) {
+ pr_debug("failed setting tchg_mins rc=%d\n", rc);
+ return rc;
+ }
+
/* HACK: Disable wdog */
rc = qpnp_chg_masked_write(chip, chip->chgr_base + 0x62,
0xFF, 0xA0, 1);
@@ -1587,13 +1684,10 @@
CHGR_IBAT_TERM_CHGR,
0x88, 0x80, 1);
- enable_irq_wake(chip->chg_fastchg_irq);
- enable_irq_wake(chip->chg_trklchg_irq);
- enable_irq_wake(chip->chg_failed_irq);
- enable_irq_wake(chip->chg_done_irq);
break;
case SMBB_BUCK_SUBTYPE:
case SMBBP_BUCK_SUBTYPE:
+ case SMBCL_BUCK_SUBTYPE:
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + CHGR_BUCK_BCK_VBAT_REG_MODE,
BUCK_VBAT_REG_NODE_SEL_BIT,
@@ -1605,43 +1699,11 @@
break;
case SMBB_BAT_IF_SUBTYPE:
case SMBBP_BAT_IF_SUBTYPE:
- chip->batt_pres_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "batt-pres");
- if (chip->batt_pres_irq < 0) {
- pr_err("Unable to get batt-pres irq\n");
- return -ENXIO;
- }
- rc = devm_request_irq(chip->dev, chip->batt_pres_irq,
- qpnp_chg_bat_if_batt_pres_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "bat_if_batt_pres", chip);
- if (rc < 0) {
- pr_err("Can't request %d batt-pres irq for chg: %d\n",
- chip->batt_pres_irq, rc);
- return -ENXIO;
- }
-
- enable_irq_wake(chip->batt_pres_irq);
+ case SMBCL_BAT_IF_SUBTYPE:
break;
case SMBB_USB_CHGPTH_SUBTYPE:
case SMBBP_USB_CHGPTH_SUBTYPE:
- chip->usbin_valid_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "usbin-valid");
- if (chip->usbin_valid_irq < 0) {
- pr_err("Unable to get usbin irq\n");
- return -ENXIO;
- }
- rc = devm_request_irq(chip->dev, chip->usbin_valid_irq,
- qpnp_chg_usb_usbin_valid_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "chg_usbin_valid", chip);
- if (rc < 0) {
- pr_err("Can't request %d usbinvalid for chg: %d\n",
- chip->usbin_valid_irq, rc);
- return -ENXIO;
- }
-
- enable_irq_wake(chip->usbin_valid_irq);
+ case SMBCL_USB_CHGPTH_SUBTYPE:
chip->usb_present = qpnp_chg_is_usb_chg_plugged_in(chip);
if (chip->usb_present) {
rc = qpnp_chg_masked_write(chip,
@@ -1666,23 +1728,6 @@
break;
case SMBB_DC_CHGPTH_SUBTYPE:
- chip->dcin_valid_irq = spmi_get_irq_byname(chip->spmi,
- spmi_resource, "dcin-valid");
- if (chip->dcin_valid_irq < 0) {
- pr_err("Unable to get dcin irq\n");
- return -ENXIO;
- }
- rc = devm_request_irq(chip->dev, chip->dcin_valid_irq,
- qpnp_chg_dc_dcin_valid_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "chg_dcin_valid", chip);
- if (rc < 0) {
- pr_err("Can't request %d dcinvalid for chg: %d\n",
- chip->dcin_valid_irq, rc);
- return -ENXIO;
- }
-
- enable_irq_wake(chip->dcin_valid_irq);
break;
case SMBB_BOOST_SUBTYPE:
case SMBBP_BOOST_SUBTYPE:
@@ -1691,6 +1736,8 @@
chip->type = SMBB;
case SMBBP_MISC_SUBTYPE:
chip->type = SMBBP;
+ case SMBCL_MISC_SUBTYPE:
+ chip->type = SMBCL;
pr_debug("Setting BOOT_DONE\n");
rc = qpnp_chg_masked_write(chip,
chip->misc_base + CHGR_MISC_BOOT_DONE,
@@ -1710,6 +1757,100 @@
return rc;
}
+#define OF_PROP_READ(chip, prop, qpnp_dt_property, retval, optional) \
+do { \
+ if (retval) \
+ break; \
+ \
+ retval = of_property_read_u32(chip->spmi->dev.of_node, \
+ "qcom," qpnp_dt_property, \
+ &chip->prop); \
+ \
+ if ((retval == -EINVAL) && optional) \
+ retval = 0; \
+ else if (retval) \
+ pr_err("Error reading " #qpnp_dt_property \
+ " property rc = %d\n", rc); \
+} while (0)
+
+static int
+qpnp_charger_read_dt_props(struct qpnp_chg_chip *chip)
+{
+ int rc = 0;
+
+ OF_PROP_READ(chip, max_voltage_mv, "vddmax-mv", rc, 0);
+ OF_PROP_READ(chip, min_voltage_mv, "vinmin-mv", rc, 0);
+ OF_PROP_READ(chip, safe_voltage_mv, "vddsafe-mv", rc, 0);
+ OF_PROP_READ(chip, resume_delta_mv, "vbatdet-delta-mv", rc, 0);
+ OF_PROP_READ(chip, safe_current, "ibatsafe-ma", rc, 0);
+ OF_PROP_READ(chip, max_bat_chg_current, "ibatmax-ma", rc, 0);
+ if (rc)
+ pr_err("failed to read required dt parameters %d\n", rc);
+
+ OF_PROP_READ(chip, term_current, "ibatterm-ma", rc, 1);
+ OF_PROP_READ(chip, maxinput_dc_ma, "maxinput-dc-ma", rc, 1);
+ OF_PROP_READ(chip, maxinput_usb_ma, "maxinput-usb-ma", rc, 1);
+ OF_PROP_READ(chip, warm_bat_decidegc, "warm-bat-decidegc", rc, 1);
+ OF_PROP_READ(chip, cool_bat_decidegc, "cool-bat-decidegc", rc, 1);
+ OF_PROP_READ(chip, tchg_mins, "tchg-mins", rc, 1);
+ if (rc)
+ return rc;
+
+ /* Look up JEITA compliance parameters if cool and warm temp provided */
+ if (chip->cool_bat_decidegc && chip->warm_bat_decidegc) {
+ rc = qpnp_adc_tm_is_ready();
+ if (rc) {
+ pr_err("tm not ready %d\n", rc);
+ return rc;
+ }
+
+ OF_PROP_READ(chip, warm_bat_chg_ma, "ibatmax-warm-ma", rc, 1);
+ OF_PROP_READ(chip, cool_bat_chg_ma, "ibatmax-cool-ma", rc, 1);
+ OF_PROP_READ(chip, warm_bat_mv, "warm-bat-mv", rc, 1);
+ OF_PROP_READ(chip, cool_bat_mv, "cool-bat-mv", rc, 1);
+ if (rc)
+ return rc;
+ }
+
+ /* Get the charging-disabled property */
+ chip->charging_disabled = of_property_read_bool(chip->spmi->dev.of_node,
+ "qcom,charging-disabled");
+
+ /* Get the fake-batt-values property */
+ chip->use_default_batt_values =
+ of_property_read_bool(chip->spmi->dev.of_node,
+ "qcom,use-default-batt-values");
+
+ /* Disable charging when faking battery values */
+ if (chip->use_default_batt_values)
+ chip->charging_disabled = true;
+
+ of_get_property(chip->spmi->dev.of_node, "qcom,thermal-mitigation",
+ &(chip->thermal_levels));
+
+ if (chip->thermal_levels > sizeof(int)) {
+ chip->thermal_mitigation = kzalloc(
+ chip->thermal_levels,
+ GFP_KERNEL);
+
+ if (chip->thermal_mitigation == NULL) {
+ pr_err("thermal mitigation kzalloc() failed.\n");
+ return rc;
+ }
+
+ chip->thermal_levels /= sizeof(int);
+ rc = of_property_read_u32_array(chip->spmi->dev.of_node,
+ "qcom,thermal-mitigation",
+ chip->thermal_mitigation, chip->thermal_levels);
+ if (rc) {
+ pr_err("qcom,thermal-mitigation missing in dt\n");
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
static int __devinit
qpnp_charger_probe(struct spmi_device *spmi)
{
@@ -1736,178 +1877,10 @@
goto fail_chg_enable;
}
- /* Get the vddmax property */
- rc = of_property_read_u32(spmi->dev.of_node, "qcom,chg-vddmax-mv",
- &chip->max_voltage_mv);
- if (rc) {
- pr_err("Error reading vddmax property %d\n", rc);
+ /* Get all device tree properties */
+ rc = qpnp_charger_read_dt_props(chip);
+ if (rc)
goto fail_chg_enable;
- }
-
- /* Get the vinmin property */
- rc = of_property_read_u32(spmi->dev.of_node, "qcom,chg-vinmin-mv",
- &chip->min_voltage_mv);
- if (rc) {
- pr_err("Error reading vddmax property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the vddmax property */
- rc = of_property_read_u32(spmi->dev.of_node, "qcom,chg-vddsafe-mv",
- &chip->safe_voltage_mv);
- if (rc) {
- pr_err("Error reading vddsave property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the vbatdet-delta property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-vbatdet-delta-mv",
- &chip->resume_delta_mv);
- if (rc && rc != -EINVAL) {
- pr_err("Error reading vbatdet-delta property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the ibatsafe property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-ibatsafe-ma",
- &chip->safe_current);
- if (rc) {
- pr_err("Error reading ibatsafe property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the ibatterm property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-ibatterm-ma",
- &chip->term_current);
- if (rc && rc != -EINVAL) {
- pr_err("Error reading ibatterm property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the ibatmax property */
- rc = of_property_read_u32(spmi->dev.of_node, "qcom,chg-ibatmax-ma",
- &chip->max_bat_chg_current);
- if (rc) {
- pr_err("Error reading ibatmax property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the maxinput-dc-ma property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-maxinput-dc-ma",
- &chip->maxinput_dc_ma);
- if (rc && rc != -EINVAL) {
- pr_err("Error reading maxinput-dc-ma property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the maxinput-usb-ma property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-maxinput-usb-ma",
- &chip->maxinput_usb_ma);
- if (rc && rc != -EINVAL) {
- pr_err("Error reading maxinput-usb-ma property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the charging-disabled property */
- chip->charging_disabled = of_property_read_bool(spmi->dev.of_node,
- "qcom,chg-charging-disabled");
-
- /* Get the warm-bat-degc property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-warm-bat-decidegc",
- &chip->warm_bat_decidegc);
- if (rc && rc != -EINVAL) {
- pr_err("Error reading warm-bat-degc property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the cool-bat-degc property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-cool-bat-decidegc",
- &chip->cool_bat_decidegc);
- if (rc && rc != -EINVAL) {
- pr_err("Error reading cool-bat-degc property %d\n", rc);
- goto fail_chg_enable;
- }
-
- if (chip->cool_bat_decidegc && chip->warm_bat_decidegc) {
- rc = qpnp_adc_tm_is_ready();
- if (rc) {
- pr_err("tm not ready %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the ibatmax-warm property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-ibatmax-warm-ma",
- &chip->warm_bat_chg_ma);
- if (rc) {
- pr_err("Error reading ibatmax-warm-ma %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the ibatmax-cool property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-ibatmax-cool-ma",
- &chip->cool_bat_chg_ma);
- if (rc) {
- pr_err("Error reading ibatmax-cool-ma %d\n", rc);
- goto fail_chg_enable;
- }
- /* Get the cool-bat-mv property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-cool-bat-mv",
- &chip->cool_bat_mv);
- if (rc) {
- pr_err("Error reading cool-bat-mv property %d\n", rc);
- goto fail_chg_enable;
- }
-
- /* Get the warm-bat-mv property */
- rc = of_property_read_u32(spmi->dev.of_node,
- "qcom,chg-warm-bat-mv",
- &chip->warm_bat_mv);
- if (rc) {
- pr_err("Error reading warm-bat-mv property %d\n", rc);
- goto fail_chg_enable;
- }
- }
-
- /* Get the fake-batt-values property */
- chip->use_default_batt_values = of_property_read_bool(spmi->dev.of_node,
- "qcom,chg-use-default-batt-values");
-
- of_get_property(spmi->dev.of_node, "qcom,chg-thermal-mitigation",
- &(chip->thermal_levels));
-
- if (chip->thermal_levels > sizeof(int)) {
- chip->thermal_mitigation = kzalloc(
- chip->thermal_levels,
- GFP_KERNEL);
-
- if (chip->thermal_mitigation == NULL) {
- pr_err("thermal mitigation kzalloc() failed.\n");
- goto fail_chg_enable;
- }
-
- chip->thermal_levels /= sizeof(int);
- rc = of_property_read_u32_array(spmi->dev.of_node,
- "qcom,chg-thermal-mitigation",
- chip->thermal_mitigation, chip->thermal_levels);
- if (rc) {
- pr_err("qcom,chg-thermal-mitigation missing in dt\n");
- goto fail_chg_enable;
- }
- }
-
- /* Disable charging when faking battery values */
- if (chip->use_default_batt_values)
- chip->charging_disabled = true;
spmi_for_each_container_dev(spmi_resource, spmi) {
if (!spmi_resource) {
@@ -1935,6 +1908,7 @@
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
case SMBBP_CHGR_SUBTYPE:
+ case SMBCL_CHGR_SUBTYPE:
chip->chgr_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1945,6 +1919,7 @@
break;
case SMBB_BUCK_SUBTYPE:
case SMBBP_BUCK_SUBTYPE:
+ case SMBCL_BUCK_SUBTYPE:
chip->buck_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1955,6 +1930,7 @@
break;
case SMBB_BAT_IF_SUBTYPE:
case SMBBP_BAT_IF_SUBTYPE:
+ case SMBCL_BAT_IF_SUBTYPE:
chip->bat_if_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1965,6 +1941,7 @@
break;
case SMBB_USB_CHGPTH_SUBTYPE:
case SMBBP_USB_CHGPTH_SUBTYPE:
+ case SMBCL_USB_CHGPTH_SUBTYPE:
chip->usb_chgpth_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1994,6 +1971,7 @@
break;
case SMBB_MISC_SUBTYPE:
case SMBBP_MISC_SUBTYPE:
+ case SMBCL_MISC_SUBTYPE:
chip->misc_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -2097,6 +2075,12 @@
qpnp_chg_charge_en(chip, !chip->charging_disabled);
qpnp_chg_force_run_on_batt(chip, chip->charging_disabled);
+ rc = qpnp_chg_request_irqs(chip);
+ if (rc) {
+ pr_err("failed to request interrupts %d\n", rc);
+ goto unregister_batt;
+ }
+
pr_info("success chg_dis = %d, usb = %d, dc = %d b_health = %d batt_present = %d\n",
chip->charging_disabled,
qpnp_chg_is_usb_chg_plugged_in(chip),
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 706fba7..9a4ea63 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -58,6 +58,17 @@
If unsure, say N.
+config SCSI_UFSHCD_PLATFORM
+ tristate "Platform bus based UFS Controller support"
+ depends on SCSI_UFSHCD
+ ---help---
+ This selects the UFS host controller support. Select this if
+ you have an UFS controller on Platform bus.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config SCSI_UFS_TEST
tristate "Universal Flash Storage host controller driver unit-tests"
depends on SCSI_UFSHCD && IOSCHED_TEST
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index bbcc202..8d6665b 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,4 +1,5 @@
# UFSHCD makefile
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
+obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_TEST) += ufs_test.o
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
new file mode 100644
index 0000000..03319ac
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -0,0 +1,217 @@
+/*
+ * Universal Flash Storage Host controller Platform bus based glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/platform_device.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pltfrm_suspend - suspend power management function
+ * @dev: pointer to device handle
+ *
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ /*
+ * TODO:
+ * 1. Call ufshcd_suspend
+ * 2. Do bus specific power management
+ */
+
+ disable_irq(hba->irq);
+
+ return 0;
+}
+
+/**
+ * ufshcd_pltfrm_resume - resume power management function
+ * @dev: pointer to device handle
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ /*
+ * TODO:
+ * 1. Call ufshcd_resume.
+ * 2. Do bus specific wake up
+ */
+
+ enable_irq(hba->irq);
+
+ return 0;
+}
+#else
+#define ufshcd_pltfrm_suspend NULL
+#define ufshcd_pltfrm_resume NULL
+#endif
+
+/**
+ * ufshcd_pltfrm_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_probe(struct platform_device *pdev)
+{
+ struct ufs_hba *hba;
+ void __iomem *mmio_base;
+ struct resource *mem_res;
+ struct resource *irq_res;
+ resource_size_t mem_size;
+ int err;
+ struct device *dev = &pdev->dev;
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res) {
+ dev_err(&pdev->dev,
+ "Memory resource not available\n");
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ mem_size = resource_size(mem_res);
+ if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) {
+ dev_err(&pdev->dev,
+ "Cannot reserve the memory resource\n");
+ err = -EBUSY;
+ goto out_error;
+ }
+
+ mmio_base = ioremap_nocache(mem_res->start, mem_size);
+ if (!mmio_base) {
+ dev_err(&pdev->dev, "memory map failed\n");
+ err = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ dev_err(&pdev->dev, "IRQ resource not available\n");
+ err = -ENODEV;
+ goto out_iounmap;
+ }
+
+ err = dma_set_coherent_mask(dev, dev->coherent_dma_mask);
+ if (err) {
+ dev_err(&pdev->dev, "set dma mask failed\n");
+ goto out_iounmap;
+ }
+
+ err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start);
+ if (err) {
+ dev_err(&pdev->dev, "Intialization failed\n");
+ goto out_iounmap;
+ }
+
+ platform_set_drvdata(pdev, hba);
+
+ return 0;
+
+out_iounmap:
+ iounmap(mmio_base);
+out_release_regions:
+ release_mem_region(mem_res->start, mem_size);
+out_error:
+ return err;
+}
+
+/**
+ * ufshcd_pltfrm_remove - remove platform driver routine
+ * @pdev: pointer to platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_remove(struct platform_device *pdev)
+{
+ struct resource *mem_res;
+ resource_size_t mem_size;
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ disable_irq(hba->irq);
+
+ /* Some buggy controllers raise interrupt after
+ * the resources are removed. So first we unregister the
+ * irq handler and then the resources used by driver
+ */
+
+ free_irq(hba->irq, hba);
+ ufshcd_remove(hba);
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res)
+ dev_err(&pdev->dev, "ufshcd: Memory resource not available\n");
+ else {
+ mem_size = resource_size(mem_res);
+ release_mem_region(mem_res->start, mem_size);
+ }
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static const struct of_device_id ufs_of_match[] = {
+ { .compatible = "jedec,ufs-1.1"},
+};
+
+static const struct dev_pm_ops ufshcd_dev_pm_ops = {
+ .suspend = ufshcd_pltfrm_suspend,
+ .resume = ufshcd_pltfrm_resume,
+};
+
+static struct platform_driver ufshcd_pltfrm_driver = {
+ .probe = ufshcd_pltfrm_probe,
+ .remove = ufshcd_pltfrm_remove,
+ .driver = {
+ .name = "ufshcd",
+ .owner = THIS_MODULE,
+ .pm = &ufshcd_dev_pm_ops,
+ .of_match_table = ufs_of_match,
+ },
+};
+
+module_platform_driver(ufshcd_pltfrm_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 60fd40c..c32a478 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -478,7 +478,7 @@
ucd_cmd_ptr->header.dword_2 = 0;
ucd_cmd_ptr->exp_data_transfer_len =
- cpu_to_be32(lrbp->cmd->transfersize);
+ cpu_to_be32(lrbp->cmd->sdb.length);
memcpy(ucd_cmd_ptr->cdb,
lrbp->cmd->cmnd,
diff --git a/drivers/thermal/msm8974-tsens.c b/drivers/thermal/msm8974-tsens.c
index 9ba954a..95378c5 100644
--- a/drivers/thermal/msm8974-tsens.c
+++ b/drivers/thermal/msm8974-tsens.c
@@ -244,7 +244,11 @@
struct tsens_tm_device_sensor {
struct thermal_zone_device *tz_dev;
enum thermal_device_mode mode;
- unsigned int sensor_num;
+ /* Physical HW sensor number */
+ unsigned int sensor_hw_num;
+ /* Software index. This is keep track of the HW/SW
+ * sensor_ID mapping */
+ unsigned int sensor_sw_id;
struct work_struct work;
int offset;
int calib_data_point1;
@@ -273,13 +277,54 @@
struct tsens_tm_device *tmdev;
-static int tsens_tz_code_to_degc(int adc_code, int sensor_num)
+int tsens_get_sw_id_mapping(int sensor_hw_num, int *sensor_sw_idx)
{
- int degc, num, den;
+ int i = 0;
+ bool id_found = false;
+ while (i < tmdev->tsens_num_sensor && !id_found) {
+ if (sensor_hw_num == tmdev->sensor[i].sensor_hw_num) {
+ *sensor_sw_idx = tmdev->sensor[i].sensor_sw_id;
+ id_found = true;
+ }
+ i++;
+ }
+
+ if (!id_found)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(tsens_get_sw_id_mapping);
+
+int tsens_get_hw_id_mapping(int sensor_sw_id, int *sensor_hw_num)
+{
+ int i = 0;
+ bool id_found = false;
+
+ while (i < tmdev->tsens_num_sensor && !id_found) {
+ if (sensor_sw_id == tmdev->sensor[i].sensor_sw_id) {
+ *sensor_hw_num = tmdev->sensor[i].sensor_hw_num;
+ id_found = true;
+ }
+ i++;
+ }
+
+ if (!id_found)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(tsens_get_hw_id_mapping);
+
+static int tsens_tz_code_to_degc(int adc_code, int sensor_sw_id)
+{
+ int degc, num, den, idx;
+
+ idx = sensor_sw_id;
num = ((adc_code * tmdev->tsens_factor) -
- tmdev->sensor[sensor_num].offset);
- den = (int) tmdev->sensor[sensor_num].slope_mul_tsens_factor;
+ tmdev->sensor[idx].offset);
+ den = (int) tmdev->sensor[idx].slope_mul_tsens_factor;
if (num > 0)
degc = ((num + (den/2))/den);
@@ -291,10 +336,10 @@
return degc;
}
-static int tsens_tz_degc_to_code(int degc, int sensor_num)
+static int tsens_tz_degc_to_code(int degc, int idx)
{
- int code = ((degc * tmdev->sensor[sensor_num].slope_mul_tsens_factor)
- + tmdev->sensor[sensor_num].offset)/tmdev->tsens_factor;
+ int code = ((degc * tmdev->sensor[idx].slope_mul_tsens_factor)
+ + tmdev->sensor[idx].offset)/tmdev->tsens_factor;
if (code > TSENS_THRESHOLD_MAX_CODE)
code = TSENS_THRESHOLD_MAX_CODE;
@@ -303,9 +348,10 @@
return code;
}
-static void msm_tsens_get_temp(int sensor_num, unsigned long *temp)
+static void msm_tsens_get_temp(int sensor_hw_num, unsigned long *temp)
{
unsigned int code, sensor_addr;
+ int sensor_sw_id = -EINVAL, rc = 0;
if (!tmdev->prev_reading_avail) {
while (!(readl_relaxed(TSENS_TRDY_ADDR(tmdev->tsens_addr))
@@ -318,9 +364,17 @@
sensor_addr =
(unsigned int)TSENS_S0_STATUS_ADDR(tmdev->tsens_addr);
code = readl_relaxed(sensor_addr +
- (sensor_num << TSENS_STATUS_ADDR_OFFSET));
+ (sensor_hw_num << TSENS_STATUS_ADDR_OFFSET));
+ /* Obtain SW index to map the corresponding thermal zone's
+ * offset and slope for code to degc conversion. */
+ rc = tsens_get_sw_id_mapping(sensor_hw_num, &sensor_sw_id);
+ if (rc < 0) {
+ pr_err("tsens mapping index not found\n");
+ return;
+ }
+
*temp = tsens_tz_code_to_degc((code & TSENS_SN_STATUS_TEMP_MASK),
- sensor_num);
+ sensor_sw_id);
}
static int tsens_tz_get_temp(struct thermal_zone_device *thermal,
@@ -331,7 +385,7 @@
if (!tm_sensor || tm_sensor->mode != THERMAL_DEVICE_ENABLED || !temp)
return -EINVAL;
- msm_tsens_get_temp(tm_sensor->sensor_num, temp);
+ msm_tsens_get_temp(tm_sensor->sensor_hw_num, temp);
return 0;
}
@@ -406,8 +460,9 @@
hi_code = TSENS_THRESHOLD_MAX_CODE;
reg_cntl = readl_relaxed((TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
- (tmdev->tsens_addr) +
- (tm_sensor->sensor_num * 4)));
+ (tmdev->tsens_addr) +
+ (tm_sensor->sensor_hw_num *
+ TSENS_SN_ADDR_OFFSET)));
switch (trip) {
case TSENS_TRIP_WARM:
code = (reg_cntl & TSENS_UPPER_THRESHOLD_MASK)
@@ -432,8 +487,8 @@
if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
writel_relaxed(reg_cntl | mask,
(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
- (tmdev->tsens_addr) +
- (tm_sensor->sensor_num * 4)));
+ (tmdev->tsens_addr) +
+ (tm_sensor->sensor_hw_num * TSENS_SN_ADDR_OFFSET)));
else {
if (code < lo_code || code > hi_code) {
pr_err("%s with invalid code %x\n", __func__, code);
@@ -441,7 +496,7 @@
}
writel_relaxed(reg_cntl & ~mask,
(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(tmdev->tsens_addr) +
- (tm_sensor->sensor_num * 4)));
+ (tm_sensor->sensor_hw_num * TSENS_SN_ADDR_OFFSET)));
}
mb();
return 0;
@@ -452,13 +507,14 @@
{
struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
unsigned int reg;
+ int sensor_sw_id = -EINVAL, rc = 0;
if (!tm_sensor || trip < 0 || !temp)
return -EINVAL;
reg = readl_relaxed(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
(tmdev->tsens_addr) +
- (tm_sensor->sensor_num * TSENS_SN_ADDR_OFFSET));
+ (tm_sensor->sensor_hw_num * TSENS_SN_ADDR_OFFSET));
switch (trip) {
case TSENS_TRIP_WARM:
reg = (reg & TSENS_UPPER_THRESHOLD_MASK) >>
@@ -471,7 +527,12 @@
return -EINVAL;
}
- *temp = tsens_tz_code_to_degc(reg, tm_sensor->sensor_num);
+ rc = tsens_get_sw_id_mapping(tm_sensor->sensor_hw_num, &sensor_sw_id);
+ if (rc < 0) {
+ pr_err("tsens mapping index not found\n");
+ return rc;
+ }
+ *temp = tsens_tz_code_to_degc(reg, sensor_sw_id);
return 0;
}
@@ -479,9 +540,8 @@
static int tsens_tz_notify(struct thermal_zone_device *thermal,
int count, enum thermal_trip_type type)
{
- /* TSENS driver does not shutdown the device.
- All Thermal notification are sent to the
- thermal daemon to take appropriate action */
+ /* Critical temperature threshold are enabled and will
+ * shutdown the device once critical thresholds are crossed. */
pr_debug("%s debug\n", __func__);
return 1;
}
@@ -491,10 +551,14 @@
{
struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
unsigned int reg_cntl;
- int code, hi_code, lo_code, code_err_chk;
+ int code, hi_code, lo_code, code_err_chk, sensor_sw_id = 0, rc = 0;
- code_err_chk = code = tsens_tz_degc_to_code(temp,
- tm_sensor->sensor_num);
+ rc = tsens_get_sw_id_mapping(tm_sensor->sensor_hw_num, &sensor_sw_id);
+ if (rc < 0) {
+ pr_err("tsens mapping index not found\n");
+ return rc;
+ }
+ code_err_chk = code = tsens_tz_degc_to_code(temp, sensor_sw_id);
if (!tm_sensor || trip < 0)
return -EINVAL;
@@ -502,8 +566,8 @@
hi_code = TSENS_THRESHOLD_MAX_CODE;
reg_cntl = readl_relaxed(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
- (tmdev->tsens_addr) +
- (tm_sensor->sensor_num * TSENS_SN_ADDR_OFFSET));
+ (tmdev->tsens_addr) + (tm_sensor->sensor_hw_num *
+ TSENS_SN_ADDR_OFFSET));
switch (trip) {
case TSENS_TRIP_WARM:
code <<= TSENS_UPPER_THRESHOLD_SHIFT;
@@ -526,7 +590,7 @@
writel_relaxed(reg_cntl | code, (TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
(tmdev->tsens_addr) +
- (tm_sensor->sensor_num *
+ (tm_sensor->sensor_hw_num *
TSENS_SN_ADDR_OFFSET)));
mb();
return 0;
@@ -557,35 +621,48 @@
tsens_work);
unsigned int i, status, threshold;
unsigned int sensor_status_addr, sensor_status_ctrl_addr;
+ int sensor_sw_id = -EINVAL, rc = 0;
sensor_status_addr =
(unsigned int)TSENS_S0_STATUS_ADDR(tmdev->tsens_addr);
sensor_status_ctrl_addr =
(unsigned int)TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
(tmdev->tsens_addr);
- for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+ for (i = 0; i < tm->tsens_num_sensor; i++) {
bool upper_thr = false, lower_thr = false;
- status = readl_relaxed(sensor_status_addr);
- threshold = readl_relaxed(sensor_status_ctrl_addr);
+ uint32_t addr_offset;
+
+ addr_offset = tm->sensor[i].sensor_hw_num *
+ TSENS_SN_ADDR_OFFSET;
+ status = readl_relaxed(sensor_status_addr + addr_offset);
+ threshold = readl_relaxed(sensor_status_ctrl_addr +
+ addr_offset);
if (status & TSENS_SN_STATUS_UPPER_STATUS) {
writel_relaxed(threshold | TSENS_UPPER_STATUS_CLR,
- sensor_status_ctrl_addr);
+ TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(
+ tmdev->tsens_addr + addr_offset));
upper_thr = true;
}
if (status & TSENS_SN_STATUS_LOWER_STATUS) {
writel_relaxed(threshold | TSENS_LOWER_STATUS_CLR,
- sensor_status_ctrl_addr);
+ TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(
+ tmdev->tsens_addr + addr_offset));
lower_thr = true;
}
if (upper_thr || lower_thr) {
/* Notify user space */
schedule_work(&tm->sensor[i].work);
- pr_debug("sensor:%d trigger temp (%d degC)\n", i,
+ rc = tsens_get_sw_id_mapping(
+ tm->sensor[i].sensor_hw_num,
+ &sensor_sw_id);
+ if (rc < 0)
+ pr_err("tsens mapping index not found\n");
+ pr_debug("sensor:%d trigger temp (%d degC)\n",
+ tm->sensor[i].sensor_hw_num,
tsens_tz_code_to_degc((status &
- TSENS_SN_STATUS_TEMP_MASK), i));
+ TSENS_SN_STATUS_TEMP_MASK),
+ sensor_sw_id));
}
- sensor_status_addr += TSENS_SN_ADDR_OFFSET;
- sensor_status_ctrl_addr += TSENS_SN_ADDR_OFFSET;
}
mb();
}
@@ -599,17 +676,19 @@
static void tsens_hw_init(void)
{
- unsigned int reg_cntl = 0;
+ unsigned int reg_cntl = 0, sensor_en = 0;
unsigned int i;
if (tmdev->tsens_local_init) {
writel_relaxed(reg_cntl, TSENS_CTRL_ADDR(tmdev->tsens_addr));
writel_relaxed(reg_cntl | TSENS_SW_RST,
TSENS_CTRL_ADDR(tmdev->tsens_addr));
- reg_cntl |= ((TSENS_62_5_MS_MEAS_PERIOD <<
- TSENS_MEAS_PERIOD_SHIFT) |
- (((1 << tmdev->tsens_num_sensor) - 1) << TSENS_SENSOR0_SHIFT) |
- TSENS_EN);
+ reg_cntl |= (TSENS_62_5_MS_MEAS_PERIOD <<
+ TSENS_MEAS_PERIOD_SHIFT);
+ for (i = 0; i < tmdev->tsens_num_sensor; i++)
+ sensor_en |= (1 << tmdev->sensor[i].sensor_hw_num);
+ sensor_en <<= TSENS_SENSOR0_SHIFT;
+ reg_cntl |= (sensor_en | TSENS_EN);
writel_relaxed(reg_cntl, TSENS_CTRL_ADDR(tmdev->tsens_addr));
writel_relaxed(TSENS_GLOBAL_INIT_DATA,
TSENS_GLOBAL_CONFIG(tmdev->tsens_addr));
@@ -618,10 +697,12 @@
for (i = 0; i < tmdev->tsens_num_sensor; i++) {
writel_relaxed(TSENS_SN_MIN_MAX_STATUS_CTRL_DATA,
TSENS_SN_MIN_MAX_STATUS_CTRL(tmdev->tsens_addr)
- + (i * TSENS_SN_ADDR_OFFSET));
+ + (tmdev->sensor[i].sensor_hw_num *
+ TSENS_SN_ADDR_OFFSET));
writel_relaxed(TSENS_SN_REMOTE_CFG_DATA,
TSENS_SN_REMOTE_CONFIG(tmdev->tsens_addr)
- + (i * TSENS_SN_ADDR_OFFSET));
+ + (tmdev->sensor[i].sensor_hw_num *
+ TSENS_SN_ADDR_OFFSET));
}
pr_debug("Local TSENS control initialization\n");
}
@@ -1223,6 +1304,7 @@
const struct device_node *of_node = pdev->dev.of_node;
struct resource *res_mem = NULL;
u32 *tsens_slope_data;
+ u32 *sensor_id;
u32 rc = 0, i, tsens_num_sensors, calib_type;
const char *tsens_calib_mode;
@@ -1280,6 +1362,29 @@
tmdev->tsens_local_init = of_property_read_bool(of_node,
"qcom,tsens-local-init");
+ sensor_id = devm_kzalloc(&pdev->dev,
+ tsens_num_sensors * sizeof(u32), GFP_KERNEL);
+ if (!sensor_id) {
+ dev_err(&pdev->dev, "can not allocate sensor id\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,sensor-id", sensor_id, tsens_num_sensors);
+ if (rc) {
+ pr_debug("Default sensor id mapping\n");
+ for (i = 0; i < tsens_num_sensors; i++) {
+ tmdev->sensor[i].sensor_hw_num = i;
+ tmdev->sensor[i].sensor_sw_id = i;
+ }
+ } else {
+ pr_debug("Use specified sensor id mapping\n");
+ for (i = 0; i < tsens_num_sensors; i++) {
+ tmdev->sensor[i].sensor_hw_num = sensor_id[i];
+ tmdev->sensor[i].sensor_sw_id = i;
+ }
+ }
+
tmdev->tsens_irq = platform_get_irq(pdev, 0);
if (tmdev->tsens_irq < 0) {
pr_err("Invalid get irq\n");
@@ -1422,9 +1527,9 @@
for (i = 0; i < tmdev->tsens_num_sensor; i++) {
char name[18];
- snprintf(name, sizeof(name), "tsens_tz_sensor%d", i);
+ snprintf(name, sizeof(name), "tsens_tz_sensor%d",
+ tmdev->sensor[i].sensor_hw_num);
tmdev->sensor[i].mode = THERMAL_DEVICE_ENABLED;
- tmdev->sensor[i].sensor_num = i;
tmdev->sensor[i].tz_dev = thermal_zone_device_register(name,
TSENS_TRIP_NUM, &tmdev->sensor[i],
&tsens_thermal_zone_ops, 0, 0, 0, 0);
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index 5aca48d..12ac3bc 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -23,17 +23,447 @@
#include <linux/msm_thermal.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
#include <mach/cpufreq.h>
+#include <mach/rpm-regulator.h>
+#include <mach/rpm-regulator-smd.h>
+#include <linux/regulator/consumer.h>
-static int enabled;
+#define MAX_RAILS 5
+
static struct msm_thermal_data msm_thermal_info;
static uint32_t limited_max_freq = MSM_CPUFREQ_NO_LIMIT;
static struct delayed_work check_temp_work;
+static bool core_control_enabled;
+static uint32_t cpus_offlined;
+static DEFINE_MUTEX(core_control_mutex);
+static int enabled;
+static int rails_cnt;
+static int psm_rails_cnt;
static int limit_idx;
static int limit_idx_low;
static int limit_idx_high;
+static int max_tsens_num;
static struct cpufreq_frequency_table *table;
+static uint32_t usefreq;
+static int freq_table_get;
+static bool vdd_rstr_enabled;
+static bool vdd_rstr_nodes_called;
+static bool vdd_rstr_probed;
+static bool psm_enabled;
+static bool psm_nodes_called;
+static bool psm_probed;
+static DEFINE_MUTEX(vdd_rstr_mutex);
+static DEFINE_MUTEX(psm_mutex);
+
+struct rail {
+ const char *name;
+ uint32_t freq_req;
+ uint32_t min_level;
+ uint32_t num_levels;
+ uint32_t curr_level;
+ uint32_t levels[3];
+ struct kobj_attribute value_attr;
+ struct kobj_attribute level_attr;
+ struct regulator *reg;
+ struct attribute_group attr_gp;
+};
+
+struct psm_rail {
+ const char *name;
+ uint8_t init;
+ uint8_t mode;
+ struct kobj_attribute mode_attr;
+ struct rpm_regulator *reg;
+ struct attribute_group attr_gp;
+};
+
+static struct psm_rail *psm_rails;
+static struct rail *rails;
+
+struct vdd_rstr_enable {
+ struct kobj_attribute ko_attr;
+ uint32_t enabled;
+};
+
+/* For SMPS only*/
+enum PMIC_SW_MODE {
+ PMIC_AUTO_MODE = RPM_REGULATOR_MODE_AUTO,
+ PMIC_IPEAK_MODE = RPM_REGULATOR_MODE_IPEAK,
+ PMIC_PWM_MODE = RPM_REGULATOR_MODE_HPM,
+};
+
+#define VDD_RES_RO_ATTRIB(_rail, ko_attr, j, _name) \
+ ko_attr.attr.name = __stringify(_name); \
+ ko_attr.attr.mode = 444; \
+ ko_attr.show = vdd_rstr_reg_##_name##_show; \
+ ko_attr.store = NULL; \
+ _rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define VDD_RES_RW_ATTRIB(_rail, ko_attr, j, _name) \
+ ko_attr.attr.name = __stringify(_name); \
+ ko_attr.attr.mode = 644; \
+ ko_attr.show = vdd_rstr_reg_##_name##_show; \
+ ko_attr.store = vdd_rstr_reg_##_name##_store; \
+ _rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define VDD_RSTR_ENABLE_FROM_ATTRIBS(attr) \
+ (container_of(attr, struct vdd_rstr_enable, ko_attr));
+
+#define VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr) \
+ (container_of(attr, struct rail, value_attr));
+
+#define VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr) \
+ (container_of(attr, struct rail, level_attr));
+
+#define PSM_RW_ATTRIB(_rail, ko_attr, j, _name) \
+ ko_attr.attr.name = __stringify(_name); \
+ ko_attr.attr.mode = 644; \
+ ko_attr.show = psm_reg_##_name##_show; \
+ ko_attr.store = psm_reg_##_name##_store; \
+ _rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define PSM_REG_MODE_FROM_ATTRIBS(attr) \
+ (container_of(attr, struct psm_rail, mode_attr));
+/* If freq table exists, then we can send freq request */
+static int check_freq_table(void)
+{
+ int ret = 0;
+ struct cpufreq_frequency_table *table = NULL;
+
+ table = cpufreq_frequency_get_table(0);
+ if (!table) {
+ pr_debug("%s: error reading cpufreq table\n", __func__);
+ return -EINVAL;
+ }
+ freq_table_get = 1;
+
+ return ret;
+}
+
+static int update_cpu_min_freq_all(uint32_t min)
+{
+ int cpu = 0;
+ int ret = 0;
+
+ if (!freq_table_get) {
+ ret = check_freq_table();
+ if (ret) {
+ pr_err("%s:Fail to get freq table\n", __func__);
+ return ret;
+ }
+ }
+ /* If min is larger than allowed max */
+ if (min != MSM_CPUFREQ_NO_LIMIT &&
+ min > table[limit_idx_high].frequency)
+ min = table[limit_idx_high].frequency;
+
+ for_each_possible_cpu(cpu) {
+ ret = msm_cpufreq_set_freq_limits(cpu, min, limited_max_freq);
+
+ if (ret) {
+ pr_err("%s:Fail to set limits for cpu%d\n",
+ __func__, cpu);
+ return ret;
+ }
+
+ if (cpufreq_update_policy(cpu))
+ pr_debug("%s: Cannot update policy for cpu%d\n",
+ __func__, cpu);
+ }
+
+ return ret;
+}
+
+static int vdd_restriction_apply_freq(struct rail *r, int level)
+{
+ int ret = 0;
+
+ /* level = -1: disable, level = 0,1,2..n: enable */
+ if (level == -1) {
+ ret = update_cpu_min_freq_all(r->min_level);
+ if (ret)
+ return ret;
+ else
+ r->curr_level = -1;
+ } else if (level >= 0 && level < (r->num_levels)) {
+ ret = update_cpu_min_freq_all(r->levels[level]);
+ if (ret)
+ return ret;
+ else
+ r->curr_level = level;
+ } else {
+ pr_err("level input:%d is not within range\n", level);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vdd_restriction_apply_voltage(struct rail *r, int level)
+{
+ int ret = 0;
+
+ if (r->reg == NULL) {
+ pr_info("Do not have regulator handle:%s, can't apply vdd\n",
+ r->name);
+ return -EFAULT;
+ }
+ /* level = -1: disable, level = 0,1,2..n: enable */
+ if (level == -1) {
+ ret = regulator_set_voltage(r->reg, r->min_level,
+ r->levels[r->num_levels - 1]);
+ if (!ret)
+ r->curr_level = -1;
+ } else if (level >= 0 && level < (r->num_levels)) {
+ ret = regulator_set_voltage(r->reg, r->levels[level],
+ r->levels[r->num_levels - 1]);
+ if (!ret)
+ r->curr_level = level;
+ } else {
+ pr_err("level input:%d is not within range\n", level);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+/* 1:enable, 0:disable */
+static int vdd_restriction_apply_all(int en)
+{
+ int i = 0;
+ int fail_cnt = 0;
+ int ret = 0;
+
+ for (i = 0; i < rails_cnt; i++) {
+ if (rails[i].freq_req == 1 && freq_table_get)
+ ret = vdd_restriction_apply_freq(&rails[i],
+ en ? 0 : -1);
+ else
+ ret = vdd_restriction_apply_voltage(&rails[i],
+ en ? 0 : -1);
+ if (ret) {
+ pr_err("Cannot set voltage for %s", rails[i].name);
+ fail_cnt++;
+ }
+ }
+ /* Check fail_cnt again to make sure all of the rails are applied
+ * restriction successfully or not */
+ if (fail_cnt)
+ return -EFAULT;
+
+ return ret;
+}
+
+/* Setting all rails the same mode */
+static int psm_set_mode_all(int mode)
+{
+ int i = 0;
+ int fail_cnt = 0;
+ int ret = 0;
+
+ for (i = 0; i < psm_rails_cnt; i++) {
+ if (psm_rails[i].mode != mode) {
+ ret = rpm_regulator_set_mode(psm_rails[i].reg, mode);
+ if (ret) {
+ pr_err("Cannot set mode:%d for %s",
+ mode, psm_rails[i].name);
+ fail_cnt++;
+ } else
+ psm_rails[i].mode = mode;
+ }
+ }
+
+ return fail_cnt ? (-EFAULT) : ret;
+}
+
+static int vdd_rstr_en_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", en->enabled);
+}
+
+static ssize_t vdd_rstr_en_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int i = 0;
+ uint8_t en_cnt = 0;
+ uint8_t dis_cnt = 0;
+ uint32_t val = 0;
+ struct kernel_param kp;
+ struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
+
+ mutex_lock(&vdd_rstr_mutex);
+ kp.arg = &val;
+ ret = param_set_bool(buf, &kp);
+ if (ret) {
+ pr_err("Invalid input %s for enabled\n", buf);
+ goto done_vdd_rstr_en;
+ }
+
+ if ((val == 0) && (en->enabled == 0))
+ goto done_vdd_rstr_en;
+
+ for (i = 0; i < rails_cnt; i++) {
+ if (rails[i].freq_req == 1 && freq_table_get)
+ ret = vdd_restriction_apply_freq(&rails[i],
+ (val) ? 0 : -1);
+ else
+ ret = vdd_restriction_apply_voltage(&rails[i],
+ (val) ? 0 : -1);
+
+ /* Even if fail to set one rail, still try to set the
+ * others. Continue the loop */
+ if (ret)
+ pr_err("Set vdd restriction for %s failed\n",
+ rails[i].name);
+ else {
+ if (val)
+ en_cnt++;
+ else
+ dis_cnt++;
+ }
+ }
+ /* As long as one rail is enabled, vdd rstr is enabled */
+ if (val && en_cnt)
+ en->enabled = 1;
+ else if (!val && (dis_cnt == rails_cnt))
+ en->enabled = 0;
+
+done_vdd_rstr_en:
+ mutex_unlock(&vdd_rstr_mutex);
+ return count;
+}
+
+static struct vdd_rstr_enable vdd_rstr_en = {
+ .ko_attr.attr.name = __stringify(enabled),
+ .ko_attr.attr.mode = 644,
+ .ko_attr.show = vdd_rstr_en_show,
+ .ko_attr.store = vdd_rstr_en_store,
+ .enabled = 1,
+};
+
+static struct attribute *vdd_rstr_en_attribs[] = {
+ &vdd_rstr_en.ko_attr.attr,
+ NULL,
+};
+
+static struct attribute_group vdd_rstr_en_attribs_gp = {
+ .attrs = vdd_rstr_en_attribs,
+};
+
+static int vdd_rstr_reg_value_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int val = 0;
+ struct rail *reg = VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr);
+ /* -1:disabled, -2:fail to get regualtor handle */
+ if (reg->curr_level < 0)
+ val = reg->curr_level;
+ else
+ val = reg->levels[reg->curr_level];
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", reg->levels[reg->curr_level]);
+}
+
+static int vdd_rstr_reg_level_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
+ return snprintf(buf, PAGE_SIZE, "%d\n", reg->curr_level);
+}
+
+static ssize_t vdd_rstr_reg_level_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int val = 0;
+
+ struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
+
+ mutex_lock(&vdd_rstr_mutex);
+ if (vdd_rstr_en.enabled == 0)
+ goto done_store_level;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("Invalid input %s for level\n", buf);
+ goto done_store_level;
+ }
+
+ if (val < 0 || val > reg->num_levels - 1) {
+ pr_err(" Invalid number %d for level\n", val);
+ goto done_store_level;
+ }
+
+ if (val != reg->curr_level) {
+ if (reg->freq_req == 1 && freq_table_get)
+ update_cpu_min_freq_all(reg->levels[val]);
+ else {
+ ret = vdd_restriction_apply_voltage(reg, val);
+ if (ret) {
+ pr_err( \
+ "Set vdd restriction for regulator %s failed\n",
+ reg->name);
+ goto done_store_level;
+ }
+ }
+ reg->curr_level = val;
+ }
+
+done_store_level:
+ mutex_unlock(&vdd_rstr_mutex);
+ return count;
+}
+
+static int psm_reg_mode_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
+ return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode);
+}
+
+static ssize_t psm_reg_mode_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int val = 0;
+ struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
+
+ mutex_lock(&psm_mutex);
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("Invalid input %s for mode\n", buf);
+ goto done_psm_store;
+ }
+
+ if ((val != PMIC_PWM_MODE) && (val != PMIC_AUTO_MODE)) {
+ pr_err(" Invalid number %d for mode\n", val);
+ goto done_psm_store;
+ }
+
+ if (val != reg->mode) {
+ ret = rpm_regulator_set_mode(reg->reg, val);
+ if (ret) {
+ pr_err( \
+ "Fail to set PMIC SW Mode:%d for %s\n",
+ val, reg->name);
+ goto done_psm_store;
+ }
+ reg->mode = val;
+ }
+
+done_psm_store:
+ mutex_unlock(&psm_mutex);
+ return count;
+}
static int msm_thermal_get_freq_table(void)
{
@@ -42,7 +472,7 @@
table = cpufreq_frequency_get_table(0);
if (table == NULL) {
- pr_debug("%s: error reading cpufreq table\n", __func__);
+ pr_debug("%s: error reading cpufreq table\n", KBUILD_MODNAME);
ret = -EINVAL;
goto fail;
}
@@ -67,17 +497,166 @@
limited_max_freq = max_freq;
if (max_freq != MSM_CPUFREQ_NO_LIMIT)
- pr_info("msm_thermal: Limiting cpu%d max frequency to %d\n",
- cpu, max_freq);
+ pr_info("%s: Limiting cpu%d max frequency to %d\n",
+ KBUILD_MODNAME, cpu, max_freq);
else
- pr_info("msm_thermal: Max frequency reset for cpu%d\n", cpu);
+ pr_info("%s: Max frequency reset for cpu%d\n",
+ KBUILD_MODNAME, cpu);
ret = cpufreq_update_policy(cpu);
return ret;
}
-static void check_temp(struct work_struct *work)
+static void __cpuinit do_core_control(long temp)
+{
+ int i = 0;
+ int ret = 0;
+
+ if (!core_control_enabled)
+ return;
+
+ mutex_lock(&core_control_mutex);
+ if (msm_thermal_info.core_control_mask &&
+ temp >= msm_thermal_info.core_limit_temp_degC) {
+ for (i = num_possible_cpus(); i > 0; i--) {
+ if (!(msm_thermal_info.core_control_mask & BIT(i)))
+ continue;
+ if (cpus_offlined & BIT(i) && !cpu_online(i))
+ continue;
+ pr_info("%s: Set Offline: CPU%d Temp: %ld\n",
+ KBUILD_MODNAME, i, temp);
+ ret = cpu_down(i);
+ if (ret)
+ pr_err("%s: Error %d offline core %d\n",
+ KBUILD_MODNAME, ret, i);
+ cpus_offlined |= BIT(i);
+ break;
+ }
+ } else if (msm_thermal_info.core_control_mask && cpus_offlined &&
+ temp <= (msm_thermal_info.core_limit_temp_degC -
+ msm_thermal_info.core_temp_hysteresis_degC)) {
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (!(cpus_offlined & BIT(i)))
+ continue;
+ cpus_offlined &= ~BIT(i);
+ pr_info("%s: Allow Online CPU%d Temp: %ld\n",
+ KBUILD_MODNAME, i, temp);
+ /* If this core is already online, then bring up the
+ * next offlined core.
+ */
+ if (cpu_online(i))
+ continue;
+ ret = cpu_up(i);
+ if (ret)
+ pr_err("%s: Error %d online core %d\n",
+ KBUILD_MODNAME, ret, i);
+ break;
+ }
+ }
+ mutex_unlock(&core_control_mutex);
+}
+
+static int do_vdd_restriction(void)
+{
+ struct tsens_device tsens_dev;
+ long temp = 0;
+ int ret = 0;
+ int i = 0;
+ int dis_cnt = 0;
+
+ if (!vdd_rstr_enabled)
+ return ret;
+
+ if (usefreq && !freq_table_get) {
+ if (check_freq_table())
+ return ret;
+ }
+
+ mutex_lock(&vdd_rstr_mutex);
+ for (i = 0; i < max_tsens_num; i++) {
+ tsens_dev.sensor_num = i;
+ ret = tsens_get_temp(&tsens_dev, &temp);
+ if (ret) {
+ pr_debug("%s: Unable to read TSENS sensor %d\n",
+ __func__, tsens_dev.sensor_num);
+ dis_cnt++;
+ continue;
+ }
+ if (temp <= msm_thermal_info.vdd_rstr_temp_hyst_degC &&
+ vdd_rstr_en.enabled == 0) {
+ ret = vdd_restriction_apply_all(1);
+ if (ret) {
+ pr_err( \
+ "Enable vdd rstr votlage for all failed\n");
+ goto exit;
+ }
+ vdd_rstr_en.enabled = 1;
+ goto exit;
+ } else if (temp > msm_thermal_info.vdd_rstr_temp_degC &&
+ vdd_rstr_en.enabled == 1)
+ dis_cnt++;
+ }
+ if (dis_cnt == max_tsens_num) {
+ ret = vdd_restriction_apply_all(0);
+ if (ret) {
+ pr_err("Disable vdd rstr votlage for all failed\n");
+ goto exit;
+ }
+ vdd_rstr_en.enabled = 0;
+ }
+exit:
+ mutex_unlock(&vdd_rstr_mutex);
+ return ret;
+}
+
+static int do_psm(void)
+{
+ struct tsens_device tsens_dev;
+ long temp = 0;
+ int ret = 0;
+ int i = 0;
+ int auto_cnt = 0;
+
+ mutex_lock(&psm_mutex);
+ for (i = 0; i < max_tsens_num; i++) {
+ tsens_dev.sensor_num = i;
+ ret = tsens_get_temp(&tsens_dev, &temp);
+ if (ret) {
+ pr_debug("%s: Unable to read TSENS sensor %d\n",
+ __func__, tsens_dev.sensor_num);
+ auto_cnt++;
+ continue;
+ }
+
+ /* As long as one sensor is above the threshold, set PWM mode
+ * on all rails, and loop stops. Set auto mode when all rails
+ * are below thershold */
+ if (temp > msm_thermal_info.psm_temp_degC) {
+ ret = psm_set_mode_all(PMIC_PWM_MODE);
+ if (ret) {
+ pr_err("Set pwm mode for all failed\n");
+ goto exit;
+ }
+ break;
+ } else if (temp <= msm_thermal_info.psm_temp_hyst_degC)
+ auto_cnt++;
+ }
+
+ if (auto_cnt == max_tsens_num) {
+ ret = psm_set_mode_all(PMIC_AUTO_MODE);
+ if (ret) {
+ pr_err("Set auto mode for all failed\n");
+ goto exit;
+ }
+ }
+
+exit:
+ mutex_unlock(&psm_mutex);
+ return ret;
+}
+
+static void __cpuinit check_temp(struct work_struct *work)
{
static int limit_init;
struct tsens_device tsens_dev;
@@ -85,12 +664,11 @@
uint32_t max_freq = limited_max_freq;
int cpu = 0;
int ret = 0;
-
tsens_dev.sensor_num = msm_thermal_info.sensor_id;
ret = tsens_get_temp(&tsens_dev, &temp);
if (ret) {
- pr_debug("msm_thermal: Unable to read TSENS sensor %d\n",
- tsens_dev.sensor_num);
+ pr_debug("%s: Unable to read TSENS sensor %d\n",
+ KBUILD_MODNAME, tsens_dev.sensor_num);
goto reschedule;
}
@@ -102,6 +680,10 @@
limit_init = 1;
}
+ do_core_control(temp);
+ do_vdd_restriction();
+ do_psm();
+
if (temp >= msm_thermal_info.limit_temp_degC) {
if (limit_idx == limit_idx_low)
goto reschedule;
@@ -129,8 +711,9 @@
for_each_possible_cpu(cpu) {
ret = update_cpu_max_freq(cpu, max_freq);
if (ret)
- pr_debug("Unable to limit cpu%d max freq to %d\n",
- cpu, max_freq);
+ pr_debug(
+ "%s: Unable to limit cpu%d max freq to %d\n",
+ KBUILD_MODNAME, cpu, max_freq);
}
reschedule:
@@ -139,7 +722,36 @@
msecs_to_jiffies(msm_thermal_info.poll_ms));
}
-static void disable_msm_thermal(void)
+static int __cpuinit msm_thermal_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) {
+ if (core_control_enabled &&
+ (msm_thermal_info.core_control_mask & BIT(cpu)) &&
+ (cpus_offlined & BIT(cpu))) {
+ pr_info(
+ "%s: Preventing cpu%d from coming online.\n",
+ KBUILD_MODNAME, cpu);
+ return NOTIFY_BAD;
+ }
+ }
+
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata msm_thermal_cpu_notifier = {
+ .notifier_call = msm_thermal_cpu_callback,
+};
+
+/**
+ * We will reset the cpu frequencies limits here. The core online/offline
+ * status will be carried over to the process stopping the msm_thermal, as
+ * we dont want to online a core and bring in the thermal issues.
+ */
+static void __cpuinit disable_msm_thermal(void)
{
int cpu = 0;
@@ -155,7 +767,7 @@
}
}
-static int set_enabled(const char *val, const struct kernel_param *kp)
+static int __cpuinit set_enabled(const char *val, const struct kernel_param *kp)
{
int ret = 0;
@@ -163,9 +775,10 @@
if (!enabled)
disable_msm_thermal();
else
- pr_info("msm_thermal: no action for enabled = %d\n", enabled);
+ pr_info("%s: no action for enabled = %d\n",
+ KBUILD_MODNAME, enabled);
- pr_info("msm_thermal: enabled = %d\n", enabled);
+ pr_info("%s: enabled = %d\n", KBUILD_MODNAME, enabled);
return ret;
}
@@ -178,18 +791,561 @@
module_param_cb(enabled, &module_ops, &enabled, 0644);
MODULE_PARM_DESC(enabled, "enforce thermal limit on cpu");
+
+/* Call with core_control_mutex locked */
+static int __cpuinit update_offline_cores(int val)
+{
+ int cpu = 0;
+ int ret = 0;
+
+ cpus_offlined = msm_thermal_info.core_control_mask & val;
+ if (!core_control_enabled)
+ return 0;
+
+ for_each_possible_cpu(cpu) {
+ if (!(cpus_offlined & BIT(cpu)))
+ continue;
+ if (!cpu_online(cpu))
+ continue;
+ ret = cpu_down(cpu);
+ if (ret)
+ pr_err("%s: Unable to offline cpu%d\n",
+ KBUILD_MODNAME, cpu);
+ }
+ return ret;
+}
+
+static ssize_t show_cc_enabled(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", core_control_enabled);
+}
+
+static ssize_t __cpuinit store_cc_enabled(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int val = 0;
+
+ mutex_lock(&core_control_mutex);
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
+ goto done_store_cc;
+ }
+
+ if (core_control_enabled == !!val)
+ goto done_store_cc;
+
+ core_control_enabled = !!val;
+ if (core_control_enabled) {
+ pr_info("%s: Core control enabled\n", KBUILD_MODNAME);
+ register_cpu_notifier(&msm_thermal_cpu_notifier);
+ update_offline_cores(cpus_offlined);
+ } else {
+ pr_info("%s: Core control disabled\n", KBUILD_MODNAME);
+ unregister_cpu_notifier(&msm_thermal_cpu_notifier);
+ }
+
+done_store_cc:
+ mutex_unlock(&core_control_mutex);
+ return count;
+}
+
+static ssize_t show_cpus_offlined(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", cpus_offlined);
+}
+
+static ssize_t __cpuinit store_cpus_offlined(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ uint32_t val = 0;
+
+ mutex_lock(&core_control_mutex);
+ ret = kstrtouint(buf, 10, &val);
+ if (ret) {
+ pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
+ goto done_cc;
+ }
+
+ if (enabled) {
+ pr_err("%s: Ignoring request; polling thread is enabled.\n",
+ KBUILD_MODNAME);
+ goto done_cc;
+ }
+
+ if (cpus_offlined == val)
+ goto done_cc;
+
+ update_offline_cores(val);
+done_cc:
+ mutex_unlock(&core_control_mutex);
+ return count;
+}
+
+static __cpuinitdata struct kobj_attribute cc_enabled_attr =
+__ATTR(enabled, 0644, show_cc_enabled, store_cc_enabled);
+
+static __cpuinitdata struct kobj_attribute cpus_offlined_attr =
+__ATTR(cpus_offlined, 0644, show_cpus_offlined, store_cpus_offlined);
+
+static __cpuinitdata struct attribute *cc_attrs[] = {
+ &cc_enabled_attr.attr,
+ &cpus_offlined_attr.attr,
+ NULL,
+};
+
+static __cpuinitdata struct attribute_group cc_attr_group = {
+ .attrs = cc_attrs,
+};
+
+static __init int msm_thermal_add_cc_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *cc_kobj = NULL;
+ int ret = 0;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("%s: cannot find kobject for module\n",
+ KBUILD_MODNAME);
+ ret = -ENOENT;
+ goto done_cc_nodes;
+ }
+
+ cc_kobj = kobject_create_and_add("core_control", module_kobj);
+ if (!cc_kobj) {
+ pr_err("%s: cannot create core control kobj\n",
+ KBUILD_MODNAME);
+ ret = -ENOMEM;
+ goto done_cc_nodes;
+ }
+
+ ret = sysfs_create_group(cc_kobj, &cc_attr_group);
+ if (ret) {
+ pr_err("%s: cannot create group\n", KBUILD_MODNAME);
+ goto done_cc_nodes;
+ }
+
+ return 0;
+
+done_cc_nodes:
+ if (cc_kobj)
+ kobject_del(cc_kobj);
+ return ret;
+}
+
int __devinit msm_thermal_init(struct msm_thermal_data *pdata)
{
int ret = 0;
BUG_ON(!pdata);
- BUG_ON(pdata->sensor_id >= TSENS_MAX_SENSORS);
+ tsens_get_max_sensor_num(&max_tsens_num);
+ BUG_ON(msm_thermal_info.sensor_id >= max_tsens_num);
memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
enabled = 1;
+ core_control_enabled = 1;
INIT_DELAYED_WORK(&check_temp_work, check_temp);
schedule_delayed_work(&check_temp_work, 0);
+ register_cpu_notifier(&msm_thermal_cpu_notifier);
+
+ return ret;
+}
+
+static int vdd_restriction_reg_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < rails_cnt; i++) {
+ if (rails[i].freq_req == 1) {
+ usefreq |= BIT(i);
+ check_freq_table();
+ /* Restrict frequency by default until we have made
+ * our first temp reading */
+ if (freq_table_get)
+ ret = vdd_restriction_apply_freq(&rails[i], 0);
+ else
+ pr_info("%s:Defer vdd rstr freq init\n",
+ __func__);
+ } else {
+ rails[i].reg = devm_regulator_get(&pdev->dev,
+ rails[i].name);
+ if (IS_ERR_OR_NULL(rails[i].reg)) {
+ ret = PTR_ERR(rails[i].reg);
+ if (ret != -EPROBE_DEFER) {
+ pr_err( \
+ "%s, could not get regulator: %s\n",
+ rails[i].name, __func__);
+ rails[i].reg = NULL;
+ rails[i].curr_level = -2;
+ return ret;
+ }
+ return ret;
+ }
+ /* Restrict votlage by default until we have made
+ * our first temp reading */
+ ret = vdd_restriction_apply_voltage(&rails[i], 0);
+ }
+ }
+
+ return ret;
+}
+
+static int psm_reg_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+
+ for (i = 0; i < psm_rails_cnt; i++) {
+ psm_rails[i].reg = rpm_regulator_get(&pdev->dev,
+ psm_rails[i].name);
+ if (IS_ERR_OR_NULL(psm_rails[i].reg)) {
+ ret = PTR_ERR(psm_rails[i].reg);
+ if (ret != -EPROBE_DEFER) {
+ pr_err("%s, could not get rpm regulator: %s\n",
+ psm_rails[i].name, __func__);
+ psm_rails[i].reg = NULL;
+ goto psm_reg_exit;
+ }
+ return ret;
+ }
+ /* Apps default vote for PWM mode */
+ psm_rails[i].init = PMIC_PWM_MODE;
+ ret = rpm_regulator_set_mode(psm_rails[i].reg,
+ psm_rails[i].init);
+ if (ret) {
+ pr_err("%s: Cannot set PMIC PWM mode\n", __func__);
+ return ret;
+ } else
+ psm_rails[i].mode = PMIC_PWM_MODE;
+ }
+
+ return ret;
+
+psm_reg_exit:
+ if (ret) {
+ for (j = 0; j < i; j++) {
+ if (psm_rails[j].reg != NULL)
+ rpm_regulator_put(psm_rails[j].reg);
+ }
+ }
+
+ return ret;
+}
+
+static int msm_thermal_add_vdd_rstr_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *vdd_rstr_kobj = NULL;
+ struct kobject *vdd_rstr_reg_kobj[MAX_RAILS] = {0};
+ int rc = 0;
+ int i = 0;
+
+ if (!vdd_rstr_probed) {
+ vdd_rstr_nodes_called = true;
+ return rc;
+ }
+
+ if (vdd_rstr_probed && rails_cnt == 0)
+ return rc;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("%s: cannot find kobject for module %s\n",
+ __func__, KBUILD_MODNAME);
+ rc = -ENOENT;
+ goto thermal_sysfs_add_exit;
+ }
+
+ vdd_rstr_kobj = kobject_create_and_add("vdd_restriction", module_kobj);
+ if (!vdd_rstr_kobj) {
+ pr_err("%s: cannot create vdd_restriction kobject\n", __func__);
+ rc = -ENOMEM;
+ goto thermal_sysfs_add_exit;
+ }
+
+ rc = sysfs_create_group(vdd_rstr_kobj, &vdd_rstr_en_attribs_gp);
+ if (rc) {
+ pr_err("%s: cannot create kobject attribute group\n", __func__);
+ rc = -ENOMEM;
+ goto thermal_sysfs_add_exit;
+ }
+
+ for (i = 0; i < rails_cnt; i++) {
+ vdd_rstr_reg_kobj[i] = kobject_create_and_add(rails[i].name,
+ vdd_rstr_kobj);
+ if (!vdd_rstr_reg_kobj[i]) {
+ pr_err("%s: cannot create for kobject for %s\n",
+ __func__, rails[i].name);
+ rc = -ENOMEM;
+ goto thermal_sysfs_add_exit;
+ }
+
+ rails[i].attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 3,
+ GFP_KERNEL);
+ if (!rails[i].attr_gp.attrs) {
+ rc = -ENOMEM;
+ goto thermal_sysfs_add_exit;
+ }
+
+ VDD_RES_RW_ATTRIB(rails[i], rails[i].level_attr, 0, level);
+ VDD_RES_RO_ATTRIB(rails[i], rails[i].value_attr, 1, value);
+ rails[i].attr_gp.attrs[2] = NULL;
+
+ rc = sysfs_create_group(vdd_rstr_reg_kobj[i],
+ &rails[i].attr_gp);
+ if (rc) {
+ pr_err("%s: cannot create attribute group for %s\n",
+ __func__, rails[i].name);
+ goto thermal_sysfs_add_exit;
+ }
+ }
+
+ return rc;
+
+thermal_sysfs_add_exit:
+ if (rc) {
+ for (i = 0; i < rails_cnt; i++) {
+ kobject_del(vdd_rstr_reg_kobj[i]);
+ kfree(rails[i].attr_gp.attrs);
+ }
+ if (vdd_rstr_kobj)
+ kobject_del(vdd_rstr_kobj);
+ }
+ return rc;
+}
+
+static int msm_thermal_add_psm_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *psm_kobj = NULL;
+ struct kobject *psm_reg_kobj[MAX_RAILS] = {0};
+ int rc = 0;
+ int i = 0;
+
+ if (!psm_probed) {
+ psm_nodes_called = true;
+ return rc;
+ }
+
+ if (psm_probed && psm_rails_cnt == 0)
+ return rc;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("%s: cannot find kobject for module %s\n",
+ __func__, KBUILD_MODNAME);
+ rc = -ENOENT;
+ goto psm_node_exit;
+ }
+
+ psm_kobj = kobject_create_and_add("pmic_sw_mode", module_kobj);
+ if (!psm_kobj) {
+ pr_err("%s: cannot create psm kobject\n", KBUILD_MODNAME);
+ rc = -ENOMEM;
+ goto psm_node_exit;
+ }
+
+ for (i = 0; i < psm_rails_cnt; i++) {
+ psm_reg_kobj[i] = kobject_create_and_add(psm_rails[i].name,
+ psm_kobj);
+ if (!psm_reg_kobj[i]) {
+ pr_err("%s: cannot create for kobject for %s\n",
+ KBUILD_MODNAME, psm_rails[i].name);
+ rc = -ENOMEM;
+ goto psm_node_exit;
+ }
+ psm_rails[i].attr_gp.attrs = kzalloc( \
+ sizeof(struct attribute *) * 2, GFP_KERNEL);
+ if (!psm_rails[i].attr_gp.attrs) {
+ rc = -ENOMEM;
+ goto psm_node_exit;
+ }
+
+ PSM_RW_ATTRIB(psm_rails[i], psm_rails[i].mode_attr, 0, mode);
+ psm_rails[i].attr_gp.attrs[1] = NULL;
+
+ rc = sysfs_create_group(psm_reg_kobj[i], &psm_rails[i].attr_gp);
+ if (rc) {
+ pr_err("%s: cannot create attribute group for %s\n",
+ KBUILD_MODNAME, psm_rails[i].name);
+ goto psm_node_exit;
+ }
+ }
+
+ return rc;
+
+psm_node_exit:
+ if (rc) {
+ for (i = 0; i < psm_rails_cnt; i++) {
+ kobject_del(psm_reg_kobj[i]);
+ kfree(psm_rails[i].attr_gp.attrs);
+ }
+ if (psm_kobj)
+ kobject_del(psm_kobj);
+ }
+ return rc;
+}
+
+static int probe_vdd_rstr(struct device_node *node,
+ struct msm_thermal_data *data, struct platform_device *pdev)
+{
+ int ret = 0;
+ int i = 0;
+ int arr_size;
+ char *key = NULL;
+ struct device_node *child_node = NULL;
+
+ key = "qcom,vdd-restriction-temp";
+ ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_degC);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,vdd-restriction-temp-hysteresis";
+ ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_hyst_degC);
+ if (ret)
+ goto read_node_fail;
+
+ for_each_child_of_node(node, child_node) {
+ rails_cnt++;
+ }
+
+ if (rails_cnt == 0)
+ goto read_node_fail;
+ if (rails_cnt >= MAX_RAILS) {
+ pr_err("%s: Too many rails.\n", __func__);
+ return -EFAULT;
+ }
+
+ rails = kzalloc(sizeof(struct rail) * rails_cnt,
+ GFP_KERNEL);
+ if (!rails) {
+ pr_err("%s: Fail to allocate memory for rails.\n", __func__);
+ return -ENOMEM;
+ }
+
+ i = 0;
+ for_each_child_of_node(node, child_node) {
+ key = "qcom,vdd-rstr-reg";
+ ret = of_property_read_string(child_node, key, &rails[i].name);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,levels";
+ if (!of_get_property(child_node, key, &arr_size))
+ goto read_node_fail;
+ rails[i].num_levels = arr_size/sizeof(__be32);
+ if (rails[i].num_levels >
+ sizeof(rails[i].levels)/sizeof(uint32_t)) {
+ pr_err("%s: Array size too large\n", __func__);
+ return -EFAULT;
+ }
+ ret = of_property_read_u32_array(child_node, key,
+ rails[i].levels, rails[i].num_levels);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,min-level";
+ ret = of_property_read_u32(child_node, key,
+ &rails[i].min_level);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,freq-req";
+ rails[i].freq_req = of_property_read_bool(child_node, key);
+
+ if (ret)
+ goto read_node_fail;
+ rails[i].curr_level = 0;
+ rails[i].reg = NULL;
+ i++;
+ }
+
+ if (rails_cnt) {
+ ret = vdd_restriction_reg_init(pdev);
+ if (ret) {
+ pr_info("%s:Failed to get regulators. KTM continues.\n",
+ __func__);
+ goto read_node_fail;
+ }
+ vdd_rstr_enabled = true;
+ }
+read_node_fail:
+ vdd_rstr_probed = true;
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. KTM continues\n",
+ __func__, node->full_name, key);
+ kfree(rails);
+ rails_cnt = 0;
+ }
+ if (ret == -EPROBE_DEFER)
+ vdd_rstr_probed = false;
+ return ret;
+}
+
+static int probe_psm(struct device_node *node, struct msm_thermal_data *data,
+ struct platform_device *pdev)
+{
+ int ret = 0;
+ int j = 0;
+ char *key = NULL;
+
+ key = "qcom,pmic-sw-mode-temp";
+ ret = of_property_read_u32(node, key, &data->psm_temp_degC);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,pmic-sw-mode-temp-hysteresis";
+ ret = of_property_read_u32(node, key, &data->psm_temp_hyst_degC);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,pmic-sw-mode-regs";
+ psm_rails_cnt = of_property_count_strings(node, key);
+ psm_rails = kzalloc(sizeof(struct psm_rail) * psm_rails_cnt,
+ GFP_KERNEL);
+ if (!psm_rails) {
+ pr_err("%s: Fail to allocate memory for psm rails\n", __func__);
+ psm_rails_cnt = 0;
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < psm_rails_cnt; j++) {
+ ret = of_property_read_string_index(node, key, j,
+ &psm_rails[j].name);
+ if (ret)
+ goto read_node_fail;
+ }
+
+ if (psm_rails_cnt) {
+ ret = psm_reg_init(pdev);
+ if (ret) {
+ pr_info("%s:Failed to get regulators. KTM continues.\n",
+ __func__);
+ goto read_node_fail;
+ }
+ psm_enabled = true;
+ }
+
+read_node_fail:
+ psm_probed = true;
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. KTM continues\n",
+ __func__, node->full_name, key);
+ kfree(psm_rails);
+ psm_rails_cnt = 0;
+ }
+ if (ret == -EPROBE_DEFER)
+ psm_probed = false;
return ret;
}
@@ -198,14 +1354,15 @@
int ret = 0;
char *key = NULL;
struct device_node *node = pdev->dev.of_node;
+
struct msm_thermal_data data;
memset(&data, 0, sizeof(struct msm_thermal_data));
+
key = "qcom,sensor-id";
ret = of_property_read_u32(node, key, &data.sensor_id);
if (ret)
goto fail;
- WARN_ON(data.sensor_id >= TSENS_MAX_SENSORS);
key = "qcom,poll-ms";
ret = of_property_read_u32(node, key, &data.poll_ms);
@@ -224,17 +1381,50 @@
key = "qcom,freq-step";
ret = of_property_read_u32(node, key, &data.freq_step);
+ if (ret)
+ goto fail;
+ key = "qcom,core-limit-temp";
+ ret = of_property_read_u32(node, key, &data.core_limit_temp_degC);
+
+ key = "qcom,core-temp-hysteresis";
+ ret = of_property_read_u32(node, key, &data.core_temp_hysteresis_degC);
+
+ key = "qcom,core-control-mask";
+ ret = of_property_read_u32(node, key, &data.core_control_mask);
+
+ /* Probe optional properties below. Call probe_psm before
+ * probe_vdd_rstr because rpm_regulator_get has to be called
+ * before devm_regulator_get*/
+ ret = probe_psm(node, &data, pdev);
+ if (ret == -EPROBE_DEFER)
+ goto fail;
+ ret = probe_vdd_rstr(node, &data, pdev);
+ if (ret == -EPROBE_DEFER)
+ goto fail;
+
+ /* In case sysfs add nodes get called before probe function.
+ * Need to make sure sysfs node is created again */
+ if (psm_nodes_called) {
+ msm_thermal_add_psm_nodes();
+ psm_nodes_called = false;
+ }
+ if (vdd_rstr_nodes_called) {
+ msm_thermal_add_vdd_rstr_nodes();
+ vdd_rstr_nodes_called = false;
+ }
+ ret = msm_thermal_init(&data);
+
+ return ret;
fail:
if (ret)
pr_err("%s: Failed reading node=%s, key=%s\n",
- __func__, node->full_name, key);
- else
- ret = msm_thermal_init(&data);
+ __func__, node->full_name, key);
return ret;
}
+
static struct of_device_id msm_thermal_match_table[] = {
{.compatible = "qcom,msm-thermal"},
{},
@@ -253,3 +1443,14 @@
{
return platform_driver_register(&msm_thermal_device_driver);
}
+
+int __init msm_thermal_late_init(void)
+{
+ msm_thermal_add_cc_nodes();
+ msm_thermal_add_psm_nodes();
+ msm_thermal_add_vdd_rstr_nodes();
+
+ return 0;
+}
+late_initcall(msm_thermal_late_init);
+
diff --git a/drivers/thermal/pm8xxx-tm.c b/drivers/thermal/pm8xxx-tm.c
index 4568933..99a9454 100644
--- a/drivers/thermal/pm8xxx-tm.c
+++ b/drivers/thermal/pm8xxx-tm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -682,6 +682,13 @@
return 0;
}
+static void pm8xxx_tm_shutdown(struct platform_device *pdev)
+{
+ struct pm8xxx_tm_chip *chip = platform_get_drvdata(pdev);
+
+ pm8xxx_tm_write_pwm(chip, TEMP_ALARM_PWM_EN_NEVER);
+}
+
#ifdef CONFIG_PM
static int pm8xxx_tm_suspend(struct device *dev)
{
@@ -719,6 +726,7 @@
static struct platform_driver pm8xxx_tm_driver = {
.probe = pm8xxx_tm_probe,
.remove = __devexit_p(pm8xxx_tm_remove),
+ .shutdown = pm8xxx_tm_shutdown,
.driver = {
.name = PM8XXX_TM_DEV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index e7d2e0f..d848a18 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -36,6 +36,8 @@
/* QPNP VADC TM register definition */
#define QPNP_REVISION3 0x2
+#define QPNP_PERPH_SUBTYPE 0x5
+#define QPNP_PERPH_TYPE2 0x2
#define QPNP_REVISION_EIGHT_CHANNEL_SUPPORT 2
#define QPNP_STATUS1 0x8
#define QPNP_STATUS1_OP_MODE 4
@@ -366,7 +368,7 @@
static int32_t qpnp_adc_tm_check_revision(uint32_t btm_chan_num)
{
- u8 rev;
+ u8 rev, perph_subtype;
int rc = 0;
rc = qpnp_adc_tm_read_reg(QPNP_REVISION3, &rev);
@@ -375,10 +377,18 @@
return rc;
}
- if ((rev < QPNP_REVISION_EIGHT_CHANNEL_SUPPORT) &&
- (btm_chan_num > QPNP_ADC_TM_M4_ADC_CH_SEL_CTL)) {
- pr_debug("Version does not support more than 5 channels\n");
- return -EINVAL;
+ rc = qpnp_adc_tm_read_reg(QPNP_PERPH_SUBTYPE, &perph_subtype);
+ if (rc) {
+ pr_err("adc-tm perph_subtype read failed\n");
+ return rc;
+ }
+
+ if (perph_subtype == QPNP_PERPH_TYPE2) {
+ if ((rev < QPNP_REVISION_EIGHT_CHANNEL_SUPPORT) &&
+ (btm_chan_num > QPNP_ADC_TM_M4_ADC_CH_SEL_CTL)) {
+ pr_debug("Version does not support more than 5 channels\n");
+ return -EINVAL;
+ }
}
return rc;
@@ -1584,6 +1594,8 @@
adc_tm->sensor[sen_idx].sensor_num = sen_idx;
pr_debug("btm_chan:%x, vadc_chan:%x\n", btm_channel_num,
adc_tm->adc->adc_channels[sen_idx].channel_num);
+ thermal_node = of_property_read_bool(child,
+ "qcom,thermal-node");
if (thermal_node) {
/* Register with the thermal zone */
pr_debug("thermal node%x\n", btm_channel_num);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 6619e96..fc8f4b3 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -475,6 +475,7 @@
void *mem;
u8 mode;
+ bool host_only_mode;
mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
if (!mem) {
@@ -548,6 +549,7 @@
dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
+ host_only_mode = of_property_read_bool(node, "host-only-mode");
pm_runtime_no_callbacks(dev);
pm_runtime_set_active(dev);
@@ -561,6 +563,12 @@
mode = DWC3_MODE(dwc->hwparams.hwparams0);
+ /* Override mode if user selects host-only config with DRD core */
+ if (host_only_mode && (mode == DWC3_MODE_DRD)) {
+ dev_dbg(dev, "host only mode selected\n");
+ mode = DWC3_MODE_HOST;
+ }
+
switch (mode) {
case DWC3_MODE_DEVICE:
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 7a6765b..5bbf106 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -177,6 +177,9 @@
struct regulator *hsusb_vddcx;
struct regulator *ssusb_1p8;
struct regulator *ssusb_vddcx;
+
+ /* VBUS regulator if no OTG and running in host only mode */
+ struct regulator *vbus_otg;
struct dwc3_ext_xceiv ext_xceiv;
bool resume_pending;
atomic_t pm_suspended;
@@ -1606,7 +1609,8 @@
0x37, 0x0);
}
- dcp = mdwc->charger.chg_type == DWC3_DCP_CHARGER;
+ dcp = ((mdwc->charger.chg_type == DWC3_DCP_CHARGER) ||
+ (mdwc->charger.chg_type == DWC3_PROPRIETARY_CHARGER));
host_bus_suspend = mdwc->host_mode == 1;
/* Sequence to put SSPHY in low power state:
@@ -1684,15 +1688,19 @@
dwc3_ssusb_ldo_enable(0);
dwc3_ssusb_config_vddcx(0);
- if (!host_bus_suspend)
+ if (!host_bus_suspend && !dcp)
dwc3_hsusb_config_vddcx(0);
wake_unlock(&mdwc->wlock);
atomic_set(&mdwc->in_lpm, 1);
dev_info(mdwc->dev, "DWC3 in low power mode\n");
- if (mdwc->hs_phy_irq)
+ if (mdwc->hs_phy_irq) {
enable_irq(mdwc->hs_phy_irq);
+ /* with DCP we dont require wakeup using HS_PHY_IRQ */
+ if (dcp)
+ disable_irq_wake(mdwc->hs_phy_irq);
+ }
return 0;
}
@@ -1719,7 +1727,8 @@
dev_err(mdwc->dev, "Failed to vote for bus scaling\n");
}
- dcp = mdwc->charger.chg_type == DWC3_DCP_CHARGER;
+ dcp = ((mdwc->charger.chg_type == DWC3_DCP_CHARGER) ||
+ (mdwc->charger.chg_type == DWC3_PROPRIETARY_CHARGER));
host_bus_suspend = mdwc->host_mode == 1;
if (!host_bus_suspend) {
@@ -1728,6 +1737,8 @@
if (ret)
dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
__func__, ret);
+
+ clk_prepare_enable(mdwc->utmi_clk);
}
if (mdwc->otg_xceiv && mdwc->ext_xceiv.otg_capability && !dcp &&
@@ -1737,10 +1748,8 @@
dwc3_ssusb_ldo_enable(1);
dwc3_ssusb_config_vddcx(1);
- if (!host_bus_suspend) {
+ if (!host_bus_suspend && !dcp)
dwc3_hsusb_config_vddcx(1);
- clk_prepare_enable(mdwc->utmi_clk);
- }
clk_prepare_enable(mdwc->ref_clk);
usleep_range(1000, 1200);
@@ -1809,6 +1818,9 @@
enable_irq(mdwc->hs_phy_irq);
mdwc->lpm_irq_seen = false;
}
+ /* it must DCP disconnect, re-enable HS_PHY wakeup IRQ */
+ if (mdwc->hs_phy_irq && dcp)
+ enable_irq_wake(mdwc->hs_phy_irq);
dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
@@ -1838,7 +1850,7 @@
if (mdwc->otg_xceiv)
mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
DWC3_EVENT_PHY_RESUME);
- pm_runtime_put_sync(mdwc->dev);
+ pm_runtime_put_noidle(mdwc->dev);
if (mdwc->otg_xceiv && (mdwc->ext_xceiv.otg_capability))
mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
DWC3_EVENT_XCEIV_STATE);
@@ -2529,24 +2541,28 @@
goto disable_hs_ldo;
}
- msm->usb_psy.name = "usb";
- msm->usb_psy.type = POWER_SUPPLY_TYPE_USB;
- msm->usb_psy.supplied_to = dwc3_msm_pm_power_supplied_to;
- msm->usb_psy.num_supplicants = ARRAY_SIZE(
- dwc3_msm_pm_power_supplied_to);
- msm->usb_psy.properties = dwc3_msm_pm_power_props_usb;
- msm->usb_psy.num_properties = ARRAY_SIZE(dwc3_msm_pm_power_props_usb);
- msm->usb_psy.get_property = dwc3_msm_power_get_property_usb;
- msm->usb_psy.set_property = dwc3_msm_power_set_property_usb;
- msm->usb_psy.external_power_changed =
- dwc3_msm_external_power_changed;
+ /* usb_psy required only for vbus_notifications or charging support */
+ if (msm->ext_xceiv.otg_capability || !msm->charger.charging_disabled) {
+ msm->usb_psy.name = "usb";
+ msm->usb_psy.type = POWER_SUPPLY_TYPE_USB;
+ msm->usb_psy.supplied_to = dwc3_msm_pm_power_supplied_to;
+ msm->usb_psy.num_supplicants = ARRAY_SIZE(
+ dwc3_msm_pm_power_supplied_to);
+ msm->usb_psy.properties = dwc3_msm_pm_power_props_usb;
+ msm->usb_psy.num_properties =
+ ARRAY_SIZE(dwc3_msm_pm_power_props_usb);
+ msm->usb_psy.get_property = dwc3_msm_power_get_property_usb;
+ msm->usb_psy.set_property = dwc3_msm_power_set_property_usb;
+ msm->usb_psy.external_power_changed =
+ dwc3_msm_external_power_changed;
- ret = power_supply_register(&pdev->dev, &msm->usb_psy);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "%s:power_supply_register usb failed\n",
- __func__);
- goto disable_hs_ldo;
+ ret = power_supply_register(&pdev->dev, &msm->usb_psy);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "%s:power_supply_register usb failed\n",
+ __func__);
+ goto disable_hs_ldo;
+ }
}
if (node) {
@@ -2571,7 +2587,8 @@
}
msm->otg_xceiv = usb_get_transceiver();
- if (msm->otg_xceiv) {
+ /* Register with OTG if present, ignore USB2 OTG using other PHY */
+ if (msm->otg_xceiv && !(msm->otg_xceiv->flags & ENABLE_SECONDARY_PHY)) {
msm->charger.start_detection = dwc3_start_chg_det;
ret = dwc3_set_charger(msm->otg_xceiv->otg, &msm->charger);
if (ret || !msm->charger.notify_detection_complete) {
@@ -2589,7 +2606,20 @@
goto put_xcvr;
}
} else {
- dev_err(&pdev->dev, "%s: No OTG transceiver found\n", __func__);
+ dev_dbg(&pdev->dev, "No OTG, DWC3 running in host only mode\n");
+ msm->host_mode = 1;
+ msm->vbus_otg = devm_regulator_get(&pdev->dev, "vbus_dwc3");
+ if (IS_ERR(msm->vbus_otg)) {
+ dev_dbg(&pdev->dev, "Failed to get vbus regulator\n");
+ msm->vbus_otg = 0;
+ } else {
+ ret = regulator_enable(msm->vbus_otg);
+ if (ret) {
+ msm->vbus_otg = 0;
+ dev_err(&pdev->dev, "Failed to enable vbus_otg\n");
+ }
+ }
+ msm->otg_xceiv = NULL;
}
wake_lock_init(&msm->wlock, WAKE_LOCK_SUSPEND, "msm_dwc3");
@@ -2601,7 +2631,8 @@
put_xcvr:
usb_put_transceiver(msm->otg_xceiv);
put_psupply:
- power_supply_unregister(&msm->usb_psy);
+ if (msm->usb_psy.dev)
+ power_supply_unregister(&msm->usb_psy);
disable_hs_ldo:
dwc3_hsusb_ldo_enable(0);
free_hs_ldo_init:
@@ -2650,6 +2681,10 @@
dwc3_start_chg_det(&msm->charger, false);
usb_put_transceiver(msm->otg_xceiv);
}
+ if (msm->usb_psy.dev)
+ power_supply_unregister(&msm->usb_psy);
+ if (msm->vbus_otg)
+ regulator_disable(msm->vbus_otg);
pm_runtime_disable(msm->dev);
wake_lock_destroy(&msm->wlock);
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 1d67cee..a3b2617 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -198,8 +198,6 @@
} else {
dev_dbg(otg->phy->dev, "%s: turn off host\n", __func__);
- platform_device_del(dwc->xhci);
-
ret = regulator_disable(dotg->vbus_otg);
if (ret) {
dev_err(otg->phy->dev, "unable to disable vbus_otg\n");
@@ -207,6 +205,7 @@
}
dwc3_otg_notify_host_mode(otg, on);
+ platform_device_del(dwc->xhci);
/*
* Perform USB hardware RESET (both core reset and DBM reset)
* when moving from host to peripheral. This is required for
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 66854b2..8d2ec97 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -393,7 +393,6 @@
u32 recip;
u32 wValue;
u32 wIndex;
- u32 reg;
int ret;
wValue = le16_to_cpu(ctrl->wValue);
@@ -414,13 +413,6 @@
return -EINVAL;
if (dwc->speed != DWC3_DSTS_SUPERSPEED)
return -EINVAL;
-
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (set)
- reg |= DWC3_DCTL_INITU1ENA;
- else
- reg &= ~DWC3_DCTL_INITU1ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
break;
case USB_DEVICE_U2_ENABLE:
@@ -428,13 +420,6 @@
return -EINVAL;
if (dwc->speed != DWC3_DSTS_SUPERSPEED)
return -EINVAL;
-
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (set)
- reg |= DWC3_DCTL_INITU2ENA;
- else
- reg &= ~DWC3_DCTL_INITU2ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
break;
case USB_DEVICE_LTM_ENABLE:
@@ -539,7 +524,6 @@
{
u32 cfg;
int ret;
- u32 reg;
dwc->start_config_issued = false;
cfg = le16_to_cpu(ctrl->wValue);
@@ -554,14 +538,6 @@
/* if the cfg matches and the cfg is non zero */
if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
dwc->dev_state = DWC3_CONFIGURED_STATE;
- /*
- * Enable transition to U1/U2 state when
- * nothing is pending from application.
- */
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- reg |= (DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA);
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-
dwc->resize_fifos = true;
dev_dbg(dwc->dev, "resize fifos flag SET\n");
}
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index d6d8a76..420d030 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -79,6 +79,7 @@
/* Add XHCI device if !OTG, otherwise OTG takes care of this */
if (!dwc->dotg) {
+ xhci->dev.parent = dwc->dev;
ret = platform_device_add(xhci);
if (ret) {
dev_err(dwc->dev, "failed to register xHCI device\n");
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index d4bdf99..705600d 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -53,6 +53,7 @@
#include "f_rmnet_sdio.c"
#include "f_rmnet_smd_sdio.c"
#include "f_rmnet.c"
+#include "f_gps.c"
#ifdef CONFIG_SND_PCM
#include "f_audio_source.c"
#endif
@@ -731,6 +732,47 @@
.attributes = rmnet_function_attributes,
};
+static void gps_function_cleanup(struct android_usb_function *f)
+{
+ gps_cleanup();
+}
+
+static int gps_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ int err;
+ static int gps_initialized;
+
+ if (!gps_initialized) {
+ gps_initialized = 1;
+ err = gps_init_port();
+ if (err) {
+ pr_err("gps: Cannot init gps port");
+ return err;
+ }
+ }
+
+ err = gps_gport_setup();
+ if (err) {
+ pr_err("gps: Cannot setup transports");
+ return err;
+ }
+ err = gps_bind_config(c);
+ if (err) {
+ pr_err("Could not bind gps config\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static struct android_usb_function gps_function = {
+ .name = "gps",
+ .cleanup = gps_function_cleanup,
+ .bind_config = gps_function_bind_config,
+};
+
+
/* ecm transport string */
static char ecm_transports[MAX_XPORT_STR_LEN];
@@ -1783,6 +1825,7 @@
&rmnet_sdio_function,
&rmnet_smd_sdio_function,
&rmnet_function,
+ &gps_function,
&diag_function,
&qdss_function,
&serial_function,
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 569f200..3cad3ce 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -98,6 +98,32 @@
}
}
+static void ci13xxx_msm_reset(void)
+{
+ struct ci13xxx *udc = _udc;
+ struct usb_phy *phy = udc->transceiver;
+ struct device *dev = udc->gadget.dev.parent;
+
+ writel_relaxed(0, USB_AHBBURST);
+ writel_relaxed(0x08, USB_AHBMODE);
+
+ if (phy && (phy->flags & ENABLE_SECONDARY_PHY)) {
+ int temp;
+
+ dev_dbg(dev, "using secondary hsphy\n");
+ temp = readl_relaxed(USB_PHY_CTRL2);
+ temp |= (1<<16);
+ writel_relaxed(temp, USB_PHY_CTRL2);
+
+ /*
+ * Add memory barrier to make sure above LINK writes are
+ * complete before moving ahead with USB peripheral mode
+ * enumeration.
+ */
+ mb();
+ }
+}
+
static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event)
{
struct device *dev = udc->gadget.dev.parent;
@@ -105,8 +131,7 @@
switch (event) {
case CI13XXX_CONTROLLER_RESET_EVENT:
dev_info(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
- writel(0, USB_AHBBURST);
- writel_relaxed(0x08, USB_AHBMODE);
+ ci13xxx_msm_reset();
break;
case CI13XXX_CONTROLLER_DISCONNECT_EVENT:
dev_info(dev, "CI13XXX_CONTROLLER_DISCONNECT_EVENT received\n");
diff --git a/drivers/usb/gadget/f_gps.c b/drivers/usb/gadget/f_gps.c
new file mode 100644
index 0000000..ef08fc5
--- /dev/null
+++ b/drivers/usb/gadget/f_gps.c
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+
+#include <mach/usb_gadget_xport.h>
+
+#include "u_rmnet.h"
+#include "gadget_chips.h"
+
+#define GPS_NOTIFY_INTERVAL 5
+#define GPS_MAX_NOTIFY_SIZE 64
+
+
+#define ACM_CTRL_DTR (1 << 0)
+
+/* TODO: use separate structures for data and
+ * control paths
+ */
+struct f_gps {
+ struct grmnet port;
+ u8 port_num;
+ int ifc_id;
+ atomic_t online;
+ atomic_t ctrl_online;
+ struct usb_composite_dev *cdev;
+
+ spinlock_t lock;
+
+ /* usb eps */
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+
+ /* control info */
+ struct list_head cpkt_resp_q;
+ atomic_t notify_count;
+ unsigned long cpkts_len;
+};
+
+static struct gps_ports {
+ enum transport_type ctrl_xport;
+ struct f_gps *port;
+} gps_port;
+
+static struct usb_interface_descriptor gps_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor gps_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+ .bInterval = 1 << GPS_NOTIFY_INTERVAL,
+};
+
+static struct usb_descriptor_header *gps_fs_function[] = {
+ (struct usb_descriptor_header *) &gps_interface_desc,
+ (struct usb_descriptor_header *) &gps_fs_notify_desc,
+ NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor gps_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+ .bInterval = GPS_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_descriptor_header *gps_hs_function[] = {
+ (struct usb_descriptor_header *) &gps_interface_desc,
+ (struct usb_descriptor_header *) &gps_hs_notify_desc,
+ NULL,
+};
+
+/* Super speed support */
+static struct usb_endpoint_descriptor gps_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+ .bInterval = GPS_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor gps_ss_notify_comp_desc = {
+ .bLength = sizeof gps_ss_notify_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+};
+
+static struct usb_descriptor_header *gps_ss_function[] = {
+ (struct usb_descriptor_header *) &gps_interface_desc,
+ (struct usb_descriptor_header *) &gps_ss_notify_desc,
+ (struct usb_descriptor_header *) &gps_ss_notify_comp_desc,
+ NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string gps_string_defs[] = {
+ [0].s = "GPS",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings gps_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = gps_string_defs,
+};
+
+static struct usb_gadget_strings *gps_strings[] = {
+ &gps_string_table,
+ NULL,
+};
+
+static void gps_ctrl_response_available(struct f_gps *dev);
+
+/* ------- misc functions --------------------*/
+
+static inline struct f_gps *func_to_gps(struct usb_function *f)
+{
+ return container_of(f, struct f_gps, port.func);
+}
+
+static inline struct f_gps *port_to_gps(struct grmnet *r)
+{
+ return container_of(r, struct f_gps, port);
+}
+
+static struct usb_request *
+gps_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->buf = kmalloc(len, flags);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ req->length = len;
+
+ return req;
+}
+
+void gps_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+static struct rmnet_ctrl_pkt *gps_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct rmnet_ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void gps_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+
+/* -------------------------------------------*/
+
+static int gps_gport_setup(void)
+{
+ u8 base;
+ int res;
+
+ res = gsmd_ctrl_setup(GPS_CTRL_CLIENT, 1, &base);
+ gps_port.port->port_num += base;
+ return res;
+}
+
+static int gport_ctrl_connect(struct f_gps *dev)
+{
+ return gsmd_ctrl_connect(&dev->port, dev->port_num);
+}
+
+static int gport_gps_disconnect(struct f_gps *dev)
+{
+ gsmd_ctrl_disconnect(&dev->port, dev->port_num);
+ return 0;
+}
+
+static void gps_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_gps *dev = func_to_gps(f);
+
+ pr_debug("%s: portno:%d\n", __func__, dev->port_num);
+
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->descriptors);
+
+ gps_free_req(dev->notify, dev->notify_req);
+
+ kfree(f->name);
+}
+
+static void gps_purge_responses(struct f_gps *dev)
+{
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s: port#%d\n", __func__, dev->port_num);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (!list_empty(&dev->cpkt_resp_q)) {
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ atomic_set(&dev->notify_count, 0);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void gps_suspend(struct usb_function *f)
+{
+ struct f_gps *dev = func_to_gps(f);
+ gps_purge_responses(dev);
+
+}
+
+static void gps_disable(struct usb_function *f)
+{
+ struct f_gps *dev = func_to_gps(f);
+
+ usb_ep_disable(dev->notify);
+ dev->notify->driver_data = NULL;
+
+ atomic_set(&dev->online, 0);
+
+ gps_purge_responses(dev);
+
+ gport_gps_disconnect(dev);
+}
+
+static int
+gps_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_gps *dev = func_to_gps(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+ int ret;
+ struct list_head *cpkt;
+
+ pr_debug("%s:dev:%p\n", __func__, dev);
+
+ if (dev->notify->driver_data)
+ usb_ep_disable(dev->notify);
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
+ if (ret) {
+ dev->notify->desc = NULL;
+ ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
+ dev->notify->name, ret);
+ return ret;
+ }
+ ret = usb_ep_enable(dev->notify);
+
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, dev->notify->name, ret);
+ return ret;
+ }
+ dev->notify->driver_data = dev;
+
+ ret = gport_ctrl_connect(dev);
+
+ atomic_set(&dev->online, 1);
+
+ /* In case notifications were aborted, but there are pending control
+ packets in the response queue, re-add the notifications */
+ list_for_each(cpkt, &dev->cpkt_resp_q)
+ gps_ctrl_response_available(dev);
+
+ return ret;
+}
+
+static void gps_ctrl_response_available(struct f_gps *dev)
+{
+ struct usb_request *req = dev->notify_req;
+ struct usb_cdc_notification *event;
+ unsigned long flags;
+ int ret;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s:dev:%p\n", __func__, dev);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!atomic_read(&dev->online) || !req || !req->buf) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ if (atomic_inc_return(&dev->notify_count) != 1) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ event = req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+ if (ret) {
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!list_empty(&dev->cpkt_resp_q)) {
+ atomic_dec(&dev->notify_count);
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ gps_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_debug("ep enqueue error %d\n", ret);
+ }
+}
+
+static void gps_connect(struct grmnet *gr)
+{
+ struct f_gps *dev;
+
+ if (!gr) {
+ pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
+ return;
+ }
+
+ dev = port_to_gps(gr);
+
+ atomic_set(&dev->ctrl_online, 1);
+}
+
+static void gps_disconnect(struct grmnet *gr)
+{
+ struct f_gps *dev;
+ struct usb_cdc_notification *event;
+ int status;
+
+ if (!gr) {
+ pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
+ return;
+ }
+
+ dev = port_to_gps(gr);
+
+ atomic_set(&dev->ctrl_online, 0);
+
+ if (!atomic_read(&dev->online)) {
+ pr_debug("%s: nothing to do\n", __func__);
+ return;
+ }
+
+ usb_ep_fifo_flush(dev->notify);
+
+ event = dev->notify_req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+
+ status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+ if (status < 0) {
+ if (!atomic_read(&dev->online))
+ return;
+ pr_err("%s: gps notify ep enqueue error %d\n",
+ __func__, status);
+ }
+
+ gps_purge_responses(dev);
+}
+
+static int
+gps_send_cpkt_response(void *gr, void *buf, size_t len)
+{
+ struct f_gps *dev;
+ struct rmnet_ctrl_pkt *cpkt;
+ unsigned long flags;
+
+ if (!gr || !buf) {
+ pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
+ __func__, gr, buf);
+ return -ENODEV;
+ }
+ cpkt = gps_alloc_ctrl_pkt(len, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+ return -ENOMEM;
+ }
+ memcpy(cpkt->buf, buf, len);
+ cpkt->len = len;
+
+ dev = port_to_gps(gr);
+
+ pr_debug("%s: dev:%p\n", __func__, dev);
+
+ if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
+ gps_free_ctrl_pkt(cpkt);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ gps_ctrl_response_available(dev);
+
+ return 0;
+}
+
+static void
+gps_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_gps *dev = req->context;
+ struct usb_composite_dev *cdev;
+
+ if (!dev) {
+ pr_err("%s: dev is null\n", __func__);
+ return;
+ }
+
+ pr_debug("%s: dev:%p\n", __func__, dev);
+
+ cdev = dev->cdev;
+
+ if (dev->port.send_encap_cmd)
+ dev->port.send_encap_cmd(dev->port_num, req->buf, req->actual);
+}
+
+static void gps_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_gps *dev = req->context;
+ int status = req->status;
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ atomic_set(&dev->notify_count, 0);
+ break;
+ default:
+ pr_err("gps notify ep error %d\n", status);
+ /* FALLTHROUGH */
+ case 0:
+ if (!atomic_read(&dev->ctrl_online))
+ break;
+
+ if (atomic_dec_and_test(&dev->notify_count))
+ break;
+
+ status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
+ if (status) {
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!list_empty(&dev->cpkt_resp_q)) {
+ atomic_dec(&dev->notify_count);
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ gps_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_debug("ep enqueue error %d\n", status);
+ }
+ break;
+ }
+}
+
+static int
+gps_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_gps *dev = func_to_gps(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = cdev->req;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ int ret = -EOPNOTSUPP;
+
+ pr_debug("%s:dev:%p\n", __func__, dev);
+
+ if (!atomic_read(&dev->online)) {
+ pr_debug("%s: usb cable is not connected\n", __func__);
+ return -ENOTCONN;
+ }
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ ret = w_length;
+ req->complete = gps_cmd_complete;
+ req->context = dev;
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ if (w_value)
+ goto invalid;
+ else {
+ unsigned len;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ spin_lock(&dev->lock);
+ if (list_empty(&dev->cpkt_resp_q)) {
+ pr_err("%s: ctrl resp queue empty", __func__);
+ spin_unlock(&dev->lock);
+ goto invalid;
+ }
+
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ spin_unlock(&dev->lock);
+
+ len = min_t(unsigned, w_length, cpkt->len);
+ memcpy(req->buf, cpkt->buf, len);
+ ret = len;
+
+ gps_free_ctrl_pkt(cpkt);
+ }
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+ if (dev->port.notify_modem)
+ dev->port.notify_modem(&dev->port,
+ dev->port_num, w_value);
+ ret = 0;
+
+ break;
+ default:
+
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (ret >= 0) {
+ VDBG(cdev, "gps req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (ret < w_length);
+ req->length = ret;
+ ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0)
+ ERROR(cdev, "gps ep0 enqueue err %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int gps_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_gps *dev = func_to_gps(f);
+ struct usb_ep *ep;
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret = -ENODEV;
+
+ dev->ifc_id = usb_interface_id(c, f);
+ if (dev->ifc_id < 0) {
+ pr_err("%s: unable to allocate ifc id, err:%d",
+ __func__, dev->ifc_id);
+ return dev->ifc_id;
+ }
+ gps_interface_desc.bInterfaceNumber = dev->ifc_id;
+
+ dev->port.in = NULL;
+ dev->port.out = NULL;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &gps_fs_notify_desc);
+ if (!ep) {
+ pr_err("%s: usb epnotify autoconfig failed\n", __func__);
+ ret = -ENODEV;
+ goto ep_auto_notify_fail;
+ }
+ dev->notify = ep;
+ ep->driver_data = cdev;
+
+ dev->notify_req = gps_alloc_req(ep,
+ sizeof(struct usb_cdc_notification),
+ GFP_KERNEL);
+ if (IS_ERR(dev->notify_req)) {
+ pr_err("%s: unable to allocate memory for notify req\n",
+ __func__);
+ ret = -ENOMEM;
+ goto ep_notify_alloc_fail;
+ }
+
+ dev->notify_req->complete = gps_notify_complete;
+ dev->notify_req->context = dev;
+
+ ret = -ENOMEM;
+ f->descriptors = usb_copy_descriptors(gps_fs_function);
+
+ if (!f->descriptors)
+ goto fail;
+
+ if (gadget_is_dualspeed(cdev->gadget)) {
+ gps_hs_notify_desc.bEndpointAddress =
+ gps_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(gps_hs_function);
+
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(cdev->gadget)) {
+ gps_ss_notify_desc.bEndpointAddress =
+ gps_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(gps_ss_function);
+
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ pr_info("%s: GPS(%d) %s Speed\n",
+ __func__, dev->port_num,
+ gadget_is_dualspeed(cdev->gadget) ? "dual" : "full");
+
+ return 0;
+
+fail:
+ if (f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->descriptors)
+ usb_free_descriptors(f->descriptors);
+ if (dev->notify_req)
+ gps_free_req(dev->notify, dev->notify_req);
+ep_notify_alloc_fail:
+ dev->notify->driver_data = NULL;
+ dev->notify = NULL;
+ep_auto_notify_fail:
+ return ret;
+}
+
+static int gps_bind_config(struct usb_configuration *c)
+{
+ int status;
+ struct f_gps *dev;
+ struct usb_function *f;
+ unsigned long flags;
+
+ pr_debug("%s: usb config:%p\n", __func__, c);
+
+ if (gps_string_defs[0].id == 0) {
+ status = usb_string_id(c->cdev);
+ if (status < 0) {
+ pr_err("%s: failed to get string id, err:%d\n",
+ __func__, status);
+ return status;
+ }
+ gps_string_defs[0].id = status;
+ }
+
+ dev = gps_port.port;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->cdev = c->cdev;
+ f = &dev->port.func;
+ f->name = kasprintf(GFP_ATOMIC, "gps");
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!f->name) {
+ pr_err("%s: cannot allocate memory for name\n", __func__);
+ return -ENOMEM;
+ }
+
+ f->strings = gps_strings;
+ f->bind = gps_bind;
+ f->unbind = gps_unbind;
+ f->disable = gps_disable;
+ f->set_alt = gps_set_alt;
+ f->setup = gps_setup;
+ f->suspend = gps_suspend;
+ dev->port.send_cpkt_response = gps_send_cpkt_response;
+ dev->port.disconnect = gps_disconnect;
+ dev->port.connect = gps_connect;
+
+ status = usb_add_function(c, f);
+ if (status) {
+ pr_err("%s: usb add function failed: %d\n",
+ __func__, status);
+ kfree(f->name);
+ return status;
+ }
+
+ pr_debug("%s: complete\n", __func__);
+
+ return status;
+}
+
+static void gps_cleanup(void)
+{
+ kfree(gps_port.port);
+}
+
+static int gps_init_port(void)
+{
+ struct f_gps *dev;
+
+ dev = kzalloc(sizeof(struct f_gps), GFP_KERNEL);
+ if (!dev) {
+ pr_err("%s: Unable to allocate gps device\n", __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&dev->lock);
+ INIT_LIST_HEAD(&dev->cpkt_resp_q);
+ dev->port_num = 0;
+
+ gps_port.port = dev;
+ gps_port.ctrl_xport = USB_GADGET_XPORT_SMD;
+
+ return 0;
+}
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index 5a3d753..22f8dc9 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -1703,6 +1703,7 @@
ntb_parameters.dwNtbInMaxSize =
cpu_to_le32(NTB_DEFAULT_IN_SIZE_IPA);
ntb_parameters.dwNtbOutMaxSize = cpu_to_le32(NTB_OUT_SIZE_IPA);
+ ntb_parameters.wNdpInDivisor = 1;
}
INIT_LIST_HEAD(&mbim->cpkt_req_q);
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
index 2dccca8..f095efb 100644
--- a/drivers/usb/gadget/f_rmnet.c
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -301,6 +301,7 @@
int ret;
int port_idx;
int i;
+ u8 base;
pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u data hsuart ports: %u"
" smd ports: %u ctrl hsic ports: %u ctrl hsuart ports: %u"
@@ -317,9 +318,13 @@
}
if (no_ctrl_smd_ports) {
- ret = gsmd_ctrl_setup(no_ctrl_smd_ports);
+ ret = gsmd_ctrl_setup(FRMNET_CTRL_CLIENT,
+ no_ctrl_smd_ports, &base);
if (ret)
return ret;
+ for (i = 0; i < nr_rmnet_ports; i++)
+ if (rmnet_ports[i].port)
+ rmnet_ports[i].port->port_num += base;
}
if (no_data_hsic_ports) {
diff --git a/drivers/usb/gadget/u_rmnet.h b/drivers/usb/gadget/u_rmnet.h
index a9cca50..06471a4 100644
--- a/drivers/usb/gadget/u_rmnet.h
+++ b/drivers/usb/gadget/u_rmnet.h
@@ -46,6 +46,13 @@
void (*connect)(struct grmnet *g);
};
+enum ctrl_client {
+ FRMNET_CTRL_CLIENT,
+ GPS_CTRL_CLIENT,
+
+ NR_CTRL_CLIENTS
+};
+
int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port);
int gbam_connect(struct grmnet *gr, u8 port_num,
enum transport_type trans, u8 src_connection_idx,
@@ -56,7 +63,8 @@
void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans);
int gsmd_ctrl_connect(struct grmnet *gr, int port_num);
void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num);
-int gsmd_ctrl_setup(unsigned int count);
+int gsmd_ctrl_setup(enum ctrl_client client_num, unsigned int count,
+ u8 *first_port_idx);
int gqti_ctrl_connect(struct grmnet *gr);
void gqti_ctrl_disconnect(struct grmnet *gr);
diff --git a/drivers/usb/gadget/u_rmnet_ctrl_smd.c b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
index 161634e..caea4ef 100644
--- a/drivers/usb/gadget/u_rmnet_ctrl_smd.c
+++ b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
@@ -24,11 +24,16 @@
#include "u_rmnet.h"
-#define NR_CTRL_SMD_PORTS 3
-static int n_rmnet_ctrl_ports;
-static char *rmnet_ctrl_names[] = {"DATA40_CNTL", "DATA39_CNTL", "DATA38_CNTL"};
+#define MAX_CTRL_PER_CLIENT 3
+#define MAX_CTRL_PORT (MAX_CTRL_PER_CLIENT * NR_CTRL_CLIENTS)
+static char *ctrl_names[NR_CTRL_CLIENTS][MAX_CTRL_PER_CLIENT] = {
+ {"DATA40_CNTL", "DATA39_CNTL", "DATA38_CNTL"},
+ {"DATA39_CNTL"},
+};
static struct workqueue_struct *grmnet_ctrl_wq;
+u8 online_clients;
+
#define SMD_CH_MAX_LEN 20
#define CH_OPENED 0
#define CH_READY 1
@@ -68,7 +73,7 @@
static struct rmnet_ctrl_ports {
struct rmnet_ctrl_port *port;
struct platform_driver pdrv;
-} ctrl_smd_ports[NR_CTRL_SMD_PORTS];
+} ctrl_smd_ports[MAX_CTRL_PORT];
/*---------------misc functions---------------- */
@@ -172,6 +177,15 @@
}
spin_unlock_irqrestore(&port->port_lock, flags);
}
+static int is_legal_port_num(u8 portno)
+{
+ if (portno >= MAX_CTRL_PORT)
+ return false;
+ if (ctrl_smd_ports[portno].port == NULL)
+ return false;
+
+ return true;
+}
static int
grmnet_ctrl_smd_send_cpkt_tomodem(u8 portno,
@@ -182,7 +196,7 @@
struct smd_ch_info *c;
struct rmnet_ctrl_pkt *cpkt;
- if (portno >= n_rmnet_ctrl_ports) {
+ if (!is_legal_port_num(portno)) {
pr_err("%s: Invalid portno#%d\n", __func__, portno);
return -ENODEV;
}
@@ -225,7 +239,7 @@
int clear_bits = 0;
int temp = 0;
- if (portno >= n_rmnet_ctrl_ports) {
+ if (!is_legal_port_num(portno)) {
pr_err("%s: Invalid portno#%d\n", __func__, portno);
return;
}
@@ -375,8 +389,8 @@
pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
- if (port_num >= n_rmnet_ctrl_ports) {
- pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ if (!is_legal_port_num(port_num)) {
+ pr_err("%s: Invalid port_num#%d\n", __func__, port_num);
return -ENODEV;
}
@@ -431,8 +445,8 @@
pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
- if (port_num >= n_rmnet_ctrl_ports) {
- pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ if (!is_legal_port_num(port_num)) {
+ pr_err("%s: Invalid port_num#%d\n", __func__, port_num);
return;
}
@@ -478,7 +492,10 @@
pr_debug("%s: name:%s\n", __func__, pdev->name);
- for (i = 0; i < n_rmnet_ctrl_ports; i++) {
+ for (i = 0; i < MAX_CTRL_PORT; i++) {
+ if (!ctrl_smd_ports[i].port)
+ continue;
+
port = ctrl_smd_ports[i].port;
c = &port->ctrl_ch;
@@ -508,7 +525,10 @@
pr_debug("%s: name:%s\n", __func__, pdev->name);
- for (i = 0; i < n_rmnet_ctrl_ports; i++) {
+ for (i = 0; i < MAX_CTRL_PORT; i++) {
+ if (!ctrl_smd_ports[i].port)
+ continue;
+
port = ctrl_smd_ports[i].port;
c = &port->ctrl_ch;
@@ -555,7 +575,8 @@
INIT_DELAYED_WORK(&port->disconnect_w, grmnet_ctrl_smd_disconnect_w);
c = &port->ctrl_ch;
- c->name = rmnet_ctrl_names[portno];
+ c->name = ctrl_names[portno / MAX_CTRL_PER_CLIENT]
+ [portno % MAX_CTRL_PER_CLIENT];
c->port = port;
init_waitqueue_head(&c->wait);
INIT_LIST_HEAD(&c->tx_q);
@@ -575,44 +596,54 @@
return 0;
}
-int gsmd_ctrl_setup(unsigned int count)
+int gsmd_ctrl_setup(enum ctrl_client client_num, unsigned int count,
+ u8 *first_port_idx)
{
- int i;
+ int i, start_port, allocated_ports;
int ret;
pr_debug("%s: requested ports:%d\n", __func__, count);
- if (!count || count > NR_CTRL_SMD_PORTS) {
+ if (!count || count > MAX_CTRL_PER_CLIENT) {
pr_err("%s: Invalid num of ports count:%d\n",
__func__, count);
return -EINVAL;
}
- grmnet_ctrl_wq = alloc_workqueue("gsmd_ctrl",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
- if (!grmnet_ctrl_wq) {
- pr_err("%s: Unable to create workqueue grmnet_ctrl\n",
- __func__);
- return -ENOMEM;
+ if (!online_clients) {
+ grmnet_ctrl_wq = alloc_workqueue("gsmd_ctrl",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!grmnet_ctrl_wq) {
+ pr_err("%s: Unable to create workqueue grmnet_ctrl\n",
+ __func__);
+ return -ENOMEM;
+ }
}
+ online_clients++;
- for (i = 0; i < count; i++) {
- n_rmnet_ctrl_ports++;
+ start_port = MAX_CTRL_PER_CLIENT * client_num;
+ allocated_ports = 0;
+ for (i = start_port; i < count + start_port; i++) {
+ allocated_ports++;
ret = grmnet_ctrl_smd_port_alloc(i);
if (ret) {
pr_err("%s: Unable to alloc port:%d\n", __func__, i);
- n_rmnet_ctrl_ports--;
+ allocated_ports--;
goto free_ctrl_smd_ports;
}
}
-
+ if (first_port_idx)
+ *first_port_idx = start_port;
return 0;
free_ctrl_smd_ports:
- for (i = 0; i < n_rmnet_ctrl_ports; i++)
- grmnet_ctrl_smd_port_free(i);
+ for (i = 0; i < allocated_ports; i++)
+ grmnet_ctrl_smd_port_free(start_port + i);
- destroy_workqueue(grmnet_ctrl_wq);
+
+ online_clients--;
+ if (!online_clients)
+ destroy_workqueue(grmnet_ctrl_wq);
return ret;
}
@@ -634,10 +665,11 @@
if (!buf)
return -ENOMEM;
- for (i = 0; i < n_rmnet_ctrl_ports; i++) {
- port = ctrl_smd_ports[i].port;
- if (!port)
+ for (i = 0; i < MAX_CTRL_PORT; i++) {
+ if (!ctrl_smd_ports[i].port)
continue;
+ port = ctrl_smd_ports[i].port;
+
spin_lock_irqsave(&port->port_lock, flags);
c = &port->ctrl_ch;
@@ -677,10 +709,10 @@
int i;
unsigned long flags;
- for (i = 0; i < n_rmnet_ctrl_ports; i++) {
- port = ctrl_smd_ports[i].port;
- if (!port)
+ for (i = 0; i < MAX_CTRL_PORT; i++) {
+ if (!ctrl_smd_ports[i].port)
continue;
+ port = ctrl_smd_ports[i].port;
spin_lock_irqsave(&port->port_lock, flags);
@@ -727,6 +759,7 @@
static int __init gsmd_ctrl_init(void)
{
gsmd_ctrl_debugfs_init();
+ online_clients = 0;
return 0;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 79dcf2f..46b5ce4 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb/otg.h>
+#include <linux/usb/msm_hsusb.h>
#include "xhci.h"
@@ -140,6 +141,10 @@
goto release_mem_region;
}
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto unmap_registers;
@@ -166,7 +171,8 @@
goto put_usb3_hcd;
phy = usb_get_transceiver();
- if (phy && phy->otg) {
+ /* Register with OTG if present, ignore USB2 OTG using other PHY */
+ if (phy && phy->otg && !(phy->flags & ENABLE_SECONDARY_PHY)) {
dev_dbg(&pdev->dev, "%s otg support available\n", __func__);
ret = otg_set_host(phy->otg, &hcd->self);
if (ret) {
@@ -175,15 +181,12 @@
usb_put_transceiver(phy);
goto put_usb3_hcd;
}
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
} else {
pm_runtime_no_callbacks(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get(&pdev->dev);
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
put_usb3_hcd:
@@ -222,9 +225,6 @@
if (phy && phy->otg) {
otg_set_host(phy->otg, NULL);
usb_put_transceiver(phy);
- } else {
- pm_runtime_put(&dev->dev);
- pm_runtime_disable(&dev->dev);
}
return 0;
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index cae2c17..4865b03 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -478,6 +478,7 @@
u32 val;
int ret;
int retries;
+ struct msm_otg_platform_data *pdata = motg->pdata;
ret = msm_otg_link_clk_reset(motg, 1);
if (ret)
@@ -496,6 +497,9 @@
if (ret)
return ret;
+ if (pdata && pdata->enable_sec_phy)
+ writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16),
+ USB_PHY_CTRL2);
val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK;
writel(val | PORTSC_PTS_ULPI, USB_PORTSC);
@@ -535,6 +539,7 @@
static int msm_otg_link_reset(struct msm_otg *motg)
{
int cnt = 0;
+ struct msm_otg_platform_data *pdata = motg->pdata;
writel_relaxed(USBCMD_RESET, USB_USBCMD);
while (cnt < LINK_RESET_TIMEOUT_USEC) {
@@ -551,6 +556,9 @@
writel_relaxed(0x0, USB_AHBBURST);
writel_relaxed(0x08, USB_AHBMODE);
+ if (pdata && pdata->enable_sec_phy)
+ writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16),
+ USB_PHY_CTRL2);
return 0;
}
@@ -3784,8 +3792,6 @@
&pdata->default_mode);
of_property_read_u32(node, "qcom,hsusb-otg-phy-type",
&pdata->phy_type);
- of_property_read_u32(node, "qcom,hsusb-otg-pmic-id-irq",
- &pdata->pmic_id_irq);
pdata->disable_reset_on_disconnect = of_property_read_bool(node,
"qcom,hsusb-otg-disable-reset");
pdata->pnoc_errata_fix = of_property_read_bool(node,
@@ -3798,6 +3804,12 @@
"qcom,hsusb-otg-delay-lpm");
pdata->dp_manual_pullup = of_property_read_bool(node,
"qcom,dp-manual-pullup");
+ pdata->enable_sec_phy = of_property_read_bool(node,
+ "qcom,usb2-enable-hsphy2");
+
+ pdata->pmic_id_irq = platform_get_irq_byname(pdev, "pmic_id_irq");
+ if (pdata->pmic_id_irq < 0)
+ pdata->pmic_id_irq = 0;
return pdata;
}
@@ -4093,6 +4105,9 @@
if (pdata->dp_manual_pullup)
phy->flags |= ENABLE_DP_MANUAL_PULLUP;
+ if (pdata->enable_sec_phy)
+ phy->flags |= ENABLE_SECONDARY_PHY;
+
ret = usb_set_transceiver(&motg->phy);
if (ret) {
dev_err(&pdev->dev, "usb_set_transceiver failed\n");
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index a3d8d7e..b57d0a4 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -945,6 +945,7 @@
void mdp4_writeback_dma_stop(struct msm_fb_data_type *mfd);
int mdp4_writeback_init(struct fb_info *info);
int mdp4_writeback_terminate(struct fb_info *info);
+int mdp4_writeback_set_mirroring_hint(struct fb_info *info, int hint);
uint32_t mdp_block2base(uint32_t block);
int mdp_hist_lut_config(struct mdp_hist_lut_data *data);
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index e8d7489..afa7b97 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -245,7 +245,7 @@
return PTR_ERR(*srcp_ihdl);
}
pr_debug("%s(): ion_hdl %p, ion_buf %d\n", __func__, *srcp_ihdl,
- ion_share_dma_buf(display_iclient, *srcp_ihdl));
+ mem_id);
pr_debug("mixer %u, pipe %u, plane %u\n", pipe->mixer_num,
pipe->pipe_ndx, plane);
if (ion_map_iommu(display_iclient, *srcp_ihdl,
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index 7caf0ad..62e89d3 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -807,3 +807,23 @@
mutex_unlock(&mfd->writeback_mutex);
wake_up(&mfd->wait_q);
}
+
+int mdp4_writeback_set_mirroring_hint(struct fb_info *info, int hint)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+ if (mfd->panel.type != WRITEBACK_PANEL)
+ return -ENOTSUPP;
+
+ switch (hint) {
+ case MDP_WRITEBACK_MIRROR_ON:
+ case MDP_WRITEBACK_MIRROR_PAUSE:
+ case MDP_WRITEBACK_MIRROR_RESUME:
+ case MDP_WRITEBACK_MIRROR_OFF:
+ pr_info("wfd state switched to %d\n", hint);
+ switch_set_state(&mfd->writeback_sdev, hint);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/video/msm/mdp4_wfd_writeback.c b/drivers/video/msm/mdp4_wfd_writeback.c
index d96fc7d..ba6c78b 100644
--- a/drivers/video/msm/mdp4_wfd_writeback.c
+++ b/drivers/video/msm/mdp4_wfd_writeback.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,6 +75,13 @@
platform_set_drvdata(mdp_dev, mfd);
+ mfd->writeback_sdev.name = "wfd";
+ rc = switch_dev_register(&mfd->writeback_sdev);
+ if (rc) {
+ pr_err("Failed to setup switch dev for writeback panel");
+ return rc;
+ }
+
rc = platform_device_add(mdp_dev);
if (rc) {
WRITEBACK_MSG_ERR("failed to add device");
@@ -84,8 +91,16 @@
return rc;
}
+static int writeback_remove(struct platform_device *pdev)
+{
+ struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+ switch_dev_unregister(&mfd->writeback_sdev);
+ return 0;
+}
+
static struct platform_driver writeback_driver = {
.probe = writeback_probe,
+ .remove = writeback_remove,
.driver = {
.name = "writeback",
},
diff --git a/drivers/video/msm/mdss/dsi_host_v2.c b/drivers/video/msm/mdss/dsi_host_v2.c
index 453cbaa..887dde7 100644
--- a/drivers/video/msm/mdss/dsi_host_v2.c
+++ b/drivers/video/msm/mdss/dsi_host_v2.c
@@ -29,6 +29,7 @@
#define DSI_POLL_SLEEP_US 1000
#define DSI_POLL_TIMEOUT_US 16000
#define DSI_ESC_CLK_RATE 19200000
+#define DSI_DMA_CMD_TIMEOUT_MS 200
struct dsi_host_v2_private {
struct completion dma_comp;
@@ -426,18 +427,15 @@
dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
/*If Video enabled, Keep Video and Cmd mode ON */
- if (dsi_ctrl & 0x02)
- dsi_ctrl &= ~0x05;
- else
- dsi_ctrl &= ~0x07;
+
+
+ dsi_ctrl &= ~0x06;
if (mode == DSI_VIDEO_MODE) {
- dsi_ctrl |= 0x03;
+ dsi_ctrl |= 0x02;
intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK;
} else { /* command mode */
- dsi_ctrl |= 0x05;
- if (pdata->panel_info.type == MIPI_VIDEO_PANEL)
- dsi_ctrl |= 0x02;
+ dsi_ctrl |= 0x04;
intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_ERROR_MASK |
DSI_INTR_CMD_MDP_DONE_MASK;
@@ -480,7 +478,7 @@
int msm_dsi_cmd_dma_tx(struct dsi_buf *tp)
{
- int len;
+ int len, rc;
unsigned long size, addr;
unsigned char *ctrl_base = dsi_host_private->dsi_base;
@@ -505,12 +503,17 @@
MIPI_OUTP(ctrl_base + DSI_CMD_MODE_DMA_SW_TRIGGER, 0x01);
wmb();
- wait_for_completion_interruptible(&dsi_host_private->dma_comp);
+ rc = wait_for_completion_timeout(&dsi_host_private->dma_comp,
+ msecs_to_jiffies(DSI_DMA_CMD_TIMEOUT_MS));
+ if (rc == 0) {
+ pr_err("DSI command transaction time out\n");
+ rc = -ETIME;
+ }
dma_unmap_single(&dsi_host_private->dis_dev, tp->dmap, size,
DMA_TO_DEVICE);
tp->dmap = 0;
- return 0;
+ return rc;
}
int msm_dsi_cmd_dma_rx(struct dsi_buf *rp, int rlen)
@@ -723,6 +726,7 @@
static int msm_dsi_cal_clk_rate(struct mdss_panel_data *pdata,
u32 *bitclk_rate,
+ u32 *dsiclk_rate,
u32 *byteclk_rate,
u32 *pclk_rate)
{
@@ -761,10 +765,11 @@
*bitclk_rate /= lanes;
*byteclk_rate = *bitclk_rate / 8;
+ *dsiclk_rate = *byteclk_rate * lanes;
*pclk_rate = *byteclk_rate * lanes * 8 / pdata->panel_info.bpp;
- pr_debug("bitclk=%u, byteclk=%u, pck_=%u\n",
- *bitclk_rate, *byteclk_rate, *pclk_rate);
+ pr_debug("dsiclk_rate=%u, byteclk=%u, pck_=%u\n",
+ *dsiclk_rate, *byteclk_rate, *pclk_rate);
return 0;
}
@@ -777,7 +782,7 @@
u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
u32 ystride, bpp, data;
u32 dummy_xres, dummy_yres;
- u32 bitclk_rate = 0, byteclk_rate = 0, pclk_rate = 0;
+ u32 bitclk_rate = 0, byteclk_rate = 0, pclk_rate = 0, dsiclk_rate = 0;
unsigned char *ctrl_base = dsi_host_private->dsi_base;
pr_debug("msm_dsi_on\n");
@@ -794,8 +799,10 @@
msm_dsi_phy_sw_reset(dsi_host_private->dsi_base);
msm_dsi_phy_init(dsi_host_private->dsi_base, pdata);
- msm_dsi_cal_clk_rate(pdata, &bitclk_rate, &byteclk_rate, &pclk_rate);
- msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, byteclk_rate, pclk_rate);
+ msm_dsi_cal_clk_rate(pdata, &bitclk_rate, &dsiclk_rate,
+ &byteclk_rate, &pclk_rate);
+ msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, dsiclk_rate,
+ byteclk_rate, pclk_rate);
msm_dsi_prepare_clocks();
msm_dsi_clk_enable();
@@ -879,12 +886,11 @@
int ret = 0;
pr_debug("msm_dsi_off\n");
- msm_dsi_clk_set_rate(0, 0, 0);
+ msm_dsi_controller_cfg(0);
+ msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, 0, 0, 0);
msm_dsi_clk_disable();
msm_dsi_unprepare_clocks();
- /* disable DSI controller */
- msm_dsi_controller_cfg(0);
msm_dsi_ahb_ctrl(0);
ret = msm_dsi_regulator_disable();
diff --git a/drivers/video/msm/mdss/dsi_io_v2.c b/drivers/video/msm/mdss/dsi_io_v2.c
index 0486c4c..93f2c76 100644
--- a/drivers/video/msm/mdss/dsi_io_v2.c
+++ b/drivers/video/msm/mdss/dsi_io_v2.c
@@ -29,6 +29,7 @@
struct clk *dsi_esc_clk;
struct clk *dsi_pixel_clk;
struct clk *dsi_ahb_clk;
+ struct clk *dsi_clk;
int msm_dsi_clk_on;
int msm_dsi_ahb_clk_on;
};
@@ -97,6 +98,13 @@
{
int rc = 0;
+ dsi_io_private->dsi_clk = clk_get(&dev->dev, "dsi_clk");
+ if (IS_ERR(dsi_io_private->dsi_clk)) {
+ pr_err("can't find dsi core_clk\n");
+ rc = PTR_ERR(dsi_io_private->dsi_clk);
+ dsi_io_private->dsi_clk = NULL;
+ return rc;
+ }
dsi_io_private->dsi_byte_clk = clk_get(&dev->dev, "byte_clk");
if (IS_ERR(dsi_io_private->dsi_byte_clk)) {
pr_err("can't find dsi byte_clk\n");
@@ -135,6 +143,10 @@
void msm_dsi_clk_deinit(void)
{
+ if (dsi_io_private->dsi_clk) {
+ clk_put(dsi_io_private->dsi_clk);
+ dsi_io_private->dsi_clk = NULL;
+ }
if (dsi_io_private->dsi_byte_clk) {
clk_put(dsi_io_private->dsi_byte_clk);
dsi_io_private->dsi_byte_clk = NULL;
@@ -156,6 +168,7 @@
int msm_dsi_prepare_clocks(void)
{
+ clk_prepare(dsi_io_private->dsi_clk);
clk_prepare(dsi_io_private->dsi_byte_clk);
clk_prepare(dsi_io_private->dsi_esc_clk);
clk_prepare(dsi_io_private->dsi_pixel_clk);
@@ -164,16 +177,24 @@
int msm_dsi_unprepare_clocks(void)
{
+ clk_unprepare(dsi_io_private->dsi_clk);
clk_unprepare(dsi_io_private->dsi_esc_clk);
clk_unprepare(dsi_io_private->dsi_byte_clk);
clk_unprepare(dsi_io_private->dsi_pixel_clk);
return 0;
}
-int msm_dsi_clk_set_rate(unsigned long esc_rate, unsigned long byte_rate,
+int msm_dsi_clk_set_rate(unsigned long esc_rate,
+ unsigned long dsi_rate,
+ unsigned long byte_rate,
unsigned long pixel_rate)
{
int rc;
+ rc = clk_set_rate(dsi_io_private->dsi_clk, dsi_rate);
+ if (rc) {
+ pr_err("dsi_esc_clk - clk_set_rate failed =%d\n", rc);
+ return rc;
+ }
rc = clk_set_rate(dsi_io_private->dsi_esc_clk, esc_rate);
if (rc) {
@@ -202,6 +223,7 @@
return 0;
}
+ clk_enable(dsi_io_private->dsi_clk);
clk_enable(dsi_io_private->dsi_esc_clk);
clk_enable(dsi_io_private->dsi_byte_clk);
clk_enable(dsi_io_private->dsi_pixel_clk);
@@ -217,6 +239,7 @@
return 0;
}
+ clk_disable(dsi_io_private->dsi_clk);
clk_disable(dsi_io_private->dsi_byte_clk);
clk_disable(dsi_io_private->dsi_esc_clk);
clk_disable(dsi_io_private->dsi_pixel_clk);
@@ -246,7 +269,7 @@
void msm_dsi_regulator_deinit(void)
{
- if (dsi_io_private->vdda_vreg) {
+ if (!IS_ERR(dsi_io_private->vdda_vreg)) {
devm_regulator_put(dsi_io_private->vdda_vreg);
dsi_io_private->vdda_vreg = NULL;
}
diff --git a/drivers/video/msm/mdss/dsi_io_v2.h b/drivers/video/msm/mdss/dsi_io_v2.h
index 25ecd7f..285bf30 100644
--- a/drivers/video/msm/mdss/dsi_io_v2.h
+++ b/drivers/video/msm/mdss/dsi_io_v2.h
@@ -29,7 +29,9 @@
int msm_dsi_unprepare_clocks(void);
-int msm_dsi_clk_set_rate(unsigned long esc_rate, unsigned long byte_rate,
+int msm_dsi_clk_set_rate(unsigned long esc_rate,
+ unsigned long dsi_rate,
+ unsigned long byte_rate,
unsigned long pixel_rate);
int msm_dsi_clk_enable(void);
diff --git a/drivers/video/msm/mdss/dsi_panel_v2.c b/drivers/video/msm/mdss/dsi_panel_v2.c
index 6686de3..e46ea3b 100644
--- a/drivers/video/msm/mdss/dsi_panel_v2.c
+++ b/drivers/video/msm/mdss/dsi_panel_v2.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/leds.h>
+#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include "dsi_v2.h"
@@ -32,6 +33,7 @@
int rst_gpio;
int disp_en_gpio;
+ int video_mode_gpio;
char bl_ctrl;
struct regulator *vddio_vreg;
@@ -40,6 +42,9 @@
struct dsi_panel_cmds_list *on_cmds_list;
struct dsi_panel_cmds_list *off_cmds_list;
struct mdss_dsi_phy_ctrl phy_params;
+
+ char *on_cmds;
+ char *off_cmds;
};
static struct dsi_panel_private *panel_private;
@@ -82,15 +87,54 @@
kfree(panel_private->dsi_panel_tx_buf.start);
kfree(panel_private->dsi_panel_rx_buf.start);
- if (panel_private->vddio_vreg)
+ if (!IS_ERR(panel_private->vddio_vreg))
devm_regulator_put(panel_private->vddio_vreg);
- if (panel_private->vdda_vreg)
- devm_regulator_put(panel_private->vddio_vreg);
+ if (!IS_ERR(panel_private->vdda_vreg))
+ devm_regulator_put(panel_private->vdda_vreg);
+ if (panel_private->on_cmds_list) {
+ kfree(panel_private->on_cmds_list->buf);
+ kfree(panel_private->on_cmds_list);
+ }
+ if (panel_private->off_cmds_list) {
+ kfree(panel_private->off_cmds_list->buf);
+ kfree(panel_private->off_cmds_list);
+ }
+
+ kfree(panel_private->on_cmds);
+ kfree(panel_private->off_cmds);
kfree(panel_private);
panel_private = NULL;
}
+int dsi_panel_power(int enable)
+{
+ int ret;
+ if (enable) {
+ ret = regulator_enable(panel_private->vddio_vreg);
+ if (ret) {
+ pr_err("dsi_panel_power regulator enable vddio fail\n");
+ return ret;
+ }
+ ret = regulator_enable(panel_private->vdda_vreg);
+ if (ret) {
+ pr_err("dsi_panel_power regulator enable vdda fail\n");
+ return ret;
+ }
+ } else {
+ ret = regulator_disable(panel_private->vddio_vreg);
+ if (ret) {
+ pr_err("dsi_panel_power regulator disable vddio fail\n");
+ return ret;
+ }
+ ret = regulator_disable(panel_private->vdda_vreg);
+ if (ret) {
+ pr_err("dsi_panel_power regulator dsiable vdda fail\n");
+ return ret;
+ }
+ }
+ return 0;
+}
void dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
{
@@ -113,6 +157,8 @@
pr_debug("%s: enable = %d\n", __func__, enable);
if (enable) {
+ dsi_panel_power(1);
+ gpio_request(panel_private->rst_gpio, "panel_reset");
gpio_set_value(panel_private->rst_gpio, 1);
/*
* these delay values are by experiments currently, will need
@@ -123,12 +169,34 @@
udelay(200);
gpio_set_value(panel_private->rst_gpio, 1);
msleep(20);
- if (gpio_is_valid(panel_private->disp_en_gpio))
+ if (gpio_is_valid(panel_private->disp_en_gpio)) {
+ gpio_request(panel_private->disp_en_gpio,
+ "panel_enable");
gpio_set_value(panel_private->disp_en_gpio, 1);
+ }
+ if (gpio_is_valid(panel_private->video_mode_gpio)) {
+ gpio_request(panel_private->video_mode_gpio,
+ "panel_video_mdoe");
+ if (pdata->panel_info.mipi.mode == DSI_VIDEO_MODE)
+ gpio_set_value(panel_private->video_mode_gpio,
+ 1);
+ else
+ gpio_set_value(panel_private->video_mode_gpio,
+ 0);
+ }
} else {
gpio_set_value(panel_private->rst_gpio, 0);
- if (gpio_is_valid(panel_private->disp_en_gpio))
+ gpio_free(panel_private->rst_gpio);
+
+ if (gpio_is_valid(panel_private->disp_en_gpio)) {
gpio_set_value(panel_private->disp_en_gpio, 0);
+ gpio_free(panel_private->disp_en_gpio);
+ }
+
+ if (gpio_is_valid(panel_private->video_mode_gpio))
+ gpio_free(panel_private->video_mode_gpio);
+
+ dsi_panel_power(0);
}
}
@@ -196,13 +264,42 @@
panel_private->disp_en_gpio = of_get_named_gpio(np,
"qcom,enable-gpio", 0);
panel_private->rst_gpio = of_get_named_gpio(np, "qcom,rst-gpio", 0);
+ panel_private->video_mode_gpio = of_get_named_gpio(np,
+ "qcom,mode-selection-gpio", 0);
return 0;
}
static int dsi_panel_parse_regulator(struct platform_device *pdev)
{
+ int ret;
panel_private->vddio_vreg = devm_regulator_get(&pdev->dev, "vddio");
+ if (IS_ERR(panel_private->vddio_vreg)) {
+ pr_err("%s: could not get vddio vreg, rc=%ld\n",
+ __func__, PTR_ERR(panel_private->vddio_vreg));
+ return PTR_ERR(panel_private->vddio_vreg);
+ }
+ ret = regulator_set_voltage(panel_private->vddio_vreg,
+ 1800000,
+ 1800000);
+ if (ret) {
+ pr_err("%s: set voltage failed on vddio_vreg, rc=%d\n",
+ __func__, ret);
+ return ret;
+ }
panel_private->vdda_vreg = devm_regulator_get(&pdev->dev, "vdda");
+ if (IS_ERR(panel_private->vdda_vreg)) {
+ pr_err("%s: could not get vdda_vreg , rc=%ld\n",
+ __func__, PTR_ERR(panel_private->vdda_vreg));
+ return PTR_ERR(panel_private->vdda_vreg);
+ }
+ ret = regulator_set_voltage(panel_private->vdda_vreg,
+ 2850000,
+ 2850000);
+ if (ret) {
+ pr_err("%s: set voltage failed on vdda_vreg, rc=%d\n",
+ __func__, ret);
+ return ret;
+ }
return 0;
}
@@ -398,180 +495,163 @@
int cmd_plen, data_offset;
const char *data;
const char *on_cmds_state, *off_cmds_state;
- char *on_cmds = NULL, *off_cmds = NULL;
int num_of_on_cmds = 0, num_of_off_cmds = 0;
data = of_get_property(np, "qcom,panel-on-cmds", &len);
if (!data) {
pr_err("%s:%d, Unable to read ON cmds", __func__, __LINE__);
- goto parse_init_cmds_error;
+ return -EINVAL;
}
- on_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL);
- if (!on_cmds)
- goto parse_init_cmds_error;
+ panel_private->on_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL);
+ if (!panel_private->on_cmds)
+ return -ENOMEM;
- memcpy(on_cmds, data, len);
+ memcpy(panel_private->on_cmds, data, len);
data_offset = 0;
cmd_plen = 0;
while ((len - data_offset) >= DT_CMD_HDR) {
data_offset += (DT_CMD_HDR - 1);
- cmd_plen = on_cmds[data_offset++];
+ cmd_plen = panel_private->on_cmds[data_offset++];
data_offset += cmd_plen;
num_of_on_cmds++;
}
if (!num_of_on_cmds) {
pr_err("%s:%d, No ON cmds specified", __func__, __LINE__);
- goto parse_init_cmds_error;
+ return -EINVAL;
}
- panel_data->dsi_panel_on_cmds =
+ panel_private->on_cmds_list =
kzalloc(sizeof(struct dsi_panel_cmds_list), GFP_KERNEL);
- if (!panel_data->dsi_panel_on_cmds)
- goto parse_init_cmds_error;
+ if (!panel_private->on_cmds_list)
+ return -ENOMEM;
- (panel_data->dsi_panel_on_cmds)->buf =
+ panel_private->on_cmds_list->buf =
kzalloc((num_of_on_cmds * sizeof(struct dsi_cmd_desc)),
GFP_KERNEL);
- if (!(panel_data->dsi_panel_on_cmds)->buf)
- goto parse_init_cmds_error;
+ if (!panel_private->on_cmds_list->buf)
+ return -ENOMEM;
data_offset = 0;
for (i = 0; i < num_of_on_cmds; i++) {
- panel_data->dsi_panel_on_cmds->buf[i].dtype =
- on_cmds[data_offset++];
- panel_data->dsi_panel_on_cmds->buf[i].last =
- on_cmds[data_offset++];
- panel_data->dsi_panel_on_cmds->buf[i].vc =
- on_cmds[data_offset++];
- panel_data->dsi_panel_on_cmds->buf[i].ack =
- on_cmds[data_offset++];
- panel_data->dsi_panel_on_cmds->buf[i].wait =
- on_cmds[data_offset++];
- panel_data->dsi_panel_on_cmds->buf[i].dlen =
- on_cmds[data_offset++];
- panel_data->dsi_panel_on_cmds->buf[i].payload =
- &on_cmds[data_offset];
- data_offset += (panel_data->dsi_panel_on_cmds->buf[i].dlen);
+ panel_private->on_cmds_list->buf[i].dtype =
+ panel_private->on_cmds[data_offset++];
+ panel_private->on_cmds_list->buf[i].last =
+ panel_private->on_cmds[data_offset++];
+ panel_private->on_cmds_list->buf[i].vc =
+ panel_private->on_cmds[data_offset++];
+ panel_private->on_cmds_list->buf[i].ack =
+ panel_private->on_cmds[data_offset++];
+ panel_private->on_cmds_list->buf[i].wait =
+ panel_private->on_cmds[data_offset++];
+ panel_private->on_cmds_list->buf[i].dlen =
+ panel_private->on_cmds[data_offset++];
+ panel_private->on_cmds_list->buf[i].payload =
+ &panel_private->on_cmds[data_offset];
+ data_offset += (panel_private->on_cmds_list->buf[i].dlen);
}
if (data_offset != len) {
pr_err("%s:%d, Incorrect ON command entries",
__func__, __LINE__);
- goto parse_init_cmds_error;
+ return -EINVAL;
}
- (panel_data->dsi_panel_on_cmds)->size = num_of_on_cmds;
+ panel_private->on_cmds_list->size = num_of_on_cmds;
on_cmds_state = of_get_property(pdev->dev.of_node,
"qcom,on-cmds-dsi-state", NULL);
if (!strncmp(on_cmds_state, "DSI_LP_MODE", 11)) {
- (panel_data->dsi_panel_on_cmds)->ctrl_state = DSI_LP_MODE;
+ panel_private->on_cmds_list->ctrl_state = DSI_LP_MODE;
} else if (!strncmp(on_cmds_state, "DSI_HS_MODE", 11)) {
- (panel_data->dsi_panel_on_cmds)->ctrl_state = DSI_HS_MODE;
+ panel_private->on_cmds_list->ctrl_state = DSI_HS_MODE;
} else {
pr_debug("%s: ON cmds state not specified. Set Default\n",
__func__);
- (panel_data->dsi_panel_on_cmds)->ctrl_state = DSI_LP_MODE;
+ panel_private->on_cmds_list->ctrl_state = DSI_LP_MODE;
}
- panel_private->on_cmds_list = panel_data->dsi_panel_on_cmds;
+ panel_data->dsi_panel_on_cmds = panel_private->on_cmds_list;
+
data = of_get_property(np, "qcom,panel-off-cmds", &len);
if (!data) {
pr_err("%s:%d, Unable to read OFF cmds", __func__, __LINE__);
- goto parse_init_cmds_error;
+ return -EINVAL;
}
- off_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL);
- if (!off_cmds)
- goto parse_init_cmds_error;
+ panel_private->off_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL);
+ if (!panel_private->off_cmds)
+ return -ENOMEM;
- memcpy(off_cmds, data, len);
+ memcpy(panel_private->off_cmds, data, len);
data_offset = 0;
cmd_plen = 0;
while ((len - data_offset) >= DT_CMD_HDR) {
data_offset += (DT_CMD_HDR - 1);
- cmd_plen = off_cmds[data_offset++];
+ cmd_plen = panel_private->off_cmds[data_offset++];
data_offset += cmd_plen;
num_of_off_cmds++;
}
if (!num_of_off_cmds) {
pr_err("%s:%d, No OFF cmds specified", __func__, __LINE__);
- goto parse_init_cmds_error;
+ return -ENOMEM;
}
- panel_data->dsi_panel_off_cmds =
+ panel_private->off_cmds_list =
kzalloc(sizeof(struct dsi_panel_cmds_list), GFP_KERNEL);
- if (!panel_data->dsi_panel_off_cmds)
- goto parse_init_cmds_error;
+ if (!panel_private->off_cmds_list)
+ return -ENOMEM;
- (panel_data->dsi_panel_off_cmds)->buf = kzalloc(num_of_off_cmds
+ panel_private->off_cmds_list->buf = kzalloc(num_of_off_cmds
* sizeof(struct dsi_cmd_desc),
GFP_KERNEL);
- if (!(panel_data->dsi_panel_off_cmds)->buf)
- goto parse_init_cmds_error;
+ if (!panel_private->off_cmds_list->buf)
+ return -ENOMEM;
data_offset = 0;
for (i = 0; i < num_of_off_cmds; i++) {
- panel_data->dsi_panel_off_cmds->buf[i].dtype =
- off_cmds[data_offset++];
- panel_data->dsi_panel_off_cmds->buf[i].last =
- off_cmds[data_offset++];
- panel_data->dsi_panel_off_cmds->buf[i].vc =
- off_cmds[data_offset++];
- panel_data->dsi_panel_off_cmds->buf[i].ack =
- off_cmds[data_offset++];
- panel_data->dsi_panel_off_cmds->buf[i].wait =
- off_cmds[data_offset++];
- panel_data->dsi_panel_off_cmds->buf[i].dlen =
- off_cmds[data_offset++];
- panel_data->dsi_panel_off_cmds->buf[i].payload =
- &off_cmds[data_offset];
- data_offset += (panel_data->dsi_panel_off_cmds->buf[i].dlen);
+ panel_private->off_cmds_list->buf[i].dtype =
+ panel_private->off_cmds[data_offset++];
+ panel_private->off_cmds_list->buf[i].last =
+ panel_private->off_cmds[data_offset++];
+ panel_private->off_cmds_list->buf[i].vc =
+ panel_private->off_cmds[data_offset++];
+ panel_private->off_cmds_list->buf[i].ack =
+ panel_private->off_cmds[data_offset++];
+ panel_private->off_cmds_list->buf[i].wait =
+ panel_private->off_cmds[data_offset++];
+ panel_private->off_cmds_list->buf[i].dlen =
+ panel_private->off_cmds[data_offset++];
+ panel_private->off_cmds_list->buf[i].payload =
+ &panel_private->off_cmds[data_offset];
+ data_offset += (panel_private->off_cmds_list->buf[i].dlen);
}
if (data_offset != len) {
pr_err("%s:%d, Incorrect OFF command entries",
__func__, __LINE__);
- goto parse_init_cmds_error;
+ return -EINVAL;
}
- (panel_data->dsi_panel_off_cmds)->size = num_of_off_cmds;
- off_cmds_state = of_get_property(pdev->dev.of_node,
+ panel_private->off_cmds_list->size = num_of_off_cmds;
+ off_cmds_state = of_get_property(pdev->dev.of_node,
"qcom,off-cmds-dsi-state", NULL);
if (!strncmp(off_cmds_state, "DSI_LP_MODE", 11)) {
- (panel_data->dsi_panel_off_cmds)->ctrl_state =
+ panel_private->off_cmds_list->ctrl_state =
DSI_LP_MODE;
} else if (!strncmp(off_cmds_state, "DSI_HS_MODE", 11)) {
- (panel_data->dsi_panel_off_cmds)->ctrl_state = DSI_HS_MODE;
+ panel_private->off_cmds_list->ctrl_state = DSI_HS_MODE;
} else {
pr_debug("%s: ON cmds state not specified. Set Default\n",
__func__);
- (panel_data->dsi_panel_off_cmds)->ctrl_state = DSI_LP_MODE;
+ panel_private->off_cmds_list->ctrl_state = DSI_LP_MODE;
}
- panel_private->off_cmds_list = panel_data->dsi_panel_on_cmds;
- kfree(on_cmds);
- kfree(off_cmds);
+ panel_data->dsi_panel_off_cmds = panel_private->off_cmds_list;
return 0;
-parse_init_cmds_error:
- if (panel_data->dsi_panel_on_cmds) {
- kfree((panel_data->dsi_panel_on_cmds)->buf);
- kfree(panel_data->dsi_panel_on_cmds);
- panel_data->dsi_panel_on_cmds = NULL;
- }
- if (panel_data->dsi_panel_off_cmds) {
- kfree((panel_data->dsi_panel_off_cmds)->buf);
- kfree(panel_data->dsi_panel_off_cmds);
- panel_data->dsi_panel_off_cmds = NULL;
- }
-
- kfree(on_cmds);
- kfree(off_cmds);
- return -EINVAL;
}
static int dsi_panel_parse_backlight(struct platform_device *pdev,
@@ -709,6 +789,7 @@
vendor_pdata.on = dsi_panel_on;
vendor_pdata.off = dsi_panel_off;
+ vendor_pdata.reset = dsi_panel_reset;
vendor_pdata.bl_fnc = dsi_panel_bl_ctrl;
rc = dsi_panel_device_register_v2(pdev, &vendor_pdata,
diff --git a/drivers/video/msm/mdss/dsi_v2.c b/drivers/video/msm/mdss/dsi_v2.c
index 5e46bf5..5833796 100644
--- a/drivers/video/msm/mdss/dsi_v2.c
+++ b/drivers/video/msm/mdss/dsi_v2.c
@@ -29,6 +29,14 @@
if (!panel_common_data || !pdata)
return -ENODEV;
+ if (dsi_intf.op_mode_config)
+ dsi_intf.op_mode_config(DSI_CMD_MODE, pdata);
+
+ pr_debug("panel off commands\n");
+ if (panel_common_data->off)
+ panel_common_data->off(pdata);
+
+ pr_debug("turn off dsi controller\n");
if (dsi_intf.off)
rc = dsi_intf.off(pdata);
@@ -37,9 +45,9 @@
return rc;
}
- pr_debug("dsi_off reset\n");
- if (panel_common_data->off)
- panel_common_data->off(pdata);
+ pr_debug("turn off panel power\n");
+ if (panel_common_data->reset)
+ panel_common_data->reset(pdata, 0);
return rc;
}
@@ -53,8 +61,6 @@
if (!panel_common_data || !pdata)
return -ENODEV;
- if (panel_common_data->reset)
- panel_common_data->reset(1);
pr_debug("dsi_on DSI controller ont\n");
if (dsi_intf.on)
@@ -64,6 +70,9 @@
pr_err("mdss_dsi_on DSI failed %d\n", rc);
return rc;
}
+ pr_debug("dsi_on power on panel\n");
+ if (panel_common_data->reset)
+ panel_common_data->reset(pdata, 1);
pr_debug("dsi_on DSI panel ont\n");
if (panel_common_data->on)
diff --git a/drivers/video/msm/mdss/dsi_v2.h b/drivers/video/msm/mdss/dsi_v2.h
index fa868ab..f68527c 100644
--- a/drivers/video/msm/mdss/dsi_v2.h
+++ b/drivers/video/msm/mdss/dsi_v2.h
@@ -189,7 +189,7 @@
struct mdss_panel_info panel_info;
int (*on) (struct mdss_panel_data *pdata);
int (*off) (struct mdss_panel_data *pdata);
- void (*reset)(int enable);
+ void (*reset)(struct mdss_panel_data *pdata, int enable);
void (*bl_fnc) (struct mdss_panel_data *pdata, u32 bl_level);
struct dsi_panel_cmds_list *dsi_panel_on_cmds;
struct dsi_panel_cmds_list *dsi_panel_off_cmds;
diff --git a/drivers/video/msm/mdss/mdp3.c b/drivers/video/msm/mdss/mdp3.c
index 890b00b..52243eb 100644
--- a/drivers/video/msm/mdss/mdp3.c
+++ b/drivers/video/msm/mdss/mdp3.c
@@ -47,7 +47,7 @@
#include "mdp3_hwio.h"
#include "mdp3_ctrl.h"
-#define MDP_CORE_HW_VERSION 0x03030304
+#define MDP_CORE_HW_VERSION 0x03040310
struct mdp3_hw_resource *mdp3_res;
#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
@@ -302,16 +302,7 @@
return ret;
}
-int mdp3_vsync_clk_enable(int enable)
-{
- int ret = 0;
- pr_debug("vsync clk enable=%d\n", enable);
- mutex_lock(&mdp3_res->res_mutex);
- mdp3_clk_update(MDP3_CLK_VSYNC, enable);
- mutex_unlock(&mdp3_res->res_mutex);
- return ret;
-}
int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate)
{
@@ -400,6 +391,9 @@
if (rc)
return rc;
+ rc = mdp3_clk_register("dsi_clk", MDP3_CLK_DSI);
+ if (rc)
+ return rc;
return rc;
}
@@ -409,6 +403,7 @@
clk_put(mdp3_res->clocks[MDP3_CLK_CORE]);
clk_put(mdp3_res->clocks[MDP3_CLK_VSYNC]);
clk_put(mdp3_res->clocks[MDP3_CLK_LCDC]);
+ clk_put(mdp3_res->clocks[MDP3_CLK_DSI]);
}
int mdp3_clk_enable(int enable)
@@ -421,6 +416,7 @@
rc = mdp3_clk_update(MDP3_CLK_AHB, enable);
rc |= mdp3_clk_update(MDP3_CLK_CORE, enable);
rc |= mdp3_clk_update(MDP3_CLK_VSYNC, enable);
+ rc |= mdp3_clk_update(MDP3_CLK_DSI, enable);
mutex_unlock(&mdp3_res->res_mutex);
return rc;
}
@@ -577,12 +573,14 @@
int rc;
rc = mdp3_clk_update(MDP3_CLK_AHB, 1);
+ rc |= mdp3_clk_update(MDP3_CLK_CORE, 1);
if (rc)
return rc;
mdp3_res->mdp_rev = MDP3_REG_READ(MDP3_REG_HW_VERSION);
rc = mdp3_clk_update(MDP3_CLK_AHB, 0);
+ rc |= mdp3_clk_update(MDP3_CLK_CORE, 0);
if (rc)
pr_err("fail to turn off the MDP3_CLK_AHB clk\n");
diff --git a/drivers/video/msm/mdss/mdp3.h b/drivers/video/msm/mdss/mdp3.h
index c853664..5774e5a 100644
--- a/drivers/video/msm/mdss/mdp3.h
+++ b/drivers/video/msm/mdss/mdp3.h
@@ -29,6 +29,7 @@
MDP3_CLK_CORE,
MDP3_CLK_VSYNC,
MDP3_CLK_LCDC,
+ MDP3_CLK_DSI,
MDP3_MAX_CLK
};
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
index e07c0a4..99b7604 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.c
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -28,7 +28,7 @@
void vsync_notify_handler(void *arg)
{
- struct mdp3_session_data *session = (struct mdp3_session_data *)session;
+ struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
complete(&session->vsync_comp);
}
@@ -100,18 +100,30 @@
.attrs = vsync_fs_attrs,
};
-static int mdp3_ctrl_res_req_dma(struct msm_fb_data_type *mfd, int status)
+static int mdp3_ctrl_res_req_bus(struct msm_fb_data_type *mfd, int status)
{
int rc = 0;
if (status) {
struct mdss_panel_info *panel_info = mfd->panel_info;
int ab = 0;
int ib = 0;
- unsigned long core_clk = 0;
- int vtotal = 0;
ab = panel_info->xres * panel_info->yres * 4;
ab *= panel_info->mipi.frame_rate;
ib = (ab * 3) / 2;
+ rc = mdp3_bus_scale_set_quota(MDP3_BW_CLIENT_DMA_P, ab, ib);
+ } else {
+ rc = mdp3_bus_scale_set_quota(MDP3_BW_CLIENT_DMA_P, 0, 0);
+ }
+ return rc;
+}
+
+static int mdp3_ctrl_res_req_clk(struct msm_fb_data_type *mfd, int status)
+{
+ int rc = 0;
+ if (status) {
+ struct mdss_panel_info *panel_info = mfd->panel_info;
+ unsigned long core_clk;
+ int vtotal;
vtotal = panel_info->lcdc.v_back_porch +
panel_info->lcdc.v_front_porch +
panel_info->lcdc.v_pulse_width +
@@ -126,10 +138,8 @@
if (rc)
return rc;
- mdp3_bus_scale_set_quota(MDP3_BW_CLIENT_DMA_P, ab, ib);
} else {
rc = mdp3_clk_enable(false);
- rc |= mdp3_bus_scale_set_quota(MDP3_BW_CLIENT_DMA_P, 0, 0);
}
return rc;
}
@@ -285,13 +295,25 @@
}
mutex_lock(&mdp3_session->lock);
if (mdp3_session->status) {
- pr_info("fb%d is on already", mfd->index);
+ pr_debug("fb%d is on already", mfd->index);
goto on_error;
}
- rc = mdp3_ctrl_res_req_dma(mfd, 1);
+ /* request bus bandwidth before DSI DMA traffic */
+ rc = mdp3_ctrl_res_req_bus(mfd, 1);
+ if (rc)
+ pr_err("fail to request bus resource\n");
+
+ panel = mdp3_session->panel;
+ if (panel->event_handler)
+ rc = panel->event_handler(panel, MDSS_EVENT_PANEL_ON, NULL);
if (rc) {
- pr_err("resource request for dma on failed\n");
+ pr_err("fail to turn on the panel\n");
+ goto on_error;
+ }
+ rc = mdp3_ctrl_res_req_clk(mfd, 1);
+ if (rc) {
+ pr_err("fail to request mdp clk resource\n");
goto on_error;
}
@@ -304,16 +326,9 @@
rc = mdp3_ctrl_intf_init(mfd, mdp3_session->intf);
if (rc) {
pr_err("display interface init failed\n");
- goto on_error;
- }
- panel = mdp3_session->panel;
- if (panel->event_handler)
- rc = panel->event_handler(panel, MDSS_EVENT_PANEL_ON, NULL);
- if (rc) {
- pr_err("fail to turn on the panel\n");
goto on_error;
}
@@ -348,26 +363,31 @@
mutex_lock(&mdp3_session->lock);
if (!mdp3_session->status) {
- pr_info("fb%d is off already", mfd->index);
+ pr_debug("fb%d is off already", mfd->index);
goto off_error;
}
- panel = mdp3_session->panel;
- if (panel->event_handler)
- rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF, NULL);
-
- if (rc)
- pr_err("fail to turn off the panel\n");
+ pr_debug("mdp3_ctrl_off stop mdp3 dma engine\n");
rc = mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf);
if (rc)
pr_err("fail to stop the MDP3 dma\n");
- rc = mdp3_ctrl_res_req_dma(mfd, 0);
+ pr_debug("mdp3_ctrl_off stop dsi panel and controller\n");
+ panel = mdp3_session->panel;
+ if (panel->event_handler)
+ rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF, NULL);
if (rc)
- pr_err("resource release for dma on failed\n");
+ pr_err("fail to turn off the panel\n");
+ pr_debug("mdp3_ctrl_off release bus and clock\n");
+ rc = mdp3_ctrl_res_req_bus(mfd, 0);
+ if (rc)
+ pr_err("mdp bus resource release failed\n");
+ rc = mdp3_ctrl_res_req_clk(mfd, 0);
+ if (rc)
+ pr_err("mdp clock resource release failed\n");
off_error:
mdp3_session->status = 0;
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index 763c7f6..04217f5 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -73,6 +73,8 @@
u32 irq_mask;
u32 irq_ena;
u32 irq_buzy;
+ u32 has_bwc;
+ u32 has_decimation;
u32 mdp_irq_mask;
u32 mdp_hist_irq_mask;
diff --git a/drivers/video/msm/mdss/mdss_debug.c b/drivers/video/msm/mdss/mdss_debug.c
index 0b2a7c0..0918db1 100644
--- a/drivers/video/msm/mdss/mdss_debug.c
+++ b/drivers/video/msm/mdss/mdss_debug.c
@@ -286,6 +286,96 @@
return -ENODEV;
}
+
+static int mdss_debug_stat_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int mdss_debug_stat_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int mdss_debug_stat_ctl_dump(struct mdss_mdp_ctl *ctl,
+ char *bp, int len)
+{
+ int tot = 0;
+
+ if (!ctl->ref_cnt)
+ return 0;
+
+ if (ctl->intf_num) {
+ tot = scnprintf(bp, len,
+ "intf%d: play: %08u \tvsync: %08u \tunderrun: %08u\n",
+ ctl->intf_num, ctl->play_cnt,
+ ctl->vsync_cnt, ctl->underrun_cnt);
+ } else {
+ tot = scnprintf(bp, len, "wb: \tmode=%x \tplay: %08u\n",
+ ctl->opmode, ctl->play_cnt);
+ }
+
+ return tot;
+}
+
+static ssize_t mdss_debug_stat_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ struct mdss_data_type *mdata = file->private_data;
+ struct mdss_mdp_pipe *pipe;
+ int i, len, tot;
+ char bp[512];
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = sizeof(bp);
+
+ tot = scnprintf(bp, len, "\nmdp:\n");
+
+ for (i = 0; i < mdata->nctl; i++)
+ tot += mdss_debug_stat_ctl_dump(mdata->ctl_off + i,
+ bp + tot, len - tot);
+ tot += scnprintf(bp + tot, len - tot, "\n");
+
+ for (i = 0; i < mdata->nvig_pipes; i++) {
+ pipe = mdata->vig_pipes + i;
+ tot += scnprintf(bp + tot, len - tot,
+ "VIG%d : %08u\t", i, pipe->play_cnt);
+ }
+ tot += scnprintf(bp + tot, len - tot, "\n");
+
+ for (i = 0; i < mdata->nrgb_pipes; i++) {
+ pipe = mdata->rgb_pipes + i;
+ tot += scnprintf(bp + tot, len - tot,
+ "RGB%d : %08u\t", i, pipe->play_cnt);
+ }
+ tot += scnprintf(bp + tot, len - tot, "\n");
+
+ for (i = 0; i < mdata->ndma_pipes; i++) {
+ pipe = mdata->dma_pipes + i;
+ tot += scnprintf(bp + tot, len - tot,
+ "DMA%d : %08u\t", i, pipe->play_cnt);
+ }
+ tot += scnprintf(bp + tot, len - tot, "\n");
+
+ if (copy_to_user(buff, bp, tot))
+ return -EFAULT;
+
+ *ppos += tot; /* increase offset */
+
+ return tot;
+}
+
+static const struct file_operations mdss_stat_fops = {
+ .open = mdss_debug_stat_open,
+ .release = mdss_debug_stat_release,
+ .read = mdss_debug_stat_read,
+};
+
static int mdss_debugfs_cleanup(struct mdss_debug_data *mdd)
{
struct mdss_debug_base *base, *tmp;
@@ -330,6 +420,7 @@
mdss_debugfs_cleanup(mdd);
return -ENODEV;
}
+ debugfs_create_file("stat", 0644, mdd->root, mdata, &mdss_stat_fops);
debugfs_create_u32("min_mdp_clk", 0644, mdd->root,
(u32 *)&mdata->min_mdp_clk);
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 8bf8c95..acac6b9 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -326,19 +326,6 @@
snprintf(mp->vreg_config[i].vreg_name,
ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st);
- /* vreg-type */
- rc = of_property_read_string_index(of_node, "qcom,supply-type",
- i, &st);
- if (rc) {
- pr_err("%s: error reading vreg type. rc=%d\n",
- __func__, rc);
- goto error;
- }
- if (!strncmp(st, "regulator", 9))
- mp->vreg_config[i].type = 0;
- else if (!strncmp(st, "switch", 6))
- mp->vreg_config[i].type = 1;
-
/* vreg-min-voltage */
memset(val_array, 0, sizeof(u32) * dt_vreg_total);
rc = of_property_read_u32_array(of_node,
@@ -373,14 +360,13 @@
__func__, rc);
goto error;
}
- mp->vreg_config[i].optimum_voltage = val_array[i];
+ mp->vreg_config[i].peak_current = val_array[i];
- pr_debug("%s: %s type=%d, min=%d, max=%d, op=%d\n",
- __func__, mp->vreg_config[i].vreg_name,
- mp->vreg_config[i].type,
+ pr_debug("%s: %s min=%d, max=%d, pc=%d\n", __func__,
+ mp->vreg_config[i].vreg_name,
mp->vreg_config[i].min_voltage,
mp->vreg_config[i].max_voltage,
- mp->vreg_config[i].optimum_voltage);
+ mp->vreg_config[i].peak_current);
}
devm_kfree(dev, val_array);
diff --git a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
index 2e20787..ef17229 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
@@ -618,7 +618,7 @@
/* Wait until READY bit is set in BCAPS */
timeout_count = 50;
- while (!(bcaps && BIT(5)) && timeout_count) {
+ while (!(bcaps & BIT(5)) && timeout_count) {
msleep(100);
timeout_count--;
/* Read BCAPS at offset 0x40 */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index 213fcff..9a90b88 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -3021,20 +3021,6 @@
}
snprintf(mp->vreg_config[j].vreg_name, 32, "%s", st);
- /* vreg-type */
- memset(prop_name, 0, sizeof(prop_name));
- snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME,
- "supply-type");
- memset(val_array, 0, sizeof(u32) * dt_vreg_total);
- rc = of_property_read_u32_array(of_node,
- prop_name, val_array, dt_vreg_total);
- if (rc) {
- DEV_ERR("%s: error read '%s' vreg type. rc=%d\n",
- __func__, hdmi_tx_pm_name(module_type), rc);
- goto error;
- }
- mp->vreg_config[j].type = val_array[i];
-
/* vreg-min-voltage */
memset(prop_name, 0, sizeof(prop_name));
snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME,
@@ -3068,24 +3054,23 @@
/* vreg-op-mode */
memset(prop_name, 0, sizeof(prop_name));
snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME,
- "op-mode");
+ "peak-current");
memset(val_array, 0, sizeof(u32) * dt_vreg_total);
rc = of_property_read_u32_array(of_node,
prop_name, val_array,
dt_vreg_total);
if (rc) {
- DEV_ERR("%s: error read '%s' min volt. rc=%d\n",
+ DEV_ERR("%s: error read '%s' peak current. rc=%d\n",
__func__, hdmi_tx_pm_name(module_type), rc);
goto error;
}
- mp->vreg_config[j].optimum_voltage = val_array[i];
+ mp->vreg_config[j].peak_current = val_array[i];
- DEV_DBG("%s: %s type=%d, min=%d, max=%d, op=%d\n",
- __func__, mp->vreg_config[j].vreg_name,
- mp->vreg_config[j].type,
+ DEV_DBG("%s: %s min=%d, max=%d, pc=%d\n", __func__,
+ mp->vreg_config[j].vreg_name,
mp->vreg_config[j].min_voltage,
mp->vreg_config[j].max_voltage,
- mp->vreg_config[j].optimum_voltage);
+ mp->vreg_config[j].peak_current);
ndx_mask >>= 1;
j++;
diff --git a/drivers/video/msm/mdss/mdss_io_util.c b/drivers/video/msm/mdss/mdss_io_util.c
index c38eaa4..ff52e4c 100644
--- a/drivers/video/msm/mdss/mdss_io_util.c
+++ b/drivers/video/msm/mdss/mdss_io_util.c
@@ -131,6 +131,7 @@
{
int i = 0, rc = 0;
struct dss_vreg *curr_vreg = NULL;
+ enum dss_vreg_type type;
if (config) {
for (i = 0; i < num_vreg; i++) {
@@ -145,7 +146,9 @@
curr_vreg->vreg = NULL;
goto vreg_get_fail;
}
- if (curr_vreg->type == DSS_REG_LDO) {
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
rc = regulator_set_voltage(
curr_vreg->vreg,
curr_vreg->min_voltage,
@@ -157,10 +160,10 @@
curr_vreg->vreg_name);
goto vreg_set_voltage_fail;
}
- if (curr_vreg->optimum_voltage >= 0) {
+ if (curr_vreg->peak_current >= 0) {
rc = regulator_set_optimum_mode(
curr_vreg->vreg,
- curr_vreg->optimum_voltage);
+ curr_vreg->peak_current);
if (rc < 0) {
DEV_ERR(
"%pS->%s: %s set opt m fail\n",
@@ -176,8 +179,11 @@
for (i = num_vreg-1; i >= 0; i--) {
curr_vreg = &in_vreg[i];
if (curr_vreg->vreg) {
- if (curr_vreg->type == DSS_REG_LDO) {
- if (curr_vreg->optimum_voltage >= 0) {
+ type = (regulator_count_voltages(
+ curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
+ if (curr_vreg->peak_current >= 0) {
regulator_set_optimum_mode(
curr_vreg->vreg, 0);
}
@@ -192,11 +198,11 @@
return 0;
vreg_unconfig:
-if (curr_vreg->type == DSS_REG_LDO)
+if (type == DSS_REG_LDO)
regulator_set_optimum_mode(curr_vreg->vreg, 0);
vreg_set_opt_mode_fail:
-if (curr_vreg->type == DSS_REG_LDO)
+if (type == DSS_REG_LDO)
regulator_set_voltage(curr_vreg->vreg, 0, curr_vreg->max_voltage);
vreg_set_voltage_fail:
@@ -206,6 +212,8 @@
vreg_get_fail:
for (i--; i >= 0; i--) {
curr_vreg = &in_vreg[i];
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
goto vreg_unconfig;
}
return rc;
diff --git a/drivers/video/msm/mdss/mdss_io_util.h b/drivers/video/msm/mdss/mdss_io_util.h
index 0ae62a3..23341d6 100644
--- a/drivers/video/msm/mdss/mdss_io_util.h
+++ b/drivers/video/msm/mdss/mdss_io_util.h
@@ -50,10 +50,9 @@
struct dss_vreg {
struct regulator *vreg; /* vreg handle */
char vreg_name[32];
- enum dss_vreg_type type;
int min_voltage;
int max_voltage;
- int optimum_voltage;
+ int peak_current;
};
struct dss_gpio {
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 51776db..a3be773 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -330,7 +330,6 @@
hw->hw_ndx, mdss_res->mdp_irq_mask,
mdss_res->mdp_hist_irq_mask);
} else {
- mdss_irq_handlers[hw->hw_ndx] = NULL;
mdss_res->irq_mask &= ~ndx_bit;
if (mdss_res->irq_mask == 0) {
mdss_res->irq_ena = false;
@@ -878,6 +877,7 @@
{
int i, j;
char *offset;
+ struct mdss_mdp_pipe *vig;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
mdata->mdp_rev = MDSS_MDP_REG_READ(MDSS_MDP_REG_HW_VERSION);
@@ -899,7 +899,16 @@
writel_relaxed(j, offset);
/* swap */
- writel_relaxed(i, offset + 4);
+ writel_relaxed(1, offset + 4);
+ }
+ vig = mdata->vig_pipes;
+ for (i = 0; i < mdata->nvig_pipes; i++) {
+ offset = vig[i].base +
+ MDSS_MDP_REG_VIG_HIST_LUT_BASE;
+ for (j = 0; j < ENHIST_LUT_ENTRIES; j++)
+ writel_relaxed(j, offset);
+ /* swap */
+ writel_relaxed(1, offset + 16);
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
pr_debug("MDP hw init done\n");
@@ -1498,6 +1507,10 @@
&data);
mdata->rot_block_size = (!rc ? data : 128);
+ mdata->has_bwc = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-bwc");
+ mdata->has_decimation = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-decimation");
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 07b083a..daa7f1a 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -38,6 +38,7 @@
#define MAX_PLANES 4
#define MAX_DOWNSCALE_RATIO 4
#define MAX_UPSCALE_RATIO 20
+#define MAX_DECIMATION 4
#define C3_ALPHA 3 /* alpha */
#define C2_R_Cr 2 /* R/Cr */
@@ -122,6 +123,7 @@
u32 flush_bits;
u32 play_cnt;
+ u32 vsync_cnt;
u32 underrun_cnt;
u16 width;
@@ -217,6 +219,21 @@
struct mdss_mdp_img_data p[MAX_PLANES];
};
+struct pp_hist_col_info {
+ u32 col_state;
+ u32 col_en;
+ u32 read_request;
+ u32 hist_cnt_read;
+ u32 hist_cnt_sent;
+ u32 hist_cnt_time;
+ u32 frame_cnt;
+ u32 is_kick_ready;
+ struct completion comp;
+ u32 data[HIST_V_SIZE];
+ struct mutex hist_mutex;
+ spinlock_t hist_lock;
+};
+
struct pp_sts_type {
u32 pa_sts;
u32 pcc_sts;
@@ -233,6 +250,8 @@
struct mdss_pipe_pp_res {
u32 igc_c0_c1[IGC_LUT_ENTRIES];
u32 igc_c2[IGC_LUT_ENTRIES];
+ u32 hist_lut[ENHIST_LUT_ENTRIES];
+ struct pp_hist_col_info hist;
struct pp_sts_type pp_sts;
};
@@ -250,6 +269,8 @@
u16 img_width;
u16 img_height;
+ u8 horz_deci;
+ u8 vert_deci;
struct mdss_mdp_img_rect src;
struct mdss_mdp_img_rect dst;
@@ -421,13 +442,14 @@
struct mdp_histogram_start_req *req);
int mdss_mdp_histogram_stop(struct mdss_mdp_ctl *ctl, u32 block);
int mdss_mdp_hist_collect(struct mdss_mdp_ctl *ctl,
- struct mdp_histogram_data *hist,
- u32 *hist_data_addr);
+ struct mdp_histogram_data *hist);
void mdss_mdp_hist_intr_done(u32 isr);
struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer,
u32 type);
struct mdss_mdp_pipe *mdss_mdp_pipe_get(struct mdss_data_type *mdata, u32 ndx);
+struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
+ u32 ndx);
int mdss_mdp_pipe_map(struct mdss_mdp_pipe *pipe);
void mdss_mdp_pipe_unmap(struct mdss_mdp_pipe *pipe);
struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_dma(struct mdss_mdp_mixer *mixer);
@@ -449,6 +471,8 @@
struct mdss_mdp_plane_sizes *ps, u32 bwc_mode);
int mdss_mdp_get_rau_strides(u32 w, u32 h, struct mdss_mdp_format_params *fmt,
struct mdss_mdp_plane_sizes *ps);
+void mdss_mdp_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
+ struct mdss_mdp_plane_sizes *ps, struct mdss_mdp_format_params *fmt);
struct mdss_mdp_format_params *mdss_mdp_get_format_params(u32 format);
int mdss_mdp_put_img(struct mdss_mdp_img_data *data);
int mdss_mdp_get_img(struct msmfb_data *img, struct mdss_mdp_img_data *data);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 4e51100..5bc077c 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -279,8 +279,6 @@
return -EINVAL;
}
- mutex_lock(&mdss_mdp_ctl_lock);
- ctl->ref_cnt--;
if (ctl->mixer_left) {
mdss_mdp_mixer_free(ctl->mixer_left);
ctl->mixer_left = NULL;
@@ -289,6 +287,8 @@
mdss_mdp_mixer_free(ctl->mixer_right);
ctl->mixer_right = NULL;
}
+ mutex_lock(&mdss_mdp_ctl_lock);
+ ctl->ref_cnt--;
ctl->power_on = false;
ctl->start_fnc = NULL;
ctl->stop_fnc = NULL;
@@ -437,7 +437,6 @@
if (ctl->stop_fnc)
ctl->stop_fnc(ctl);
- mdss_mdp_mixer_free(mixer);
mdss_mdp_ctl_free(ctl);
mdss_mdp_ctl_perf_commit(ctl->mdata, MDSS_MDP_PERF_UPDATE_ALL);
@@ -1236,6 +1235,7 @@
{
struct mdss_mdp_ctl *ctl;
struct mdss_mdp_mixer *mixer;
+ int i;
if (!pipe)
return -EINVAL;
@@ -1259,7 +1259,12 @@
if (params_changed) {
mixer->params_changed++;
- mixer->stage_pipe[pipe->mixer_stage] = pipe;
+ for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) {
+ if (i == pipe->mixer_stage)
+ mixer->stage_pipe[i] = pipe;
+ else if (mixer->stage_pipe[i] == pipe)
+ mixer->stage_pipe[i] = NULL;
+ }
}
if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA)
diff --git a/drivers/video/msm/mdss/mdss_mdp_formats.h b/drivers/video/msm/mdss/mdss_mdp_formats.h
index c6d5fb9..acb8dc2 100644
--- a/drivers/video/msm/mdss/mdss_mdp_formats.h
+++ b/drivers/video/msm/mdss/mdss_mdp_formats.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -108,6 +108,8 @@
FMT_YUV_COMMON(fmt), \
.fetch_planes = MDSS_MDP_PLANE_PLANAR, \
.chroma_sample = samp, \
+ .bpp = 1, \
+ .unpack_count = 1, \
.element = { (e0), (e1) } \
}
@@ -134,9 +136,9 @@
FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V2_VENUS, MDSS_MDP_CHROMA_420,
C1_B_Cb, C2_R_Cr),
- FMT_YUV_PLANR(MDP_Y_CR_CB_H2V2, MDSS_MDP_CHROMA_420, C2_R_Cr, C1_B_Cb),
- FMT_YUV_PLANR(MDP_Y_CB_CR_H2V2, MDSS_MDP_CHROMA_420, C1_B_Cb, C2_R_Cr),
- FMT_YUV_PLANR(MDP_Y_CR_CB_GH2V2, MDSS_MDP_CHROMA_420, C2_R_Cr, C1_B_Cb),
+ FMT_YUV_PLANR(MDP_Y_CB_CR_H2V2, MDSS_MDP_CHROMA_420, C2_R_Cr, C1_B_Cb),
+ FMT_YUV_PLANR(MDP_Y_CR_CB_H2V2, MDSS_MDP_CHROMA_420, C1_B_Cb, C2_R_Cr),
+ FMT_YUV_PLANR(MDP_Y_CR_CB_GH2V2, MDSS_MDP_CHROMA_420, C1_B_Cb, C2_R_Cr),
{
FMT_YUV_COMMON(MDP_YCBCR_H1V1),
diff --git a/drivers/video/msm/mdss/mdss_mdp_hwio.h b/drivers/video/msm/mdss/mdss_mdp_hwio.h
index d50f47e..ecbdaf0 100644
--- a/drivers/video/msm/mdss/mdss_mdp_hwio.h
+++ b/drivers/video/msm/mdss/mdss_mdp_hwio.h
@@ -190,8 +190,7 @@
#define MDSS_MDP_REG_SSPP_CURRENT_SRC1_ADDR 0x0A8
#define MDSS_MDP_REG_SSPP_CURRENT_SRC2_ADDR 0x0AC
#define MDSS_MDP_REG_SSPP_CURRENT_SRC3_ADDR 0x0B0
-#define MDSS_MDP_REG_SSPP_LINE_SKIP_STEP_C03 0x0B4
-#define MDSS_MDP_REG_SSPP_LINE_SKIP_STEP_C12 0x0B8
+#define MDSS_MDP_REG_SSPP_DECIMATION_CONFIG 0x0B4
#define MDSS_MDP_REG_VIG_OP_MODE 0x200
#define MDSS_MDP_REG_VIG_QSEED2_CONFIG 0x204
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index d6b0fb2..006a8dd 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -180,6 +180,7 @@
pr_debug("%s: ctl=%d intf_num=%d\n", __func__, ctl->num, ctl->intf_num);
vsync_time = ktime_get();
+ ctl->vsync_cnt++;
spin_lock(&ctx->vsync_lock);
if (ctx->vsync_handler)
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 0426784..6e631e9 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -282,8 +282,9 @@
}
vsync_time = ktime_get();
+ ctl->vsync_cnt++;
- pr_debug("intr ctl=%d\n", ctl->num);
+ pr_debug("intr ctl=%d vsync cnt=%u\n", ctl->num, ctl->vsync_cnt);
complete_all(&ctx->vsync_comp);
spin_lock(&ctx->vsync_lock);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index 7fbb031..d86527b 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -33,9 +33,11 @@
u32 intf_num;
u32 opmode;
- u32 format;
+ struct mdss_mdp_format_params *dst_fmt;
u16 width;
u16 height;
+ struct mdss_mdp_img_rect dst_rect;
+
u8 rot90;
u32 bwc_mode;
int initialized;
@@ -81,48 +83,55 @@
}
static int mdss_mdp_writeback_addr_setup(struct mdss_mdp_writeback_ctx *ctx,
- struct mdss_mdp_data *data)
+ const struct mdss_mdp_data *in_data)
{
int ret;
+ struct mdss_mdp_data data;
- if (!data)
+ if (!in_data)
return -EINVAL;
+ data = *in_data;
- pr_debug("wb_num=%d addr=0x%x\n", ctx->wb_num, data->p[0].addr);
+ pr_debug("wb_num=%d addr=0x%x\n", ctx->wb_num, data.p[0].addr);
if (ctx->bwc_mode)
- data->bwc_enabled = 1;
+ data.bwc_enabled = 1;
- ret = mdss_mdp_data_check(data, &ctx->dst_planes);
+ ret = mdss_mdp_data_check(&data, &ctx->dst_planes);
if (ret)
return ret;
- mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST0_ADDR, data->p[0].addr);
- mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST1_ADDR, data->p[1].addr);
- mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST2_ADDR, data->p[2].addr);
- mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST3_ADDR, data->p[3].addr);
+ mdss_mdp_data_calc_offset(&data, ctx->dst_rect.x, ctx->dst_rect.y,
+ &ctx->dst_planes, ctx->dst_fmt);
+
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST0_ADDR, data.p[0].addr);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST1_ADDR, data.p[1].addr);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST2_ADDR, data.p[2].addr);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST3_ADDR, data.p[3].addr);
return 0;
}
-static int mdss_mdp_writeback_format_setup(struct mdss_mdp_writeback_ctx *ctx)
+static int mdss_mdp_writeback_format_setup(struct mdss_mdp_writeback_ctx *ctx,
+ u32 format)
{
struct mdss_mdp_format_params *fmt;
u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
u32 opmode = ctx->opmode;
struct mdss_data_type *mdata;
- pr_debug("wb_num=%d format=%d\n", ctx->wb_num, ctx->format);
+ pr_debug("wb_num=%d format=%d\n", ctx->wb_num, format);
- mdss_mdp_get_plane_sizes(ctx->format, ctx->width, ctx->height,
+ mdss_mdp_get_plane_sizes(format, ctx->width, ctx->height,
&ctx->dst_planes,
ctx->opmode & MDSS_MDP_OP_BWC_EN);
- fmt = mdss_mdp_get_format_params(ctx->format);
+ fmt = mdss_mdp_get_format_params(format);
if (!fmt) {
- pr_err("wb format=%d not supported\n", ctx->format);
+ pr_err("wb format=%d not supported\n", format);
return -EINVAL;
}
+ ctx->dst_fmt = fmt;
chroma_samp = fmt->chroma_sample;
@@ -164,33 +173,29 @@
dst_format |= BIT(14); /* DST_ALPHA_X */
}
- if (fmt->fetch_planes != MDSS_MDP_PLANE_PLANAR) {
- mdata = mdss_mdp_get_mdata();
- if (mdata && mdata->mdp_rev >= MDSS_MDP_HW_REV_102) {
- pattern = (fmt->element[3] << 24) |
- (fmt->element[2] << 16) |
- (fmt->element[1] << 8) |
- (fmt->element[0] << 0);
- } else {
- pattern = (fmt->element[3] << 24) |
- (fmt->element[2] << 15) |
- (fmt->element[1] << 8) |
- (fmt->element[0] << 0);
- }
-
- dst_format |= (fmt->unpack_align_msb << 18) |
- (fmt->unpack_tight << 17) |
- ((fmt->unpack_count - 1) << 12) |
- ((fmt->bpp - 1) << 9);
+ mdata = mdss_mdp_get_mdata();
+ if (mdata && mdata->mdp_rev >= MDSS_MDP_HW_REV_102) {
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
} else {
- pattern = 0;
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 15) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
}
+ dst_format |= (fmt->unpack_align_msb << 18) |
+ (fmt->unpack_tight << 17) |
+ ((fmt->unpack_count - 1) << 12) |
+ ((fmt->bpp - 1) << 9);
+
ystride0 = (ctx->dst_planes.ystride[0]) |
(ctx->dst_planes.ystride[1] << 16);
ystride1 = (ctx->dst_planes.ystride[2]) |
(ctx->dst_planes.ystride[3] << 16);
- outsize = (ctx->height << 16) | ctx->width;
+ outsize = (ctx->dst_rect.h << 16) | ctx->dst_rect.w;
mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_FORMAT, dst_format);
mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_OP_MODE, opmode);
@@ -217,11 +222,14 @@
pr_debug("wfd setup ctl=%d\n", ctl->num);
ctx->opmode = 0;
- ctx->format = ctl->dst_format;
ctx->width = ctl->width;
ctx->height = ctl->height;
+ ctx->dst_rect.x = 0;
+ ctx->dst_rect.y = 0;
+ ctx->dst_rect.w = ctx->width;
+ ctx->dst_rect.h = ctx->height;
- ret = mdss_mdp_writeback_format_setup(ctx);
+ ret = mdss_mdp_writeback_format_setup(ctx, ctl->dst_format);
if (ret) {
pr_err("format setup failed\n");
return ret;
@@ -237,6 +245,7 @@
struct mdss_mdp_writeback_ctx *ctx;
struct mdss_mdp_writeback_arg *wb_args;
struct mdss_mdp_rotator_session *rot;
+ u32 format;
ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
if (!ctx)
@@ -259,24 +268,26 @@
ctx->bwc_mode = rot->bwc_mode;
ctx->opmode |= ctx->bwc_mode;
- ctx->width = rot->src_rect.w;
- ctx->height = rot->src_rect.h;
-
- ctx->format = rot->format;
+ ctx->width = rot->dst.w;
+ ctx->height = rot->dst.h;
+ ctx->dst_rect.x = rot->dst.x;
+ ctx->dst_rect.y = rot->dst.y;
+ ctx->dst_rect.w = rot->src_rect.w;
+ ctx->dst_rect.h = rot->src_rect.h;
ctx->rot90 = !!(rot->flags & MDP_ROT_90);
if (ctx->bwc_mode || ctx->rot90)
- ctx->format = mdss_mdp_get_rotator_dst_format(rot->format);
+ format = mdss_mdp_get_rotator_dst_format(rot->format);
else
- ctx->format = rot->format;
+ format = rot->format;
if (ctx->rot90) {
ctx->opmode |= BIT(5); /* ROT 90 */
- swap(ctx->width, ctx->height);
+ swap(ctx->dst_rect.w, ctx->dst_rect.h);
}
- return mdss_mdp_writeback_format_setup(ctx);
+ return mdss_mdp_writeback_format_setup(ctx, format);
}
static int mdss_mdp_writeback_stop(struct mdss_mdp_ctl *ctl)
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index dae3e05..5fc4cc4 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -92,15 +92,26 @@
return -EOVERFLOW;
}
- if (req->dst_rect.w < min_dst_size || req->dst_rect.h < min_dst_size ||
- req->dst_rect.w > MAX_DST_W || req->dst_rect.h > MAX_DST_H) {
+ if (req->dst_rect.w < min_dst_size || req->dst_rect.h < min_dst_size) {
pr_err("invalid destination resolution (%dx%d)",
req->dst_rect.w, req->dst_rect.h);
return -EOVERFLOW;
}
+ if (req->horz_deci || req->vert_deci) {
+ if (!mdata->has_decimation) {
+ pr_err("No Decimation in MDP V=%x\n", mdata->mdp_rev);
+ return -EINVAL;
+ } else if ((req->horz_deci > MAX_DECIMATION) ||
+ (req->vert_deci > MAX_DECIMATION)) {
+ pr_err("Invalid decimation factors horz=%d vert=%d\n",
+ req->horz_deci, req->vert_deci);
+ return -EINVAL;
+ }
+ }
+
if (!(req->flags & MDSS_MDP_ROT_ONLY)) {
- u32 dst_w, dst_h;
+ u32 src_w, src_h, dst_w, dst_h;
if ((CHECK_BOUNDS(req->dst_rect.x, req->dst_rect.w, xres) ||
CHECK_BOUNDS(req->dst_rect.y, req->dst_rect.h, yres))) {
@@ -118,27 +129,36 @@
dst_h = req->dst_rect.h;
}
- if ((req->src_rect.w * MAX_UPSCALE_RATIO) < dst_w) {
+ src_w = req->src_rect.w >> req->horz_deci;
+ src_h = req->src_rect.h >> req->vert_deci;
+
+ if (src_w > MAX_MIXER_WIDTH) {
+ pr_err("invalid source width=%d HDec=%d\n",
+ req->src_rect.w, req->horz_deci);
+ return -EINVAL;
+ }
+
+ if ((src_w * MAX_UPSCALE_RATIO) < dst_w) {
pr_err("too much upscaling Width %d->%d\n",
req->src_rect.w, req->dst_rect.w);
return -EINVAL;
}
- if ((req->src_rect.h * MAX_UPSCALE_RATIO) < dst_h) {
+ if ((src_h * MAX_UPSCALE_RATIO) < dst_h) {
pr_err("too much upscaling. Height %d->%d\n",
req->src_rect.h, req->dst_rect.h);
return -EINVAL;
}
- if (req->src_rect.w > (dst_w * MAX_DOWNSCALE_RATIO)) {
- pr_err("too much downscaling. Width %d->%d\n",
- req->src_rect.w, req->dst_rect.w);
+ if (src_w > (dst_w * MAX_DOWNSCALE_RATIO)) {
+ pr_err("too much downscaling. Width %d->%d H Dec=%d\n",
+ src_w, req->dst_rect.w, req->horz_deci);
return -EINVAL;
}
- if (req->src_rect.h > (dst_h * MAX_DOWNSCALE_RATIO)) {
- pr_err("too much downscaling. Height %d->%d\n",
- req->src_rect.h, req->dst_rect.h);
+ if (src_h > (dst_h * MAX_DOWNSCALE_RATIO)) {
+ pr_err("too much downscaling. Height %d->%d V Dec=%d\n",
+ src_h, req->dst_rect.h, req->vert_deci);
return -EINVAL;
}
@@ -177,6 +197,7 @@
struct mdss_mdp_rotator_session *rot;
struct mdss_mdp_format_params *fmt;
int ret = 0;
+ u32 bwc_enabled;
pr_debug("rot ctl=%u req id=%x\n", mdp5_data->ctl->num, req->id);
@@ -213,7 +234,14 @@
rot->flags = req->flags & (MDP_ROT_90 | MDP_FLIP_LR | MDP_FLIP_UD |
MDP_SECURE_OVERLAY_SESSION);
- rot->bwc_mode = (req->flags & MDP_BWC_EN) ? 1 : 0;
+ bwc_enabled = req->flags & MDP_BWC_EN;
+ if (bwc_enabled && !mdp5_data->mdata->has_bwc) {
+ pr_err("BWC is not supported in MDP version %x\n",
+ mdp5_data->mdata->mdp_rev);
+ rot->bwc_mode = 0;
+ } else {
+ rot->bwc_mode = bwc_enabled ? 1 : 0;
+ }
rot->format = fmt->format;
rot->img_width = req->src.width;
rot->img_height = req->src.height;
@@ -227,9 +255,13 @@
rot->src_rect.h /= 2;
}
- rot->params_changed++;
-
- req->id = rot->session_id;
+ ret = mdss_mdp_rotator_setup(rot);
+ if (ret == 0) {
+ req->id = rot->session_id;
+ } else {
+ pr_err("Unable to setup rotator session\n");
+ mdss_mdp_rotator_release(rot->session_id);
+ }
return ret;
}
@@ -243,11 +275,24 @@
struct mdss_mdp_mixer *mixer = NULL;
u32 pipe_type, mixer_mux, len, src_format;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdp_histogram_start_req hist;
int ret;
+ u32 bwc_enabled;
if (mdp5_data->ctl == NULL)
return -ENODEV;
+ if (req->flags & MDP_ROT_90) {
+ pr_err("unsupported inline rotation\n");
+ return -ENOTSUPP;
+ }
+
+ if ((req->dst_rect.w > MAX_DST_W) || (req->dst_rect.h > MAX_DST_H)) {
+ pr_err("exceeded max mixer supported resolution %dx%d\n",
+ req->dst_rect.w, req->dst_rect.h);
+ return -EOVERFLOW;
+ }
+
if (req->flags & MDSS_MDP_RIGHT_MIXER)
mixer_mux = MDSS_MDP_MIXER_MUX_RIGHT;
else
@@ -256,11 +301,6 @@
pr_debug("pipe ctl=%u req id=%x mux=%d\n", mdp5_data->ctl->num, req->id,
mixer_mux);
- if (req->flags & MDP_ROT_90) {
- pr_err("unsupported inline rotation\n");
- return -ENOTSUPP;
- }
-
src_format = req->src.format;
if (req->flags & (MDP_SOURCE_ROTATED_90 | MDP_BWC_EN))
src_format = mdss_mdp_get_rotator_dst_format(src_format);
@@ -346,8 +386,15 @@
}
pipe->flags = req->flags;
- pipe->bwc_mode = pipe->mixer->rotator_mode ?
- 0 : (req->flags & MDP_BWC_EN ? 1 : 0) ;
+ bwc_enabled = req->flags & MDP_BWC_EN;
+ if (bwc_enabled && !mdp5_data->mdata->has_bwc) {
+ pr_err("BWC is not supported in MDP version %x\n",
+ mdp5_data->mdata->mdp_rev);
+ pipe->bwc_mode = 0;
+ } else {
+ pipe->bwc_mode = pipe->mixer->rotator_mode ?
+ 0 : (bwc_enabled ? 1 : 0) ;
+ }
pipe->img_width = req->src.width & 0x3fff;
pipe->img_height = req->src.height & 0x3fff;
pipe->src.x = req->src_rect.x;
@@ -358,14 +405,16 @@
pipe->dst.y = req->dst_rect.y;
pipe->dst.w = req->dst_rect.w;
pipe->dst.h = req->dst_rect.h;
-
+ pipe->horz_deci = req->horz_deci;
+ pipe->vert_deci = req->vert_deci;
pipe->src_fmt = fmt;
pipe->mixer_stage = req->z_order;
pipe->is_fg = req->is_fg;
pipe->alpha = req->alpha;
pipe->transp = req->transp_mask;
- pipe->overfetch_disable = fmt->is_yuv;
+ pipe->overfetch_disable = fmt->is_yuv &&
+ !(pipe->flags & MDP_SOURCE_ROTATED_90);
pipe->req_data = *req;
@@ -389,6 +438,31 @@
pipe->pp_res.igc_c0_c1;
pipe->pp_cfg.igc_cfg.c2_data = pipe->pp_res.igc_c2;
}
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_CFG) {
+ if (pipe->pp_cfg.hist_cfg.ops & MDP_PP_OPS_ENABLE) {
+ hist.block = pipe->pp_cfg.hist_cfg.block;
+ hist.frame_cnt =
+ pipe->pp_cfg.hist_cfg.frame_cnt;
+ hist.bit_mask = pipe->pp_cfg.hist_cfg.bit_mask;
+ hist.num_bins = pipe->pp_cfg.hist_cfg.num_bins;
+ mdss_mdp_histogram_start(pipe->mixer->ctl,
+ &hist);
+ } else if (pipe->pp_cfg.hist_cfg.ops &
+ MDP_PP_OPS_DISABLE) {
+ mdss_mdp_histogram_stop(pipe->mixer->ctl,
+ pipe->pp_cfg.hist_cfg.block);
+ }
+ }
+ len = pipe->pp_cfg.hist_lut_cfg.len;
+ if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_LUT_CFG) &&
+ (len == ENHIST_LUT_ENTRIES)) {
+ ret = copy_from_user(pipe->pp_res.hist_lut,
+ pipe->pp_cfg.hist_lut_cfg.data,
+ sizeof(uint32_t) * len);
+ if (ret)
+ return -ENOMEM;
+ pipe->pp_cfg.hist_lut_cfg.data = pipe->pp_res.hist_lut;
+ }
}
if (pipe->flags & MDP_DEINTERLACE) {
@@ -1457,7 +1531,7 @@
int ret = -ENOSYS;
struct mdp_histogram_data hist;
struct mdp_histogram_start_req hist_req;
- u32 block, hist_data_addr = 0;
+ u32 block;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
switch (cmd) {
@@ -1488,15 +1562,9 @@
if (ret)
return ret;
- ret = mdss_mdp_hist_collect(mdp5_data->ctl, &hist,
- &hist_data_addr);
- if ((ret == 0) && hist_data_addr) {
- ret = copy_to_user(hist.c0, (u32 *)hist_data_addr,
- sizeof(u32) * hist.bin_cnt);
- if (ret == 0)
- ret = copy_to_user(argp, &hist,
- sizeof(hist));
- }
+ ret = mdss_mdp_hist_collect(mdp5_data->ctl, &hist);
+ if (!ret)
+ ret = copy_to_user(argp, &hist, sizeof(hist));
break;
default:
break;
@@ -1532,6 +1600,10 @@
caps->vig_pipes = mdata->nvig_pipes;
caps->rgb_pipes = mdata->nrgb_pipes;
caps->dma_pipes = mdata->ndma_pipes;
+ if (mdata->has_bwc)
+ caps->features |= MDP_BWC_EN;
+ if (mdata->has_decimation)
+ caps->features |= MDP_DECIMATION_EN;
return 0;
}
@@ -1583,10 +1655,8 @@
ret = copy_to_user(argp, &req, sizeof(req));
}
- if (ret) {
+ if (ret)
pr_debug("OVERLAY_GET failed (%d)\n", ret);
- ret = -EFAULT;
- }
break;
case MSMFB_OVERLAY_SET:
@@ -1597,10 +1667,8 @@
if (!IS_ERR_VALUE(ret))
ret = copy_to_user(argp, &req, sizeof(req));
}
- if (ret) {
+ if (ret)
pr_debug("OVERLAY_SET failed (%d)\n", ret);
- ret = -EFAULT;
- }
break;
@@ -1629,10 +1697,8 @@
mdss_fb_update_backlight(mfd);
}
- if (ret) {
+ if (ret)
pr_debug("OVERLAY_PLAY failed (%d)\n", ret);
- ret = -EFAULT;
- }
} else {
ret = 0;
}
@@ -1646,10 +1712,8 @@
if (!ret)
ret = mdss_mdp_overlay_play_wait(mfd, &data);
- if (ret) {
+ if (ret)
pr_err("OVERLAY_PLAY_WAIT failed (%d)\n", ret);
- ret = -EFAULT;
- }
} else {
ret = 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index b169c43..746705d 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -27,8 +27,6 @@
static DEFINE_MUTEX(mdss_mdp_smp_lock);
static DECLARE_BITMAP(mdss_mdp_smp_mmb_pool, MDSS_MDP_SMP_MMB_BLOCKS);
-static struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
- u32 ndx);
static int mdss_mdp_pipe_free(struct mdss_mdp_pipe *pipe);
static inline void mdss_mdp_pipe_write(struct mdss_mdp_pipe *pipe,
@@ -128,11 +126,10 @@
return rc;
pr_debug("BWC SMP strides ystride0=%x ystride1=%x\n",
ps.ystride[0], ps.ystride[1]);
- } else if ((mdata->mdp_rev >= MDSS_MDP_HW_REV_102) &&
- pipe->src_fmt->is_yuv) {
+ } else if (mdata->has_decimation && pipe->src_fmt->is_yuv) {
ps.num_planes = 2;
- ps.ystride[0] = pipe->src.w;
- ps.ystride[1] = pipe->src.w;
+ ps.ystride[0] = pipe->src.w >> pipe->horz_deci;
+ ps.ystride[1] = pipe->src.h >> pipe->vert_deci;
} else {
rc = mdss_mdp_get_plane_sizes(pipe->src_fmt->format,
pipe->src.w, pipe->src.h, &ps, 0);
@@ -257,10 +254,13 @@
pipe = NULL;
}
- if (pipe)
+ if (pipe) {
pr_debug("type=%x pnum=%d\n", pipe->type, pipe->num);
- else
+ mutex_init(&pipe->pp_res.hist.hist_mutex);
+ spin_lock_init(&pipe->pp_res.hist.hist_lock);
+ } else {
pr_err("no %d type pipes available\n", type);
+ }
return pipe;
}
@@ -318,7 +318,7 @@
return pipe;
}
-static struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
+struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
u32 ndx)
{
u32 i;
@@ -376,6 +376,7 @@
{
u32 img_size, src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
u32 width, height;
+ u32 decimation;
pr_debug("pnum=%d wh=%dx%d src={%d,%d,%d,%d} dst={%d,%d,%d,%d}\n",
pipe->num, pipe->img_width, pipe->img_height,
@@ -396,6 +397,12 @@
height /= 2;
}
+ decimation = ((1 << pipe->horz_deci) - 1) << 8;
+ decimation |= ((1 << pipe->vert_deci) - 1);
+ if (decimation)
+ pr_debug("Image decimation h=%d v=%d\n",
+ pipe->horz_deci, pipe->vert_deci);
+
img_size = (height << 16) | width;
src_size = (pipe->src.h << 16) | pipe->src.w;
src_xy = (pipe->src.y << 16) | pipe->src.x;
@@ -418,6 +425,8 @@
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_OUT_XY, dst_xy);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_YSTRIDE0, ystride0);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_YSTRIDE1, ystride1);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_DECIMATION_CONFIG,
+ decimation);
return 0;
}
@@ -465,17 +474,12 @@
fmt->fetch_planes != MDSS_MDP_PLANE_INTERLEAVED)
src_format |= BIT(8); /* SRCC3_EN */
- if (fmt->fetch_planes != MDSS_MDP_PLANE_PLANAR) {
- unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
(fmt->element[1] << 8) | (fmt->element[0] << 0);
-
- src_format |= ((fmt->unpack_count - 1) << 12) |
- (fmt->unpack_tight << 17) |
- (fmt->unpack_align_msb << 18) |
- ((fmt->bpp - 1) << 9);
- } else {
- unpack = 0;
- }
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
mdss_mdp_pipe_sspp_setup(pipe, &opmode);
@@ -487,28 +491,6 @@
return 0;
}
-static void mdss_mdp_addr_add_offset(struct mdss_mdp_pipe *pipe,
- struct mdss_mdp_data *data)
-{
- data->p[0].addr += pipe->src.x +
- (pipe->src.y * pipe->src_planes.ystride[0]);
- if (data->num_planes > 1) {
- u8 hmap[] = { 1, 2, 1, 2 };
- u8 vmap[] = { 1, 1, 2, 2 };
- u16 xoff = pipe->src.x / hmap[pipe->src_fmt->chroma_sample];
- u16 yoff = pipe->src.y / vmap[pipe->src_fmt->chroma_sample];
-
- if (data->num_planes == 2) /* pseudo planar */
- xoff *= 2;
- data->p[1].addr += xoff + (yoff * pipe->src_planes.ystride[1]);
-
- if (data->num_planes > 2) { /* planar */
- data->p[2].addr += xoff +
- (yoff * pipe->src_planes.ystride[2]);
- }
- }
-}
-
int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata, u32 *offsets,
u32 *ftch_id, u32 type, u32 num_base, u32 len)
{
@@ -559,7 +541,7 @@
static int mdss_mdp_src_addr_setup(struct mdss_mdp_pipe *pipe,
struct mdss_mdp_data *data)
{
- int is_rot = pipe->mixer->rotator_mode;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
int ret = 0;
pr_debug("pnum=%d\n", pipe->num);
@@ -571,11 +553,13 @@
return ret;
if (pipe->overfetch_disable)
- mdss_mdp_addr_add_offset(pipe, data);
+ mdss_mdp_data_calc_offset(data, pipe->src.x, pipe->src.y,
+ &pipe->src_planes, pipe->src_fmt);
/* planar format expects YCbCr, swap chroma planes if YCrCb */
- if (!is_rot && (pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_PLANAR) &&
- (pipe->src_fmt->element[0] == C2_R_Cr))
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_102 &&
+ (pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_PLANAR)
+ && (pipe->src_fmt->element[0] == C1_B_Cb))
swap(data->p[1].addr, data->p[2].addr);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC0_ADDR, data->p[0].addr);
diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
index 2c0d5e0..5a0c27e 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pp.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
@@ -78,7 +78,7 @@
#define MDSS_BLOCK_DISP_NUM (MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0)
-#define HIST_WAIT_TIMEOUT(frame) ((60 * HZ * (frame)) / 1000)
+#define HIST_WAIT_TIMEOUT(frame) ((75 * HZ * (frame)) / 1000)
/* hist collect state */
enum {
HIST_UNKNOWN,
@@ -88,18 +88,6 @@
HIST_READY,
};
-struct pp_hist_col_info {
- u32 col_state;
- u32 col_en;
- u32 read_request;
- u32 hist_cnt_read;
- u32 hist_cnt_sent;
- u32 frame_cnt;
- u32 is_kick_ready;
- struct completion comp;
- u32 data[HIST_V_SIZE];
-};
-
static u32 dither_matrix[16] = {
15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10};
static u32 dither_depth_map[9] = {
@@ -166,11 +154,13 @@
};
static DEFINE_MUTEX(mdss_pp_mutex);
-static DEFINE_SPINLOCK(mdss_hist_lock);
-static DEFINE_MUTEX(mdss_mdp_hist_mutex);
static struct mdss_pp_res_type *mdss_pp_res;
-static void pp_hist_read(u32 v_base, struct pp_hist_col_info *hist_info);
+static void pp_hist_read(char __iomem *v_base,
+ struct pp_hist_col_info *hist_info);
+static int pp_histogram_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix);
+static int pp_histogram_disable(struct pp_hist_col_info *hist_info,
+ u32 done_bit, char __iomem *ctl_base);
static void pp_update_pcc_regs(u32 offset,
struct mdp_pcc_cfg_data *cfg_ptr);
static void pp_update_igc_lut(struct mdp_igc_lut_data *cfg,
@@ -179,7 +169,8 @@
struct mdp_ar_gc_lut_data *lut_data);
static void pp_update_argc_lut(u32 offset,
struct mdp_pgc_lut_data *config);
-static void pp_update_hist_lut(u32 offset, struct mdp_hist_lut_data *cfg);
+static void pp_update_hist_lut(char __iomem *base,
+ struct mdp_hist_lut_data *cfg);
static void pp_pa_config(unsigned long flags, u32 base,
struct pp_sts_type *pp_sts,
struct mdp_pa_cfg *pa_config);
@@ -190,7 +181,7 @@
struct pp_sts_type *pp_sts,
struct mdp_igc_lut_data *igc_config,
u32 pipe_num);
-static void pp_enhist_config(unsigned long flags, u32 base,
+static void pp_enhist_config(unsigned long flags, char __iomem *base,
struct pp_sts_type *pp_sts,
struct mdp_hist_lut_data *enhist_cfg);
static void pp_sharp_config(char __iomem *offset,
@@ -391,7 +382,7 @@
}
}
-static void pp_enhist_config(unsigned long flags, u32 base,
+static void pp_enhist_config(unsigned long flags, char __iomem *base,
struct pp_sts_type *pp_sts,
struct mdp_hist_lut_data *enhist_cfg)
{
@@ -431,6 +422,7 @@
{
u32 opmode = 0, base = 0;
unsigned long flags = 0;
+ char __iomem *offset;
pr_debug("pnum=%x\n", pipe->num);
@@ -464,6 +456,8 @@
}
}
+ pp_histogram_setup(&opmode, MDSS_PP_SSPP_CFG | pipe->num, pipe->mixer);
+
if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_CFG) {
flags = PP_FLAGS_DIRTY_PA;
@@ -475,6 +469,26 @@
if (pipe->pp_res.pp_sts.pa_sts & PP_STS_ENABLE)
opmode |= (1 << 4); /* PA_EN */
}
+
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_LUT_CFG) {
+ pp_enhist_config(PP_FLAGS_DIRTY_ENHIST,
+ pipe->base + MDSS_MDP_REG_VIG_HIST_LUT_BASE,
+ &pipe->pp_res.pp_sts,
+ &pipe->pp_cfg.hist_lut_cfg);
+ }
+ }
+
+ if (pipe->pp_res.pp_sts.enhist_sts & PP_STS_ENABLE) {
+ /* Enable HistLUT and PA */
+ opmode |= BIT(10) | BIT(4);
+ if (!(pipe->pp_res.pp_sts.pa_sts & PP_STS_ENABLE)) {
+ /* Program default value */
+ offset = pipe->base + MDSS_MDP_REG_VIG_PA_BASE;
+ writel_relaxed(0, offset);
+ writel_relaxed(0, offset + 4);
+ writel_relaxed(0, offset + 8);
+ writel_relaxed(0, offset + 12);
+ }
}
*op = opmode;
@@ -522,6 +536,7 @@
u32 chroma_sample;
u32 filter_mode;
struct mdss_data_type *mdata;
+ u32 src_w, src_h;
mdata = mdss_mdp_get_mdata();
if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102 && pipe->src_fmt->is_yuv)
@@ -538,6 +553,9 @@
}
}
+ src_w = pipe->src.w >> pipe->horz_deci;
+ src_h = pipe->src.h >> pipe->vert_deci;
+
chroma_sample = pipe->src_fmt->chroma_sample;
if (pipe->flags & MDP_SOURCE_ROTATED_90) {
if (chroma_sample == MDSS_MDP_CHROMA_H1V2)
@@ -556,23 +574,22 @@
}
if ((pipe->src_fmt->is_yuv) &&
- !((pipe->dst.w < pipe->src.w) || (pipe->dst.h < pipe->src.h))) {
+ !((pipe->dst.w < src_w) || (pipe->dst.h < src_h))) {
pp_sharp_config(pipe->base +
MDSS_MDP_REG_VIG_QSEED2_SHARP,
&pipe->pp_res.pp_sts,
&pipe->pp_cfg.sharp_cfg);
}
- if ((pipe->src.h != pipe->dst.h) ||
+ if ((src_h != pipe->dst.h) ||
(pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE) ||
(chroma_sample == MDSS_MDP_CHROMA_420) ||
(chroma_sample == MDSS_MDP_CHROMA_H1V2)) {
- pr_debug("scale y - src_h=%d dst_h=%d\n",
- pipe->src.h, pipe->dst.h);
+ pr_debug("scale y - src_h=%d dst_h=%d\n", src_h, pipe->dst.h);
- if ((pipe->src.h / MAX_DOWNSCALE_RATIO) > pipe->dst.h) {
+ if ((src_h / MAX_DOWNSCALE_RATIO) > pipe->dst.h) {
pr_err("too much downscaling height=%d->%d",
- pipe->src.h, pipe->dst.h);
+ src_h, pipe->dst.h);
return -EINVAL;
}
@@ -580,11 +597,12 @@
if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
u32 chr_dst_h = pipe->dst.h;
- if ((chroma_sample == MDSS_MDP_CHROMA_420) ||
- (chroma_sample == MDSS_MDP_CHROMA_H1V2))
+ if (!pipe->vert_deci &&
+ ((chroma_sample == MDSS_MDP_CHROMA_420) ||
+ (chroma_sample == MDSS_MDP_CHROMA_H1V2)))
chr_dst_h *= 2; /* 2x upsample chroma */
- if (pipe->src.h <= pipe->dst.h) {
+ if (src_h <= pipe->dst.h) {
scale_config |= /* G/Y, A */
(filter_mode << 10) |
(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
@@ -593,7 +611,7 @@
(MDSS_MDP_SCALE_FILTER_PCMN << 10) |
(MDSS_MDP_SCALE_FILTER_PCMN << 18);
- if (pipe->src.h <= chr_dst_h)
+ if (src_h <= chr_dst_h)
scale_config |= /* CrCb */
(MDSS_MDP_SCALE_FILTER_BIL << 14);
else
@@ -601,12 +619,12 @@
(MDSS_MDP_SCALE_FILTER_PCMN << 14);
phasey_step = mdss_mdp_scale_phase_step(
- PHASE_STEP_SHIFT, pipe->src.h, chr_dst_h);
+ PHASE_STEP_SHIFT, src_h, chr_dst_h);
writel_relaxed(phasey_step, pipe->base +
MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
} else {
- if (pipe->src.h <= pipe->dst.h)
+ if (src_h <= pipe->dst.h)
scale_config |= /* RGB, A */
(MDSS_MDP_SCALE_FILTER_BIL << 10) |
(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
@@ -617,19 +635,18 @@
}
phasey_step = mdss_mdp_scale_phase_step(
- PHASE_STEP_SHIFT, pipe->src.h, pipe->dst.h);
+ PHASE_STEP_SHIFT, src_h, pipe->dst.h);
}
- if ((pipe->src.w != pipe->dst.w) ||
+ if ((src_w != pipe->dst.w) ||
(pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE) ||
(chroma_sample == MDSS_MDP_CHROMA_420) ||
(chroma_sample == MDSS_MDP_CHROMA_H2V1)) {
- pr_debug("scale x - src_w=%d dst_w=%d\n",
- pipe->src.w, pipe->dst.w);
+ pr_debug("scale x - src_w=%d dst_w=%d\n", src_w, pipe->dst.w);
- if ((pipe->src.w / MAX_DOWNSCALE_RATIO) > pipe->dst.w) {
+ if ((src_w / MAX_DOWNSCALE_RATIO) > pipe->dst.w) {
pr_err("too much downscaling width=%d->%d",
- pipe->src.w, pipe->dst.w);
+ src_w, pipe->dst.w);
return -EINVAL;
}
@@ -638,11 +655,12 @@
if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
u32 chr_dst_w = pipe->dst.w;
- if ((chroma_sample == MDSS_MDP_CHROMA_420) ||
- (chroma_sample == MDSS_MDP_CHROMA_H2V1))
+ if (!pipe->horz_deci &&
+ ((chroma_sample == MDSS_MDP_CHROMA_420) ||
+ (chroma_sample == MDSS_MDP_CHROMA_H2V1)))
chr_dst_w *= 2; /* 2x upsample chroma */
- if (pipe->src.w <= pipe->dst.w) {
+ if (src_w <= pipe->dst.w) {
scale_config |= /* G/Y, A */
(filter_mode << 8) |
(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
@@ -651,7 +669,7 @@
(MDSS_MDP_SCALE_FILTER_PCMN << 8) |
(MDSS_MDP_SCALE_FILTER_PCMN << 16);
- if (pipe->src.w <= chr_dst_w)
+ if (src_w <= chr_dst_w)
scale_config |= /* CrCb */
(MDSS_MDP_SCALE_FILTER_BIL << 12);
else
@@ -659,11 +677,11 @@
(MDSS_MDP_SCALE_FILTER_PCMN << 12);
phasex_step = mdss_mdp_scale_phase_step(
- PHASE_STEP_SHIFT, pipe->src.w, chr_dst_w);
+ PHASE_STEP_SHIFT, src_w, chr_dst_w);
writel_relaxed(phasex_step, pipe->base +
MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
} else {
- if (pipe->src.w <= pipe->dst.w)
+ if (src_w <= pipe->dst.w)
scale_config |= /* RGB, A */
(MDSS_MDP_SCALE_FILTER_BIL << 8) |
(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
@@ -674,7 +692,7 @@
}
phasex_step = mdss_mdp_scale_phase_step(
- PHASE_STEP_SHIFT, pipe->src.w, pipe->dst.w);
+ PHASE_STEP_SHIFT, src_w, pipe->dst.w);
}
writel_relaxed(scale_config, pipe->base +
@@ -704,6 +722,17 @@
void mdss_mdp_pipe_sspp_term(struct mdss_mdp_pipe *pipe)
{
+ u32 done_bit;
+ struct pp_hist_col_info *hist_info;
+ char __iomem *ctl_base;
+
+ if (!pipe && pipe->pp_res.hist.col_en) {
+ done_bit = 3 << (pipe->num * 4);
+ hist_info = &pipe->pp_res.hist;
+ ctl_base = pipe->base +
+ MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+ pp_histogram_disable(hist_info, done_bit, ctl_base);
+ }
memset(&pipe->pp_cfg, 0, sizeof(struct mdp_overlay_pp_params));
memset(&pipe->pp_res, 0, sizeof(struct mdss_pipe_pp_res));
}
@@ -796,16 +825,86 @@
return 0;
}
+static char __iomem *mdss_mdp_get_dspp_addr_off(u32 dspp_num)
+{
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_mixer *mixer;
+
+ mdata = mdss_mdp_get_mdata();
+ if (mdata->nmixers_intf <= dspp_num) {
+ pr_err("Invalid dspp_num=%d", dspp_num);
+ return ERR_PTR(-EINVAL);
+ }
+ mixer = mdata->mixer_intf + dspp_num;
+ return mixer->dspp_base;
+}
+
+/* Assumes that function will be called from within clock enabled space*/
+static int pp_histogram_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix)
+{
+ int ret = -EINVAL;
+ char __iomem *base;
+ u32 op_flags, kick_base, col_state;
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_pipe *pipe;
+ struct pp_hist_col_info *hist_info;
+ unsigned long flag;
+
+ if (mix && (PP_LOCAT(block) == MDSS_PP_DSPP_CFG)) {
+ /* HIST_EN & AUTO_CLEAR */
+ op_flags = BIT(16) | BIT(17);
+ hist_info = &mdss_pp_res->dspp_hist[mix->num];
+ base = mdss_mdp_get_dspp_addr_off(PP_BLOCK(block));
+ kick_base = MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
+ } else if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG) {
+ mdata = mdss_mdp_get_mdata();
+ pipe = mdss_mdp_pipe_get(mdata, BIT(PP_BLOCK(block)));
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_debug("pipe DNE (%d)", (u32) BIT(PP_BLOCK(block)));
+ ret = -ENODEV;
+ goto error;
+ }
+ /* HIST_EN & AUTO_CLEAR */
+ op_flags = BIT(8) + BIT(9);
+ hist_info = &pipe->pp_res.hist;
+ base = pipe->base;
+ kick_base = MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+ mdss_mdp_pipe_unmap(pipe);
+ } else {
+ pr_warn("invalid histogram location (%d)", block);
+ goto error;
+ }
+
+ if (hist_info->col_en) {
+ *op |= op_flags;
+ mutex_lock(&hist_info->hist_mutex);
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ col_state = hist_info->col_state;
+ if (hist_info->is_kick_ready &&
+ ((col_state == HIST_IDLE) ||
+ ((false == hist_info->read_request) &&
+ col_state == HIST_READY))) {
+ /* Kick off collection */
+ writel_relaxed(1, base + kick_base);
+ hist_info->col_state = HIST_START;
+ }
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ mutex_unlock(&hist_info->hist_mutex);
+ }
+ ret = 0;
+error:
+ return ret;
+}
+
static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_ctl *ctl,
struct mdss_mdp_mixer *mixer)
{
u32 flags, base, offset, dspp_num, opmode = 0;
struct mdp_dither_cfg_data *dither_cfg;
- struct pp_hist_col_info *hist_info;
struct mdp_pgc_lut_data *pgc_config;
struct pp_sts_type *pp_sts;
- u32 data, col_state;
- unsigned long flag;
+ u32 data;
+ char __iomem *basel;
int i, ret = 0;
if (!mixer || !ctl)
@@ -817,28 +916,13 @@
(dspp_num >= MDSS_MDP_MAX_DSPP))
return -EINVAL;
base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num);
- hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+ basel = mdss_mdp_get_dspp_addr_off(dspp_num);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- if (hist_info->col_en) {
- /* HIST_EN & AUTO_CLEAR */
- opmode |= (1 << 16) | (1 << 17);
- mutex_lock(&mdss_mdp_hist_mutex);
- spin_lock_irqsave(&mdss_hist_lock, flag);
- col_state = hist_info->col_state;
- if (hist_info->is_kick_ready &&
- ((col_state == HIST_IDLE) ||
- ((false == hist_info->read_request) &&
- col_state == HIST_READY))) {
- /* Kick off collection */
- MDSS_MDP_REG_WRITE(base +
- MDSS_MDP_REG_DSPP_HIST_CTL_BASE, 1);
- hist_info->col_state = HIST_START;
- }
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- mutex_unlock(&mdss_mdp_hist_mutex);
- }
+ ret = pp_histogram_setup(&opmode, MDSS_PP_DSPP_CFG | dspp_num, mixer);
+ if (ret)
+ goto dspp_exit;
if (disp_num < MDSS_BLOCK_DISP_NUM)
flags = mdss_pp_res->pp_disp_flags[disp_num];
@@ -846,7 +930,7 @@
flags = 0;
/* nothing to update */
- if ((!flags) && (!(hist_info->col_en)))
+ if ((!flags) && (!(opmode)))
goto dspp_exit;
pp_sts = &mdss_pp_res->pp_dspp_sts[dspp_num];
@@ -860,7 +944,7 @@
pp_igc_config(flags, MDSS_MDP_REG_IGC_DSPP_BASE, pp_sts,
&mdss_pp_res->igc_disp_cfg[disp_num], dspp_num);
- pp_enhist_config(flags, base + MDSS_MDP_REG_DSPP_HIST_LUT_BASE,
+ pp_enhist_config(flags, basel + MDSS_MDP_REG_DSPP_HIST_LUT_BASE,
pp_sts, &mdss_pp_res->enhist_disp_cfg[disp_num]);
if (pp_sts->pa_sts & PP_STS_ENABLE)
@@ -934,7 +1018,7 @@
if (pp_sts->pgc_sts & PP_STS_ENABLE)
opmode |= (1 << 22);
- MDSS_MDP_REG_WRITE(base + MDSS_MDP_REG_DSPP_OP_MODE, opmode);
+ writel_relaxed(opmode, basel + MDSS_MDP_REG_DSPP_OP_MODE);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, BIT(13 + dspp_num));
wmb();
dspp_exit:
@@ -1068,7 +1152,9 @@
int mdss_mdp_pp_init(struct device *dev)
{
- int ret = 0;
+ int i, ret = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_pipe *vig;
mutex_lock(&mdss_pp_mutex);
if (!mdss_pp_res) {
@@ -1078,6 +1164,18 @@
pr_err("%s mdss_pp_res allocation failed!", __func__);
ret = -ENOMEM;
}
+
+ for (i = 0; i < MDSS_MDP_MAX_DSPP; i++) {
+ mutex_init(&mdss_pp_res->dspp_hist[i].hist_mutex);
+ spin_lock_init(&mdss_pp_res->dspp_hist[i].hist_lock);
+ }
+ }
+ if (mdata) {
+ vig = mdata->vig_pipes;
+ for (i = 0; i < mdata->nvig_pipes; i++) {
+ mutex_init(&vig[i].pp_res.hist.hist_mutex);
+ spin_lock_init(&vig[i].pp_res.hist.hist_lock);
+ }
}
mutex_unlock(&mdss_pp_mutex);
return ret;
@@ -1545,13 +1643,17 @@
}
/* Note: Assumes that its inputs have been checked by calling function */
-static void pp_update_hist_lut(u32 offset, struct mdp_hist_lut_data *cfg)
+static void pp_update_hist_lut(char __iomem *offset,
+ struct mdp_hist_lut_data *cfg)
{
int i;
for (i = 0; i < ENHIST_LUT_ENTRIES; i++)
- MDSS_MDP_REG_WRITE(offset, cfg->data[i]);
+ writel_relaxed(cfg->data[i], offset);
/* swap */
- MDSS_MDP_REG_WRITE(offset + 4, 1);
+ if (PP_LOCAT(cfg->block) == MDSS_PP_DSPP_CFG)
+ writel_relaxed(1, offset + 4);
+ else
+ writel_relaxed(1, offset + 16);
}
int mdss_mdp_argc_config(struct mdss_mdp_ctl *ctl,
@@ -1574,7 +1676,7 @@
mutex_lock(&mdss_pp_mutex);
disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
- switch (config->block & MDSS_PP_LOCATION_MASK) {
+ switch (PP_LOCAT(config->block)) {
case MDSS_PP_LM_CFG:
argc_offset = MDSS_MDP_REG_LM_OFFSET(dspp_num) +
MDSS_MDP_REG_LM_GC_LUT_BASE;
@@ -1670,12 +1772,12 @@
if (!ctl)
return -EINVAL;
- if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
- (config->block >= MDP_BLOCK_MAX))
+ if ((PP_BLOCK(config->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (PP_BLOCK(config->block) >= MDP_BLOCK_MAX))
return -EINVAL;
mutex_lock(&mdss_pp_mutex);
- disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+ disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
if (config->ops & MDP_PP_OPS_READ) {
ret = pp_get_dspp_num(disp_num, &dspp_num);
@@ -1833,124 +1935,205 @@
mdss_mdp_pp_setup(ctl);
return ret;
}
-static void pp_hist_read(u32 v_base, struct pp_hist_col_info *hist_info)
+static void pp_hist_read(char __iomem *v_base,
+ struct pp_hist_col_info *hist_info)
{
int i, i_start;
u32 data;
- data = MDSS_MDP_REG_READ(v_base);
+ data = readl_relaxed(v_base);
i_start = data >> 24;
hist_info->data[i_start] = data & 0xFFFFFF;
for (i = i_start + 1; i < HIST_V_SIZE; i++)
- hist_info->data[i] = MDSS_MDP_REG_READ(v_base) & 0xFFFFFF;
+ hist_info->data[i] = readl_relaxed(v_base) & 0xFFFFFF;
for (i = 0; i < i_start - 1; i++)
- hist_info->data[i] = MDSS_MDP_REG_READ(v_base) & 0xFFFFFF;
+ hist_info->data[i] = readl_relaxed(v_base) & 0xFFFFFF;
hist_info->hist_cnt_read++;
}
-int mdss_mdp_histogram_start(struct mdss_mdp_ctl *ctl,
- struct mdp_histogram_start_req *req)
+/* Assumes that relevant clocks are enabled */
+static int pp_histogram_enable(struct pp_hist_col_info *hist_info,
+ struct mdp_histogram_start_req *req,
+ u32 shift_bit, char __iomem *ctl_base)
{
- u32 ctl_base, done_shift_bit;
+ unsigned long flag;
+ int ret = 0;
+ mutex_lock(&hist_info->hist_mutex);
+ /* check if it is idle */
+ if (hist_info->col_en) {
+ pr_info("%s Hist collection has already been enabled %d",
+ __func__, (u32) ctl_base);
+ ret = -EINVAL;
+ goto exit;
+ }
+ hist_info->frame_cnt = req->frame_cnt;
+ init_completion(&hist_info->comp);
+ hist_info->hist_cnt_read = 0;
+ hist_info->hist_cnt_sent = 0;
+ hist_info->hist_cnt_time = 0;
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ hist_info->read_request = false;
+ hist_info->col_state = HIST_RESET;
+ hist_info->col_en = true;
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ hist_info->is_kick_ready = false;
+ mdss_mdp_hist_irq_enable(3 << shift_bit);
+ writel_relaxed(req->frame_cnt, ctl_base + 8);
+ /* Kick out reset start */
+ writel_relaxed(1, ctl_base + 4);
+exit:
+ mutex_unlock(&hist_info->hist_mutex);
+ return ret;
+}
+
+int mdss_mdp_histogram_start(struct mdss_mdp_ctl *ctl,
+ struct mdp_histogram_start_req *req)
+{
+ u32 done_shift_bit;
+ char __iomem *ctl_base;
struct pp_hist_col_info *hist_info;
int i, ret = 0;
u32 disp_num, dspp_num = 0;
u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
- unsigned long flag;
-
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (!ctl)
return -EINVAL;
- if ((req->block < MDP_LOGICAL_BLOCK_DISP_0) ||
- (req->block >= MDP_BLOCK_MAX))
+ if ((PP_BLOCK(req->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (PP_BLOCK(req->block) >= MDP_BLOCK_MAX))
return -EINVAL;
- mutex_lock(&mdss_mdp_hist_mutex);
- disp_num = req->block - MDP_LOGICAL_BLOCK_DISP_0;
+ disp_num = PP_BLOCK(req->block) - MDP_LOGICAL_BLOCK_DISP_0;
mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
if (!mixer_cnt) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
ret = -EPERM;
- goto hist_start_exit;
+ goto hist_exit;
}
if (mixer_cnt >= MDSS_MDP_MAX_DSPP) {
pr_err("%s, Too many dspp connects to disp %d",
__func__, mixer_cnt);
ret = -EPERM;
- goto hist_start_exit;
+ goto hist_exit;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- for (i = 0; i < mixer_cnt; i++) {
- dspp_num = mixer_id[i];
- hist_info = &mdss_pp_res->dspp_hist[dspp_num];
- done_shift_bit = (dspp_num * 4) + 12;
- /* check if it is idle */
- if (hist_info->col_en) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
- pr_info("%s Hist collection has already been enabled %d",
- __func__, dspp_num);
- goto hist_start_exit;
+
+ if (PP_LOCAT(req->block) == MDSS_PP_SSPP_CFG) {
+ i = MDSS_PP_ARG_MASK & req->block;
+ if (!i) {
+ ret = -EINVAL;
+ pr_warn("Must pass pipe arguments, %d", i);
+ goto hist_exit;
}
- spin_lock_irqsave(&mdss_hist_lock, flag);
- hist_info->frame_cnt = req->frame_cnt;
- init_completion(&hist_info->comp);
- hist_info->hist_cnt_read = 0;
- hist_info->hist_cnt_sent = 0;
- hist_info->read_request = false;
- hist_info->col_state = HIST_RESET;
- hist_info->col_en = true;
- hist_info->is_kick_ready = false;
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- mdss_pp_res->hist_col[disp_num][i] =
- &mdss_pp_res->dspp_hist[dspp_num];
- mdss_mdp_hist_irq_enable(3 << done_shift_bit);
- ctl_base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
- MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
- MDSS_MDP_REG_WRITE(ctl_base + 8, req->frame_cnt);
- /* Kick out reset start */
- MDSS_MDP_REG_WRITE(ctl_base + 4, 1);
+
+ for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, req->block))
+ continue;
+ pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ if (IS_ERR_OR_NULL(pipe))
+ continue;
+ if (!pipe || pipe->num > MDSS_MDP_SSPP_VIG2) {
+ ret = -EINVAL;
+ pr_warn("Invalid Hist pipe (%d)", i);
+ goto hist_exit;
+ }
+ done_shift_bit = (pipe->num * 4);
+ hist_info = &pipe->pp_res.hist;
+ ctl_base = pipe->base +
+ MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+ ret = pp_histogram_enable(hist_info, req,
+ done_shift_bit, ctl_base);
+ mdss_mdp_pipe_unmap(pipe);
+ }
+ } else if (PP_LOCAT(req->block) == MDSS_PP_DSPP_CFG) {
+ for (i = 0; i < mixer_cnt; i++) {
+ dspp_num = mixer_id[i];
+ done_shift_bit = (dspp_num * 4) + 12;
+ hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+ ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num) +
+ MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
+ ret = pp_histogram_enable(hist_info, req,
+ done_shift_bit, ctl_base);
+ mdss_pp_res->pp_disp_flags[disp_num] |=
+ PP_FLAGS_DIRTY_HIST_COL;
+ }
}
- for (i = mixer_cnt; i < MDSS_MDP_MAX_DSPP; i++)
- mdss_pp_res->hist_col[disp_num][i] = 0;
- mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_HIST_COL;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
-hist_start_exit:
- mutex_unlock(&mdss_mdp_hist_mutex);
- if (!ret) {
+
+hist_exit:
+ if (!ret && (PP_LOCAT(req->block) == MDSS_PP_DSPP_CFG)) {
mdss_mdp_pp_setup(ctl);
/* wait for a frame to let histrogram enable itself */
+ /* TODO add hysteresis value to be able to remove this sleep */
usleep(41666);
for (i = 0; i < mixer_cnt; i++) {
dspp_num = mixer_id[i];
hist_info = &mdss_pp_res->dspp_hist[dspp_num];
- mutex_lock(&mdss_mdp_hist_mutex);
- spin_lock_irqsave(&mdss_hist_lock, flag);
+ mutex_lock(&hist_info->hist_mutex);
hist_info->is_kick_ready = true;
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- mutex_unlock(&mdss_mdp_hist_mutex);
+ mutex_unlock(&hist_info->hist_mutex);
+ }
+ } else if (!ret) {
+ for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, req->block))
+ continue;
+ pr_info("PP_ARG(%d) = %d", i, PP_ARG(i, req->block));
+ pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ if (IS_ERR_OR_NULL(pipe))
+ continue;
+ hist_info = &pipe->pp_res.hist;
+ hist_info->is_kick_ready = true;
+ mdss_mdp_pipe_unmap(pipe);
}
}
return ret;
}
+static int pp_histogram_disable(struct pp_hist_col_info *hist_info,
+ u32 done_bit, char __iomem *ctl_base)
+{
+ int ret = 0;
+ unsigned long flag;
+ mutex_lock(&hist_info->hist_mutex);
+ if (hist_info->col_en == false) {
+ pr_debug("Histogram already disabled (%d)", (u32) ctl_base);
+ ret = -EINVAL;
+ goto exit;
+ }
+ complete_all(&hist_info->comp);
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ hist_info->col_en = false;
+ hist_info->col_state = HIST_UNKNOWN;
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ hist_info->is_kick_ready = false;
+ mdss_mdp_hist_irq_disable(done_bit);
+ writel_relaxed(BIT(1), ctl_base);/* cancel */
+ ret = 0;
+exit:
+ mutex_unlock(&hist_info->hist_mutex);
+ return ret;
+}
+
int mdss_mdp_histogram_stop(struct mdss_mdp_ctl *ctl, u32 block)
{
int i, ret = 0;
- u32 dspp_num, disp_num, ctl_base, done_bit;
+ char __iomem *ctl_base;
+ u32 dspp_num, disp_num, done_bit;
struct pp_hist_col_info *hist_info;
u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
- unsigned long flag;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (!ctl)
return -EINVAL;
- if ((block < MDP_LOGICAL_BLOCK_DISP_0) ||
- (block >= MDP_BLOCK_MAX))
+ if ((PP_BLOCK(block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (PP_BLOCK(block) >= MDP_BLOCK_MAX))
return -EINVAL;
- mutex_lock(&mdss_mdp_hist_mutex);
- disp_num = block - MDP_LOGICAL_BLOCK_DISP_0;
+ disp_num = PP_BLOCK(block) - MDP_LOGICAL_BLOCK_DISP_0;
mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
if (!mixer_cnt) {
@@ -1966,162 +2149,312 @@
goto hist_stop_exit;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- for (i = 0; i < mixer_cnt; i++) {
- dspp_num = mixer_id[i];
- hist_info = &mdss_pp_res->dspp_hist[dspp_num];
- done_bit = 3 << ((dspp_num * 4) + 12);
- ctl_base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
- MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
- if (hist_info->col_en == false) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG) {
+ i = MDSS_PP_ARG_MASK & block;
+ if (!i) {
+ pr_warn("Must pass pipe arguments, %d", i);
goto hist_stop_exit;
}
- complete_all(&hist_info->comp);
- spin_lock_irqsave(&mdss_hist_lock, flag);
- hist_info->col_en = false;
- hist_info->col_state = HIST_UNKNOWN;
- hist_info->is_kick_ready = false;
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- mdss_mdp_hist_irq_disable(done_bit);
- MDSS_MDP_REG_WRITE(ctl_base, (1 << 1));/* cancel */
+
+ for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, block))
+ continue;
+ pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ if (IS_ERR_OR_NULL(pipe) ||
+ pipe->num > MDSS_MDP_SSPP_VIG2) {
+ pr_warn("Invalid Hist pipe (%d)", i);
+ continue;
+ }
+ done_bit = 3 << (pipe->num * 4);
+ hist_info = &pipe->pp_res.hist;
+ ctl_base = pipe->base +
+ MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+ ret = pp_histogram_disable(hist_info, done_bit,
+ ctl_base);
+ mdss_mdp_pipe_unmap(pipe);
+ if (ret)
+ goto hist_stop_exit;
+ }
+ } else if (PP_LOCAT(block) == MDSS_PP_DSPP_CFG) {
+ for (i = 0; i < mixer_cnt; i++) {
+ dspp_num = mixer_id[i];
+ done_bit = 3 << ((dspp_num * 4) + 12);
+ hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+ ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num) +
+ MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
+ ret = pp_histogram_disable(hist_info, done_bit,
+ ctl_base);
+ if (ret)
+ goto hist_stop_exit;
+ mdss_pp_res->pp_disp_flags[disp_num] |=
+ PP_FLAGS_DIRTY_HIST_COL;
+ }
}
- for (i = 0; i < MDSS_MDP_MAX_DSPP; i++)
- mdss_pp_res->hist_col[disp_num][i] = 0;
- mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_HIST_COL;
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
hist_stop_exit:
- mutex_unlock(&mdss_mdp_hist_mutex);
- if (!ret)
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ if (!ret && (PP_LOCAT(block) == MDSS_PP_DSPP_CFG))
mdss_mdp_pp_setup(ctl);
return ret;
}
-int mdss_mdp_hist_collect(struct mdss_mdp_ctl *ctl,
- struct mdp_histogram_data *hist,
- u32 *hist_data_addr)
+static int pp_hist_collect(struct mdss_mdp_ctl *ctl,
+ struct mdp_histogram_data *hist,
+ struct pp_hist_col_info *hist_info,
+ char __iomem *ctl_base)
{
- int i, j, wait_ret, ret = 0;
- u32 timeout, v_base;
- struct pp_hist_col_info *hist_info;
- u32 dspp_num, disp_num, ctl_base;
- u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ int wait_ret, ret = 0;
+ u32 timeout;
+ char __iomem *v_base;
unsigned long flag;
+ struct mdss_pipe_pp_res *res;
+ struct mdss_mdp_pipe *pipe;
+
+ mutex_lock(&hist_info->hist_mutex);
+ if ((hist_info->col_en == 0) ||
+ (hist_info->col_state == HIST_UNKNOWN)) {
+ ret = -EINVAL;
+ goto hist_collect_exit;
+ }
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ /* wait for hist done if cache has no data */
+ if (hist_info->col_state != HIST_READY) {
+ hist_info->read_request = true;
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ timeout = HIST_WAIT_TIMEOUT(hist_info->frame_cnt);
+ mutex_unlock(&hist_info->hist_mutex);
+ /* flush updates before wait*/
+ if (PP_LOCAT(hist->block) == MDSS_PP_DSPP_CFG)
+ mdss_mdp_pp_setup(ctl);
+ if (PP_LOCAT(hist->block) == MDSS_PP_SSPP_CFG) {
+ res = container_of(hist_info, struct mdss_pipe_pp_res,
+ hist);
+ pipe = container_of(res, struct mdss_mdp_pipe, pp_res);
+ pipe->params_changed++;
+ }
+ wait_ret = wait_for_completion_killable_timeout(
+ &(hist_info->comp), timeout);
+
+ mutex_lock(&hist_info->hist_mutex);
+ if (wait_ret == 0) {
+ ret = -ETIMEDOUT;
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ pr_debug("bin collection timedout, state %d",
+ hist_info->col_state);
+ /*
+ * When the histogram has timed out (usually
+ * underrun) change the SW state back to idle
+ * since histogram hardware will have done the
+ * same. Histogram data also needs to be
+ * cleared in this case, which is done by the
+ * histogram being read (triggered by READY
+ * state, which also moves the histogram SW back
+ * to IDLE).
+ */
+ hist_info->hist_cnt_time++;
+ hist_info->col_state = HIST_READY;
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ } else if (wait_ret < 0) {
+ ret = -EINTR;
+ pr_debug("%s: bin collection interrupted",
+ __func__);
+ goto hist_collect_exit;
+ }
+ if (hist_info->col_state != HIST_READY) {
+ ret = -ENODATA;
+ pr_debug("%s: state is not ready: %d",
+ __func__, hist_info->col_state);
+ goto hist_collect_exit;
+ }
+ } else {
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ }
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ if (hist_info->col_state == HIST_READY) {
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ v_base = ctl_base + 0x1C;
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ pp_hist_read(v_base, hist_info);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ hist_info->read_request = false;
+ hist_info->col_state = HIST_IDLE;
+ }
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+hist_collect_exit:
+ mutex_unlock(&hist_info->hist_mutex);
+ return ret;
+}
+
+int mdss_mdp_hist_collect(struct mdss_mdp_ctl *ctl,
+ struct mdp_histogram_data *hist)
+{
+ int i, j, off, ret = 0;
+ struct pp_hist_col_info *hist_info;
+ u32 dspp_num, disp_num;
+ char __iomem *ctl_base;
+ u32 hist_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ u32 *hist_concat = NULL;
+ u32 *hist_data_addr;
+ u32 pipe_cnt = 0;
+ u32 pipe_num = MDSS_MDP_SSPP_VIG0;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (!ctl)
return -EINVAL;
- if ((hist->block < MDP_LOGICAL_BLOCK_DISP_0) ||
- (hist->block >= MDP_BLOCK_MAX))
+ if ((PP_BLOCK(hist->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (PP_BLOCK(hist->block) >= MDP_BLOCK_MAX))
return -EINVAL;
- mutex_lock(&mdss_mdp_hist_mutex);
- disp_num = hist->block - MDP_LOGICAL_BLOCK_DISP_0;
- mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+ disp_num = PP_BLOCK(hist->block) - MDP_LOGICAL_BLOCK_DISP_0;
+ hist_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
- if (!mixer_cnt) {
+ if (!hist_cnt) {
pr_err("%s, no dspp connects to disp %d",
__func__, disp_num);
ret = -EPERM;
goto hist_collect_exit;
}
- if (mixer_cnt >= MDSS_MDP_MAX_DSPP) {
+ if (hist_cnt >= MDSS_MDP_MAX_DSPP) {
pr_err("%s, Too many dspp connects to disp %d",
- __func__, mixer_cnt);
+ __func__, hist_cnt);
ret = -EPERM;
goto hist_collect_exit;
}
- hist_info = &mdss_pp_res->dspp_hist[0];
- for (i = 0; i < mixer_cnt; i++) {
- dspp_num = mixer_id[i];
- hist_info = &mdss_pp_res->dspp_hist[dspp_num];
- ctl_base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
- MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
- if ((hist_info->col_en == 0) ||
- (hist_info->col_state == HIST_UNKNOWN)) {
- ret = -EINVAL;
- goto hist_collect_exit;
- }
- spin_lock_irqsave(&mdss_hist_lock, flag);
- /* wait for hist done if cache has no data */
- if (hist_info->col_state != HIST_READY) {
- hist_info->read_request = true;
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- timeout = HIST_WAIT_TIMEOUT(hist_info->frame_cnt);
- mutex_unlock(&mdss_mdp_hist_mutex);
- /* flush updates before wait*/
- mdss_mdp_pp_setup(ctl);
- wait_ret = wait_for_completion_killable_timeout(
- &(hist_info->comp), timeout);
-
- mutex_lock(&mdss_mdp_hist_mutex);
- if (wait_ret == 0) {
- ret = -ETIMEDOUT;
- spin_lock_irqsave(&mdss_hist_lock, flag);
- pr_debug("bin collection timedout, state %d",
- hist_info->col_state);
- /*
- * When the histogram has timed out (usually
- * underrun) change the SW state back to idle
- * since histogram hardware will have done the
- * same. Histogram data also needs to be
- * cleared in this case, which is done by the
- * histogram being read (triggered by READY
- * state, which also moves the histogram SW back
- * to IDLE).
- */
- hist_info->col_state = HIST_READY;
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- } else if (wait_ret < 0) {
- ret = -EINTR;
- pr_debug("%s: bin collection interrupted",
- __func__);
- goto hist_collect_exit;
- }
- if (hist_info->col_state != HIST_READY) {
- ret = -ENODATA;
- pr_debug("%s: state is not ready: %d",
- __func__, hist_info->col_state);
- goto hist_collect_exit;
- }
- } else {
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- }
- spin_lock_irqsave(&mdss_hist_lock, flag);
- if (hist_info->col_state == HIST_READY) {
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- v_base = ctl_base + 0x1C;
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- pp_hist_read(v_base, hist_info);
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
- spin_lock_irqsave(&mdss_hist_lock, flag);
- hist_info->read_request = false;
- hist_info->col_state = HIST_IDLE;
- }
- spin_unlock_irqrestore(&mdss_hist_lock, flag);
- }
- if (mixer_cnt > 1) {
- memset(&mdss_pp_res->hist_data[disp_num][0],
- 0, HIST_V_SIZE * sizeof(u32));
- for (i = 0; i < mixer_cnt; i++) {
+ if (PP_LOCAT(hist->block) == MDSS_PP_DSPP_CFG) {
+ hist_info = &mdss_pp_res->dspp_hist[disp_num];
+ for (i = 0; i < hist_cnt; i++) {
dspp_num = mixer_id[i];
hist_info = &mdss_pp_res->dspp_hist[dspp_num];
- for (j = 0; j < HIST_V_SIZE; j++)
- mdss_pp_res->hist_data[disp_num][i] +=
- hist_info->data[i];
+ ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num) +
+ MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
+ ret = pp_hist_collect(ctl, hist, hist_info, ctl_base);
+ if (ret)
+ goto hist_collect_exit;
}
- *hist_data_addr = (u32)&mdss_pp_res->hist_data[disp_num][0];
+ if (hist_cnt > 1) {
+ if (hist->bin_cnt != HIST_V_SIZE) {
+ pr_err("User not expecting size %d output",
+ HIST_V_SIZE);
+ ret = -EINVAL;
+ goto hist_collect_exit;
+ }
+ hist_concat = kmalloc(HIST_V_SIZE * sizeof(u32),
+ GFP_KERNEL);
+ if (!hist_concat) {
+ ret = -ENOMEM;
+ goto hist_collect_exit;
+ }
+ memset(hist_concat, 0, HIST_V_SIZE * sizeof(u32));
+ for (i = 0; i < hist_cnt; i++) {
+ dspp_num = mixer_id[i];
+ hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+ mutex_lock(&hist_info->hist_mutex);
+ for (j = 0; j < HIST_V_SIZE; j++)
+ hist_concat[i] += hist_info->data[i];
+ mutex_unlock(&hist_info->hist_mutex);
+ }
+ hist_data_addr = hist_concat;
+ } else {
+ hist_data_addr = hist_info->data;
+ }
+ hist_info = &mdss_pp_res->dspp_hist[disp_num];
+ hist_info->hist_cnt_sent++;
+ } else if (PP_LOCAT(hist->block) == MDSS_PP_SSPP_CFG) {
+
+ hist_cnt = MDSS_PP_ARG_MASK & hist->block;
+ if (!hist_cnt) {
+ pr_warn("Must pass pipe arguments, %d", hist_cnt);
+ goto hist_collect_exit;
+ }
+
+ /* Find the first pipe requested */
+ for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+ if (PP_ARG(i, hist_cnt)) {
+ pipe_num = i;
+ break;
+ }
+ }
+
+ pipe = mdss_mdp_pipe_get(mdata, BIT(pipe_num));
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_warn("Invalid starting hist pipe, %d", pipe_num);
+ ret = -ENODEV;
+ goto hist_collect_exit;
+ }
+ hist_info = &pipe->pp_res.hist;
+ mdss_mdp_pipe_unmap(pipe);
+ for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, hist->block))
+ continue;
+ pipe_cnt++;
+ pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ if (IS_ERR_OR_NULL(pipe) ||
+ pipe->num > MDSS_MDP_SSPP_VIG2) {
+ pr_warn("Invalid Hist pipe (%d)", i);
+ continue;
+ }
+ hist_info = &pipe->pp_res.hist;
+ ctl_base = pipe->base +
+ MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+ ret = pp_hist_collect(ctl, hist, hist_info, ctl_base);
+ mdss_mdp_pipe_unmap(pipe);
+ if (ret)
+ goto hist_collect_exit;
+ }
+ if (pipe_cnt > 1) {
+ if (hist->bin_cnt != (HIST_V_SIZE * pipe_cnt)) {
+ pr_err("User not expecting size %d output",
+ pipe_cnt * HIST_V_SIZE);
+ ret = -EINVAL;
+ goto hist_collect_exit;
+ }
+ hist_concat = kmalloc(HIST_V_SIZE * pipe_cnt *
+ sizeof(u32), GFP_KERNEL);
+ if (!hist_concat) {
+ ret = -ENOMEM;
+ goto hist_collect_exit;
+ }
+
+ memset(hist_concat, 0, pipe_cnt * HIST_V_SIZE *
+ sizeof(u32));
+ for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, hist->block))
+ continue;
+ pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ hist_info = &pipe->pp_res.hist;
+ off = HIST_V_SIZE * i;
+ mutex_lock(&hist_info->hist_mutex);
+ for (j = off; j < off + HIST_V_SIZE; j++)
+ hist_concat[j] =
+ hist_info->data[j - off];
+ hist_info->hist_cnt_sent++;
+ mutex_unlock(&hist_info->hist_mutex);
+ mdss_mdp_pipe_unmap(pipe);
+ }
+
+ hist_data_addr = hist_concat;
+ } else {
+ hist_data_addr = hist_info->data;
+ }
} else {
- *hist_data_addr = (u32)hist_info->data;
+ pr_info("No Histogram at location %d", PP_LOCAT(hist->block));
+ goto hist_collect_exit;
}
- hist_info->hist_cnt_sent++;
+ ret = copy_to_user(hist->c0, hist_data_addr, sizeof(u32) *
+ hist->bin_cnt);
hist_collect_exit:
- mutex_unlock(&mdss_mdp_hist_mutex);
+ kfree(hist_concat);
+
return ret;
}
void mdss_mdp_hist_intr_done(u32 isr)
{
u32 isr_blk, blk_idx;
- struct pp_hist_col_info *hist_info;
+ struct pp_hist_col_info *hist_info = NULL;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
isr &= 0x333333;
while (isr != 0) {
if (isr & 0xFFF000) {
@@ -2141,36 +2474,40 @@
hist_info = &mdss_pp_res->dspp_hist[blk_idx];
} else {
if (isr & 0x3) {
- blk_idx = 0;
+ blk_idx = MDSS_MDP_SSPP_VIG0;
isr_blk = isr & 0x3;
isr &= ~0x3;
} else if (isr & 0x30) {
- blk_idx = 1;
+ blk_idx = MDSS_MDP_SSPP_VIG1;
isr_blk = (isr >> 4) & 0x3;
isr &= ~0x30;
} else {
- blk_idx = 2;
+ blk_idx = MDSS_MDP_SSPP_VIG2;
isr_blk = (isr >> 8) & 0x3;
isr &= ~0x300;
}
- /* SSPP block, not support yet*/
- continue;
+ pipe = mdss_mdp_pipe_search(mdata, BIT(blk_idx));
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_debug("pipe DNE, %d", blk_idx);
+ continue;
+ }
+ hist_info = &pipe->pp_res.hist;
}
/* Histogram Done Interrupt */
- if ((isr_blk & 0x1) &&
+ if (hist_info && (isr_blk & 0x1) &&
(hist_info->col_en)) {
- spin_lock(&mdss_hist_lock);
+ spin_lock(&hist_info->hist_lock);
hist_info->col_state = HIST_READY;
- spin_unlock(&mdss_hist_lock);
+ spin_unlock(&hist_info->hist_lock);
if (hist_info->read_request)
complete(&hist_info->comp);
}
/* Histogram Reset Done Interrupt */
if ((isr_blk & 0x2) &&
(hist_info->col_en)) {
- spin_lock(&mdss_hist_lock);
+ spin_lock(&hist_info->hist_lock);
hist_info->col_state = HIST_IDLE;
- spin_unlock(&mdss_hist_lock);
+ spin_unlock(&hist_info->hist_lock);
}
};
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index 5711653..ce4c28f 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -27,6 +27,8 @@
static struct mdss_mdp_rotator_session rotator_session[MAX_ROTATOR_SESSIONS];
static LIST_HEAD(rotator_queue);
+static int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot);
+
struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_alloc(void)
{
struct mdss_mdp_rotator_session *rot;
@@ -166,27 +168,21 @@
return 0;
}
-int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
+static int mdss_mdp_rotator_queue_sub(struct mdss_mdp_rotator_session *rot,
struct mdss_mdp_data *src_data,
struct mdss_mdp_data *dst_data)
{
struct mdss_mdp_pipe *rot_pipe = NULL;
struct mdss_mdp_ctl *ctl;
- int ret, need_wait = false;
+ int ret;
- ret = mutex_lock_interruptible(&rotator_lock);
- if (ret)
- return ret;
-
- if (!rot || !rot->ref_cnt) {
- mutex_unlock(&rotator_lock);
- return -ENODEV;
- }
+ if (!rot || !rot->ref_cnt)
+ return -ENOENT;
ret = mdss_mdp_rotator_pipe_dequeue(rot);
if (ret) {
pr_err("unable to acquire rotator\n");
- goto done;
+ return ret;
}
rot_pipe = rot->pipe;
@@ -203,31 +199,141 @@
rot_pipe->img_height = rot->img_height;
rot_pipe->src = rot->src_rect;
rot_pipe->dst = rot->src_rect;
+ rot_pipe->dst.x = 0;
+ rot_pipe->dst.y = 0;
rot_pipe->params_changed++;
}
ret = mdss_mdp_pipe_queue_data(rot->pipe, src_data);
if (ret) {
pr_err("unable to queue rot data\n");
- goto done;
+ return ret;
}
ret = mdss_mdp_rotator_kickoff(ctl, rot, dst_data);
- if (ret == 0 && !rot->no_wait)
- need_wait = true;
-done:
+ return ret;
+}
+
+int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
+ struct mdss_mdp_data *src_data,
+ struct mdss_mdp_data *dst_data)
+{
+ int ret;
+ struct mdss_mdp_rotator_session *tmp = rot;
+
+ ret = mutex_lock_interruptible(&rotator_lock);
+ if (ret)
+ return ret;
+
+ pr_debug("rotator session=%x start\n", rot->session_id);
+
+ for (ret = 0, tmp = rot; ret == 0 && tmp; tmp = tmp->next)
+ ret = mdss_mdp_rotator_queue_sub(tmp, src_data, dst_data);
+
mutex_unlock(&rotator_lock);
- if (need_wait)
- mdss_mdp_rotator_busy_wait(rot);
+ if (ret) {
+ pr_err("rotation failed %d for rot=%d\n", ret, rot->session_id);
+ return ret;
+ }
- if (rot_pipe)
- pr_debug("end of rotator pnum=%d enqueue\n", rot_pipe->num);
+ for (tmp = rot; tmp; tmp = tmp->next)
+ mdss_mdp_rotator_busy_wait(tmp);
+
+ pr_debug("rotator session=%x queue done\n", rot->session_id);
return ret;
}
+int mdss_mdp_rotator_setup(struct mdss_mdp_rotator_session *rot)
+{
+
+ rot->dst = rot->src_rect;
+ /*
+ * by default, rotator output should be placed directly on
+ * output buffer address without any offset.
+ */
+ rot->dst.x = 0;
+ rot->dst.y = 0;
+
+ if (rot->flags & MDP_ROT_90)
+ swap(rot->dst.w, rot->dst.h);
+
+ if (rot->src_rect.w > MAX_MIXER_WIDTH) {
+ struct mdss_mdp_rotator_session *tmp;
+ u32 width;
+
+ if (rot->bwc_mode) {
+ pr_err("Unable to do split rotation with bwc set\n");
+ return -EINVAL;
+ }
+
+ width = rot->src_rect.w;
+
+ pr_debug("setting up split rotation src=%dx%d\n",
+ rot->src_rect.w, rot->src_rect.h);
+
+ if (width > (MAX_MIXER_WIDTH * 2)) {
+ pr_err("unsupported source width %d\n", width);
+ return -EOVERFLOW;
+ }
+
+ if (!rot->next) {
+ tmp = mdss_mdp_rotator_session_alloc();
+ if (!tmp) {
+ pr_err("unable to allocate rot dual session\n");
+ return -ENOMEM;
+ }
+ rot->next = tmp;
+ }
+ tmp = rot->next;
+
+ tmp->session_id = rot->session_id & ~MDSS_MDP_ROT_SESSION_MASK;
+ tmp->flags = rot->flags;
+ tmp->format = rot->format;
+ tmp->img_width = rot->img_width;
+ tmp->img_height = rot->img_height;
+ tmp->src_rect = rot->src_rect;
+
+ tmp->src_rect.w = width / 2;
+ width -= tmp->src_rect.w;
+ tmp->src_rect.x += width;
+
+ tmp->dst = rot->dst;
+ rot->src_rect.w = width;
+
+ if (rot->flags & MDP_ROT_90) {
+ /*
+ * If rotated by 90 first half should be on top.
+ * But if horizontally flipped should be on bottom.
+ */
+ if (rot->flags & MDP_FLIP_LR)
+ rot->dst.y = tmp->src_rect.w;
+ else
+ tmp->dst.y = rot->src_rect.w;
+ } else {
+ /*
+ * If not rotated, first half should be the left part
+ * of the frame, unless horizontally flipped
+ */
+ if (rot->flags & MDP_FLIP_LR)
+ rot->dst.x = tmp->src_rect.w;
+ else
+ tmp->dst.x = rot->src_rect.w;
+ }
+
+ tmp->params_changed++;
+ } else if (rot->next) {
+ mdss_mdp_rotator_finish(rot->next);
+ rot->next = NULL;
+ }
+
+ rot->params_changed++;
+
+ return 0;
+}
+
static int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
{
struct mdss_mdp_pipe *rot_pipe;
@@ -237,6 +343,9 @@
pr_debug("finish rot id=%x\n", rot->session_id);
+ if (rot->next)
+ mdss_mdp_rotator_finish(rot->next);
+
rot_pipe = rot->pipe;
if (rot_pipe) {
mdss_mdp_rotator_busy_wait(rot);
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.h b/drivers/video/msm/mdss/mdss_mdp_rotator.h
index 21ee9bb..c50d710 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.h
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.h
@@ -30,6 +30,7 @@
u16 img_width;
u16 img_height;
struct mdss_mdp_img_rect src_rect;
+ struct mdss_mdp_img_rect dst;
u32 bwc_mode;
struct mdss_mdp_pipe *pipe;
@@ -40,6 +41,7 @@
u8 no_wait;
struct list_head head;
+ struct mdss_mdp_rotator_session *next;
};
static inline u32 mdss_mdp_get_rotator_dst_format(u32 in_format)
@@ -61,6 +63,7 @@
struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_alloc(void);
struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_get(u32 session_id);
+int mdss_mdp_rotator_setup(struct mdss_mdp_rotator_session *rot);
int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
struct mdss_mdp_data *src_data,
struct mdss_mdp_data *dst_data);
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index 5915f61..4de6d03 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -387,6 +387,31 @@
return 0;
}
+void mdss_mdp_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
+ struct mdss_mdp_plane_sizes *ps, struct mdss_mdp_format_params *fmt)
+{
+ if ((x == 0) && (y == 0))
+ return;
+
+ data->p[0].addr += y * ps->ystride[0];
+
+ if (data->num_planes == 1) {
+ data->p[0].addr += x * fmt->bpp;
+ } else {
+ u8 hmap[] = { 1, 2, 1, 2 };
+ u8 vmap[] = { 1, 1, 2, 2 };
+ u16 xoff = x / hmap[fmt->chroma_sample];
+ u16 yoff = y / vmap[fmt->chroma_sample];
+
+ data->p[0].addr += x;
+ data->p[1].addr += xoff + (yoff * ps->ystride[1]);
+ if (data->num_planes == 2) /* pseudo planar */
+ data->p[1].addr += xoff;
+ else /* planar */
+ data->p[2].addr += xoff + (yoff * ps->ystride[2]);
+ }
+}
+
int mdss_mdp_put_img(struct mdss_mdp_img_data *data)
{
struct ion_client *iclient = mdss_get_ionclient();
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index 88e7605..c19e07a 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -25,6 +25,7 @@
#include "mdss_mdp.h"
#include "mdss_fb.h"
+#include "mdss_wb.h"
enum mdss_mdp_wb_state {
@@ -535,11 +536,37 @@
return ret;
}
+int mdss_mdp_wb_set_mirr_hint(struct msm_fb_data_type *mfd, int hint)
+{
+ struct mdss_panel_data *pdata = NULL;
+ struct mdss_wb_ctrl *wb_ctrl = NULL;
+
+ if (!mfd) {
+ pr_err("No panel data!\n");
+ return -EINVAL;
+ }
+
+ pdata = mfd->pdev->dev.platform_data;
+ wb_ctrl = container_of(pdata, struct mdss_wb_ctrl, pdata);
+
+ switch (hint) {
+ case MDP_WRITEBACK_MIRROR_ON:
+ case MDP_WRITEBACK_MIRROR_PAUSE:
+ case MDP_WRITEBACK_MIRROR_RESUME:
+ case MDP_WRITEBACK_MIRROR_OFF:
+ pr_info("wfd state switched to %d\n", hint);
+ switch_set_state(&wb_ctrl->sdev, hint);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd,
void *arg)
{
struct msmfb_data data;
- int ret = -ENOSYS;
+ int ret = -ENOSYS, hint = 0;
switch (cmd) {
case MSMFB_WRITEBACK_INIT:
@@ -570,6 +597,14 @@
case MSMFB_WRITEBACK_TERMINATE:
ret = mdss_mdp_wb_terminate(mfd);
break;
+ case MSMFB_WRITEBACK_SET_MIRRORING_HINT:
+ if (!copy_from_user(&hint, arg, sizeof(hint))) {
+ ret = mdss_mdp_wb_set_mirr_hint(mfd, hint);
+ } else {
+ pr_err("set mirroring hint failed on copy_from_user\n");
+ ret = -EFAULT;
+ }
+ break;
}
return ret;
diff --git a/drivers/video/msm/mdss/mdss_qpic.c b/drivers/video/msm/mdss/mdss_qpic.c
index be02113..fa6bd3d 100644
--- a/drivers/video/msm/mdss/mdss_qpic.c
+++ b/drivers/video/msm/mdss/mdss_qpic.c
@@ -428,7 +428,7 @@
param[0]);
param++;
bytes_left -= 4;
- space++;
+ space--;
} else if (bytes_left == 2) {
QPIC_OUTPW(QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0,
*(u16 *)param);
diff --git a/drivers/video/msm/mdss/mdss_wb.c b/drivers/video/msm/mdss/mdss_wb.c
index 1b398d3..a169302 100644
--- a/drivers/video/msm/mdss/mdss_wb.c
+++ b/drivers/video/msm/mdss/mdss_wb.c
@@ -24,6 +24,7 @@
#include <linux/version.h>
#include "mdss_panel.h"
+#include "mdss_wb.h"
/**
* mdss_wb_check_params - check new panel info params
@@ -87,22 +88,62 @@
return 0;
}
+static int mdss_wb_dev_init(struct mdss_wb_ctrl *wb_ctrl)
+{
+ int rc = 0;
+ if (!wb_ctrl) {
+ pr_err("%s: no driver data\n", __func__);
+ return -ENODEV;
+ }
+
+ wb_ctrl->sdev.name = "wfd";
+ rc = switch_dev_register(&wb_ctrl->sdev);
+ if (rc) {
+ pr_err("Failed to setup switch dev for writeback panel");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int mdss_wb_dev_uninit(struct mdss_wb_ctrl *wb_ctrl)
+{
+ if (!wb_ctrl) {
+ pr_err("%s: no driver data\n", __func__);
+ return -ENODEV;
+ }
+
+ switch_dev_unregister(&wb_ctrl->sdev);
+ return 0;
+}
+
static int mdss_wb_probe(struct platform_device *pdev)
{
struct mdss_panel_data *pdata = NULL;
+ struct mdss_wb_ctrl *wb_ctrl = NULL;
int rc = 0;
if (!pdev->dev.of_node)
return -ENODEV;
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
+ wb_ctrl = devm_kzalloc(&pdev->dev, sizeof(*wb_ctrl), GFP_KERNEL);
+ if (!wb_ctrl)
return -ENOMEM;
+ pdata = &wb_ctrl->pdata;
+ wb_ctrl->pdev = pdev;
+ platform_set_drvdata(pdev, wb_ctrl);
+
rc = !mdss_wb_parse_dt(pdev, pdata);
if (!rc)
return rc;
+ rc = mdss_wb_dev_init(wb_ctrl);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to set up device nodes for writeback panel\n");
+ return rc;
+ }
+
pdata->panel_info.type = WRITEBACK_PANEL;
pdata->panel_info.clk_rate = 74250000;
pdata->panel_info.pdest = DISPLAY_3;
@@ -120,6 +161,19 @@
return rc;
}
+static int mdss_wb_remove(struct platform_device *pdev)
+{
+ struct mdss_wb_ctrl *wb_ctrl = platform_get_drvdata(pdev);
+ if (!wb_ctrl) {
+ pr_err("%s: no driver data\n", __func__);
+ return -ENODEV;
+ }
+
+ mdss_wb_dev_uninit(wb_ctrl);
+ devm_kfree(&wb_ctrl->pdev->dev, wb_ctrl);
+ return 0;
+}
+
static const struct of_device_id mdss_wb_match[] = {
{ .compatible = "qcom,mdss_wb", },
{ { 0 } }
@@ -127,6 +181,7 @@
static struct platform_driver mdss_wb_driver = {
.probe = mdss_wb_probe,
+ .remove = mdss_wb_remove,
.driver = {
.name = "mdss_wb",
.of_match_table = mdss_wb_match,
diff --git a/drivers/video/msm/mdss/mdss_wb.h b/drivers/video/msm/mdss/mdss_wb.h
new file mode 100644
index 0000000..3b0c52a
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_wb.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_WB_H
+#define MDSS_WB_H
+
+#include <linux/switch.h>
+
+struct mdss_wb_ctrl {
+ struct platform_device *pdev;
+ struct mdss_panel_data pdata;
+ struct switch_dev sdev;
+};
+
+#endif
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 797d4a3..18c63a0 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -3238,6 +3238,29 @@
return mdp4_writeback_terminate(info);
}
+static int msmfb_overlay_ioctl_writeback_set_mirr_hint(struct fb_info *
+ info, void *argp)
+{
+ int ret = 0, hint;
+
+ if (!info) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = copy_from_user(&hint, argp, sizeof(hint));
+ if (ret)
+ goto error;
+
+ ret = mdp4_writeback_set_mirroring_hint(info, hint);
+ if (ret)
+ goto error;
+error:
+ if (ret)
+ pr_err("%s: ioctl failed\n", __func__);
+ return ret;
+}
+
#else
static int msmfb_overlay_ioctl_writeback_init(struct fb_info *info)
{
@@ -3270,6 +3293,12 @@
{
return -ENOTSUPP;
}
+
+static int msmfb_overlay_ioctl_writeback_set_mirr_hint(struct fb_info *
+ info, void *argp)
+{
+ return -ENOTSUPP;
+}
#endif
static int msmfb_overlay_3d_sbys(struct fb_info *info, unsigned long *argp)
@@ -3745,6 +3774,10 @@
case MSMFB_WRITEBACK_TERMINATE:
ret = msmfb_overlay_ioctl_writeback_terminate(info);
break;
+ case MSMFB_WRITEBACK_SET_MIRRORING_HINT:
+ ret = msmfb_overlay_ioctl_writeback_set_mirr_hint(
+ info, argp);
+ break;
#endif
case MSMFB_VSYNC_CTRL:
case MSMFB_OVERLAY_VSYNC_CTRL:
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
index 7519ac7..a02a108 100644
--- a/drivers/video/msm/msm_fb.h
+++ b/drivers/video/msm/msm_fb.h
@@ -37,7 +37,9 @@
#include <linux/fb.h>
#include <linux/list.h>
#include <linux/types.h>
+#include <linux/switch.h>
#include <linux/msm_mdp.h>
+
#ifdef CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
#endif
@@ -180,6 +182,7 @@
struct list_head writeback_busy_queue;
struct list_head writeback_free_queue;
struct list_head writeback_register_queue;
+ struct switch_dev writeback_sdev;
wait_queue_head_t wait_q;
struct ion_client *iclient;
unsigned long display_iova;
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 969b400..2f77d29 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -118,10 +118,10 @@
/* This needs to be modified manually now, when we add
a new RANGE of SSIDs to the msg_mask_tbl */
#define MSG_MASK_TBL_CNT 24
-#define EVENT_LAST_ID 0x09AB
+#define EVENT_LAST_ID 0x09B2
#define MSG_SSID_0 0
-#define MSG_SSID_0_LAST 94
+#define MSG_SSID_0_LAST 97
#define MSG_SSID_1 500
#define MSG_SSID_1_LAST 506
#define MSG_SSID_2 1000
@@ -287,6 +287,9 @@
MSG_LVL_LOW,
MSG_LVL_LOW,
MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
MSG_LVL_LOW
};
@@ -722,7 +725,7 @@
/* LOG CODES */
#define LOG_0 0x0
-#define LOG_1 0x1755
+#define LOG_1 0x17F4
#define LOG_2 0x0
#define LOG_3 0x0
#define LOG_4 0x4910
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index c737eb7..2a144e6 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -39,6 +39,12 @@
void kmap_flush_unused(void);
+#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
+void kmap_atomic_flush_unused(void);
+#else
+static inline void kmap_atomic_flush_unused(void) { }
+#endif
+
#else /* CONFIG_HIGHMEM */
static inline unsigned int nr_free_highpages(void) { return 0; }
@@ -72,6 +78,7 @@
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#define kmap_flush_unused() do {} while(0)
+#define kmap_atomic_flush_unused() do {} while (0)
#endif
#endif /* CONFIG_HIGHMEM */
diff --git a/include/linux/ion.h b/include/linux/ion.h
index 88ad9a0..4983316 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -35,6 +35,7 @@
ION_HEAP_TYPE_SYSTEM,
ION_HEAP_TYPE_SYSTEM_CONTIG,
ION_HEAP_TYPE_CARVEOUT,
+ ION_HEAP_TYPE_CHUNK,
ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
are at the end of this enum */
ION_NUM_HEAPS,
@@ -44,8 +45,10 @@
#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8
+
/**
- * heap flags - the lower 16 bits are used by core ion, the upper 16
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves.
*/
#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
@@ -74,8 +77,9 @@
/**
* struct ion_platform_heap - defines a heap in the given platform
* @type: type of the heap from ion_heap_type enum
- * @id: unique identifier for heap. When allocating (lower numbers
- * will be allocated from first)
+ * @id: unique identifier for heap. When allocating higher numbers
+ * will be allocated from first. At allocation these are passed
+ * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
* @name: used for debug purposes
* @base: base address of heap in physical memory if applicable
* @size: size of the heap in bytes if applicable
@@ -83,6 +87,10 @@
* @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
* @extra_data: Extra data specific to each heap type
* @priv: heap private data
+ * @align: required alignment in physical memory if applicable
+ * @priv: private info passed from the board file
+ *
+ * Provided by the board file.
*/
struct ion_platform_heap {
enum ion_heap_type type;
@@ -93,6 +101,7 @@
enum ion_memory_types memory_type;
unsigned int has_outer_cache;
void *extra_data;
+ ion_phys_addr_t align;
void *priv;
};
@@ -125,12 +134,12 @@
/**
* ion_client_create() - allocate a client and returns it
- * @dev: the global ion device
- * @heap_mask: mask of heaps this client can allocate from
- * @name: used for debugging
+ * @dev: the global ion device
+ * @heap_type_mask: mask of heaps this client can allocate from
+ * @name: used for debugging
*/
struct ion_client *ion_client_create(struct ion_device *dev,
- unsigned int heap_mask, const char *name);
+ const char *name);
/**
* ion_client_destroy() - free's a client and all it's handles
@@ -143,21 +152,22 @@
/**
* ion_alloc - allocate ion memory
- * @client: the client
- * @len: size of the allocation
- * @align: requested allocation alignment, lots of hardware blocks have
- * alignment requirements of some kind
- * @heap_mask: mask of heaps to allocate from, if multiple bits are set
- * heaps will be tried in order from lowest to highest order bit
- * @flags: heap flags, the low 16 bits are consumed by ion, the high 16
- * bits are passed on to the respective heap and can be heap
- * custom
+ * @client: the client
+ * @len: size of the allocation
+ * @align: requested allocation alignment, lots of hardware blocks
+ * have alignment requirements of some kind
+ * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
+ * heaps will be tried in order from highest to lowest
+ * id
+ * @flags: heap flags, the low 16 bits are consumed by ion, the
+ * high 16 bits are passed on to the respective heap and
+ * can be heap custom
*
* Allocate memory in one of the heaps provided in heap mask and return
* an opaque handle to it.
*/
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_mask,
+ size_t align, unsigned int heap_id_mask,
unsigned int flags);
/**
@@ -217,11 +227,19 @@
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
/**
- * ion_share_dma_buf() - given an ion client, create a dma-buf fd
+ * ion_share_dma_buf() - share buffer as dma-buf
* @client: the client
* @handle: the handle
*/
-int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle);
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
+ * @client: the client
+ * @handle: the handle
+ */
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
/**
* ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
@@ -310,12 +328,12 @@
/**
* struct ion_allocation_data - metadata passed from userspace for allocations
- * @len: size of the allocation
- * @align: required alignment of the allocation
- * @heap_mask: mask of heaps to allocate from
- * @flags: flags passed to heap
- * @handle: pointer that will be populated with a cookie to use to refer
- * to this allocation
+ * @len: size of the allocation
+ * @align: required alignment of the allocation
+ * @heap_id_mask: mask of heap ids to allocate from
+ * @flags: flags passed to heap
+ * @handle: pointer that will be populated with a cookie to use to
+ * refer to this allocation
*
* Provided by userspace as an argument to the ioctl
*/
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index 8aa758d..b882fe2 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -40,8 +40,12 @@
might_sleep_if(timeout_us); \
for (;;) { \
(val) = readl(addr); \
- if ((cond) || (timeout_us && time_after(jiffies, timeout))) \
+ if (cond) \
break; \
+ if (timeout_us && time_after(jiffies, timeout)) { \
+ (val) = readl(addr); \
+ break; \
+ } \
if (sleep_us) \
usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \
} \
diff --git a/include/linux/memory_alloc.h b/include/linux/memory_alloc.h
index b649451..8097949 100644
--- a/include/linux/memory_alloc.h
+++ b/include/linux/memory_alloc.h
@@ -20,7 +20,7 @@
struct mem_pool {
struct mutex pool_mutex;
struct gen_pool *gpool;
- unsigned long paddr;
+ phys_addr_t paddr;
unsigned long size;
unsigned long free;
unsigned int id;
@@ -28,29 +28,34 @@
struct alloc {
struct rb_node rb_node;
- void *vaddr;
- unsigned long paddr;
+ /*
+ * The physical address may be used for lookup in the tree so the
+ * 'virtual address' needs to be able to accomodate larger physical
+ * addresses.
+ */
+ phys_addr_t vaddr;
+ phys_addr_t paddr;
struct mem_pool *mpool;
unsigned long len;
void *caller;
};
-struct mem_pool *initialize_memory_pool(unsigned long start,
+struct mem_pool *initialize_memory_pool(phys_addr_t start,
unsigned long size, int mem_type);
void *allocate_contiguous_memory(unsigned long size,
int mem_type, unsigned long align, int cached);
-unsigned long _allocate_contiguous_memory_nomap(unsigned long size,
+phys_addr_t _allocate_contiguous_memory_nomap(unsigned long size,
int mem_type, unsigned long align, void *caller);
-unsigned long allocate_contiguous_memory_nomap(unsigned long size,
+phys_addr_t allocate_contiguous_memory_nomap(unsigned long size,
int mem_type, unsigned long align);
void free_contiguous_memory(void *addr);
-void free_contiguous_memory_by_paddr(unsigned long paddr);
+void free_contiguous_memory_by_paddr(phys_addr_t paddr);
-unsigned long memory_pool_node_paddr(void *vaddr);
+phys_addr_t memory_pool_node_paddr(void *vaddr);
unsigned long memory_pool_node_len(void *vaddr);
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index 1c67b1e..5439fd1 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -165,6 +165,7 @@
unsigned int warm_bat_chg_current;
unsigned int cool_bat_voltage;
unsigned int warm_bat_voltage;
+ int hysteresis_temp;
unsigned int (*get_batt_capacity_percent) (void);
int64_t batt_id_min;
int64_t batt_id_max;
diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h
index 37a12fb..3a9b1b9 100644
--- a/include/linux/mfd/wcd9xxx/core.h
+++ b/include/linux/mfd/wcd9xxx/core.h
@@ -150,6 +150,17 @@
#define WCD9XXX_CH(xport, xshift) \
{.port = xport, .shift = xshift}
+struct wcd9xxx_codec_type {
+ u16 id_major;
+ u16 id_minor;
+ struct mfd_cell *dev;
+ int size;
+ int num_irqs;
+ int version; /* -1 to retrive version from chip version register */
+ enum wcd9xxx_slim_slave_addr_type slim_slave_type;
+ u16 i2c_chip_status;
+};
+
struct wcd9xxx {
struct device *dev;
struct slim_device *slim;
@@ -181,14 +192,14 @@
struct pm_qos_request pm_qos_req;
int wlock_holders;
- u8 idbyte[4];
+ u16 id_minor;
+ u16 id_major;
unsigned int irq_base;
unsigned int irq;
u8 irq_masks_cur[WCD9XXX_NUM_IRQ_REGS];
u8 irq_masks_cache[WCD9XXX_NUM_IRQ_REGS];
bool irq_level_high[WCD9XXX_MAX_NUM_IRQS];
- int num_irqs;
/* Slimbus or I2S port */
u32 num_rx_port;
u32 num_tx_port;
@@ -196,7 +207,7 @@
struct wcd9xxx_ch *tx_chs;
u32 mclk_rate;
- enum wcd9xxx_slim_slave_addr_type slim_slave_type;
+ const struct wcd9xxx_codec_type *codec_type;
};
int wcd9xxx_reg_read(struct wcd9xxx *wcd9xxx, unsigned short reg);
diff --git a/include/linux/mfd/wcd9xxx/pdata.h b/include/linux/mfd/wcd9xxx/pdata.h
index 813cac3..c6e4ab3 100644
--- a/include/linux/mfd/wcd9xxx/pdata.h
+++ b/include/linux/mfd/wcd9xxx/pdata.h
@@ -136,7 +136,7 @@
unsigned int hph_ocp_limit:3; /* Headphone OCP current limit */
};
-#define MAX_REGULATOR 7
+#define WCD9XXX_MAX_REGULATOR 8
/*
* format : TABLA_<POWER_SUPPLY_PIN_NAME>_CUR_MAX
*
@@ -151,11 +151,14 @@
#define WCD9XXX_VDDD_CDC_D_CUR_MAX 5000
#define WCD9XXX_VDDD_CDC_A_CUR_MAX 5000
+#define WCD9XXX_VDD_SPKDRV_NAME "cdc-vdd-spkdrv"
+
struct wcd9xxx_regulator {
const char *name;
int min_uV;
int max_uV;
int optimum_uA;
+ bool ondemand;
struct regulator *regulator;
};
@@ -168,7 +171,7 @@
struct slim_device slimbus_slave_device;
struct wcd9xxx_micbias_setting micbias;
struct wcd9xxx_ocp_setting ocp;
- struct wcd9xxx_regulator regulator[MAX_REGULATOR];
+ struct wcd9xxx_regulator regulator[WCD9XXX_MAX_REGULATOR];
u32 mclk_rate;
u32 dmic_sample_rate;
};
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 538185f..4e30082 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -122,6 +122,25 @@
* secure discard kind of operations to complete.
*/
#define SDHCI_QUIRK2_USE_MAX_DISCARD_SIZE (1<<5)
+/*
+ * Ignore data timeout error for R1B commands as there will be no
+ * data associated and the busy timeout value for these commands
+ * could be lager than the maximum timeout value that controller
+ * can handle.
+ */
+#define SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD (1<<6)
+/*
+ * The preset value registers are not properly initialized by
+ * some hardware and hence preset value must not be enabled for
+ * such controllers.
+ */
+#define SDHCI_QUIRK2_BROKEN_PRESET_VALUE (1<<7)
+/*
+ * Some controllers define the usage of 0xF in data timeout counter
+ * register (0x2E) which is actually a reserved bit as per
+ * specification.
+ */
+#define SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT (1<<8)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index f932494..f9e483c 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -76,6 +76,8 @@
struct mdp_display_commit)
#define MSMFB_METADATA_SET _IOW(MSMFB_IOCTL_MAGIC, 165, struct msmfb_metadata)
#define MSMFB_METADATA_GET _IOW(MSMFB_IOCTL_MAGIC, 166, struct msmfb_metadata)
+#define MSMFB_WRITEBACK_SET_MIRRORING_HINT _IOW(MSMFB_IOCTL_MAGIC, 167, \
+ unsigned int)
#define FB_TYPE_3D_PANEL 0x10101010
#define MDP_IMGTYPE2_START 0x10000
@@ -171,6 +173,7 @@
#define MDP_OV_PIPE_FORCE_DMA 0x00004000
#define MDP_MEMORY_ID_TYPE_FB 0x00001000
#define MDP_BWC_EN 0x00000400
+#define MDP_DECIMATION_EN 0x00000800
#define MDP_TRANSP_NOP 0xffffffff
#define MDP_ALPHA_NOP 0xff
@@ -289,14 +292,19 @@
#define MDP_PP_IGC_FLAG_ROM0 0x10
#define MDP_PP_IGC_FLAG_ROM1 0x20
-#define MDSS_PP_DSPP_CFG 0x0000
-#define MDSS_PP_SSPP_CFG 0x4000
-#define MDSS_PP_LM_CFG 0x8000
-#define MDSS_PP_WB_CFG 0xC000
+#define MDSS_PP_DSPP_CFG 0x000
+#define MDSS_PP_SSPP_CFG 0x100
+#define MDSS_PP_LM_CFG 0x200
+#define MDSS_PP_WB_CFG 0x300
-#define MDSS_PP_LOCATION_MASK 0xC000
-#define MDSS_PP_LOGICAL_MASK 0x3FFF
+#define MDSS_PP_ARG_MASK 0x3C00
+#define MDSS_PP_ARG_NUM 4
+#define MDSS_PP_ARG_SHIFT 10
+#define MDSS_PP_LOCATION_MASK 0x0300
+#define MDSS_PP_LOGICAL_MASK 0x00FF
+#define MDSS_PP_ADD_ARG(var, arg) ((var) | (0x1 << (MDSS_PP_ARG_SHIFT + (arg))))
+#define PP_ARG(x, var) ((var) & (0x1 << (MDSS_PP_ARG_SHIFT + (x))))
#define PP_LOCAT(var) ((var) & MDSS_PP_LOCATION_MASK)
#define PP_BLOCK(var) ((var) & MDSS_PP_LOGICAL_MASK)
@@ -326,6 +334,8 @@
#define MDP_OVERLAY_PP_PA_CFG 0x4
#define MDP_OVERLAY_PP_IGC_CFG 0x8
#define MDP_OVERLAY_PP_SHARP_CFG 0x10
+#define MDP_OVERLAY_PP_HIST_CFG 0x20
+#define MDP_OVERLAY_PP_HIST_LUT_CFG 0x40
#define MDP_CSC_FLAG_ENABLE 0x1
#define MDP_CSC_FLAG_YUV_IN 0x2
@@ -361,6 +371,21 @@
uint32_t *c2_data;
};
+struct mdp_histogram_cfg {
+ uint32_t ops;
+ uint32_t block;
+ uint8_t frame_cnt;
+ uint8_t bit_mask;
+ uint16_t num_bins;
+};
+
+struct mdp_hist_lut_data {
+ uint32_t block;
+ uint32_t ops;
+ uint32_t len;
+ uint32_t *data;
+};
+
struct mdp_overlay_pp_params {
uint32_t config_ops;
struct mdp_csc_cfg csc_cfg;
@@ -368,6 +393,8 @@
struct mdp_pa_cfg pa_cfg;
struct mdp_igc_lut_data igc_cfg;
struct mdp_sharp_cfg sharp_cfg;
+ struct mdp_histogram_cfg hist_cfg;
+ struct mdp_hist_lut_data hist_lut_cfg;
};
struct mdp_overlay {
@@ -380,7 +407,9 @@
uint32_t transp_mask;
uint32_t flags;
uint32_t id;
- uint32_t user_data[8];
+ uint32_t user_data[7];
+ uint8_t horz_deci;
+ uint8_t vert_deci;
struct mdp_overlay_pp_params overlay_pp_cfg;
};
@@ -433,7 +462,7 @@
MDP_BLOCK_DMA_S,
MDP_BLOCK_DMA_E,
MDP_BLOCK_OVERLAY_2,
- MDP_LOGICAL_BLOCK_DISP_0 = 0x1000,
+ MDP_LOGICAL_BLOCK_DISP_0 = 0x10,
MDP_LOGICAL_BLOCK_DISP_1,
MDP_LOGICAL_BLOCK_DISP_2,
MDP_BLOCK_MAX,
@@ -502,13 +531,6 @@
};
-struct mdp_hist_lut_data {
- uint32_t block;
- uint32_t ops;
- uint32_t len;
- uint32_t *data;
-};
-
struct mdp_lut_cfg_data {
uint32_t lut_type;
union {
@@ -677,6 +699,7 @@
uint8_t rgb_pipes;
uint8_t vig_pipes;
uint8_t dma_pipes;
+ uint32_t features;
};
struct msmfb_metadata {
@@ -748,6 +771,13 @@
MDP_IOMMU_DOMAIN_NS,
};
+enum {
+ MDP_WRITEBACK_MIRROR_OFF,
+ MDP_WRITEBACK_MIRROR_ON,
+ MDP_WRITEBACK_MIRROR_PAUSE,
+ MDP_WRITEBACK_MIRROR_RESUME,
+};
+
#ifdef __KERNEL__
int msm_fb_get_iommu_domain(struct fb_info *info, int domain);
/* get the framebuffer physical address information */
diff --git a/include/linux/msm_thermal.h b/include/linux/msm_thermal.h
index 2c9a613..f14cc52 100644
--- a/include/linux/msm_thermal.h
+++ b/include/linux/msm_thermal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,9 +17,16 @@
struct msm_thermal_data {
uint32_t sensor_id;
uint32_t poll_ms;
- uint32_t limit_temp_degC;
- uint32_t temp_hysteresis_degC;
+ int32_t limit_temp_degC;
+ int32_t temp_hysteresis_degC;
uint32_t freq_step;
+ int32_t core_limit_temp_degC;
+ int32_t core_temp_hysteresis_degC;
+ uint32_t core_control_mask;
+ int32_t vdd_rstr_temp_degC;
+ int32_t vdd_rstr_temp_hyst_degC;
+ int32_t psm_temp_degC;
+ int32_t psm_temp_hyst_degC;
};
#ifdef CONFIG_THERMAL_MONITOR
diff --git a/include/linux/msm_tsens.h b/include/linux/msm_tsens.h
index 757f1dc..35eacf1 100644
--- a/include/linux/msm_tsens.h
+++ b/include/linux/msm_tsens.h
@@ -44,9 +44,17 @@
#if defined(CONFIG_THERMAL_TSENS8974)
int __init tsens_tm_init_driver(void);
+int tsens_get_sw_id_mapping(int sensor_num, int *sensor_sw_idx);
+int tsens_get_hw_id_mapping(int sensor_sw_id, int *sensor_hw_num);
#else
static inline int __init tsens_tm_init_driver(void)
{ return -ENXIO; }
+static inline int tsens_get_sw_id_mapping(
+ int sensor_num, int *sensor_sw_idx)
+{ return -ENXIO; }
+static inline int tsens_get_hw_id_mapping(
+ int sensor_sw_id, int *sensor_hw_num)
+{ return -ENXIO; }
#endif
#if defined(CONFIG_THERMAL_TSENS8974) || defined(CONFIG_THERMAL_TSENS8960)
diff --git a/include/linux/qseecom.h b/include/linux/qseecom.h
index c399b81..294c881 100644
--- a/include/linux/qseecom.h
+++ b/include/linux/qseecom.h
@@ -138,6 +138,25 @@
enum qseecom_key_management_usage_type usage;
};
+#define SHA256_DIGEST_LENGTH (256/8)
+/*
+ * struct qseecom_save_partition_hash_req
+ * @partition_id - partition id.
+ * @hash[SHA256_DIGEST_LENGTH] - sha256 digest.
+ */
+struct qseecom_save_partition_hash_req {
+ int partition_id; /* in */
+ char digest[SHA256_DIGEST_LENGTH]; /* in */
+};
+
+/*
+ * struct qseecom_is_es_activated_req
+ * @is_activated - 1=true , 0=false
+ */
+struct qseecom_is_es_activated_req {
+ int is_activated; /* out */
+};
+
#define QSEECOM_IOC_MAGIC 0x97
@@ -195,5 +214,10 @@
#define QSEECOM_IOCTL_WIPE_KEY_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 18, struct qseecom_wipe_key_req)
+#define QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 19, struct qseecom_save_partition_hash_req)
+
+#define QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 20, struct qseecom_is_es_activated_req)
#endif /* __QSEECOM_H_ */
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index c8a20da..5e73bd9 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -201,6 +201,7 @@
* USB enters LPM.
* @delay_lpm_on_disconnect: Use a delay before entering LPM
* upon USB cable disconnection.
+ * @enable_sec_phy: Use second HSPHY with USB2 core
* @bus_scale_table: parameters for bus bandwidth requirements
* @mhl_dev_name: MHL device name used to register with MHL driver.
*/
@@ -222,12 +223,14 @@
bool core_clk_always_on_workaround;
bool delay_lpm_on_disconnect;
bool dp_manual_pullup;
+ bool enable_sec_phy;
struct msm_bus_scale_pdata *bus_scale_table;
const char *mhl_dev_name;
};
/* phy related flags */
#define ENABLE_DP_MANUAL_PULLUP BIT(0)
+#define ENABLE_SECONDARY_PHY BIT(1)
/* Timeout (in msec) values (min - max) associated with OTG timers */
diff --git a/lib/memory_alloc.c b/lib/memory_alloc.c
index cc7424f..03f1944 100644
--- a/lib/memory_alloc.c
+++ b/lib/memory_alloc.c
@@ -67,7 +67,7 @@
struct rb_node *r = p;
struct alloc *node = rb_entry(r, struct alloc, rb_node);
- seq_printf(m, "0x%lx 0x%p %ld %u %pS\n", node->paddr, node->vaddr,
+ seq_printf(m, "0x%pa 0x%pa %ld %u %pS\n", &node->paddr, &node->vaddr,
node->len, node->mpool->id, node->caller);
return 0;
}
@@ -84,7 +84,7 @@
return seq_open(file, &mempool_op);
}
-static struct alloc *find_alloc(void *addr)
+static struct alloc *find_alloc(phys_addr_t addr)
{
struct rb_root *root = &alloc_root;
struct rb_node *p = root->rb_node;
@@ -126,7 +126,7 @@
else if (node->vaddr > tmp->vaddr)
p = &(*p)->rb_right;
else {
- WARN(1, "memory at %p already allocated", tmp->vaddr);
+ WARN(1, "memory at %pa already allocated", &tmp->vaddr);
mutex_unlock(&alloc_mutex);
return -EINVAL;
}
@@ -149,7 +149,7 @@
return 0;
}
-static struct gen_pool *initialize_gpool(unsigned long start,
+static struct gen_pool *initialize_gpool(phys_addr_t start,
unsigned long size)
{
struct gen_pool *gpool;
@@ -194,7 +194,12 @@
if (!vaddr)
goto out_kfree;
- node->vaddr = vaddr;
+ /*
+ * Just cast to an unsigned long to avoid warnings about casting from a
+ * pointer to an integer of different size. The pointer is only 32-bits
+ * so we lose no data.
+ */
+ node->vaddr = (unsigned long)vaddr;
node->paddr = paddr;
node->len = aligned_size;
node->mpool = mpool;
@@ -216,13 +221,19 @@
static void __free(void *vaddr, bool unmap)
{
- struct alloc *node = find_alloc(vaddr);
+ struct alloc *node = find_alloc((unsigned long)vaddr);
if (!node)
return;
if (unmap)
- iounmap(node->vaddr);
+ /*
+ * We need the double cast because otherwise gcc complains about
+ * cast to pointer of different size. This is technically a down
+ * cast but if unmap is being called, this had better be an
+ * actual 32-bit pointer anyway.
+ */
+ iounmap((void *)(unsigned long)node->vaddr);
gen_pool_free(node->mpool->gpool, node->paddr, node->len);
node->mpool->free += node->len;
@@ -248,7 +259,7 @@
return mpool;
}
-struct mem_pool *initialize_memory_pool(unsigned long start,
+struct mem_pool *initialize_memory_pool(phys_addr_t start,
unsigned long size, int mem_type)
{
int id = mem_type;
@@ -264,8 +275,8 @@
mpools[id].id = id;
mutex_unlock(&mpools[id].pool_mutex);
- pr_info("memory pool %d (start %lx size %lx) initialized\n",
- id, start, size);
+ pr_info("memory pool %d (start %pa size %lx) initialized\n",
+ id, &start, size);
return &mpools[id];
}
EXPORT_SYMBOL_GPL(initialize_memory_pool);
@@ -285,10 +296,10 @@
}
EXPORT_SYMBOL_GPL(allocate_contiguous_memory);
-unsigned long _allocate_contiguous_memory_nomap(unsigned long size,
+phys_addr_t _allocate_contiguous_memory_nomap(unsigned long size,
int mem_type, unsigned long align, void *caller)
{
- unsigned long paddr;
+ phys_addr_t paddr;
unsigned long aligned_size;
struct alloc *node;
@@ -317,7 +328,7 @@
* are disjoint, so there won't be any chance of
* a duplicate node->vaddr value.
*/
- node->vaddr = (void *)paddr;
+ node->vaddr = paddr;
node->len = aligned_size;
node->mpool = mpool;
node->caller = caller;
@@ -334,7 +345,7 @@
}
EXPORT_SYMBOL_GPL(_allocate_contiguous_memory_nomap);
-unsigned long allocate_contiguous_memory_nomap(unsigned long size,
+phys_addr_t allocate_contiguous_memory_nomap(unsigned long size,
int mem_type, unsigned long align)
{
return _allocate_contiguous_memory_nomap(size, mem_type, align,
@@ -351,18 +362,18 @@
}
EXPORT_SYMBOL_GPL(free_contiguous_memory);
-void free_contiguous_memory_by_paddr(unsigned long paddr)
+void free_contiguous_memory_by_paddr(phys_addr_t paddr)
{
if (!paddr)
return;
- __free((void *)paddr, false);
+ __free((void *)(unsigned long)paddr, false);
return;
}
EXPORT_SYMBOL_GPL(free_contiguous_memory_by_paddr);
-unsigned long memory_pool_node_paddr(void *vaddr)
+phys_addr_t memory_pool_node_paddr(void *vaddr)
{
- struct alloc *node = find_alloc(vaddr);
+ struct alloc *node = find_alloc((unsigned long)vaddr);
if (!node)
return -EINVAL;
@@ -373,7 +384,7 @@
unsigned long memory_pool_node_len(void *vaddr)
{
- struct alloc *node = find_alloc(vaddr);
+ struct alloc *node = find_alloc((unsigned long)vaddr);
if (!node)
return -EINVAL;
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index fd3e0dc..a7069a6 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -193,6 +193,7 @@
s32 dmic_5_6_clk_cnt;
u32 anc_slot;
+ bool anc_func;
/*track tapan interface type*/
u8 intf_type;
@@ -348,6 +349,58 @@
return 0;
}
+static int tapan_get_anc_func(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = (tapan->anc_func == true ? 1 : 0);
+ return 0;
+}
+
+static int tapan_put_anc_func(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+ mutex_lock(&dapm->codec->mutex);
+ tapan->anc_func = (!ucontrol->value.integer.value[0] ? false : true);
+
+ dev_err(codec->dev, "%s: anc_func %x", __func__, tapan->anc_func);
+
+ if (tapan->anc_func == true) {
+ pr_info("enable anc virtual widgets");
+ snd_soc_dapm_enable_pin(dapm, "ANC HPHR");
+ snd_soc_dapm_enable_pin(dapm, "ANC HPHL");
+ snd_soc_dapm_enable_pin(dapm, "ANC HEADPHONE");
+ snd_soc_dapm_enable_pin(dapm, "ANC EAR PA");
+ snd_soc_dapm_enable_pin(dapm, "ANC EAR");
+ snd_soc_dapm_disable_pin(dapm, "HPHR");
+ snd_soc_dapm_disable_pin(dapm, "HPHL");
+ snd_soc_dapm_disable_pin(dapm, "HEADPHONE");
+ snd_soc_dapm_disable_pin(dapm, "EAR PA");
+ snd_soc_dapm_disable_pin(dapm, "EAR");
+ } else {
+ pr_info("disable anc virtual widgets");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
+ snd_soc_dapm_disable_pin(dapm, "ANC HEADPHONE");
+ snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
+ snd_soc_dapm_disable_pin(dapm, "ANC EAR");
+ snd_soc_dapm_enable_pin(dapm, "HPHR");
+ snd_soc_dapm_enable_pin(dapm, "HPHL");
+ snd_soc_dapm_enable_pin(dapm, "HEADPHONE");
+ snd_soc_dapm_enable_pin(dapm, "EAR PA");
+ snd_soc_dapm_enable_pin(dapm, "EAR");
+ }
+ snd_soc_dapm_sync(dapm);
+ mutex_unlock(&dapm->codec->mutex);
+ return 0;
+}
+
static int tapan_pa_gain_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -713,6 +766,10 @@
SOC_ENUM_SINGLE_EXT(2, tapan_ear_pa_gain_text),
};
+static const char *const tapan_anc_func_text[] = {"OFF", "ON"};
+static const struct soc_enum tapan_anc_func_enum =
+ SOC_ENUM_SINGLE_EXT(2, tapan_anc_func_text);
+
/*cut of frequency for high pass filter*/
static const char * const cf_text[] = {
"MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
@@ -770,11 +827,11 @@
SOC_SINGLE_TLV("SPK DRV Volume", TAPAN_A_SPKR_DRV_GAIN, 3, 7, 1,
line_gain),
- SOC_SINGLE_TLV("ADC1 Volume", TAPAN_A_TX_1_EN, 2, 13, 0, analog_gain),
- SOC_SINGLE_TLV("ADC2 Volume", TAPAN_A_TX_2_EN, 2, 13, 0, analog_gain),
- SOC_SINGLE_TLV("ADC3 Volume", TAPAN_A_TX_3_EN, 2, 13, 0, analog_gain),
- SOC_SINGLE_TLV("ADC4 Volume", TAPAN_A_TX_4_EN, 2, 13, 0, analog_gain),
- SOC_SINGLE_TLV("ADC5 Volume", TAPAN_A_TX_5_EN, 2, 13, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC1 Volume", TAPAN_A_TX_1_EN, 2, 19, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC2 Volume", TAPAN_A_TX_2_EN, 2, 19, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC3 Volume", TAPAN_A_TX_3_EN, 2, 19, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC4 Volume", TAPAN_A_TX_4_EN, 2, 19, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC5 Volume", TAPAN_A_TX_5_EN, 2, 19, 0, analog_gain),
SOC_SINGLE_S8_TLV("RX1 Digital Volume", TAPAN_A_CDC_RX1_VOL_CTL_B2_CTL,
-84, 40, digital_gain),
@@ -803,9 +860,10 @@
SOC_SINGLE_S8_TLV("IIR1 INP4 Volume", TAPAN_A_CDC_IIR1_GAIN_B4_CTL, -84,
40, digital_gain),
- SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 0, 100, tapan_get_anc_slot,
+ SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 100, 0, tapan_get_anc_slot,
tapan_put_anc_slot),
-
+ SOC_ENUM_EXT("ANC Function", tapan_anc_func_enum, tapan_get_anc_func,
+ tapan_put_anc_func),
SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
SOC_ENUM("TX3 HPF cut off", cf_dec3_enum),
@@ -943,6 +1001,10 @@
"RSVD", "RSVD"
};
+static const char * const anc1_fb_mux_text[] = {
+ "ZERO", "EAR_HPH_L", "EAR_LINE_1",
+};
+
static const char * const iir1_inp1_text[] = {
"ZERO", "DEC1", "DEC2", "DEC3", "DEC4",
"RX1", "RX2", "RX3", "RX4", "RX5"
@@ -1031,6 +1093,9 @@
static const struct soc_enum anc2_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_ANC_B1_CTL, 4, 15, anc_mux_text);
+static const struct soc_enum anc1_fb_mux_enum =
+ SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_ANC_B2_CTL, 0, 3, anc1_fb_mux_text);
+
static const struct soc_enum iir1_inp1_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_EQ1_B1_CTL, 0, 10, iir1_inp1_text);
@@ -1200,6 +1265,9 @@
static const struct snd_kcontrol_new anc2_mux =
SOC_DAPM_ENUM("ANC2 MUX Mux", anc2_mux_enum);
+static const struct snd_kcontrol_new anc1_fb_mux =
+ SOC_DAPM_ENUM("ANC1 FB MUX Mux", anc1_fb_mux_enum);
+
static const struct snd_kcontrol_new dac1_switch[] = {
SOC_DAPM_SINGLE("Switch", TAPAN_A_RX_EAR_EN, 5, 1, 0)
};
@@ -1322,11 +1390,11 @@
return 0;
}
break;
- default:
- dev_err(codec->dev, "Unknown AIF %d\n", dai_id);
- mutex_unlock(&codec->mutex);
- return -EINVAL;
- }
+ default:
+ dev_err(codec->dev, "Unknown AIF %d\n", dai_id);
+ mutex_unlock(&codec->mutex);
+ return -EINVAL;
+ }
dev_dbg(codec->dev, "%s: name %s sname %s updated value %u shift %d\n",
__func__, widget->name, widget->sname,
widget->value, widget->shift);
@@ -1392,14 +1460,14 @@
break;
case 2:
if (wcd9xxx_rx_vport_validation(port_id + core->num_tx_port,
- &tapan_p->dai[AIF1_PB].wcd9xxx_ch_list))
+ &tapan_p->dai[AIF2_PB].wcd9xxx_ch_list))
goto pr_err;
list_add_tail(&core->rx_chs[port_id].list,
&tapan_p->dai[AIF2_PB].wcd9xxx_ch_list);
break;
case 3:
if (wcd9xxx_rx_vport_validation(port_id + core->num_tx_port,
- &tapan_p->dai[AIF1_PB].wcd9xxx_ch_list))
+ &tapan_p->dai[AIF3_PB].wcd9xxx_ch_list))
goto pr_err;
list_add_tail(&core->rx_chs[port_id].list,
&tapan_p->dai[AIF3_PB].wcd9xxx_ch_list);
@@ -1668,9 +1736,11 @@
int anc_size_remaining;
u32 *anc_ptr;
u16 reg;
- u8 mask, val;
+ u8 mask, val, old_val;
dev_dbg(codec->dev, "%s %d\n", __func__, event);
+ if (tapan->anc_func == 0)
+ return 0;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -1737,14 +1807,21 @@
for (i = 0; i < anc_writes_size; i++) {
TAPAN_CODEC_UNPACK_ENTRY(anc_ptr[i], reg,
mask, val);
- snd_soc_write(codec, reg, val);
+ old_val = snd_soc_read(codec, reg);
+ snd_soc_write(codec, reg, (old_val & ~mask) |
+ (val & mask));
}
release_firmware(fw);
break;
case SND_SOC_DAPM_POST_PMD:
- snd_soc_write(codec, TAPAN_A_CDC_CLK_ANC_RESET_CTL, 0xFF);
+ msleep(40);
+ snd_soc_update_bits(codec, TAPAN_A_CDC_ANC1_B1_CTL, 0x01, 0x00);
+ snd_soc_update_bits(codec, TAPAN_A_CDC_ANC2_B1_CTL, 0x02, 0x00);
+ msleep(20);
+ snd_soc_write(codec, TAPAN_A_CDC_CLK_ANC_RESET_CTL, 0x0F);
snd_soc_write(codec, TAPAN_A_CDC_CLK_ANC_CLK_EN_CTL, 0);
+ snd_soc_write(codec, TAPAN_A_CDC_CLK_ANC_RESET_CTL, 0xFF);
break;
}
return 0;
@@ -2137,12 +2214,12 @@
dev_dbg(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
if (w->shift == 5) {
- e_pre_on = WCD9XXX_EVENT_PRE_HPHL_PA_ON;
- e_post_off = WCD9XXX_EVENT_POST_HPHL_PA_OFF;
- req_clsh_state = WCD9XXX_CLSH_STATE_HPHL;
- } else if (w->shift == 4) {
e_pre_on = WCD9XXX_EVENT_PRE_HPHR_PA_ON;
e_post_off = WCD9XXX_EVENT_POST_HPHR_PA_OFF;
+ req_clsh_state = WCD9XXX_CLSH_STATE_HPHL;
+ } else if (w->shift == 4) {
+ e_pre_on = WCD9XXX_EVENT_PRE_HPHL_PA_ON;
+ e_post_off = WCD9XXX_EVENT_POST_HPHL_PA_OFF;
req_clsh_state = WCD9XXX_CLSH_STATE_HPHR;
} else {
pr_err("%s: Invalid w->shift %d\n", __func__, w->shift);
@@ -2182,6 +2259,46 @@
return 0;
}
+static int tapan_codec_enable_anc_hph(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ ret = tapan_hph_pa_event(w, kcontrol, event);
+ if (w->shift == 4) {
+ ret |= tapan_codec_enable_anc(w, kcontrol, event);
+ msleep(50);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ if (w->shift == 4) {
+ snd_soc_update_bits(codec,
+ TAPAN_A_RX_HPH_CNP_EN, 0x30, 0x30);
+ msleep(30);
+ }
+ ret = tapan_hph_pa_event(w, kcontrol, event);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ if (w->shift == 5) {
+ snd_soc_update_bits(codec,
+ TAPAN_A_RX_HPH_CNP_EN, 0x30, 0x00);
+ msleep(40);
+ }
+ if (w->shift == 5) {
+ snd_soc_update_bits(codec,
+ TAPAN_A_TX_7_MBHC_EN, 0x80, 00);
+ ret |= tapan_codec_enable_anc(w, kcontrol, event);
+ }
+ case SND_SOC_DAPM_POST_PMD:
+ ret = tapan_hph_pa_event(w, kcontrol, event);
+ break;
+ }
+ return ret;
+}
+
static const struct snd_soc_dapm_widget tapan_dapm_i2s_widgets[] = {
SND_SOC_DAPM_SUPPLY("I2S_CLK", TAPAN_A_CDC_CLK_I2S_CTL,
4, 0, NULL, 0),
@@ -2296,6 +2413,11 @@
{"EAR_PA_MIXER", NULL, "DAC1"},
{"DAC1", NULL, "RX_BIAS"},
+ {"ANC EAR", NULL, "ANC EAR PA"},
+ {"ANC EAR PA", NULL, "EAR_PA_MIXER"},
+ {"ANC1 FB MUX", "EAR_HPH_L", "RX1 MIX2"},
+ {"ANC1 FB MUX", "EAR_LINE_1", "RX2 MIX2"},
+
/* Headset (RX MIX1 and RX MIX2) */
{"HEADPHONE", NULL, "HPHL"},
{"HEADPHONE", NULL, "HPHR"},
@@ -2308,6 +2430,33 @@
{"HPHR_PA_MIXER", NULL, "HPHR DAC"},
{"HPHR DAC", NULL, "RX_BIAS"},
+ {"ANC HEADPHONE", NULL, "ANC HPHL"},
+ {"ANC HEADPHONE", NULL, "ANC HPHR"},
+
+ {"ANC HPHL", NULL, "HPHL_PA_MIXER"},
+ {"ANC HPHR", NULL, "HPHR_PA_MIXER"},
+
+ {"ANC1 MUX", "ADC1", "ADC1"},
+ {"ANC1 MUX", "ADC2", "ADC2"},
+ {"ANC1 MUX", "ADC3", "ADC3"},
+ {"ANC1 MUX", "ADC4", "ADC4"},
+ {"ANC1 MUX", "ADC5", "ADC5"},
+ {"ANC1 MUX", "DMIC1", "DMIC1"},
+ {"ANC1 MUX", "DMIC2", "DMIC2"},
+ {"ANC1 MUX", "DMIC3", "DMIC3"},
+ {"ANC1 MUX", "DMIC4", "DMIC4"},
+ {"ANC2 MUX", "ADC1", "ADC1"},
+ {"ANC2 MUX", "ADC2", "ADC2"},
+ {"ANC2 MUX", "ADC3", "ADC3"},
+ {"ANC2 MUX", "ADC4", "ADC4"},
+ {"ANC2 MUX", "ADC5", "ADC5"},
+ {"ANC2 MUX", "DMIC1", "DMIC1"},
+ {"ANC2 MUX", "DMIC2", "DMIC2"},
+ {"ANC2 MUX", "DMIC3", "DMIC3"},
+ {"ANC2 MUX", "DMIC4", "DMIC4"},
+
+ {"ANC HPHR", NULL, "CDC_CONN"},
+
{"DAC1", "Switch", "CLASS_H_DSM MUX"},
{"HPHL DAC", "Switch", "CLASS_H_DSM MUX"},
{"HPHR DAC", NULL, "RX2 CHAIN"},
@@ -2336,6 +2485,8 @@
{"RX1 CHAIN", NULL, "RX1 MIX2"},
{"RX2 CHAIN", NULL, "RX2 MIX2"},
{"CLASS_H_DSM MUX", "RX_HPHL", "RX1 CHAIN"},
+ {"RX1 MIX2", NULL, "ANC1 MUX"},
+ {"RX2 MIX2", NULL, "ANC2 MUX"},
{"LINEOUT1 DAC", NULL, "RX_BIAS"},
{"LINEOUT2 DAC", NULL, "RX_BIAS"},
@@ -2564,6 +2715,14 @@
(reg <= TAPAN_A_CDC_IIR2_COEF_B2_CTL))
return 1;
+ /* ANC filter registers are not cacheable */
+ if ((reg >= TAPAN_A_CDC_ANC1_IIR_B1_CTL) &&
+ (reg <= TAPAN_A_CDC_ANC1_LPF_B2_CTL))
+ return 1;
+ if ((reg >= TAPAN_A_CDC_ANC2_IIR_B1_CTL) &&
+ (reg <= TAPAN_A_CDC_ANC2_LPF_B2_CTL))
+ return 1;
+
/* Digital gain register is not cacheable so we have to write
* the setting even it is the same
*/
@@ -2850,7 +3009,7 @@
tapan->comp_fs[comp_rx_path[j]]
= compander_fs;
}
- if (j <= 2)
+ if (j <= 1)
rx_mix_1_reg_1 += 3;
else
rx_mix_1_reg_1 += 2;
@@ -3419,6 +3578,33 @@
return 0;
}
+static int tapan_codec_enable_anc_ear(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ ret = tapan_codec_enable_anc(w, kcontrol, event);
+ msleep(50);
+ snd_soc_update_bits(codec, TAPAN_A_RX_EAR_EN, 0x10, 0x10);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ ret = tapan_codec_enable_ear_pa(w, kcontrol, event);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, TAPAN_A_RX_EAR_EN, 0x10, 0x00);
+ msleep(40);
+ ret |= tapan_codec_enable_anc(w, kcontrol, event);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ ret = tapan_codec_enable_ear_pa(w, kcontrol, event);
+ break;
+ }
+ return ret;
+}
+
/* Todo: Have seperate dapm widgets for I2S and Slimbus.
* Might Need to have callbacks registered only for slimbus
@@ -3695,9 +3881,21 @@
SND_SOC_DAPM_MUX("ANC1 MUX", SND_SOC_NOPM, 0, 0, &anc1_mux),
SND_SOC_DAPM_MUX("ANC2 MUX", SND_SOC_NOPM, 0, 0, &anc2_mux),
- SND_SOC_DAPM_MIXER_E("ANC", SND_SOC_NOPM, 0, 0, NULL, 0,
- tapan_codec_enable_anc, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_OUTPUT("ANC HEADPHONE"),
+ SND_SOC_DAPM_PGA_E("ANC HPHL", SND_SOC_NOPM, 5, 0, NULL, 0,
+ tapan_codec_enable_anc_hph,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PGA_E("ANC HPHR", SND_SOC_NOPM, 4, 0, NULL, 0,
+ tapan_codec_enable_anc_hph, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_OUTPUT("ANC EAR"),
+ SND_SOC_DAPM_PGA_E("ANC EAR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+ tapan_codec_enable_anc_ear,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX("ANC1 FB MUX", SND_SOC_NOPM, 0, 0, &anc1_fb_mux),
SND_SOC_DAPM_INPUT("AMIC2"),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 External", TAPAN_A_MICB_2_CTL, 7, 0,
@@ -4304,6 +4502,14 @@
(void) tapan_setup_irqs(tapan);
atomic_set(&kp_tapan_priv, (unsigned long)tapan);
+ mutex_lock(&dapm->codec->mutex);
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
+ snd_soc_dapm_disable_pin(dapm, "ANC HEADPHONE");
+ snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
+ snd_soc_dapm_disable_pin(dapm, "ANC EAR");
+ snd_soc_dapm_sync(dapm);
+ mutex_unlock(&dapm->codec->mutex);
codec->ignore_pmdown_time = 1;
return ret;
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index cc1e8eb..7ba43c0 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -24,6 +24,7 @@
#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
#include <linux/mfd/wcd9xxx/wcd9320_registers.h>
#include <linux/mfd/wcd9xxx/pdata.h>
+#include <linux/regulator/consumer.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -41,6 +42,9 @@
#define TAIKO_MAD_SLIMBUS_TX_PORT 12
#define TAIKO_MAD_AUDIO_FIRMWARE_PATH "wcd9320/wcd9320_mad_audio.bin"
+#define TAIKO_HPH_PA_SETTLE_COMP_ON 3000
+#define TAIKO_HPH_PA_SETTLE_COMP_OFF 13000
+
static atomic_t kp_taiko_priv;
static int spkr_drv_wrnd_param_set(const char *val,
const struct kernel_param *kp);
@@ -394,6 +398,7 @@
u8 aux_r_gain;
bool spkr_pa_widget_on;
+ struct regulator *spkdrv_reg;
struct afe_param_cdc_slimbus_slave_cfg slimbus_slave_cfg;
@@ -404,7 +409,6 @@
/* class h specific data */
struct wcd9xxx_clsh_cdc_data clsh_d;
-
};
static const u32 comp_shift[] = {
@@ -427,39 +431,39 @@
static const struct comp_sample_dependent_params comp_samp_params[] = {
{
/* 8 Khz */
- .peak_det_timeout = 0x02,
+ .peak_det_timeout = 0x06,
.rms_meter_div_fact = 0x09,
.rms_meter_resamp_fact = 0x06,
},
{
/* 16 Khz */
- .peak_det_timeout = 0x03,
+ .peak_det_timeout = 0x07,
.rms_meter_div_fact = 0x0A,
.rms_meter_resamp_fact = 0x0C,
},
{
/* 32 Khz */
- .peak_det_timeout = 0x05,
+ .peak_det_timeout = 0x08,
.rms_meter_div_fact = 0x0B,
.rms_meter_resamp_fact = 0x1E,
},
{
/* 48 Khz */
- .peak_det_timeout = 0x05,
+ .peak_det_timeout = 0x09,
.rms_meter_div_fact = 0x0B,
.rms_meter_resamp_fact = 0x28,
},
{
/* 96 Khz */
- .peak_det_timeout = 0x06,
+ .peak_det_timeout = 0x0A,
.rms_meter_div_fact = 0x0C,
.rms_meter_resamp_fact = 0x50,
},
{
/* 192 Khz */
- .peak_det_timeout = 0x07,
- .rms_meter_div_fact = 0xD,
- .rms_meter_resamp_fact = 0xA0,
+ .peak_det_timeout = 0x0B,
+ .rms_meter_div_fact = 0xC,
+ .rms_meter_resamp_fact = 0x50,
},
};
@@ -809,6 +813,36 @@
pr_debug("%s: Compander %d enable current %d, new %d\n",
__func__, comp, taiko->comp_enabled[comp], value);
taiko->comp_enabled[comp] = value;
+
+ if (comp == COMPANDER_1 &&
+ taiko->comp_enabled[comp] == 1) {
+ /* Wavegen to 5 msec */
+ snd_soc_write(codec, TAIKO_A_RX_HPH_CNP_WG_CTL, 0xDA);
+ snd_soc_write(codec, TAIKO_A_RX_HPH_CNP_WG_TIME, 0x15);
+ snd_soc_write(codec, TAIKO_A_RX_HPH_BIAS_WG_OCP, 0x2A);
+
+ /* Enable Chopper */
+ snd_soc_update_bits(codec,
+ TAIKO_A_RX_HPH_CHOP_CTL, 0x80, 0x80);
+
+ snd_soc_write(codec, TAIKO_A_NCP_DTEST, 0x20);
+ pr_debug("%s: Enabled Chopper and set wavegen to 5 msec\n",
+ __func__);
+ } else if (comp == COMPANDER_1 &&
+ taiko->comp_enabled[comp] == 0) {
+ /* Wavegen to 20 msec */
+ snd_soc_write(codec, TAIKO_A_RX_HPH_CNP_WG_CTL, 0xDB);
+ snd_soc_write(codec, TAIKO_A_RX_HPH_CNP_WG_TIME, 0x58);
+ snd_soc_write(codec, TAIKO_A_RX_HPH_BIAS_WG_OCP, 0x1A);
+
+ /* Disable CHOPPER block */
+ snd_soc_update_bits(codec,
+ TAIKO_A_RX_HPH_CHOP_CTL, 0x80, 0x00);
+
+ snd_soc_write(codec, TAIKO_A_NCP_DTEST, 0x10);
+ pr_debug("%s: Disabled Chopper and set wavegen to 20 msec\n",
+ __func__);
+ }
return 0;
}
@@ -848,26 +882,50 @@
static void taiko_discharge_comp(struct snd_soc_codec *codec, int comp)
{
- /* Update RSM to 1, DIVF to 5 */
- snd_soc_write(codec, TAIKO_A_CDC_COMP0_B3_CTL + (comp * 8), 1);
+ /* Level meter DIV Factor to 5*/
snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8), 0xF0,
- 1 << 5);
- /* Wait for 1ms */
- usleep_range(1000, 1000);
+ 0x05 << 4);
+ /* RMS meter Sampling to 0x01 */
+ snd_soc_write(codec, TAIKO_A_CDC_COMP0_B3_CTL + (comp * 8), 0x01);
+
+ /* Worst case timeout for compander CnP sleep timeout */
+ usleep_range(3000, 3000);
+}
+
+static enum wcd9xxx_buck_volt taiko_codec_get_buck_mv(
+ struct snd_soc_codec *codec)
+{
+ int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
+ struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
+ struct wcd9xxx_pdata *pdata = taiko->resmgr.pdata;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
+ if (!strncmp(pdata->regulator[i].name,
+ WCD9XXX_SUPPLY_BUCK_NAME,
+ sizeof(WCD9XXX_SUPPLY_BUCK_NAME))) {
+ if ((pdata->regulator[i].min_uV ==
+ WCD9XXX_CDC_BUCK_MV_1P8) ||
+ (pdata->regulator[i].min_uV ==
+ WCD9XXX_CDC_BUCK_MV_2P15))
+ buck_volt = pdata->regulator[i].min_uV;
+ break;
+ }
+ }
+ return buck_volt;
}
static int taiko_config_compander(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- int mask, emask;
- bool timedout;
- unsigned long timeout;
+ int mask, enable_mask;
struct snd_soc_codec *codec = w->codec;
struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
const int comp = w->shift;
const u32 rate = taiko->comp_fs[comp];
const struct comp_sample_dependent_params *comp_params =
&comp_samp_params[rate];
+ enum wcd9xxx_buck_volt buck_mv;
pr_debug("%s: %s event %d compander %d, enabled %d", __func__,
w->name, event, comp, taiko->comp_enabled[comp]);
@@ -877,72 +935,73 @@
/* Compander 0 has single channel */
mask = (comp == COMPANDER_0 ? 0x01 : 0x03);
- emask = (comp == COMPANDER_0 ? 0x02 : 0x03);
+ enable_mask = (comp == COMPANDER_0 ? 0x02 : 0x03);
+ buck_mv = taiko_codec_get_buck_mv(codec);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- /* Set gain source to compander */
- taiko_config_gain_compander(codec, comp, true);
- /* Enable RX interpolation path clocks */
+ /* Set compander Sample rate */
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_FS_CFG + (comp * 8),
+ 0x07, rate);
+ /* Set the static gain offset */
+ if (comp == COMPANDER_1
+ && buck_mv == WCD9XXX_CDC_BUCK_MV_2P15) {
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_B4_CTL + (comp * 8),
+ 0x80, 0x80);
+ } else {
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_B4_CTL + (comp * 8),
+ 0x80, 0x00);
+ }
+ /* Enable RX interpolation path compander clocks */
snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_RX_B2_CTL,
mask << comp_shift[comp],
mask << comp_shift[comp]);
-
- taiko_discharge_comp(codec, comp);
-
- /* Clear compander halt */
- snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B1_CTL +
- (comp * 8),
- 1 << 2, 0);
/* Toggle compander reset bits */
snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
mask << comp_shift[comp],
mask << comp_shift[comp]);
snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
mask << comp_shift[comp], 0);
- break;
- case SND_SOC_DAPM_POST_PMU:
+
+ /* Set gain source to compander */
+ taiko_config_gain_compander(codec, comp, true);
+
+ /* Compander enable */
+ snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B1_CTL +
+ (comp * 8), enable_mask, enable_mask);
+
+ taiko_discharge_comp(codec, comp);
+
/* Set sample rate dependent paramater */
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_COMP0_FS_CFG + (comp * 8),
- 0x07, rate);
snd_soc_write(codec, TAIKO_A_CDC_COMP0_B3_CTL + (comp * 8),
comp_params->rms_meter_resamp_fact);
snd_soc_update_bits(codec,
TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8),
- 0x0F, comp_params->peak_det_timeout);
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8),
0xF0, comp_params->rms_meter_div_fact << 4);
- /* Compander enable */
- snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B1_CTL +
- (comp * 8), emask, emask);
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8),
+ 0x0F, comp_params->peak_det_timeout);
break;
case SND_SOC_DAPM_PRE_PMD:
- /* Halt compander */
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_COMP0_B1_CTL + (comp * 8),
- 1 << 2, 1 << 2);
- /* Wait up to a second for shutdown complete */
- timeout = jiffies + HZ;
- do {
- if ((snd_soc_read(codec,
- TAIKO_A_CDC_COMP0_SHUT_DOWN_STATUS +
- (comp * 8)) & mask) == mask)
- break;
- } while (!(timedout = time_after(jiffies, timeout)));
- pr_debug("%s: Compander %d shutdown %s in %dms\n", __func__,
- comp, timedout ? "timedout" : "completed",
- jiffies_to_msecs(timeout - HZ - jiffies));
- break;
- case SND_SOC_DAPM_POST_PMD:
/* Disable compander */
snd_soc_update_bits(codec,
TAIKO_A_CDC_COMP0_B1_CTL + (comp * 8),
- emask, 0x00);
+ enable_mask, 0x00);
+
+ /* Toggle compander reset bits */
+ snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
+ mask << comp_shift[comp],
+ mask << comp_shift[comp]);
+ snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
+ mask << comp_shift[comp], 0);
+
/* Turn off the clock for compander in pair */
snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_RX_B2_CTL,
mask << comp_shift[comp], 0);
+
/* Set gain source to register */
taiko_config_gain_compander(codec, comp, false);
break;
@@ -1082,13 +1141,6 @@
40, digital_gain),
SOC_SINGLE_S8_TLV("IIR2 INP4 Volume", TAIKO_A_CDC_IIR2_GAIN_B4_CTL, -84,
40, digital_gain),
- SOC_SINGLE_TLV("ADC1 Volume", TAIKO_A_TX_1_2_EN, 5, 3, 0, analog_gain),
- SOC_SINGLE_TLV("ADC2 Volume", TAIKO_A_TX_1_2_EN, 1, 3, 0, analog_gain),
- SOC_SINGLE_TLV("ADC3 Volume", TAIKO_A_TX_3_4_EN, 5, 3, 0, analog_gain),
- SOC_SINGLE_TLV("ADC4 Volume", TAIKO_A_TX_3_4_EN, 1, 3, 0, analog_gain),
- SOC_SINGLE_TLV("ADC5 Volume", TAIKO_A_TX_5_6_EN, 5, 3, 0, analog_gain),
- SOC_SINGLE_TLV("ADC6 Volume", TAIKO_A_TX_5_6_EN, 1, 3, 0, analog_gain),
-
SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 100, 0, taiko_get_anc_slot,
taiko_put_anc_slot),
@@ -2707,10 +2759,20 @@
int ret = 0;
struct snd_soc_codec *codec = w->codec;
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
+ struct taiko_priv *priv = snd_soc_codec_get_drvdata(codec);
pr_debug("%s: %d %s\n", __func__, event, w->name);
+
+ WARN_ONCE(!priv->spkdrv_reg, "SPKDRV supply %s isn't defined\n",
+ WCD9XXX_VDD_SPKDRV_NAME);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (priv->spkdrv_reg) {
+ ret = regulator_enable(priv->spkdrv_reg);
+ if (ret)
+ pr_err("%s: Failed to enable spkdrv_reg %s\n",
+ __func__, WCD9XXX_VDD_SPKDRV_NAME);
+ }
if (spkr_drv_wrnd > 0) {
WARN_ON(!(snd_soc_read(codec, TAIKO_A_SPKR_DRV_EN) &
0x80));
@@ -2731,6 +2793,12 @@
snd_soc_update_bits(codec, TAIKO_A_SPKR_DRV_EN, 0x80,
0x80);
}
+ if (priv->spkdrv_reg) {
+ ret = regulator_disable(priv->spkdrv_reg);
+ if (ret)
+ pr_err("%s: Failed to disable spkdrv_reg %s\n",
+ __func__, WCD9XXX_VDD_SPKDRV_NAME);
+ }
break;
}
@@ -2955,6 +3023,7 @@
struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
enum wcd9xxx_notify_event e_pre_on, e_post_off;
u8 req_clsh_state;
+ u32 pa_settle_time = TAIKO_HPH_PA_SETTLE_COMP_OFF;
pr_debug("%s: %s event = %d\n", __func__, w->name, event);
if (w->shift == 5) {
@@ -2970,6 +3039,9 @@
return -EINVAL;
}
+ if (taiko->comp_enabled[COMPANDER_1])
+ pa_settle_time = TAIKO_HPH_PA_SETTLE_COMP_ON;
+
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Let MBHC module know PA is turning on */
@@ -2977,16 +3049,21 @@
break;
case SND_SOC_DAPM_POST_PMU:
+ usleep_range(pa_settle_time, pa_settle_time + 1000);
+ pr_debug("%s: sleep %d us after %s PA enable\n", __func__,
+ pa_settle_time, w->name);
wcd9xxx_clsh_fsm(codec, &taiko->clsh_d,
req_clsh_state,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
-
- usleep_range(5000, 5000);
break;
case SND_SOC_DAPM_POST_PMD:
+ usleep_range(pa_settle_time, pa_settle_time + 1000);
+ pr_debug("%s: sleep %d us after %s PA disable\n", __func__,
+ pa_settle_time, w->name);
+
/* Let MBHC module know PA turned off */
wcd9xxx_resmgr_notifier_call(&taiko->resmgr, e_post_off);
@@ -2995,9 +3072,6 @@
WCD9XXX_CLSH_REQ_DISABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
- pr_debug("%s: sleep 10 ms after %s PA disable.\n", __func__,
- w->name);
- usleep_range(5000, 5000);
break;
}
return 0;
@@ -4955,13 +5029,13 @@
SND_SOC_DAPM_SUPPLY("COMP0_CLK", SND_SOC_NOPM, 0, 0,
taiko_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 1, 0,
taiko_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 2, 0,
taiko_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_INPUT("AMIC1"),
@@ -5378,7 +5452,8 @@
}
for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
- if (!strncmp(pdata->regulator[i].name, "CDC_VDDA_RX", 11)) {
+ if (pdata->regulator[i].name &&
+ !strncmp(pdata->regulator[i].name, "CDC_VDDA_RX", 11)) {
if (pdata->regulator[i].min_uV == 1800000 &&
pdata->regulator[i].max_uV == 1800000) {
snd_soc_write(codec, TAIKO_A_BIAS_REF_CTL,
@@ -5739,6 +5814,21 @@
{TAIKO_A_CDC_COMP0_B5_CTL, 0x7F, 0x7F},
{TAIKO_A_CDC_COMP1_B5_CTL, 0x7F, 0x7F},
{TAIKO_A_CDC_COMP2_B5_CTL, 0x7F, 0x7F},
+
+ /*
+ * Setup wavegen timer to 20msec and disable chopper
+ * as default. This corresponds to Compander OFF
+ */
+ {TAIKO_A_RX_HPH_CNP_WG_CTL, 0xFF, 0xDB},
+ {TAIKO_A_RX_HPH_CNP_WG_TIME, 0xFF, 0x58},
+ {TAIKO_A_RX_HPH_BIAS_WG_OCP, 0xFF, 0x1A},
+ {TAIKO_A_RX_HPH_CHOP_CTL, 0xFF, 0x24},
+
+ /* Choose max non-overlap time for NCP */
+ {TAIKO_A_NCP_CLK, 0xFF, 0xFC},
+
+ /* Program the 0.85 volt VBG_REFERENCE */
+ {TAIKO_A_BIAS_CURR_CTL_2, 0xFF, 0x04},
};
static void taiko_codec_init_reg(struct snd_soc_codec *codec)
@@ -5877,24 +5967,6 @@
return 0;
}
-static int taiko_codec_get_buck_mv(struct snd_soc_codec *codec)
-{
- int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
- struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx_pdata *pdata = taiko->resmgr.pdata;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
- if (!strncmp(pdata->regulator[i].name,
- WCD9XXX_SUPPLY_BUCK_NAME,
- sizeof(WCD9XXX_SUPPLY_BUCK_NAME))) {
- buck_volt = pdata->regulator[i].min_uV;
- break;
- }
- }
- return buck_volt;
-}
-
static const struct snd_soc_dapm_widget taiko_1_dapm_widgets[] = {
SND_SOC_DAPM_ADC_E("ADC1", NULL, TAIKO_A_TX_1_2_EN, 7, 0,
taiko_codec_enable_adc,
@@ -5945,6 +6017,21 @@
SND_SOC_DAPM_POST_PMU),
};
+static struct regulator *taiko_codec_find_regulator(struct snd_soc_codec *codec,
+ const char *name)
+{
+ int i;
+ struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
+
+ for (i = 0; i < core->num_of_supplies; i++) {
+ if (core->supplies[i].supply &&
+ !strcmp(core->supplies[i].supply, name))
+ return core->supplies[i].consumer;
+ }
+
+ return NULL;
+}
+
static int taiko_codec_probe(struct snd_soc_codec *codec)
{
struct wcd9xxx *control;
@@ -6023,6 +6110,9 @@
goto err_pdata;
}
+ taiko->spkdrv_reg = taiko_codec_find_regulator(codec,
+ WCD9XXX_VDD_SPKDRV_NAME);
+
if (spkr_drv_wrnd > 0) {
WCD9XXX_BCL_LOCK(&taiko->resmgr);
wcd9xxx_resmgr_get_bandgap(&taiko->resmgr,
@@ -6127,6 +6217,8 @@
/* cleanup resmgr */
wcd9xxx_resmgr_deinit(&taiko->resmgr);
+ taiko->spkdrv_reg = NULL;
+
kfree(taiko);
return 0;
}
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index aacc9df..1d01d2e 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -89,7 +89,7 @@
#define WCD9XXX_MEAS_INVALD_RANGE_LOW_MV 20
#define WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV 80
#define WCD9XXX_GM_SWAP_THRES_MIN_MV 150
-#define WCD9XXX_GM_SWAP_THRES_MAX_MV 500
+#define WCD9XXX_GM_SWAP_THRES_MAX_MV 650
#define WCD9XXX_USLEEP_RANGE_MARGIN_US 1000
diff --git a/sound/soc/codecs/wcd9xxx-resmgr.c b/sound/soc/codecs/wcd9xxx-resmgr.c
index 18614d8..60a76a2 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr.c
@@ -391,8 +391,6 @@
} else {
snd_soc_update_bits(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x1, 0);
snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0);
- /* clk source to ext clk and clk buff ref to VBG */
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x0C, 0x04);
}
return 0;
@@ -423,9 +421,14 @@
snd_soc_write(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02);
wcd9xxx_resmgr_enable_config_mode(codec, 0);
}
+ /* clk source to ext clk and clk buff ref to VBG */
+ snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x0C, 0x04);
}
snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x01, 0x01);
+ /* sleep time required by codec hardware to enable clock buffer */
+ usleep_range(1000, 1200);
+
snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x00);
/* on MCLK */
diff --git a/sound/soc/msm/mdm9625.c b/sound/soc/msm/mdm9625.c
index 2bef1b7..f3ccb33 100644
--- a/sound/soc/msm/mdm9625.c
+++ b/sound/soc/msm/mdm9625.c
@@ -761,6 +761,21 @@
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA1
},
{
+ .name = "MDM9625 Media2",
+ .stream_name = "MultiMedia2",
+ .cpu_dai_name = "MultiMedia2",
+ .platform_name = "msm-pcm-dsp.0",
+ .dynamic = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA2,
+ },
+ {
.name = "MSM VoIP",
.stream_name = "VoIP",
.cpu_dai_name = "VoIP",
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index 8affc09..778dc10 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -196,7 +196,7 @@
static int msm_proxy_rx_ch = 2;
static struct mutex cdc_mclk_mutex;
-static struct q_clkdiv *codec_clk;
+static struct clk *codec_clk;
static int clk_users;
static atomic_t prim_auxpcm_rsc_ref;
static atomic_t sec_auxpcm_rsc_ref;
@@ -515,23 +515,24 @@
if (clk_users != 1)
goto exit;
- ret = qpnp_clkdiv_enable(codec_clk);
- if (ret) {
- dev_err(codec->dev, "%s: Error enabling taiko MCLK\n",
- __func__);
- ret = -ENODEV;
+ if (codec_clk) {
+ clk_set_rate(codec_clk, TAIKO_EXT_CLK_RATE);
+ clk_prepare_enable(codec_clk);
+ taiko_mclk_enable(codec, 1, dapm);
+ } else {
+ pr_err("%s: Error setting Taiko MCLK\n", __func__);
+ clk_users--;
goto exit;
}
- taiko_mclk_enable(codec, 1, dapm);
} else {
if (clk_users > 0) {
clk_users--;
if (clk_users == 0) {
taiko_mclk_enable(codec, 0, dapm);
- qpnp_clkdiv_disable(codec_clk);
+ clk_disable_unprepare(codec_clk);
}
} else {
- pr_err("%s: Error releasing Tabla MCLK\n", __func__);
+ pr_err("%s: Error releasing Taiko MCLK\n", __func__);
ret = -EINVAL;
goto exit;
}
@@ -1276,6 +1277,8 @@
snd_soc_dapm_sync(dapm);
+ codec_clk = clk_get(cpu_dai->dev, "osr_clk");
+
snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch),
tx_ch, ARRAY_SIZE(rx_ch), rx_ch);
@@ -1285,7 +1288,7 @@
if (err) {
pr_err("%s: Failed to set codec registers config %d\n",
__func__, err);
- return err;
+ goto out;
}
config_data = taiko_get_afe_config(codec, AFE_SLIMBUS_SLAVE_CONFIG);
@@ -1293,7 +1296,7 @@
if (err) {
pr_err("%s: Failed to set slimbus slave config %d\n", __func__,
err);
- return err;
+ goto out;
}
config_data = taiko_get_afe_config(codec, AFE_AANC_VERSION);
@@ -1301,16 +1304,23 @@
if (err) {
pr_err("%s: Failed to set aanc version %d\n",
__func__, err);
- return err;
+ goto out;
}
/* start mbhc */
mbhc_cfg.calibration = def_taiko_mbhc_cal();
- if (mbhc_cfg.calibration)
+ if (mbhc_cfg.calibration) {
err = taiko_hs_detect(codec, &mbhc_cfg);
- else
+ if (err)
+ goto out;
+ else
+ return err;
+ } else {
err = -ENOMEM;
-
+ goto out;
+ }
+out:
+ clk_put(codec_clk);
return err;
}
@@ -2234,20 +2244,6 @@
}
}
- codec_clk = qpnp_clkdiv_get(card->dev, "taiko-mclk");
- if (IS_ERR(codec_clk)) {
- dev_err(card->dev,
- "%s: Failed to request taiko mclk from pmic %ld\n",
- __func__, PTR_ERR(codec_clk));
- return -ENODEV ;
- }
-
- ret = qpnp_clkdiv_config(codec_clk, Q_CLKDIV_XO_DIV_2);
- if (ret) {
- dev_err(card->dev, "%s: Failed to set taiko mclk to %u\n",
- __func__, pdata->mclk_gpio);
- return ret;
- }
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
index c14cb74..ad96ae3 100644
--- a/sound/soc/msm/qdsp6v2/audio_ocmem.c
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -20,18 +20,58 @@
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/of_device.h>
+#include <linux/memory_alloc.h>
#include <asm/mach-types.h>
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
#include <mach/ocmem.h>
+#include <mach/subsystem_notif.h>
+#include <mach/subsystem_restart.h>
+#include <mach/msm_memtypes.h>
+#include <mach/ramdump.h>
#include "q6core.h"
#include "audio_ocmem.h"
+
#define AUDIO_OCMEM_BUF_SIZE (512 * SZ_1K)
+/**
+ * Exercise OCMEM Dump if audio OCMEM state is
+ * one of the following. All other states indicate
+ * audio data is not mapped from DDR to OCMEM and
+ * therefore no need of dump.
+ */
+#define _DO_OCMEM_DUMP_BIT_MASK_\
+ ((1 << OCMEM_STATE_MAP_COMPL) |\
+ (1 << OCMEM_STATE_MAP_TRANSITION) |\
+ (1 << OCMEM_STATE_UNMAP_TRANSITION) |\
+ (1 << OCMEM_STATE_SHRINK) |\
+ (1 << OCMEM_STATE_GROW))
+
+/**
+ * Wait for OCMEM driver to process and respond for
+ * ongoing map/unmap request before calling OCMEM dump.
+ */
+#define _WAIT_BFR_DUMP_BIT_MASK_\
+ ((1 << OCMEM_STATE_MAP_COMPL) |\
+ (1 << OCMEM_STATE_UNMAP_COMPL) |\
+ (1 << OCMEM_STATE_MAP_FAIL) |\
+ (1 << OCMEM_STATE_UNMAP_FAIL))
+
+#define _MAP_RESPONSE_BIT_MASK_\
+ ((1 << OCMEM_STATE_MAP_COMPL) |\
+ (1 << OCMEM_STATE_MAP_FAIL))
+
+
+#define _UNMAP_RESPONSE_BIT_MASK_\
+ ((1 << OCMEM_STATE_UNMAP_COMPL) |\
+ (1 << OCMEM_STATE_UNMAP_FAIL))
+
#define _BIT_MASK_\
- ((1 << OCMEM_STATE_EXIT) |\
+ ((1 << OCMEM_STATE_SSR) |\
+ (1 << OCMEM_STATE_EXIT) |\
(1 << OCMEM_STATE_GROW) |\
(1 << OCMEM_STATE_SHRINK))
@@ -89,6 +129,10 @@
struct workqueue_struct *audio_ocmem_workqueue;
struct workqueue_struct *voice_ocmem_workqueue;
bool ocmem_en;
+ bool audio_ocmem_running;
+ void *ocmem_ramdump_dev;
+ struct ramdump_segment ocmem_ramdump_segment;
+ unsigned long ocmem_dump_addr;
};
static struct audio_ocmem_prv audio_ocmem_lcl;
@@ -114,7 +158,9 @@
break;
case OCMEM_MAP_FAIL:
pr_debug("%s: map fail\n", __func__);
- atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_FAIL);
+ clear_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_MAP_TRANSITION);
+ set_bit_pos(audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_FAIL);
break;
case OCMEM_UNMAP_DONE:
pr_debug("%s: unmap done\n", __func__);
@@ -125,8 +171,10 @@
break;
case OCMEM_UNMAP_FAIL:
pr_debug("%s: unmap fail\n", __func__);
- atomic_set(&audio_ocmem_lcl.audio_state,
- OCMEM_STATE_UNMAP_FAIL);
+ clear_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_TRANSITION);
+ set_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_FAIL);
break;
case OCMEM_ALLOC_GROW:
rbuf = data;
@@ -170,6 +218,9 @@
} else if (test_bit_pos((*state), OCMEM_STATE_EXIT)) {
pr_debug("%s: returning exit state\n", __func__);
return OCMEM_STATE_EXIT;
+ } else if (test_bit_pos((*state), OCMEM_STATE_SSR)) {
+ pr_debug("%s: returning ssr state\n", __func__);
+ return OCMEM_STATE_SSR;
} else
return -EINVAL;
@@ -278,8 +329,8 @@
}
wait_event_interruptible(audio_ocmem_lcl.audio_wait,
- test_bit_pos(audio_ocmem_lcl.audio_state,
- OCMEM_STATE_MAP_COMPL) != 0);
+ (atomic_read(&audio_ocmem_lcl.audio_state) &
+ _MAP_RESPONSE_BIT_MASK_) != 0);
atomic_set(&audio_ocmem_lcl.audio_cond, 1);
mutex_unlock(&audio_ocmem_lcl.protect_lock);
@@ -322,8 +373,9 @@
}
wait_event_interruptible(audio_ocmem_lcl.audio_wait,
- test_bit_pos(audio_ocmem_lcl.audio_state,
- OCMEM_STATE_UNMAP_COMPL) != 0);
+ (atomic_read(&audio_ocmem_lcl.audio_state) &
+ _UNMAP_RESPONSE_BIT_MASK_)
+ != 0);
ret = ocmem_shrink(cid, audio_ocmem_lcl.buf, 0);
if (ret) {
pr_err("%s: ocmem_shrink failed, state[%d]\n",
@@ -353,8 +405,8 @@
goto fail_cmd;
}
wait_event_interruptible(audio_ocmem_lcl.audio_wait,
- test_bit_pos(audio_ocmem_lcl.audio_state,
- OCMEM_STATE_MAP_COMPL) != 0);
+ (atomic_read(&audio_ocmem_lcl.audio_state) &
+ _MAP_RESPONSE_BIT_MASK_) != 0);
clear_bit_pos(audio_ocmem_lcl.audio_state,
OCMEM_STATE_GROW);
@@ -377,8 +429,8 @@
}
wait_event_interruptible(
audio_ocmem_lcl.audio_wait,
- test_bit_pos(audio_ocmem_lcl.audio_state,
- OCMEM_STATE_UNMAP_COMPL) != 0);
+ (atomic_read(&audio_ocmem_lcl.audio_state) &
+ _UNMAP_RESPONSE_BIT_MASK_) != 0);
}
if (test_bit_pos(audio_ocmem_lcl.audio_state,
@@ -434,14 +486,15 @@
goto fail_cmd;
}
pr_debug("%s: ocmem_free success\n", __func__);
+ /* Fall through */
+ case OCMEM_STATE_SSR:
msm_bus_scale_client_update_request(
audio_ocmem_lcl.audio_ocmem_bus_client,
0);
set_bit_pos(audio_ocmem_lcl.audio_state,
- OCMEM_STATE_DISABLE);
+ OCMEM_STATE_DISABLE);
break;
-
case -EINVAL:
pr_info("%s: audio_cond[%d] audio_state[0x%x]\n",
__func__,
@@ -453,6 +506,7 @@
ret = 0;
fail_cmd:
pr_debug("%s: exit\n", __func__);
+ audio_ocmem_lcl.audio_ocmem_running = false;
return ret;
}
@@ -470,8 +524,11 @@
pr_debug("%s: audio_cond[0x%x], audio_state[0x%x]\n", __func__,
atomic_read(&audio_ocmem_lcl.audio_cond),
atomic_read(&audio_ocmem_lcl.audio_state));
- set_bit_pos(audio_ocmem_lcl.audio_state,
- OCMEM_STATE_EXIT);
+ if (!test_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_SSR))
+ set_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_EXIT);
+
wake_up(&audio_ocmem_lcl.audio_wait);
mutex_unlock(&audio_ocmem_lcl.protect_lock);
@@ -652,6 +709,7 @@
}
workdata->id = id;
workdata->en = enable;
+ audio_ocmem_lcl.audio_ocmem_running = true;
INIT_WORK(&workdata->work, audio_ocmem_process_workdata);
queue_work(audio_ocmem_lcl.audio_ocmem_workqueue,
@@ -684,12 +742,130 @@
return 0;
}
+
+static void do_ocmem_ramdump(void)
+{
+ int ret = 0;
+ void *virt = NULL;
+
+ virt = ioremap(audio_ocmem_lcl.ocmem_dump_addr, AUDIO_OCMEM_BUF_SIZE);
+ ret = ocmem_dump(OCMEM_LP_AUDIO,
+ audio_ocmem_lcl.buf,
+ (unsigned long)virt);
+ iounmap(virt);
+
+ if (ret)
+ pr_err("%s: ocmem_dump failed\n", __func__);
+
+ audio_ocmem_lcl.ocmem_ramdump_segment.address
+ = (unsigned long)audio_ocmem_lcl.ocmem_dump_addr;
+ audio_ocmem_lcl.ocmem_ramdump_segment.size
+ = AUDIO_OCMEM_BUF_SIZE;
+ ret = do_ramdump(audio_ocmem_lcl.ocmem_ramdump_dev,
+ &audio_ocmem_lcl.ocmem_ramdump_segment,
+ 1);
+ if (ret < 0)
+ pr_err("%s: do_ramdump failed\n", __func__);
+}
+
+static void process_ocmem_dump(void)
+{
+ int ret = 0;
+
+ set_bit_pos(audio_ocmem_lcl.audio_state, OCMEM_STATE_SSR);
+
+ if (atomic_read(&audio_ocmem_lcl.audio_state) &
+ _DO_OCMEM_DUMP_BIT_MASK_) {
+
+ wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+ (atomic_read(&audio_ocmem_lcl.audio_state) &
+ _WAIT_BFR_DUMP_BIT_MASK_) != 0);
+
+ if (test_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_MAP_COMPL) ||
+ test_bit_pos(audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_FAIL)) {
+
+ if (audio_ocmem_lcl.ocmem_dump_addr &&
+ audio_ocmem_lcl.ocmem_ramdump_dev)
+ do_ocmem_ramdump();
+ else
+ pr_err("%s: Error calling ocmem ramdump\n",
+ __func__);
+
+ ret = ocmem_drop(OCMEM_LP_AUDIO, audio_ocmem_lcl.buf,
+ &audio_ocmem_lcl.mlist);
+ if (ret)
+ pr_err("%s: ocmem_drop failed\n", __func__);
+ }
+ }
+
+ ret = ocmem_free(OCMEM_LP_AUDIO, audio_ocmem_lcl.buf);
+ if (ret)
+ pr_err("%s: ocmem_free failed\n", __func__);
+}
+
+static int lpass_notifier_cb(struct notifier_block *this, unsigned long code,
+ void *_cmd)
+{
+ int ret = NOTIFY_DONE;
+
+ switch (code) {
+ case SUBSYS_BEFORE_SHUTDOWN:
+ pr_debug("AO-Notify: Shutdown started\n");
+ break;
+ case SUBSYS_AFTER_SHUTDOWN:
+ pr_debug("AO-Notify: Shutdown Completed\n");
+ break;
+ case SUBSYS_RAMDUMP_NOTIFICATION:
+ pr_debug("AO-Notify: OCMEM dump\n");
+ if (audio_ocmem_lcl.ocmem_en &&
+ audio_ocmem_lcl.audio_ocmem_running)
+ process_ocmem_dump();
+ pr_debug("AO-Notify: OCMEM dump done\n");
+ break;
+ case SUBSYS_BEFORE_POWERUP:
+ pr_debug("AO-Notify: Powerup started\n");
+ break;
+ case SUBSYS_AFTER_POWERUP:
+ pr_debug("AO-Notify: Powerup completed\n");
+ break;
+ default:
+ pr_err("AO-Notify: Generel: %lu\n", code);
+ break;
+ }
+ return ret;
+}
+
+static struct notifier_block anb = {
+ .notifier_call = lpass_notifier_cb,
+};
+
static int ocmem_audio_client_probe(struct platform_device *pdev)
{
int ret;
struct msm_bus_scale_pdata *audio_ocmem_bus_scale_pdata = NULL;
pr_debug("%s\n", __func__);
+
+ subsys_notif_register_notifier("adsp", &anb);
+
+ audio_ocmem_lcl.ocmem_dump_addr =
+ allocate_contiguous_memory_nomap(AUDIO_OCMEM_BUF_SIZE,
+ MEMTYPE_EBI1,
+ AUDIO_OCMEM_BUF_SIZE);
+
+ if (audio_ocmem_lcl.ocmem_dump_addr) {
+ audio_ocmem_lcl.ocmem_ramdump_dev =
+ create_ramdump_device("audio-ocmem", &pdev->dev);
+
+ if (!audio_ocmem_lcl.ocmem_ramdump_dev)
+ pr_err("%s: audio-ocmem ramdump device failed\n",
+ __func__);
+ } else {
+ pr_err("%s: ocmem dump memory alloc failed\n", __func__);
+ }
+
audio_ocmem_lcl.audio_ocmem_workqueue =
alloc_workqueue("ocmem_audio_client_driver_audio",
WQ_NON_REENTRANT | WQ_UNBOUND, 0);
@@ -715,6 +891,7 @@
spin_lock_init(&audio_ocmem_lcl.audio_lock);
mutex_init(&audio_ocmem_lcl.protect_lock);
audio_ocmem_lcl.ocmem_en = true;
+ audio_ocmem_lcl.audio_ocmem_running = false;
/* populate platform data */
ret = audio_ocmem_platform_data_populate(pdev);
@@ -753,6 +930,7 @@
msm_bus_cl_clear_pdata(audio_ocmem_bus_scale_pdata);
ocmem_notifier_unregister(audio_ocmem_lcl.audio_hdl,
&audio_ocmem_client_nb);
+ free_contiguous_memory_by_paddr(audio_ocmem_lcl.ocmem_dump_addr);
return 0;
}
static const struct of_device_id msm_ocmem_audio_dt_match[] = {
@@ -771,11 +949,9 @@
.remove = ocmem_audio_client_remove,
};
-
static int __init ocmem_audio_client_init(void)
{
int rc;
-
rc = platform_driver_register(&audio_ocmem_driver);
if (rc)
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
index b43c3bd..f77ec0f 100644
--- a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
@@ -305,6 +305,7 @@
}
if (idx >= NUM_DOLBY_ENDP_DEVICE) {
pr_err("%s: device is not set accordingly\n", __func__);
+ kfree(params_value);
return -EINVAL;
}
for (i = 0; i < DOLBY_ENDDEP_PARAM_LENGTH; i++) {
@@ -367,6 +368,7 @@
params_length);
if (rc) {
pr_err("%s: send dolby params failed\n", __func__);
+ kfree(params_value);
return -EINVAL;
}
for (i = 0; i < MAX_DOLBY_PARAMS; i++) {
@@ -584,7 +586,7 @@
if (rc) {
pr_err("%s: get parameters failed\n", __func__);
kfree(params_value);
- rc = -EINVAL;
+ return -EINVAL;
}
update_params_value = (int *)params_value;
ucontrol->value.integer.value[0] = dolby_dap_params_get.device_id;
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 9d6896e..63af271 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -56,6 +56,10 @@
#define TIMEOUT_MS 1000
#define Q6AFE_MAX_VOLUME 0x3FFF
+static int pcm_afe_instance[2];
+static int proxy_afe_instance[2];
+bool afe_close_done[2] = {true, true};
+
#define SIZEOF_CFG_CMD(y) \
(sizeof(struct apr_hdr) + sizeof(u16) + (sizeof(struct y)))
@@ -943,7 +947,7 @@
int i;
i = port_id - SLIMBUS_0_RX;
- if (i < 0 || i > ARRAY_SIZE(afe_ports_mad_type)) {
+ if (i < 0 || i >= ARRAY_SIZE(afe_ports_mad_type)) {
pr_debug("%s: Non Slimbus port_id 0x%x\n", __func__, port_id);
return MAD_HW_NONE;
}
@@ -1061,11 +1065,30 @@
}
if ((port_id == RT_PROXY_DAI_001_RX) ||
- (port_id == RT_PROXY_DAI_002_TX))
- return 0;
- if ((port_id == RT_PROXY_DAI_002_RX) ||
- (port_id == RT_PROXY_DAI_001_TX))
+ (port_id == RT_PROXY_DAI_002_TX)) {
+ pr_debug("%s: before incrementing pcm_afe_instance %d"\
+ " port_id %d\n", __func__,
+ pcm_afe_instance[port_id & 0x1], port_id);
port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ pcm_afe_instance[port_id & 0x1]++;
+ return 0;
+ }
+ if ((port_id == RT_PROXY_DAI_002_RX) ||
+ (port_id == RT_PROXY_DAI_001_TX)) {
+ pr_debug("%s: before incrementing proxy_afe_instance %d"\
+ " port_id %d\n", __func__,
+ proxy_afe_instance[port_id & 0x1], port_id);
+
+ if (!afe_close_done[port_id & 0x1]) {
+ /*close pcm dai corresponding to the proxy dai*/
+ afe_close(port_id - 0x10);
+ pcm_afe_instance[port_id & 0x1]++;
+ pr_debug("%s: reconfigure afe port again\n", __func__);
+ }
+ proxy_afe_instance[port_id & 0x1]++;
+ afe_close_done[port_id & 0x1] = false;
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ }
pr_debug("%s: port id: %#x\n", __func__, port_id);
@@ -2618,6 +2641,31 @@
goto fail_cmd;
}
pr_debug("%s: port_id=%d\n", __func__, port_id);
+ if ((port_id == RT_PROXY_DAI_001_RX) ||
+ (port_id == RT_PROXY_DAI_002_TX)) {
+ pr_debug("%s: before decrementing pcm_afe_instance %d\n",
+ __func__, pcm_afe_instance[port_id & 0x1]);
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ pcm_afe_instance[port_id & 0x1]--;
+ if (!(pcm_afe_instance[port_id & 0x1] == 0 &&
+ proxy_afe_instance[port_id & 0x1] == 0))
+ return 0;
+ else
+ afe_close_done[port_id & 0x1] = true;
+ }
+
+ if ((port_id == RT_PROXY_DAI_002_RX) ||
+ (port_id == RT_PROXY_DAI_001_TX)) {
+ pr_debug("%s: before decrementing proxy_afe_instance %d\n",
+ __func__, proxy_afe_instance[port_id & 0x1]);
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ proxy_afe_instance[port_id & 0x1]--;
+ if (!(pcm_afe_instance[port_id & 0x1] == 0 &&
+ proxy_afe_instance[port_id & 0x1] == 0))
+ return 0;
+ else
+ afe_close_done[port_id & 0x1] = true;
+ }
port_id = q6audio_convert_virtual_to_portid(port_id);
index = q6audio_get_port_index(port_id);