Merge "input: sensor: Add cm36283 light/proximity sensor driver"
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
index 195a98d..f57d928 100644
--- a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
@@ -3,10 +3,8 @@
The application processor in MSM can do a variety of C-States for low power
management. These C-States are invoked by the CPUIdle framework when the core
becomes idle. But based on the time available until the next scheduled wakeup,
-the system can do a combination of low power modes of different resources -
-L2, XO, Vdd Dig and Vdd Mem. The combination is captured in the device tree as
-lpm-level. The units for voltage are dependent on the PMIC used on the target
-and are in uV.
+the system can do several low power modes. The combination is captured in the
+device tree as lpm-level.
The required nodes for lpm-levels are:
@@ -20,20 +18,11 @@
"retention" - Retention
"pc_suspend" - Suspended Power Collapse
"pc_no_xo_shutdown" - Power Collapse with no XO shutdown
-- qcom,xo: The state of XO clock. Values are "xo_on" and "xo_off"
- qcom,l2: The state of L2 cache. Values are:
"l2_cache_pc" - L2 cache in power collapse
"l2_cache_retenetion" - L2 cache in retention
"l2_cache_gdhs" - L2 cache in GDHS
"l2_cache_active" - L2 cache in active mode
-- qcom,vdd-mem-upper-bound: The upper bound value of mem voltage in uV
-- qcom,vdd-mem-lower-bound: The lower bound value of mem voltage in uV
-- qcom,vdd-dig-upper-bound: The upper bound value of dig voltage in uV
- or an RBCPR (Rapid Bridge Core Power Reduction)
- corner voltage.
-- qcom,vdd-dig-lower-bound: The lower bound value of dig voltage in uV
- or an RBCPR (Rapid Bridge Core Power Reduction)
- corner voltage.
- qcom,latency-us: The latency in handling the interrupt if this level was
chosen, in uSec
- qcom,ss-power: The steady state power expelled when the processor is in this
@@ -42,29 +31,18 @@
in mWatts.uSec
- qcom,time-overhead: The time spent in entering and exiting this level in uS
-Optional properties
-- qcom,irqs-detectable: The field indicates whether the IRQs are detectable by
- the GIC controller when entering a low power mode.
-- qcom,gpio-detectable: The field indicates whether the GPIOs can be detected
- by the GPIO interrupt controller during a given low
- power mode.
-- qcom,use-qtimer: Indicates whether the target uses the synchronized QTimer.
+The optional nodes for lpm-levels are :
+- qcom,no-l2-saw: Indicates if this target has an L2 SAW (SPM and AVS wrapper).
+- qcom,default-l2-state: Indicates what the default low power state of the L2 SAW should be. This property is used only when there is an L2 SAW.
Example:
qcom,lpm-levels {
- qcom,use-qtimer;
+ qcom,no-l2-saw;
qcom,lpm-level@0 {
reg = <0>;
qcom,mode = "wfi";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
- qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <5>; /* MAX */
- qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
qcom,latency-us = <100>;
qcom,ss-power = <650>;
qcom,energy-overhead = <801>;
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt b/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
deleted file mode 100644
index 7b5fda3..0000000
--- a/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-* Low Power Management Resources
-
-The application processor in the MSM can enter several different low power
-states depending on the sleep time and on the required system resources. The
-MSM cannot enter a given low power state if that state involves turning off
-some shared resources which are required by some components of the
-system.The lpm-resources device tree node represents the shared resources
-that need to be monitored for usage requirement to check if a given low power
-state can be entered.Each resource is identified by a combination of the name,
-id,type and key which is also used by the RPM to identify a shared resource.
-The name and resource-type are required nodes; the type, id and key are
-optional nodes which are needed if the resource type is RPM shared resource
-(MSM_LPM_RPM_RS_TYPE).
-
-The nodes for lpm-resources are:
-
-Required Nodes:
-
-- compatible: "qcom,lpm-resources"
-- reg: The numeric level id
-- qcom,name: The name of the low power resource represented
- as a string.
-- qcom,init-value: Initialization value of the LPM resource represented as
- decimal value for vdd-dig and vdd-mem resources and
- as string for pxo and l2 resources.
-
-
-Optional Nodes:
-
-- qcom,type: The type of resource used like smps or pxo
- represented as a hex value.
-- qcom,id: The id representing a device within a resource type.
-- qcom,key: The key is the specific attribute of the resource being
- monitored represented as a hex value.
-- qcom,local-resource-type: The property exists only for locally managed
- resource and is represented as a bool.
-
-Example:
- qcom,lpm-resources@0 {
- reg = <0x0>;
- qcom,name = "vdd-dig";
- qcom,type = <0x62706d73>; /* "smpb" */
- qcom,id = <0x02>;
- qcom,key = <0x6e726f63>; /* "corn" */
- qcom,init-value= <5>; /* Active Corner*/
- };
-
diff --git a/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt b/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt
index 6dac1b7..bf5e544 100644
--- a/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt
+++ b/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt
@@ -26,12 +26,13 @@
add the appropriate binding:
Required parameters:
-- qcom,memblock-remove: base and size of block to be removed
+- qcom,memblock-remove: array of the base and size of blocks to be removed
qcom,a-driver {
compatible = "qcom,a-driver";
- /* Remove 4MB at 0x200000*/
- qcom,memblock-remove = <0x200000 0x400000>;
+ /* Remove 4MB at 0x200000 and 2MB at 0x800000*/
+ qcom,memblock-remove = <0x200000 0x400000
+ 0x800000 0x200000>;
};
In order to ensure memory is only reserved when a driver is actually enabled,
diff --git a/Documentation/devicetree/bindings/coresight/coresight.txt b/Documentation/devicetree/bindings/coresight/coresight.txt
index d24e671..7f7ee25 100644
--- a/Documentation/devicetree/bindings/coresight/coresight.txt
+++ b/Documentation/devicetree/bindings/coresight/coresight.txt
@@ -118,6 +118,12 @@
- qcom,setb-gpios-pull : active pull configuration for set B gpios
- qcom,setb-gpios-dir : active direction for set B gpios
- qcom,hwevent-clks : list of clocks required by hardware event driver
+- qcom,byte-cntr-absent : specifies if the byte counter feature is absent on
+ the device. Only relevant in case of tmc-etr device.
+- interrupts : <a b c> where a is 0 or 1 depending on if the interrupt is
+ spi/ppi, b is the interrupt number and c is the mask,
+- interrupt-names : a list of strings that map in order to the list of
+ interrupts specified in the 'interrupts' property.
Examples:
@@ -128,6 +134,11 @@
<0xfc37c000 0x3000>;
reg-names = "tmc-base", "bam-base";
+ interrupts = <0 166 0>;
+ interrupt-names = "byte-cntr-irq";
+
+ qcom,byte-cntr-absent;
+
coresight-id = <0>;
coresight-name = "coresight-tmc-etr";
coresight-nr-inports = <1>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-edp.txt b/Documentation/devicetree/bindings/fb/mdss-edp.txt
index 578b07c..3d7e5a2 100644
--- a/Documentation/devicetree/bindings/fb/mdss-edp.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-edp.txt
@@ -20,6 +20,7 @@
specific platforms.
- qcom,mdss-fb-map: pHandle that specifies the framebuffer to which the
interface is mapped.
+- gpio-panel-hpd : gpio pin use for edp hpd
Example:
mdss_edp: qcom,mdss_edp@fd923400 {
@@ -32,6 +33,8 @@
qcom,panel-lpg-channel = <7>; /* LPG Channel 8 */
qcom,panel-pwm-period = <53>;
status = "disable";
+ qcom,mdss-fb-map = <&mdss_fb0>;
+ gpio-panel-hpd = <&msmgpio 102 0>;
};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt b/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt
index 0f35e73..3720172 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/synaptics_i2c_rmi4.txt
@@ -20,6 +20,7 @@
- synaptics,panel-x : panel x dimension
- synaptics,panel-y : panel y dimension
- synaptics,fw-image-name : name of firmware .img file in /etc/firmware
+ - synaptics,power-down : fully power down regulators in suspend
Example:
i2c@f9927000 { /* BLSP1 QUP5 */
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp.txt b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
index e9c5061..b60760e 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
@@ -107,10 +107,13 @@
- qcom,mode: mode the led should operate in, options "pwm" and "lpg"
- qcom,pwm-channel: pwm channel the led will operate on
- qcom,pwm-us: time the pwm device will modulate at (us)
-- qcom,row-src-sel-val: select source for rows. One bit is used for each row.
- Specify 0 for vph_pwr and 1 for vbst for each row.
-- qcom,row-scan-val: select rows for scanning
-- qcom,row-scan-en: row scan enable
+- qcom,row-id: specify the id of the row. Supported values are 0 to 3.
+
+Optional properties for keypad backlight:
+- qcom,row-src-vbst: select source for rows. Specify for vbst and ignore it
+ for vph_pwr.
+- qcom,row-src-en: specify to enable row source
+- qcom,always-on: specify if the module has to be always on
Required properties for PWM mode only:
- qcom,pwm-us: time the pwm device will modulate at (us)
@@ -259,16 +262,33 @@
qcom,leds@e200 {
status = "okay";
- qcom,kpdbl {
+
+ qcom,kpdbl1 {
label = "kpdbl";
- linux,name = "button-backlight";
+ linux,name = "kpdbl-pwm-1";
qcom,mode = <0>;
qcom,pwm-channel = <8>;
qcom,pwm-us = <1000>;
qcom,id = <7>;
qcom,max-current = <20>;
- qcom,row-src-sel-val = <0x00>;
- qcom,row-scan-en = <0x01>;
- qcom,row-scan-val = <0x01>;
+ qcom,row-id = <0>;
+ qcom,row-src-en;
+ qcom,always-on;
};
+
+ qcom,kpdbl2 {
+ label = "kpdbl";
+ linux,name = "kpdbl-lut-2";
+ qcom,mode = <1>;
+ qcom,pwm-channel = <9>;
+ qcom,pwm-us = <1000>;
+ qcom,start-idx = <1>;
+ qcom,duty-pcts = [00 00 00 00 64
+ 64 00 00 00 00];
+ qcom,id = <7>;
+ qcom,max-current = <20>;
+ qcom,row-id = <1>;
+ qcom,row-src-en;
+ };
+
};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cci.txt
index 8e988be..b8156c3 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cci.txt
@@ -18,6 +18,17 @@
qcom,gpio-req-tbl-num property (in the same order)
- qcom,gpio-req-tbl-label : should contain name of gpios present in
qcom,gpio-req-tbl-num property (in the same order)
+
+Optional properties:
+- master0: qcom,cci-master0 - node should contain clock settings for
+ cci master 0 bus
+- master1: qcom,cci-master1 - node should contain clock settings for
+ cci master 1 bus
+
+[Second level nodes]
+* Qualcomm CCI clock settings
+
+Optional properties:
- qcom,hw-thigh : should contain high period of the SCL clock in terms of CCI
clock cycle
- qcom,hw-tlow : should contain high period of the SCL clock in terms of CCI
@@ -31,7 +42,6 @@
- qcom,hw-trdhld : should contain internal hold time for SDA
- qcom,hw-tsp : should contain filtering of glitches
-[Second level nodes]
* Qualcomm MSM Sensor
MSM sensor node contains properties of camera sensor
@@ -175,16 +185,12 @@
"CCI_I2C_CLK0",
"CCI_I2C_DATA1",
"CCI_I2C_CLK1";
- qcom,hw-thigh = <78>;
- qcom,hw-tlow = <114>;
- qcom,hw-tsu-sto = <28>;
- qcom,hw-tsu-sta = <28>;
- qcom,hw-thd-dat = <10>;
- qcom,hw-thd-sta = <77>;
- qcom,hw-tbuf = <118>;
- qcom,hw-scl-stretch-en = <0>;
- qcom,hw-trdhld = <6>;
- qcom,hw-tsp = <1>;
+ master0: qcom,cci-master0 {
+ status = "disabled";
+ };
+ master1: qcom,cci-master1 {
+ status = "disabled";
+ };
actuator0: qcom,actuator@18 {
cell-index = <0>;
@@ -235,3 +241,31 @@
qcom,sensor-mode = <1>;
};
};
+
+ &master0 {
+ qcom,hw-thigh = <78>;
+ qcom,hw-tlow = <114>;
+ qcom,hw-tsu-sto = <28>;
+ qcom,hw-tsu-sta = <28>;
+ qcom,hw-thd-dat = <10>;
+ qcom,hw-thd-sta = <77>;
+ qcom,hw-tbuf = <118>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <1>;
+ status = "ok";
+ };
+
+ &master1 {
+ qcom,hw-thigh = <78>;
+ qcom,hw-tlow = <114>;
+ qcom,hw-tsu-sto = <28>;
+ qcom,hw-tsu-sta = <28>;
+ qcom,hw-thd-dat = <10>;
+ qcom,hw-thd-sta = <77>;
+ qcom,hw-tbuf = <118>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <1>;
+ status = "ok";
+ };
diff --git a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
index e724c62..f2707f6 100644
--- a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
@@ -73,6 +73,11 @@
programs the CERR to 3 by default. When this flag is true, CERR is set to
zero and transaction errors are ignored.
+- hsic,reset-delay: If present then add the given delay time (ms) between
+ the reset and enumeration. Since some devices might take more than 100ms
+ for initialization when receiving the bus reset, add delay to avoid the
+ problem that enmueration is before device initialization done.
+
- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
below optional properties:
- qcom,msm_bus,name
diff --git a/arch/arm/boot/dts/apq8074-v2-cdp.dts b/arch/arm/boot/dts/apq8074-v2.0-1-cdp.dts
similarity index 92%
rename from arch/arm/boot/dts/apq8074-v2-cdp.dts
rename to arch/arm/boot/dts/apq8074-v2.0-1-cdp.dts
index 1dc0912..0489b55 100644
--- a/arch/arm/boot/dts/apq8074-v2-cdp.dts
+++ b/arch/arm/boot/dts/apq8074-v2.0-1-cdp.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "apq8074-v2.dtsi"
+/include/ "apq8074-v2.0-1.dtsi"
/include/ "msm8974-cdp.dtsi"
/ {
- model = "Qualcomm APQ 8074v2 CDP";
+ model = "Qualcomm APQ 8074v2.0-1 CDP";
compatible = "qcom,apq8074-cdp", "qcom,apq8074", "qcom,cdp";
qcom,msm-id = <184 1 0x20000>;
};
diff --git a/arch/arm/boot/dts/apq8074-v2-dragonboard.dts b/arch/arm/boot/dts/apq8074-v2.0-1-dragonboard.dts
similarity index 89%
rename from arch/arm/boot/dts/apq8074-v2-dragonboard.dts
rename to arch/arm/boot/dts/apq8074-v2.0-1-dragonboard.dts
index 5a6f5f3..128d8bd 100644
--- a/arch/arm/boot/dts/apq8074-v2-dragonboard.dts
+++ b/arch/arm/boot/dts/apq8074-v2.0-1-dragonboard.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "apq8074-v2.dtsi"
+/include/ "apq8074-v2.0-1.dtsi"
/include/ "apq8074-dragonboard.dtsi"
/ {
- model = "Qualcomm APQ 8074v2 DRAGONBOARD";
+ model = "Qualcomm APQ 8074v2.0-1 DRAGONBOARD";
compatible = "qcom,apq8074-dragonboard", "qcom,apq8074", "qcom,dragonboard";
qcom,msm-id = <184 10 0x20000>;
};
diff --git a/arch/arm/boot/dts/apq8074-v2-liquid.dts b/arch/arm/boot/dts/apq8074-v2.0-1-liquid.dts
similarity index 89%
rename from arch/arm/boot/dts/apq8074-v2-liquid.dts
rename to arch/arm/boot/dts/apq8074-v2.0-1-liquid.dts
index a0ecb50..63c32f3 100644
--- a/arch/arm/boot/dts/apq8074-v2-liquid.dts
+++ b/arch/arm/boot/dts/apq8074-v2.0-1-liquid.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "apq8074-v2.dtsi"
+/include/ "apq8074-v2.0-1.dtsi"
/include/ "msm8974-liquid.dtsi"
/ {
- model = "Qualcomm APQ 8074v2 LIQUID";
+ model = "Qualcomm APQ 8074v2.0-1 LIQUID";
compatible = "qcom,apq8074-liquid", "qcom,apq8074", "qcom,liquid";
qcom,msm-id = <184 9 0x20000>;
};
diff --git a/arch/arm/boot/dts/apq8074-v2.dtsi b/arch/arm/boot/dts/apq8074-v2.0-1.dtsi
similarity index 97%
rename from arch/arm/boot/dts/apq8074-v2.dtsi
rename to arch/arm/boot/dts/apq8074-v2.0-1.dtsi
index c700a5c..8314fab 100644
--- a/arch/arm/boot/dts/apq8074-v2.dtsi
+++ b/arch/arm/boot/dts/apq8074-v2.0-1.dtsi
@@ -16,7 +16,7 @@
* msm8974.dtsi file.
*/
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
&soc {
qcom,qseecom@a700000 {
diff --git a/arch/arm/boot/dts/apq8084-coresight.dtsi b/arch/arm/boot/dts/apq8084-coresight.dtsi
index 610d80b..6cd238a 100644
--- a/arch/arm/boot/dts/apq8084-coresight.dtsi
+++ b/arch/arm/boot/dts/apq8084-coresight.dtsi
@@ -23,6 +23,7 @@
coresight-id = <0>;
coresight-name = "coresight-tmc-etr";
coresight-nr-inports = <1>;
+ coresight-ctis = <&cti0 &cti8>;
};
replicator: replicator@fc324000 {
@@ -50,6 +51,7 @@
coresight-child-list = <&replicator>;
coresight-child-ports = <0>;
coresight-default-sink;
+ coresight-ctis = <&cti0 &cti8>;
};
funnel_merg: funnel@fc323000 {
@@ -131,6 +133,70 @@
coresight-child-ports = <7>;
};
+ etm0: etm@fc34c000 {
+ compatible = "arm,coresight-etm";
+ reg = <0xfc34c000 0x1000>;
+ reg-names = "etm-base";
+
+ coresight-id = <10>;
+ coresight-name = "coresight-etm0";
+ coresight-nr-inports = <0>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_kpss>;
+ coresight-child-ports = <0>;
+
+ qcom,pc-save;
+ qcom,round-robin;
+ };
+
+ etm1: etm@fc34d000 {
+ compatible = "arm,coresight-etm";
+ reg = <0xfc34d000 0x1000>;
+ reg-names = "etm-base";
+
+ coresight-id = <11>;
+ coresight-name = "coresight-etm1";
+ coresight-nr-inports = <0>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_kpss>;
+ coresight-child-ports = <1>;
+
+ qcom,pc-save;
+ qcom,round-robin;
+ };
+
+ etm2: etm@fc34e000 {
+ compatible = "arm,coresight-etm";
+ reg = <0xfc34e000 0x1000>;
+ reg-names = "etm-base";
+
+ coresight-id = <12>;
+ coresight-name = "coresight-etm2";
+ coresight-nr-inports = <0>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_kpss>;
+ coresight-child-ports = <2>;
+
+ qcom,pc-save;
+ qcom,round-robin;
+ };
+
+ etm3: etm@fc34f000 {
+ compatible = "arm,coresight-etm";
+ reg = <0xfc34f000 0x1000>;
+ reg-names = "etm-base";
+
+ coresight-id = <13>;
+ coresight-name = "coresight-etm3";
+ coresight-nr-inports = <0>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_kpss>;
+ coresight-child-ports = <3>;
+
+ qcom,pc-save;
+ qcom,round-robin;
+ };
+
csr: csr@fc301000 {
compatible = "qcom,coresight-csr";
reg = <0xfc301000 0x1000>;
@@ -142,4 +208,144 @@
qcom,blk-size = <3>;
};
+
+ cti0: cti@fc310000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc310000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <15>;
+ coresight-name = "coresight-cti0";
+ coresight-nr-inports = <0>;
+ };
+
+ cti1: cti@fc311000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc311000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <16>;
+ coresight-name = "coresight-cti1";
+ coresight-nr-inports = <0>;
+ };
+
+ cti2: cti@fc312000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc312000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <17>;
+ coresight-name = "coresight-cti2";
+ coresight-nr-inports = <0>;
+ };
+
+ cti3: cti@fc313000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc313000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <18>;
+ coresight-name = "coresight-cti3";
+ coresight-nr-inports = <0>;
+ };
+
+ cti4: cti@fc314000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc314000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <19>;
+ coresight-name = "coresight-cti4";
+ coresight-nr-inports = <0>;
+ };
+
+ cti5: cti@fc315000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc315000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <20>;
+ coresight-name = "coresight-cti5";
+ coresight-nr-inports = <0>;
+ };
+
+ cti6: cti@fc316000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc316000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <21>;
+ coresight-name = "coresight-cti6";
+ coresight-nr-inports = <0>;
+ };
+
+ cti7: cti@fc317000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc317000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <22>;
+ coresight-name = "coresight-cti7";
+ coresight-nr-inports = <0>;
+ };
+
+ cti8: cti@fc318000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc318000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <23>;
+ coresight-name = "coresight-cti8";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_l2: cti@fc340000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc340000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <24>;
+ coresight-name = "coresight-cti-l2";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu0: cti@fc341000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc341000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <25>;
+ coresight-name = "coresight-cti-cpu0";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu1: cti@fc342000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc342000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <26>;
+ coresight-name = "coresight-cti-cpu1";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu2: cti@fc343000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc343000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <27>;
+ coresight-name = "coresight-cti-cpu2";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu3: cti@fc344000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc344000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-id = <28>;
+ coresight-name = "coresight-cti-cpu3";
+ coresight-nr-inports = <0>;
+ };
};
diff --git a/arch/arm/boot/dts/apq8084-regulator.dtsi b/arch/arm/boot/dts/apq8084-regulator.dtsi
index 998b469..0c9ca7d 100644
--- a/arch/arm/boot/dts/apq8084-regulator.dtsi
+++ b/arch/arm/boot/dts/apq8084-regulator.dtsi
@@ -349,6 +349,16 @@
qcom,regulator-type = <1>;
qcom,hpm-min-load = <100000>;
+ pma8084_s2_corner: regulator-s2-corner {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8084_s2_corner";
+ qcom,set = <3>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <7>;
+ qcom,use-voltage-corner;
+ qcom,consumer-supplies = "vdd_dig", "";
+ };
+
pma8084_s2_corner_ao: regulator-s2-corner-ao {
compatible = "qcom,rpm-regulator-smd";
regulator-name = "8084_s2_corner_ao";
diff --git a/arch/arm/boot/dts/apq8084.dtsi b/arch/arm/boot/dts/apq8084.dtsi
index 3014813..943f2a3 100644
--- a/arch/arm/boot/dts/apq8084.dtsi
+++ b/arch/arm/boot/dts/apq8084.dtsi
@@ -164,6 +164,37 @@
qcom,scl-gpio = <&msmgpio 11 0>;
};
+ qcom,usbbam@f9304000 {
+ compatible = "qcom,usb-bam-msm";
+ reg = <0xf9304000 0x5000>,
+ <0xf92f880c 0x4>;
+ reg-names = "ssusb", "qscratch_ram1_reg";
+ interrupts = <0 132 0>;
+ interrupt-names = "ssusb";
+ qcom,usb-bam-num-pipes = <16>;
+ qcom,usb-bam-fifo-baseaddr = <0x00000000 0xf9200000>;
+ qcom,ignore-core-reset-ack;
+ qcom,disable-clk-gating;
+
+ qcom,pipe0 {
+ label = "ssusb-qdss-in-0";
+ qcom,usb-bam-mem-type = <1>;
+ qcom,bam-type = <0>;
+ qcom,dir = <1>;
+ qcom,pipe-num = <0>;
+ qcom,peer-bam = <1>;
+ qcom,src-bam-physical-address = <0xfc37C000>;
+ qcom,src-bam-pipe-index = <0>;
+ qcom,dst-bam-physical-address = <0xf9304000>;
+ qcom,dst-bam-pipe-index = <2>;
+ qcom,data-fifo-offset = <0xf0000>;
+ qcom,data-fifo-size = <0x1800>;
+ qcom,descriptor-fifo-offset = <0xf4000>;
+ qcom,descriptor-fifo-size = <0x1400>;
+ qcom,reset-bam-on-connect;
+ };
+ };
+
usb3: qcom,ssusb@f9200000 {
compatible = "qcom,dwc-usb3-msm";
reg = <0xf9200000 0xfc000>,
@@ -304,6 +335,21 @@
interrupts = <0 28 0>;
status = "disabled";
};
+
+ qcom,wdt@f9017000 {
+ compatible = "qcom,msm-watchdog";
+ reg = <0xf9017000 0x1000>;
+ interrupts = <0 3 0>, <0 4 0>;
+ qcom,bark-time = <11000>;
+ qcom,pet-time = <10000>;
+ qcom,ipi-ping;
+ };
+
+ qcom,msm-rng@f9bff000{
+ compatible = "qcom,msm-rng";
+ reg = <0xf9bff000 0x200>;
+ qcom,msm-rng-iface-clk;
+ };
};
&gdsc_venus {
diff --git a/arch/arm/boot/dts/dsi-v2-panel-otm8018b-fwvga-video.dtsi b/arch/arm/boot/dts/dsi-v2-panel-otm8018b-fwvga-video.dtsi
new file mode 100644
index 0000000..c2c64da
--- /dev/null
+++ b/arch/arm/boot/dts/dsi-v2-panel-otm8018b-fwvga-video.dtsi
@@ -0,0 +1,261 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ qcom,dsi_v2_otm8018b_fwvga_video {
+ compatible = "qcom,dsi-panel-v2";
+ label = "OTM8018B FWVGA video mode dsi panel";
+ qcom,dsi-ctrl-phandle = <&mdss_dsi0>;
+ qcom,rst-gpio = <&msmgpio 41 0>;
+ vdda-supply = <&pm8110_l19>;
+ vddio-supply=<&pm8110_l14>;
+ qcom,mdss-pan-res = <480 854>;
+ qcom,mdss-pan-bpp = <24>;
+ qcom,mdss-pan-dest = "display_1";
+ qcom,mdss-pan-porch-values = <54 8 80 16 2 12>;
+ qcom,mdss-pan-underflow-clr = <0xff>;
+ qcom,mdss-pan-bl-levels = <1 255>;
+ qcom,mdss-pan-bl-ctrl = "bl_ctrl_wled";
+ qcom,mdss-pan-dsi-mode = <0>;
+ qcom,mdss-pan-dsi-h-pulse-mode = <0>;
+ qcom,mdss-pan-dsi-h-power-stop = <0 0 0>;
+ qcom,mdss-pan-dsi-bllp-power-stop = <1 1>;
+ qcom,mdss-pan-dsi-traffic-mode = <1>;
+ qcom,mdss-pan-dsi-dst-format = <3>;
+ qcom,mdss-pan-dsi-vc = <0>;
+ qcom,mdss-pan-dsi-rgb-swap = <0>;
+ qcom,mdss-pan-dsi-data-lanes = <1 1 0 0>;
+ qcom,mdss-pan-dsi-dlane-swap = <1>;
+ qcom,mdss-pan-dsi-t-clk = <0x1b 0x04>;
+ qcom,mdss-pan-dsi-stream = <0>;
+ qcom,mdss-pan-dsi-mdp-tr = <0x0>;/*todo*/
+ qcom,mdss-pan-dsi-dma-tr = <0x04>;
+ qcom,mdss-pan-dsi-frame-rate = <60>;
+ qcom,panel-phy-regulatorSettings =[02 08 05 00 20 03];
+ qcom,panel-phy-timingSettings = [8B 1F 14 00 45 4A
+ 19 23 23 03 04 00];
+ qcom,panel-phy-strengthCtrl = [ff 06];
+ qcom,panel-phy-bistCtrl = [03 03 00 00 0f 00];
+ qcom,panel-phy-laneConfig =
+ [80 45 00 00 01 66 /*lane0**/
+ 80 45 00 00 01 66 /*lane1*/
+ 80 45 00 00 01 66 /*lane2*/
+ 80 45 00 00 01 66 /*lane3*/
+ 40 67 00 00 01 88]; /*Clk*/
+
+ qcom,on-cmds-dsi-state = "DSI_LP_MODE";
+ qcom,panel-on-cmds = [
+ 29 01 00 00 00 02
+ 00 00
+ 29 01 00 00 00 04
+ ff 80 09 01
+ 29 01 00 00 00 02
+ 00 80
+ 29 01 00 00 00 03
+ ff 80 09
+ 29 01 00 00 00 02
+ 00 80
+ 29 01 00 00 00 02
+ d6 48
+ 29 01 00 00 00 02
+ 00 03
+ 29 01 00 00 00 02
+ ff 01
+ 29 01 00 00 00 02
+ 00 B4
+ 29 01 00 00 00 02
+ C0 10
+ 29 01 00 00 00 02
+ 00 82
+ 29 01 00 00 00 02
+ C5 A3
+ 29 01 00 00 00 02
+ 00 90
+ 29 01 00 00 00 03
+ C5 96 87
+ 29 01 00 00 00 02
+ 00 00
+ 29 01 00 00 00 03
+ D8 74 72
+ 29 01 00 00 00 02
+ 00 00
+ 29 01 00 00 00 02
+ D9 56
+ 29 01 00 00 00 02
+ 00 00
+ 29 01 00 00 00 11
+ E1 00 06 0A
+ 07 03 16 08
+ 0A 04 06 07
+ 08 0F 23 22
+ 05
+ 29 01 00 00 00 02
+ 00 00
+ 29 01 00 00 00 11
+ E2 00 06 0A
+ 07 03 16 08
+ 0A 04 06 07
+ 08 0F 23 22
+ 05
+ 29 01 00 00 00 02
+ 00 81
+ 29 01 00 00 00 02
+ C1 77
+ 29 01 00 00 00 02
+ 00 A0
+ 29 01 00 00 00 02
+ C1 EA
+ 29 01 00 00 00 02
+ 00 A1
+ 29 01 00 00 00 02
+ C1 08
+ 29 01 00 00 00 02
+ 00 89
+ 29 01 00 00 00 02
+ C4 08
+ 29 01 00 00 00 02
+ 00 81
+ 29 01 00 00 00 02
+ C4 83
+ 29 01 00 00 00 02
+ 00 92
+ 29 01 00 00 00 02
+ C5 01
+ 29 01 00 00 00 02
+ 00 B1
+ 29 01 00 00 00 02
+ C5 A9
+ 29 01 00 00 00 02
+ 00 92
+ 29 01 00 00 00 02
+ B3 45
+ 29 01 00 00 00 02
+ 00 90
+ 29 01 00 00 00 02
+ B3 02
+ 29 01 00 00 00 02
+ 00 80
+ 29 01 00 00 00 06
+ C0 00 58 00
+ 14 16
+ 29 01 00 00 00 02
+ 00 80
+ 29 01 00 00 00 02
+ C4 30
+ 29 01 00 00 00 02
+ 00 90
+ 29 01 00 00 00 07
+ C0 00 44 00
+ 00 00 03
+ 29 01 00 00 00 02
+ 00 A6
+ 29 01 00 00 00 04
+ C1 01 00 00
+ 29 01 00 00 00 02
+ 00 80
+ 29 01 00 00 00 0D
+ CE 87 03 00
+ 85 03 00 86
+ 03 00 84 03
+ 00
+ 29 01 00 00 00 02
+ 00 A0
+ 29 01 00 00 00 0f
+ CE 38 03 03
+ 58 00 00 00
+ 38 02 03 59
+ 00 00 00
+ 29 01 00 00 00 02
+ 00 B0
+ 29 01 00 00 00 0f
+ CE 38 01 03
+ 5A 00 00 00
+ 38 00 03 5B
+ 00 00 00
+ 29 01 00 00 00 02
+ 00 C0
+ 29 01 00 00 00 0f
+ CE 30 00 03
+ 5C 00 00 00
+ 30 01 03 5D
+ 00 00 00
+ 29 01 00 00 00 02
+ 00 D0
+ 29 01 00 00 00 0f
+ CE 30 02 03
+ 5E 00 00 00
+ 30 03 03 5F
+ 00 00 00
+ 29 01 00 00 00 02
+ 00 C7
+ 29 01 00 00 00 02
+ CF 00
+ 29 01 00 00 00 02
+ 00 C9
+ 29 01 00 00 00 02
+ CF 00
+ 29 01 00 00 00 02
+ 00 D0
+ 29 01 00 00 00 02
+ CF 00
+ 29 01 00 00 00 02
+ 00 C4
+ 29 01 00 00 00 07
+ CB 04 04 04
+ 04 04 04
+ 29 01 00 00 00 02
+ 00 D9
+ 29 01 00 00 00 07
+ CB 04 04 04
+ 04 04 04
+ 29 01 00 00 00 02
+ 00 84
+ 29 01 00 00 00 07
+ CC 0C 0A 10
+ 0E 03 04
+ 29 01 00 00 00 02
+ 00 9E
+ 29 01 00 00 00 02
+ CC 0B
+ 29 01 00 00 00 02
+ 00 A0
+ 29 01 00 00 00 06
+ CC 09 0F 0D
+ 01 02
+ 29 01 00 00 00 02
+ 00 B4
+ 29 01 00 00 00 07
+ CC 0D 0F 09
+ 0B 02 01
+ 29 01 00 00 00 02
+ 00 CE
+ 29 01 00 00 00 02
+ CC 0E
+ 29 01 00 00 00 02
+ 00 D0
+ 29 01 00 00 00 06
+ CC 10 0A 0C
+ 04 03
+ 29 01 00 00 00 02
+ 00 00
+ 29 01 00 00 00 04
+ ff ff ff ff
+ 05 01 00 00 78 02
+ 11 00
+ 05 01 00 00 32 02
+ 29 00
+ ];
+ qcom,panel-off-cmds = [05 01 00 00 32 02 28 00
+ 05 01 00 00 78 02 10 00];
+ qcom,off-cmds-dsi-state = "DSI_LP_MODE";
+ };
+};
diff --git a/arch/arm/boot/dts/mpq8092-iommu-domains.dtsi b/arch/arm/boot/dts/mpq8092-iommu-domains.dtsi
new file mode 100644
index 0000000..25fca2a
--- /dev/null
+++ b/arch/arm/boot/dts/mpq8092-iommu-domains.dtsi
@@ -0,0 +1,31 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,iommu-domains {
+ compatible = "qcom,iommu-domains";
+
+ venus_domain_ns: qcom,iommu-domain1 {
+ label = "venus_ns";
+ qcom,iommu-contexts = <&venus_ns>;
+ qcom,virtual-addr-pool = <0x40000000 0x3f000000
+ 0x7f000000 0x1000000>;
+ };
+
+ venus_domain_cp: qcom,iommu-domain2 {
+ label = "venus_cp";
+ qcom,iommu-contexts = <&venus_cp>;
+ qcom,virtual-addr-pool = <0x1000000 0x3f000000>;
+ qcom,secure-domain;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/mpq8092.dtsi b/arch/arm/boot/dts/mpq8092.dtsi
index 04f0945..2f67f3e 100644
--- a/arch/arm/boot/dts/mpq8092.dtsi
+++ b/arch/arm/boot/dts/mpq8092.dtsi
@@ -21,6 +21,7 @@
};
/include/ "mpq8092-iommu.dtsi"
+/include/ "mpq8092-iommu-domains.dtsi"
/include/ "msm-gdsc.dtsi"
/include/ "mpq8092-ion.dtsi"
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index fc828b7..2460377 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -1329,6 +1329,12 @@
label = "wled";
};
+ qcom,leds@e200 {
+ compatible = "qcom,leds-qpnp";
+ reg = <0xe200 0x100>;
+ label = "kpdbl";
+ };
+
pwm@b100 {
compatible = "qcom,qpnp-pwm";
reg = <0xb100 0x100>,
@@ -1392,4 +1398,36 @@
reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
qcom,channel-id = <7>;
};
+
+ pwm@e400 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xe400 0x100>,
+ <0xe342 0x1e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <8>;
+ };
+
+ pwm@e500 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xe500 0x100>,
+ <0xe342 0x1e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <9>;
+ };
+
+ pwm@e600 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xe600 0x100>,
+ <0xe342 0x1e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <10>;
+ };
+
+ pwm@e700 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xe700 0x100>,
+ <0xe342 0x1e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <11>;
+ };
};
diff --git a/arch/arm/boot/dts/msm8226-camera.dtsi b/arch/arm/boot/dts/msm8226-camera.dtsi
index ec0092d..617d738 100644
--- a/arch/arm/boot/dts/msm8226-camera.dtsi
+++ b/arch/arm/boot/dts/msm8226-camera.dtsi
@@ -125,15 +125,39 @@
qcom,gpio-tbl-flags = <1 1>;
qcom,gpio-tbl-label = "CCI_I2C_DATA0",
"CCI_I2C_CLK0";
- qcom,hw-thigh = <78>;
- qcom,hw-tlow = <114>;
- qcom,hw-tsu-sto = <28>;
- qcom,hw-tsu-sta = <28>;
- qcom,hw-thd-dat = <10>;
- qcom,hw-thd-sta = <77>;
- qcom,hw-tbuf = <118>;
- qcom,hw-scl-stretch-en = <0>;
- qcom,hw-trdhld = <6>;
- qcom,hw-tsp = <1>;
+ master0: qcom,cci-master0 {
+ status = "disabled";
+ };
+ master1: qcom,cci-master1 {
+ status = "disabled";
+ };
};
};
+
+&master0 {
+ qcom,hw-thigh = <78>;
+ qcom,hw-tlow = <114>;
+ qcom,hw-tsu-sto = <28>;
+ qcom,hw-tsu-sta = <28>;
+ qcom,hw-thd-dat = <10>;
+ qcom,hw-thd-sta = <77>;
+ qcom,hw-tbuf = <118>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <1>;
+ status = "ok";
+};
+
+&master1 {
+ qcom,hw-thigh = <78>;
+ qcom,hw-tlow = <114>;
+ qcom,hw-tsu-sto = <28>;
+ qcom,hw-tsu-sta = <28>;
+ qcom,hw-thd-dat = <10>;
+ qcom,hw-thd-sta = <77>;
+ qcom,hw-tbuf = <118>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <1>;
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/msm8226-cdp.dtsi b/arch/arm/boot/dts/msm8226-cdp.dtsi
index 7acb1fe..d94b41d 100644
--- a/arch/arm/boot/dts/msm8226-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8226-cdp.dtsi
@@ -35,6 +35,7 @@
synaptics,irq-gpio = <&msmgpio 17 0x2008>;
synaptics,button-map = <139 102 158>;
synaptics,i2c-pull-up;
+ synaptics,power-down;
};
};
@@ -75,7 +76,7 @@
compatible = "micrel,ks8851";
reg = <3>;
interrupt-parent = <&msmgpio>;
- interrupts = <0 115 0>;
+ interrupts = <115 0x8>;
spi-max-frequency = <4800000>;
rst-gpio = <&msmgpio 114 0>;
vdd-io-supply = <&pm8226_lvs1>;
diff --git a/arch/arm/boot/dts/msm8226-coresight.dtsi b/arch/arm/boot/dts/msm8226-coresight.dtsi
index 11f6369..cbfdfc9 100644
--- a/arch/arm/boot/dts/msm8226-coresight.dtsi
+++ b/arch/arm/boot/dts/msm8226-coresight.dtsi
@@ -16,6 +16,8 @@
reg = <0xfc322000 0x1000>,
<0xfc37c000 0x3000>;
reg-names = "tmc-base", "bam-base";
+ interrupts = <0 166 0>;
+ interrupt-names = "byte-cntr-irq";
qcom,memory-reservation-type = "EBI1";
qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
diff --git a/arch/arm/boot/dts/msm8226-mtp.dtsi b/arch/arm/boot/dts/msm8226-mtp.dtsi
index ddb61bc..825e853 100644
--- a/arch/arm/boot/dts/msm8226-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8226-mtp.dtsi
@@ -35,6 +35,7 @@
synaptics,irq-gpio = <&msmgpio 17 0x2008>;
synaptics,button-map = <139 102 158>;
synaptics,i2c-pull-up;
+ synaptics,power-down;
};
};
@@ -86,7 +87,7 @@
compatible = "micrel,ks8851";
reg = <3>;
interrupt-parent = <&msmgpio>;
- interrupts = <0 115 0>;
+ interrupts = <115 0x8>;
spi-max-frequency = <4800000>;
rst-gpio = <&msmgpio 114 0>;
vdd-io-supply = <&pm8226_lvs1>;
diff --git a/arch/arm/boot/dts/msm8226-pm.dtsi b/arch/arm/boot/dts/msm8226-pm.dtsi
index 0fc6af4..ef0a55e 100644
--- a/arch/arm/boot/dts/msm8226-pm.dtsi
+++ b/arch/arm/boot/dts/msm8226-pm.dtsi
@@ -101,62 +101,16 @@
50 f0 0f]; /*APCS_PMIC_OFF_L2RAM_OFF*/
};
- qcom,lpm-resources {
- compatible = "qcom,lpm-resources";
- #address-cells = <1>;
- #size-cells = <0>;
-
- qcom,lpm-resources@0 {
- reg = <0x0>;
- qcom,name = "vdd-dig";
- qcom,type = <0x61706d73>; /* "smpa" */
- qcom,id = <0x01>;
- qcom,key = <0x6e726f63>; /* "corn" */
- qcom,init-value = <3>; /* SVS SOC */
- };
-
- qcom,lpm-resources@1 {
- reg = <0x1>;
- qcom,name = "vdd-mem";
- qcom,type = <0x616F646C>; /* "ldoa" */
- qcom,id = <0x03>;
- qcom,key = <0x6e726f63>; /* "corn" */
- qcom,init-value = <3>; /* SVS SOC */
- };
-
- qcom,lpm-resources@2 {
- reg = <0x2>;
- qcom,name = "pxo";
- qcom,type = <0x306b6c63>; /* "clk0" */
- qcom,id = <0x00>;
- qcom,key = <0x62616e45>; /* "Enab" */
- qcom,init-value = "xo_on";
- };
-
- qcom,lpm-resources@3 {
- reg = <0x3>;
- qcom,name = "l2";
- qcom,local-resource-type;
- qcom,init-value = "l2_cache_active";
- };
- };
-
qcom,lpm-levels {
compatible = "qcom,lpm-levels";
+ qcom,default-l2-state = "l2_cache_active";
#address-cells = <1>;
#size-cells = <0>;
qcom,lpm-level@0 {
reg = <0x0>;
qcom,mode = "wfi";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <4>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
qcom,latency-us = <1>;
qcom,ss-power = <784>;
qcom,energy-overhead = <190000>;
@@ -166,14 +120,7 @@
qcom,lpm-level@1 {
reg = <0x1>;
qcom,mode = "standalone_pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <4>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
qcom,latency-us = <3000>;
qcom,ss-power = <725>;
qcom,energy-overhead = <99500>;
@@ -183,15 +130,8 @@
qcom,lpm-level@2 {
reg = <0x2>;
qcom,mode = "pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_retention";
- qcom,vdd-mem-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <4>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
- qcom,latency-us = <8000>;
+ qcom,latency-us = <20000>;
qcom,ss-power = <138>;
qcom,energy-overhead = <1208400>;
qcom,time-overhead = <9200>;
@@ -200,64 +140,12 @@
qcom,lpm-level@3 {
reg = <0x3>;
qcom,mode = "pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <4>; /* NORMAL */
- qcom,vdd-mem-lower-bound = <3>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <4>; /* NORMAL */
- qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
- qcom,latency-us = <9000>;
+ qcom,latency-us = <30000>;
qcom,ss-power = <110>;
qcom,energy-overhead = <1250300>;
qcom,time-overhead = <9500>;
};
-
- qcom,lpm-level@4 {
- reg = <0x4>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <4>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,latency-us = <16300>;
- qcom,ss-power = <63>;
- qcom,energy-overhead = <2128000>;
- qcom,time-overhead = <24200>;
- };
-
- qcom,lpm-level@5 {
- reg = <0x5>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <4>; /* NORMAL */
- qcom,vdd-mem-lower-bound = <3>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <4>; /* NORMAL */
- qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
- qcom,latency-us = <24000>;
- qcom,ss-power = <10>;
- qcom,energy-overhead = <3202600>;
- qcom,time-overhead = <33000>;
- };
-
- qcom,lpm-level@6 {
- reg = <0x6>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <3>; /* SVS SOC */
- qcom,vdd-mem-lower-bound = <1>; /* RETENTION */
- qcom,vdd-dig-upper-bound = <3>; /* SVS SOC */
- qcom,vdd-dig-lower-bound = <1>; /* RETENTION */
- qcom,latency-us = <26000>;
- qcom,ss-power = <2>;
- qcom,energy-overhead = <4252000>;
- qcom,time-overhead = <38000>;
- };
};
qcom,pm-boot {
diff --git a/arch/arm/boot/dts/msm8226-qrd.dtsi b/arch/arm/boot/dts/msm8226-qrd.dtsi
index 9bf37af..55d8691 100644
--- a/arch/arm/boot/dts/msm8226-qrd.dtsi
+++ b/arch/arm/boot/dts/msm8226-qrd.dtsi
@@ -31,6 +31,7 @@
synaptics,irq-gpio = <&msmgpio 17 0x2008>;
synaptics,button-map = <139 102 158>;
synaptics,i2c-pull-up;
+ synaptics,power-down;
};
focaltech@38 {
compatible = "focaltech,5x06";
@@ -98,7 +99,7 @@
compatible = "micrel,ks8851";
reg = <3>;
interrupt-parent = <&msmgpio>;
- interrupts = <0 115 0>;
+ interrupts = <115 0x8>;
spi-max-frequency = <4800000>;
rst-gpio = <&msmgpio 114 0>;
vdd-io-supply = <&pm8226_lvs1>;
diff --git a/arch/arm/boot/dts/msm8226-regulator.dtsi b/arch/arm/boot/dts/msm8226-regulator.dtsi
index 571ddc3..d587b77 100644
--- a/arch/arm/boot/dts/msm8226-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8226-regulator.dtsi
@@ -43,7 +43,7 @@
qcom,pvs-bin-process = <0 0 0 0 0 1 1 1 1 1 2 2 2 2 2 2
2 2 2 2 3 3 3 3 3 3 3 3 0 0 0 0>;
- qcom,pvs-corner-ceiling-slow = <1155000 1160000 1275000>;
+ qcom,pvs-corner-ceiling-slow = <1050000 1160000 1275000>;
qcom,pvs-corner-ceiling-nom = <1050000 1075000 1200000>;
qcom,pvs-corner-ceiling-fast = <1050000 1050000 1140000>;
vdd-apc-supply = <&pm8226_s2>;
diff --git a/arch/arm/boot/dts/msm8226-v1-qrd-dvt.dts b/arch/arm/boot/dts/msm8226-v1-qrd-dvt.dts
index d36e93e..45c26c5 100644
--- a/arch/arm/boot/dts/msm8226-v1-qrd-dvt.dts
+++ b/arch/arm/boot/dts/msm8226-v1-qrd-dvt.dts
@@ -27,5 +27,6 @@
&soc {
qcom,mdss_dsi_hx8394a_720p_video {
status = "ok";
+ qcom,cont-splash-enabled;
};
};
diff --git a/arch/arm/boot/dts/msm8226-v2-qrd-dvt.dts b/arch/arm/boot/dts/msm8226-v2-qrd-dvt.dts
index f35e2e4..0a3148b 100644
--- a/arch/arm/boot/dts/msm8226-v2-qrd-dvt.dts
+++ b/arch/arm/boot/dts/msm8226-v2-qrd-dvt.dts
@@ -13,7 +13,6 @@
/dts-v1/;
/include/ "msm8226-v2.dtsi"
/include/ "msm8226-qrd.dtsi"
-/include/ "msm8226-camera-sensor-cdp.dtsi"
/include/ "dsi-panel-hx8394a-720p-video.dtsi"
/ {
@@ -29,5 +28,6 @@
&soc {
qcom,mdss_dsi_hx8394a_720p_video {
status = "ok";
+ qcom,cont-splash-enabled;
};
};
diff --git a/arch/arm/boot/dts/msm8226-v2.dtsi b/arch/arm/boot/dts/msm8226-v2.dtsi
index b44cb681..1dab78a 100644
--- a/arch/arm/boot/dts/msm8226-v2.dtsi
+++ b/arch/arm/boot/dts/msm8226-v2.dtsi
@@ -42,7 +42,7 @@
&apc_vreg_corner {
qcom,pvs-bin-process = <1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2
2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3>;
- qcom,pvs-corner-ceiling-slow = <1160000 1160000 1280000>;
+ qcom,pvs-corner-ceiling-slow = <1050000 1160000 1280000>;
qcom,pvs-corner-ceiling-nom = <1050000 1080000 1200000>;
qcom,pvs-corner-ceiling-fast = <1050000 1050000 1140000>;
qcom,cpr-step-quotient = <30>;
diff --git a/arch/arm/boot/dts/msm8610-ion.dtsi b/arch/arm/boot/dts/msm8610-ion.dtsi
index d625b95..77cd582 100644
--- a/arch/arm/boot/dts/msm8610-ion.dtsi
+++ b/arch/arm/boot/dts/msm8610-ion.dtsi
@@ -38,14 +38,14 @@
compatible = "qcom,msm-ion-reserve";
reg = <23>;
qcom,heap-align = <0x1000>;
- qcom,memory-fixed = <0x0bf00000 0x1A00000>;
+ qcom,memory-fixed = <0x0c500000 0x1300000>;
};
qcom,ion-heap@26 { /* MODEM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <26>;
qcom,heap-align = <0x1000>;
- qcom,memory-fixed = <0x08000000 0x3F00000>;
+ qcom,memory-fixed = <0x08800000 0x3d00000>;
};
};
diff --git a/arch/arm/boot/dts/msm8610-pm.dtsi b/arch/arm/boot/dts/msm8610-pm.dtsi
index c531740..d31a65c 100644
--- a/arch/arm/boot/dts/msm8610-pm.dtsi
+++ b/arch/arm/boot/dts/msm8610-pm.dtsi
@@ -101,62 +101,16 @@
50 f0 0f]; /*APCS_PMIC_OFF_L2RAM_OFF*/
};
- qcom,lpm-resources {
- compatible = "qcom,lpm-resources";
- #address-cells = <1>;
- #size-cells = <0>;
-
- qcom,lpm-resources@0 {
- reg = <0x0>;
- qcom,name = "vdd-dig";
- qcom,type = <0x61706d73>; /* "smpa" */
- qcom,id = <0x01>;
- qcom,key = <0x6e726f63>; /* "corn" */
- qcom,init-value = <3>; /* SVS SOC */
- };
-
- qcom,lpm-resources@1 {
- reg = <0x1>;
- qcom,name = "vdd-mem";
- qcom,type = <0x616F646C>; /* "ldoa" */
- qcom,id = <0x03>;
- qcom,key = <0x6e726f63>; /* "corn" */
- qcom,init-value = <3>; /* SVS SOC */
- };
-
- qcom,lpm-resources@2 {
- reg = <0x2>;
- qcom,name = "pxo";
- qcom,type = <0x306b6c63>; /* "clk0" */
- qcom,id = <0x00>;
- qcom,key = <0x62616e45>; /* "Enab" */
- qcom,init-value = "xo_on";
- };
-
- qcom,lpm-resources@3 {
- reg = <0x3>;
- qcom,name = "l2";
- qcom,local-resource-type;
- qcom,init-value = "l2_cache_active";
- };
- };
-
qcom,lpm-levels {
compatible = "qcom,lpm-levels";
+ qcom,default-l2-state = "l2_cache_active";
#address-cells = <1>;
#size-cells = <0>;
qcom,lpm-level@0 {
reg = <0x0>;
qcom,mode = "wfi";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <4>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
qcom,latency-us = <1>;
qcom,ss-power = <784>;
qcom,energy-overhead = <190000>;
@@ -166,14 +120,7 @@
qcom,lpm-level@1 {
reg = <0x1>;
qcom,mode = "standalone_pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <4>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
qcom,latency-us = <3000>;
qcom,ss-power = <725>;
qcom,energy-overhead = <99500>;
@@ -183,15 +130,8 @@
qcom,lpm-level@2 {
reg = <0x2>;
qcom,mode = "pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_retention";
- qcom,vdd-mem-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <4>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
- qcom,latency-us = <8000>;
+ qcom,latency-us = <20000>;
qcom,ss-power = <138>;
qcom,energy-overhead = <1208400>;
qcom,time-overhead = <9200>;
@@ -200,64 +140,12 @@
qcom,lpm-level@3 {
reg = <0x3>;
qcom,mode = "pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <4>; /* NORMAL */
- qcom,vdd-mem-lower-bound = <3>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <4>; /* NORMAL */
- qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
- qcom,latency-us = <9000>;
+ qcom,latency-us = <30000>;
qcom,ss-power = <110>;
qcom,energy-overhead = <1250300>;
qcom,time-overhead = <9500>;
};
-
- qcom,lpm-level@4 {
- reg = <0x4>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <4>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,latency-us = <16300>;
- qcom,ss-power = <63>;
- qcom,energy-overhead = <2128000>;
- qcom,time-overhead = <24200>;
- };
-
- qcom,lpm-level@5 {
- reg = <0x5>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <4>; /* NORMAL */
- qcom,vdd-mem-lower-bound = <3>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <4>; /* NORMAL */
- qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
- qcom,latency-us = <24000>;
- qcom,ss-power = <10>;
- qcom,energy-overhead = <3202600>;
- qcom,time-overhead = <33000>;
- };
-
- qcom,lpm-level@6 {
- reg = <0x6>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <3>; /* SVS SOC */
- qcom,vdd-mem-lower-bound = <1>; /* RETENTION */
- qcom,vdd-dig-upper-bound = <3>; /* SVS SOC */
- qcom,vdd-dig-lower-bound = <1>; /* RETENTION */
- qcom,latency-us = <26000>;
- qcom,ss-power = <2>;
- qcom,energy-overhead = <4252000>;
- qcom,time-overhead = <38000>;
- };
};
qcom,pm-boot {
diff --git a/arch/arm/boot/dts/msm8610-qrd-skuaa.dts b/arch/arm/boot/dts/msm8610-qrd-skuaa.dts
index 220f642..aeaf8ca 100644
--- a/arch/arm/boot/dts/msm8610-qrd-skuaa.dts
+++ b/arch/arm/boot/dts/msm8610-qrd-skuaa.dts
@@ -40,3 +40,8 @@
qcom,ext-spk-amp-gpio = <&msmgpio 92 0x0>;
};
};
+
+&pm8110_bms {
+ status = "ok";
+ qcom,batt-type = <5>;
+};
diff --git a/arch/arm/boot/dts/msm8610-qrd-skuab.dts b/arch/arm/boot/dts/msm8610-qrd-skuab.dts
index 24c2490..947a312 100644
--- a/arch/arm/boot/dts/msm8610-qrd-skuab.dts
+++ b/arch/arm/boot/dts/msm8610-qrd-skuab.dts
@@ -13,6 +13,7 @@
/dts-v1/;
/include/ "msm8610-qrd.dtsi"
+/include/ "dsi-v2-panel-otm8018b-fwvga-video.dtsi"
/include/ "msm8612-qrd-camera-sensor.dtsi"
/ {
@@ -75,7 +76,11 @@
vdd-supply = <&pm8110_l19>;
vio-supply = <&pm8110_l14>;
fsl,irq-gpio = <&msmgpio 81 0x00>;
- fsl,sensors-position = <5>;
+ fsl,sensors-position = <1>;
};
};
+
+ qcom,dsi_v2_otm8018b_fwvga_video {
+ status = "ok";
+ };
};
diff --git a/arch/arm/boot/dts/msm8610-regulator.dtsi b/arch/arm/boot/dts/msm8610-regulator.dtsi
index cf54098..34cbd99 100644
--- a/arch/arm/boot/dts/msm8610-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8610-regulator.dtsi
@@ -43,9 +43,9 @@
qcom,pvs-bin-process = <1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1>;
- qcom,pvs-corner-ceiling-slow = <1150000 1150000 1275000>;
- qcom,pvs-corner-ceiling-nom = <1075000 1075000 1200000>;
- qcom,pvs-corner-ceiling-fast = <1000000 1000000 1140000>;
+ qcom,pvs-corner-ceiling-slow = <1050000 1150000 1275000>;
+ qcom,pvs-corner-ceiling-nom = <1050000 1075000 1200000>;
+ qcom,pvs-corner-ceiling-fast = <1050000 1050000 1140000>;
vdd-apc-supply = <&pm8110_s2>;
vdd-mx-supply = <&pm8110_l3_ao>;
@@ -54,12 +54,12 @@
qcom,cpr-ref-clk = <19200>;
qcom,cpr-timer-delay = <5000>;
- qcom,cpr-timer-cons-up = <1>;
+ qcom,cpr-timer-cons-up = <0>;
qcom,cpr-timer-cons-down = <2>;
qcom,cpr-irq-line = <0>;
qcom,cpr-step-quotient = <15>;
- qcom,cpr-up-threshold = <1>;
- qcom,cpr-down-threshold = <2>;
+ qcom,cpr-up-threshold = <0>;
+ qcom,cpr-down-threshold = <10>;
qcom,cpr-idle-clocks = <5>;
qcom,cpr-gcnt-time = <1>;
qcom,vdd-apc-step-up-limit = <1>;
@@ -77,6 +77,8 @@
qcom,cpr-fuse-redun-bp-scheme = <25>;
qcom,cpr-fuse-redun-target-quot = <32 12 0>;
qcom,cpr-fuse-redun-ro-sel = <44 26 29>;
+
+ qcom,cpr-enable;
};
};
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index ee444e3..0078861 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -215,9 +215,10 @@
reg = <0xf9a55000 0x400>;
interrupts = <0 134 0>, <0 140 0>;
interrupt-names = "core_irq", "async_irq";
- HSUSB_VDDCX-supply = <&pm8110_s1>;
+ hsusb_vdd_dig-supply = <&pm8110_s1_corner>;
HSUSB_1p8-supply = <&pm8110_l10>;
HSUSB_3p3-supply = <&pm8110_l20>;
+ qcom,vdd-voltage-level = <1 5 7>;
qcom,hsusb-otg-phy-init-seq =
<0x44 0x80 0x68 0x81 0x24 0x82 0x13 0x83 0xffffffff>;
@@ -419,7 +420,7 @@
qcom,msm-mem-hole {
compatible = "qcom,msm-mem-hole";
- qcom,memblock-remove = <0x07b00000 0x6400000>; /* Address and Size of Hole */
+ qcom,memblock-remove = <0x08800000 0x5600000>; /* Address and Size of Hole */
};
qcom,wdt@f9017000 {
@@ -802,9 +803,9 @@
interrupts = <0 29 1>;
};
- qcom,qseecom@7B00000 {
+ qcom,qseecom@da00000 {
compatible = "qcom,qseecom";
- reg = <0x7B00000 0x500000>;
+ reg = <0xda00000 0x100000>;
reg-names = "secapp-region";
qcom,disk-encrypt-pipe-pair = <2>;
qcom,hlos-ce-hw-instance = <0>;
diff --git a/arch/arm/boot/dts/msm8926.dtsi b/arch/arm/boot/dts/msm8926.dtsi
index d15459c..f46b714 100644
--- a/arch/arm/boot/dts/msm8926.dtsi
+++ b/arch/arm/boot/dts/msm8926.dtsi
@@ -73,3 +73,8 @@
qcom,cpr-down-threshold = <5>;
qcom,cpr-apc-volt-step = <10000>;
};
+
+&tsens {
+ qcom,sensors = <6>;
+ qcom,slope = <2901 2846 3038 2955 2901 2846>;
+};
diff --git a/arch/arm/boot/dts/msm8974-camera.dtsi b/arch/arm/boot/dts/msm8974-camera.dtsi
index 456b079..4be2b38 100644
--- a/arch/arm/boot/dts/msm8974-camera.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera.dtsi
@@ -196,18 +196,42 @@
qcom,gpio-tbl-num = <0 1 2 3>;
qcom,gpio-tbl-flags = <1 1 1 1>;
qcom,gpio-tbl-label = "CCI_I2C_DATA0",
- "CCI_I2C_CLK0",
- "CCI_I2C_DATA1",
- "CCI_I2C_CLK1";
- qcom,hw-thigh = <78>;
- qcom,hw-tlow = <114>;
- qcom,hw-tsu-sto = <28>;
- qcom,hw-tsu-sta = <28>;
- qcom,hw-thd-dat = <10>;
- qcom,hw-thd-sta = <77>;
- qcom,hw-tbuf = <118>;
- qcom,hw-scl-stretch-en = <0>;
- qcom,hw-trdhld = <6>;
- qcom,hw-tsp = <1>;
+ "CCI_I2C_CLK0",
+ "CCI_I2C_DATA1",
+ "CCI_I2C_CLK1";
+ master0: qcom,cci-master0 {
+ status = "disabled";
+ };
+ master1: qcom,cci-master1 {
+ status = "disabled";
+ };
};
};
+
+&master0 {
+ qcom,hw-thigh = <78>;
+ qcom,hw-tlow = <114>;
+ qcom,hw-tsu-sto = <28>;
+ qcom,hw-tsu-sta = <28>;
+ qcom,hw-thd-dat = <10>;
+ qcom,hw-thd-sta = <77>;
+ qcom,hw-tbuf = <118>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <1>;
+ status = "ok";
+};
+
+&master1 {
+ qcom,hw-thigh = <78>;
+ qcom,hw-tlow = <114>;
+ qcom,hw-tsu-sto = <28>;
+ qcom,hw-tsu-sta = <28>;
+ qcom,hw-thd-dat = <10>;
+ qcom,hw-thd-sta = <77>;
+ qcom,hw-tbuf = <118>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <1>;
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/msm8974-cdp.dtsi b/arch/arm/boot/dts/msm8974-cdp.dtsi
index 8a3a77e..e66ea25 100644
--- a/arch/arm/boot/dts/msm8974-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-cdp.dtsi
@@ -15,6 +15,12 @@
/include/ "msm8974-leds.dtsi"
/include/ "msm8974-camera-sensor-cdp.dtsi"
+/ {
+ aliases {
+ serial0 = &blsp1_uart1;
+ };
+};
+
&soc {
serial@f991e000 {
status = "ok";
diff --git a/arch/arm/boot/dts/msm8974-coresight.dtsi b/arch/arm/boot/dts/msm8974-coresight.dtsi
index e41adac..9fd0cd9 100644
--- a/arch/arm/boot/dts/msm8974-coresight.dtsi
+++ b/arch/arm/boot/dts/msm8974-coresight.dtsi
@@ -16,6 +16,8 @@
reg = <0xfc322000 0x1000>,
<0xfc37c000 0x3000>;
reg-names = "tmc-base", "bam-base";
+ interrupts = <0 166 0>;
+ interrupt-names = "byte-cntr-irq";
qcom,memory-reservation-type = "EBI1";
qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
diff --git a/arch/arm/boot/dts/msm8974-fluid.dtsi b/arch/arm/boot/dts/msm8974-fluid.dtsi
index bbbe3bd..7f714e8 100644
--- a/arch/arm/boot/dts/msm8974-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-fluid.dtsi
@@ -14,6 +14,12 @@
/include/ "msm8974-camera-sensor-fluid.dtsi"
/include/ "msm8974-leds.dtsi"
+/ {
+ aliases {
+ serial0 = &blsp1_uart1;
+ };
+};
+
&soc {
serial@f991e000 {
status = "ok";
diff --git a/arch/arm/boot/dts/msm8974-leds.dtsi b/arch/arm/boot/dts/msm8974-leds.dtsi
index ab57468..06abbd8 100644
--- a/arch/arm/boot/dts/msm8974-leds.dtsi
+++ b/arch/arm/boot/dts/msm8974-leds.dtsi
@@ -123,4 +123,60 @@
qcom,leds@d700 {
status = "disabled";
};
+
+ qcom,leds@e200 {
+ status = "okay";
+
+ qcom,kpdbl1 {
+ label = "kpdbl";
+ linux,name = "kpdbl-pwm-1";
+ qcom,mode = "pwm";
+ qcom,pwm-channel = <8>;
+ qcom,pwm-us = <1000>;
+ qcom,id = <7>;
+ qcom,max-current = <20>;
+ qcom,row-id = <0>;
+ qcom,row-src-en;
+ qcom,always-on;
+ };
+
+ qcom,kpdbl2 {
+ label = "kpdbl";
+ linux,name = "kpdbl-lut-2";
+ qcom,mode = "lpg";
+ qcom,pwm-channel = <9>;
+ qcom,pwm-us = <1000>;
+ qcom,start-idx = <1>;
+ qcom,duty-pcts = [00 00 00 00 64
+ 64 00 00 00 00];
+ qcom,id = <7>;
+ qcom,max-current = <20>;
+ qcom,row-id = <1>;
+ qcom,row-src-en;
+ };
+
+ qcom,kpdbl3 {
+ label = "kpdbl";
+ linux,name = "kpdbl-pwm-3";
+ qcom,mode = "pwm";
+ qcom,pwm-channel = <10>;
+ qcom,pwm-us = <1000>;
+ qcom,id = <7>;
+ qcom,max-current = <20>;
+ qcom,row-id = <2>;
+ qcom,row-src-en;
+ };
+
+ qcom,kpdbl4 {
+ label = "kpdbl";
+ linux,name = "kpdbl-pwm-4";
+ qcom,mode = "pwm";
+ qcom,pwm-channel = <11>;
+ qcom,pwm-us = <1000>;
+ qcom,id = <7>;
+ qcom,max-current = <20>;
+ qcom,row-id = <3>;
+ qcom,row-src-en;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index e5336e6..51cb226 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -13,6 +13,12 @@
/include/ "msm8974-leds.dtsi"
/include/ "msm8974-camera-sensor-liquid.dtsi"
+/ {
+ aliases {
+ serial0 = &blsp1_uart1;
+ };
+};
+
&soc {
serial@f991e000 {
status = "ok";
diff --git a/arch/arm/boot/dts/msm8974-mdss.dtsi b/arch/arm/boot/dts/msm8974-mdss.dtsi
index 52d730d..f8a8fa6 100644
--- a/arch/arm/boot/dts/msm8974-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8974-mdss.dtsi
@@ -189,5 +189,6 @@
qcom,panel-pwm-period = <53>;
status = "disable";
qcom,mdss-fb-map = <&mdss_fb0>;
+ gpio-panel-hpd = <&msmgpio 102 0>;
};
};
diff --git a/arch/arm/boot/dts/msm8974-mtp.dtsi b/arch/arm/boot/dts/msm8974-mtp.dtsi
index 8e3b7cc..4ee56ad 100644
--- a/arch/arm/boot/dts/msm8974-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-mtp.dtsi
@@ -14,6 +14,12 @@
/include/ "msm8974-camera-sensor-mtp.dtsi"
/include/ "msm8974-leds.dtsi"
+/ {
+ aliases {
+ serial0 = &blsp1_uart1;
+ };
+};
+
&soc {
serial@f991e000 {
status = "ok";
diff --git a/arch/arm/boot/dts/msm8974-v1-pm.dtsi b/arch/arm/boot/dts/msm8974-v1-pm.dtsi
index 288a703..7362b64 100644
--- a/arch/arm/boot/dts/msm8974-v1-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1-pm.dtsi
@@ -129,64 +129,16 @@
50 02 32 50 0f];
};
- qcom,lpm-resources {
- compatible = "qcom,lpm-resources";
- #address-cells = <1>;
- #size-cells = <0>;
-
- qcom,lpm-resources@0 {
- reg = <0x0>;
- qcom,name = "vdd-dig";
- qcom,type = <0x62706d73>; /* "smpb" */
- qcom,id = <0x02>;
- qcom,key = <0x6e726f63>; /* "corn" */
- qcom,init-value = <6>; /* Super Turbo */
- };
-
- qcom,lpm-resources@1 {
- reg = <0x1>;
- qcom,name = "vdd-mem";
- qcom,type = <0x62706d73>; /* "smpb" */
- qcom,id = <0x01>;
- qcom,key = <0x7675>; /* "uv" */
- qcom,init-value = <1050000>; /* Super Turbo */
- };
-
- qcom,lpm-resources@2 {
- reg = <0x2>;
- qcom,name = "pxo";
- qcom,type = <0x306b6c63>; /* "clk0" */
- qcom,id = <0x00>;
- qcom,key = <0x62616e45>; /* "Enab" */
- qcom,init-value = "xo_on";
- };
-
- qcom,lpm-resources@3 {
- reg = <0x3>;
- qcom,name = "l2";
- qcom,local-resource-type;
- qcom,init-value = "l2_cache_retention";
- };
- };
-
qcom,lpm-levels {
compatible = "qcom,lpm-levels";
+ qcom,default-l2-state = "l2_cache_retention";
#address-cells = <1>;
#size-cells = <0>;
- qcom,use-qtimer;
-
qcom,lpm-level@0 {
reg = <0x0>;
qcom,mode = "wfi";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
qcom,latency-us = <1>;
qcom,ss-power = <784>;
qcom,energy-overhead = <190000>;
@@ -196,14 +148,7 @@
qcom,lpm-level@1 {
reg = <0x1>;
qcom,mode = "retention";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
qcom,latency-us = <75>;
qcom,ss-power = <735>;
qcom,energy-overhead = <77341>;
@@ -214,14 +159,7 @@
qcom,lpm-level@2 {
reg = <0x2>;
qcom,mode = "standalone_pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
qcom,latency-us = <95>;
qcom,ss-power = <725>;
qcom,energy-overhead = <99500>;
@@ -231,15 +169,8 @@
qcom,lpm-level@3 {
reg = <0x3>;
qcom,mode = "pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_gdhs";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
- qcom,latency-us = <2000>;
+ qcom,latency-us = <20000>;
qcom,ss-power = <138>;
qcom,energy-overhead = <1208400>;
qcom,time-overhead = <3200>;
@@ -248,79 +179,12 @@
qcom,lpm-level@4 {
reg = <0x4>;
qcom,mode = "pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
- qcom,latency-us = <3000>;
+ qcom,latency-us = <30000>;
qcom,ss-power = <110>;
qcom,energy-overhead = <1250300>;
qcom,time-overhead = <3500>;
};
-
- qcom,lpm-level@5 {
- reg = <0x5>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_gdhs";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,latency-us = <3000>;
- qcom,ss-power = <68>;
- qcom,energy-overhead = <1350200>;
- qcom,time-overhead = <4000>;
- };
-
- qcom,lpm-level@6 {
- reg = <0x6>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,latency-us = <10300>;
- qcom,ss-power = <63>;
- qcom,energy-overhead = <2128000>;
- qcom,time-overhead = <18200>;
- };
-
- qcom,lpm-level@7 {
- reg = <0x7>;
- qcom,mode= "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <950000>; /* NORMAL */
- qcom,vdd-mem-lower-bound = <950000>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <4>; /* NORMAL */
- qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
- qcom,latency-us = <18000>;
- qcom,ss-power = <10>;
- qcom,energy-overhead = <3202600>;
- qcom,time-overhead = <27000>;
- };
-
- qcom,lpm-level@8 {
- reg = <0x8>;
- qcom,mode= "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <950000>; /* SVS SOC */
- qcom,vdd-mem-lower-bound = <675000>; /* RETENTION */
- qcom,vdd-dig-upper-bound = <3>; /* SVS SOC */
- qcom,vdd-dig-lower-bound = <1>; /* RETENTION */
- qcom,latency-us = <20000>;
- qcom,ss-power = <2>;
- qcom,energy-overhead = <4252000>;
- qcom,time-overhead = <32000>;
- };
};
qcom,pm-boot {
diff --git a/arch/arm/boot/dts/msm8974-v1.dtsi b/arch/arm/boot/dts/msm8974-v1.dtsi
index cfacac6..86a61cd 100644
--- a/arch/arm/boot/dts/msm8974-v1.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1.dtsi
@@ -65,6 +65,7 @@
/* CoreSight */
&tmc_etr {
qcom,reset-flush-race;
+ qcom,byte-cntr-absent;
};
&stm {
@@ -147,3 +148,20 @@
&usb_otg {
qcom,hsusb-otg-pnoc-errata-fix;
};
+
+&gdsc_venus {
+ qcom,skip-logic-collapse;
+ qcom,retain-periph;
+ qcom,retain-mem;
+};
+
+&gdsc_mdss {
+ qcom,skip-logic-collapse;
+ qcom,retain-periph;
+ qcom,retain-mem;
+};
+
+&gdsc_oxili_gx {
+ qcom,retain-mem;
+ qcom,retain-periph;
+};
diff --git a/arch/arm/boot/dts/msm8974-v2-iommu.dtsi b/arch/arm/boot/dts/msm8974-v2-iommu.dtsi
index 03f7e80..5efa17d 100644
--- a/arch/arm/boot/dts/msm8974-v2-iommu.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2-iommu.dtsi
@@ -42,14 +42,14 @@
0x2034
0x2038>;
- qcom,iommu-bfb-data = <0xFFFFFFFF
- 0xFFFFFFFF
- 0x00000004
- 0x00000008
- 0x00000000
- 0x00013205
- 0x00004000
- 0x00014020
+ qcom,iommu-bfb-data = <0xffffffff
+ 0xffffffff
+ 0x4
+ 0x8
+ 0x0
+ 0x13205
+ 0x4000
+ 0x14020
0x0
0x94
0x114
@@ -116,8 +116,8 @@
0x2010
0x2014>;
- qcom,iommu-bfb-data = <0x3FFF
- 0x00000000
+ qcom,iommu-bfb-data = <0xffff
+ 0x0
0x4
0x4
0x0
@@ -125,8 +125,8 @@
0x10
0x50
0x0
- 0x00002804
- 0x00009614
+ 0x2804
+ 0x9614
0x0
0x0
0x0
@@ -157,14 +157,14 @@
0x201c
0x2020>;
- qcom,iommu-bfb-data = <0xFFFFF
- 0x00000000
- 0x00000004
- 0x00000010
- 0x00000000
- 0x00006800
- 0x00006221
- 0x00016231
+ qcom,iommu-bfb-data = <0xffffffff
+ 0x0
+ 0x4
+ 0x10
+ 0x0
+ 0x6800
+ 0x6221
+ 0x16231
0x0
0x34
0x74
@@ -197,14 +197,14 @@
0x2414
0x2008>;
- qcom,iommu-bfb-data = <0x00000003
+ qcom,iommu-bfb-data = <0x3
0x0
- 0x00000004
- 0x00000010
- 0x00000000
- 0x00000000
- 0x00000000
- 0x00000020
+ 0x4
+ 0x10
+ 0x0
+ 0x0
+ 0x0
+ 0x20
0x0
0x1
0x81
@@ -236,7 +236,7 @@
0x2020>;
qcom,iommu-bfb-data = <0xffffffff
- 0x00000000
+ 0x0
0x4
0x8
0x0
@@ -244,8 +244,8 @@
0x20
0x78
0x0
- 0x00003c08
- 0x0000b41e
+ 0x3c08
+ 0xb41e
0x0
0x0
0x0
diff --git a/arch/arm/boot/dts/msm8974-v2-pm.dtsi b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
index a162bb7..1235c6e 100644
--- a/arch/arm/boot/dts/msm8974-v2-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
@@ -125,64 +125,16 @@
50 02 32 50 0f];
};
- qcom,lpm-resources {
- compatible = "qcom,lpm-resources";
- #address-cells = <1>;
- #size-cells = <0>;
-
- qcom,lpm-resources@0 {
- reg = <0x0>;
- qcom,name = "vdd-dig";
- qcom,type = <0x62706d73>; /* "smpb" */
- qcom,id = <0x02>;
- qcom,key = <0x6e726f63>; /* "corn" */
- qcom,init-value = <6>; /* Super Turbo */
- };
-
- qcom,lpm-resources@1 {
- reg = <0x1>;
- qcom,name = "vdd-mem";
- qcom,type = <0x62706d73>; /* "smpb" */
- qcom,id = <0x01>;
- qcom,key = <0x7675>; /* "uv" */
- qcom,init-value = <1050000>; /* Super Turbo */
- };
-
- qcom,lpm-resources@2 {
- reg = <0x2>;
- qcom,name = "pxo";
- qcom,type = <0x306b6c63>; /* "clk0" */
- qcom,id = <0x00>;
- qcom,key = <0x62616e45>; /* "Enab" */
- qcom,init-value = "xo_on";
- };
-
- qcom,lpm-resources@3 {
- reg = <0x3>;
- qcom,name = "l2";
- qcom,local-resource-type;
- qcom,init-value = "l2_cache_retention";
- };
- };
-
qcom,lpm-levels {
compatible = "qcom,lpm-levels";
+ qcom,default-l2-state = "l2_cache_retention";
#address-cells = <1>;
#size-cells = <0>;
- qcom,use-qtimer;
-
qcom,lpm-level@0 {
reg = <0x0>;
qcom,mode = "wfi";
- qcom,xo = "xo_on";
- qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
+ qcom,l2 = "l2_cache_retention";
qcom,latency-us = <1>;
qcom,ss-power = <715>;
qcom,energy-overhead = <17700>;
@@ -192,14 +144,7 @@
qcom,lpm-level@1 {
reg = <0x1>;
qcom,mode = "retention";
- qcom,xo = "xo_on";
- qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
+ qcom,l2 = "l2_cache_retention";
qcom,latency-us = <35>;
qcom,ss-power = <542>;
qcom,energy-overhead = <34920>;
@@ -210,14 +155,7 @@
qcom,lpm-level@2 {
reg = <0x2>;
qcom,mode = "standalone_pc";
- qcom,xo = "xo_on";
- qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
+ qcom,l2 = "l2_cache_retention";
qcom,latency-us = <300>;
qcom,ss-power = <476>;
qcom,energy-overhead = <225300>;
@@ -227,32 +165,19 @@
qcom,lpm-level@3 {
reg = <0x3>;
qcom,mode = "standalone_pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_gdhs";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
- qcom,latency-us = <300>;
+ qcom,latency-us = <320>;
qcom,ss-power = <476>;
qcom,energy-overhead = <225300>;
- qcom,time-overhead = <350>;
+ qcom,time-overhead = <375>;
};
qcom,lpm-level@4 {
reg = <0x4>;
qcom,mode = "pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_gdhs";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
qcom,gpio-detectable;
- qcom,latency-us = <2817>;
+ qcom,latency-us = <20000>;
qcom,ss-power = <163>;
qcom,energy-overhead = <1577736>;
qcom,time-overhead = <5067>;
@@ -261,64 +186,12 @@
qcom,lpm-level@5 {
reg = <0x5>;
qcom,mode = "pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
- qcom,latency-us = <3922>;
+ qcom,latency-us = <30000>;
qcom,ss-power = <83>;
qcom,energy-overhead = <2274420>;
qcom,time-overhead = <6605>;
};
-
- qcom,lpm-level@6 {
- reg = <0x6>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,latency-us = <4922>;
- qcom,ss-power = <68>;
- qcom,energy-overhead = <2568180>;
- qcom,time-overhead = <8812>;
- };
-
- qcom,lpm-level@7 {
- reg = <0x7>;
- qcom,mode= "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <950000>; /* NORMAL */
- qcom,vdd-mem-lower-bound = <950000>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <4>; /* NORMAL */
- qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
- qcom,latency-us = <5890>;
- qcom,ss-power = <60>;
- qcom,energy-overhead = <2675900>;
- qcom,time-overhead = <10140>;
- };
-
- qcom,lpm-level@8 {
- reg = <0x8>;
- qcom,mode= "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <950000>; /* SVS SOC */
- qcom,vdd-mem-lower-bound = <675000>; /* RETENTION */
- qcom,vdd-dig-upper-bound = <3>; /* SVS SOC */
- qcom,vdd-dig-lower-bound = <1>; /* RETENTION */
- qcom,latency-us = <8500>;
- qcom,ss-power = <18>;
- qcom,energy-overhead = <3286600>;
- qcom,time-overhead = <15760>;
- };
};
qcom,pm-boot {
diff --git a/arch/arm/boot/dts/msm8974-v2-cdp.dts b/arch/arm/boot/dts/msm8974-v2.0-1-cdp.dts
similarity index 90%
rename from arch/arm/boot/dts/msm8974-v2-cdp.dts
rename to arch/arm/boot/dts/msm8974-v2.0-1-cdp.dts
index f4014aa..875b3fc 100644
--- a/arch/arm/boot/dts/msm8974-v2-cdp.dts
+++ b/arch/arm/boot/dts/msm8974-v2.0-1-cdp.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
/include/ "msm8974-cdp.dtsi"
/ {
- model = "Qualcomm MSM 8974v2 CDP";
+ model = "Qualcomm MSM 8974v2.0/1 CDP";
compatible = "qcom,msm8974-cdp", "qcom,msm8974", "qcom,cdp";
qcom,msm-id = <126 1 0x20000>,
<185 1 0x20000>,
diff --git a/arch/arm/boot/dts/msm8974-v2-fluid.dts b/arch/arm/boot/dts/msm8974-v2.0-1-fluid.dts
similarity index 90%
rename from arch/arm/boot/dts/msm8974-v2-fluid.dts
rename to arch/arm/boot/dts/msm8974-v2.0-1-fluid.dts
index 9c9e3c0..236593d 100644
--- a/arch/arm/boot/dts/msm8974-v2-fluid.dts
+++ b/arch/arm/boot/dts/msm8974-v2.0-1-fluid.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
/include/ "msm8974-fluid.dtsi"
/ {
- model = "Qualcomm MSM 8974v2 FLUID";
+ model = "Qualcomm MSM 8974v2.0/1 FLUID";
compatible = "qcom,msm8974-fluid", "qcom,msm8974", "qcom,fluid";
qcom,msm-id = <126 3 0x20000>,
<185 3 0x20000>,
diff --git a/arch/arm/boot/dts/msm8974-v2-liquid.dts b/arch/arm/boot/dts/msm8974-v2.0-1-liquid.dts
similarity index 90%
rename from arch/arm/boot/dts/msm8974-v2-liquid.dts
rename to arch/arm/boot/dts/msm8974-v2.0-1-liquid.dts
index ddae6fe..23292f6 100644
--- a/arch/arm/boot/dts/msm8974-v2-liquid.dts
+++ b/arch/arm/boot/dts/msm8974-v2.0-1-liquid.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
/include/ "msm8974-liquid.dtsi"
/ {
- model = "Qualcomm MSM 8974v2 LIQUID";
+ model = "Qualcomm MSM 8974v2.0/1 LIQUID";
compatible = "qcom,msm8974-liquid", "qcom,msm8974", "qcom,liquid";
qcom,msm-id = <126 9 0x20000>,
<185 9 0x20000>,
diff --git a/arch/arm/boot/dts/msm8974-v2-mtp.dts b/arch/arm/boot/dts/msm8974-v2.0-1-mtp.dts
similarity index 91%
rename from arch/arm/boot/dts/msm8974-v2-mtp.dts
rename to arch/arm/boot/dts/msm8974-v2.0-1-mtp.dts
index 021b626..de9e6a3 100644
--- a/arch/arm/boot/dts/msm8974-v2-mtp.dts
+++ b/arch/arm/boot/dts/msm8974-v2.0-1-mtp.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
/include/ "msm8974-mtp.dtsi"
/ {
- model = "Qualcomm MSM 8974v2 MTP";
+ model = "Qualcomm MSM 8974v2.0/1 MTP";
compatible = "qcom,msm8974-mtp", "qcom,msm8974", "qcom,mtp";
qcom,msm-id = <126 8 0x20000>,
<185 8 0x20000>,
diff --git a/arch/arm/boot/dts/msm8974-v2.0-1.dtsi b/arch/arm/boot/dts/msm8974-v2.0-1.dtsi
new file mode 100644
index 0000000..1fad868
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-v2.0-1.dtsi
@@ -0,0 +1,36 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only version-specific property overrides should be placed
+ * inside this file. However, device definitions should be placed inside the
+ * msm8974.dtsi file.
+ */
+
+/include/ "msm8974-v2.dtsi"
+
+&gdsc_venus {
+ qcom,skip-logic-collapse;
+ qcom,retain-periph;
+ qcom,retain-mem;
+};
+
+&gdsc_mdss {
+ qcom,skip-logic-collapse;
+ qcom,retain-periph;
+ qcom,retain-mem;
+};
+
+&gdsc_oxili_gx {
+ qcom,retain-mem;
+ qcom,retain-periph;
+};
diff --git a/arch/arm/boot/dts/msm8974-v2-2-cdp.dts b/arch/arm/boot/dts/msm8974-v2.2-cdp.dts
similarity index 91%
rename from arch/arm/boot/dts/msm8974-v2-2-cdp.dts
rename to arch/arm/boot/dts/msm8974-v2.2-cdp.dts
index cb8895f..c1f4a8b 100644
--- a/arch/arm/boot/dts/msm8974-v2-2-cdp.dts
+++ b/arch/arm/boot/dts/msm8974-v2.2-cdp.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "msm8974-v2-2.dtsi"
+/include/ "msm8974-v2.2.dtsi"
/include/ "msm8974-cdp.dtsi"
/ {
- model = "Qualcomm MSM 8974v2 CDP";
+ model = "Qualcomm MSM 8974v2.2 CDP";
compatible = "qcom,msm8974-cdp", "qcom,msm8974", "qcom,cdp";
qcom,msm-id = <126 1 0x20002>,
<185 1 0x20002>,
diff --git a/arch/arm/boot/dts/msm8974-v2-2-fluid.dts b/arch/arm/boot/dts/msm8974-v2.2-fluid.dts
similarity index 91%
rename from arch/arm/boot/dts/msm8974-v2-2-fluid.dts
rename to arch/arm/boot/dts/msm8974-v2.2-fluid.dts
index 8e04c18..207db37 100644
--- a/arch/arm/boot/dts/msm8974-v2-2-fluid.dts
+++ b/arch/arm/boot/dts/msm8974-v2.2-fluid.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "msm8974-v2-2.dtsi"
+/include/ "msm8974-v2.2.dtsi"
/include/ "msm8974-fluid.dtsi"
/ {
- model = "Qualcomm MSM 8974v2 FLUID";
+ model = "Qualcomm MSM 8974v2.2 FLUID";
compatible = "qcom,msm8974-fluid", "qcom,msm8974", "qcom,fluid";
qcom,msm-id = <126 3 0x20002>,
<185 3 0x20002>,
diff --git a/arch/arm/boot/dts/msm8974-v2-2-liquid.dts b/arch/arm/boot/dts/msm8974-v2.2-liquid.dts
similarity index 90%
rename from arch/arm/boot/dts/msm8974-v2-2-liquid.dts
rename to arch/arm/boot/dts/msm8974-v2.2-liquid.dts
index 7128abe..36e6e9c 100644
--- a/arch/arm/boot/dts/msm8974-v2-2-liquid.dts
+++ b/arch/arm/boot/dts/msm8974-v2.2-liquid.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "msm8974-v2-2.dtsi"
+/include/ "msm8974-v2.2.dtsi"
/include/ "msm8974-liquid.dtsi"
/ {
- model = "Qualcomm MSM 8974v2 LIQUID";
+ model = "Qualcomm MSM 8974v2.2 LIQUID";
compatible = "qcom,msm8974-liquid", "qcom,msm8974", "qcom,liquid";
qcom,msm-id = <126 9 0x20002>,
<185 9 0x20002>,
diff --git a/arch/arm/boot/dts/msm8974-v2-2-mtp.dts b/arch/arm/boot/dts/msm8974-v2.2-mtp.dts
similarity index 91%
rename from arch/arm/boot/dts/msm8974-v2-2-mtp.dts
rename to arch/arm/boot/dts/msm8974-v2.2-mtp.dts
index b7e35cf..0593f6e 100644
--- a/arch/arm/boot/dts/msm8974-v2-2-mtp.dts
+++ b/arch/arm/boot/dts/msm8974-v2.2-mtp.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "msm8974-v2-2.dtsi"
+/include/ "msm8974-v2.2.dtsi"
/include/ "msm8974-mtp.dtsi"
/ {
- model = "Qualcomm MSM 8974v2 MTP";
+ model = "Qualcomm MSM 8974v2.2 MTP";
compatible = "qcom,msm8974-mtp", "qcom,msm8974", "qcom,mtp";
qcom,msm-id = <126 8 0x20002>,
<185 8 0x20002>,
diff --git a/arch/arm/boot/dts/msm8974-v2-2.dtsi b/arch/arm/boot/dts/msm8974-v2.2.dtsi
similarity index 97%
rename from arch/arm/boot/dts/msm8974-v2-2.dtsi
rename to arch/arm/boot/dts/msm8974-v2.2.dtsi
index 09455b1..0ca021b 100644
--- a/arch/arm/boot/dts/msm8974-v2-2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.2.dtsi
@@ -103,3 +103,8 @@
};
};
};
+
+&gdsc_mdss {
+ qcom,retain-periph;
+ qcom,retain-mem;
+};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 288da99..4360fe0 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -227,7 +227,7 @@
status = "disabled";
};
- serial@f991e000 {
+ blsp1_uart1: serial@f991e000 {
compatible = "qcom,msm-lsuart-v14";
reg = <0xf991e000 0x1000>;
interrupts = <0 108 0>;
@@ -1651,17 +1651,11 @@
&gdsc_venus {
qcom,clock-names = "core_clk";
- qcom,skip-logic-collapse;
- qcom,retain-periph;
- qcom,retain-mem;
status = "ok";
};
&gdsc_mdss {
qcom,clock-names = "core_clk", "lut_clk";
- qcom,skip-logic-collapse;
- qcom,retain-periph;
- qcom,retain-mem;
status = "ok";
};
@@ -1678,8 +1672,6 @@
&gdsc_oxili_gx {
qcom,clock-names = "core_clk";
- qcom,retain-mem;
- qcom,retain-periph;
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm9625-coresight.dtsi b/arch/arm/boot/dts/msm9625-coresight.dtsi
index 60472c8..4a903b7 100644
--- a/arch/arm/boot/dts/msm9625-coresight.dtsi
+++ b/arch/arm/boot/dts/msm9625-coresight.dtsi
@@ -16,6 +16,8 @@
reg = <0xfc322000 0x1000>,
<0xfc37c000 0x3000>;
reg-names = "tmc-base", "bam-base";
+ interrupts = <0 166 0>;
+ interrupt-names = "byte-cntr-irq";
qcom,memory-reservation-type = "EBI1";
qcom,memory-reservation-size = <0x20000>; /* 128K EBI1 buffer */
diff --git a/arch/arm/boot/dts/msm9625-pm.dtsi b/arch/arm/boot/dts/msm9625-pm.dtsi
index e18e143..7989f2b 100644
--- a/arch/arm/boot/dts/msm9625-pm.dtsi
+++ b/arch/arm/boot/dts/msm9625-pm.dtsi
@@ -28,57 +28,16 @@
3e 0f];
};
- qcom,lpm-resources {
- compatible = "qcom,lpm-resources";
- #address-cells = <1>;
- #size-cells = <0>;
-
- qcom,lpm-resources@0 {
- reg = <0x0>;
- qcom,name = "vdd-dig";
- qcom,type = <0x616F646C>; /* "ldoa" */
- qcom,id = <0x0A>;
- qcom,key = <0x6e726f63>; /* "corn" */
- qcom,init-value = <5>; /* Super Turbo */
- };
-
- qcom,lpm-resources@1 {
- reg = <0x1>;
- qcom,name = "vdd-mem";
- qcom,type = <0x616F646C>; /* "ldoa" */
- qcom,id = <0x0C>;
- qcom,key = <0x7675>; /* "uv" */
- qcom,init-value = <1050000>; /* Super Turbo */
- };
-
- qcom,lpm-resources@2 {
- reg = <0x2>;
- qcom,name = "pxo";
- qcom,type = <0x306b6c63>; /* "clk0" */
- qcom,id = <0x00>;
- qcom,key = <0x62616e45>; /* "Enab" */
- qcom,init-value = "xo_on";
- };
- };
-
qcom,lpm-levels {
compatible = "qcom,lpm-levels";
+ qcom,no-l2-saw;
#address-cells = <1>;
#size-cells = <0>;
- qcom,use-qtimer;
-
qcom,lpm-level@0 {
reg = <0x0>;
qcom,mode = "wfi";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
qcom,latency-us = <100>;
qcom,ss-power = <8000>;
qcom,energy-overhead = <100000>;
@@ -88,14 +47,7 @@
qcom,lpm-level@1 {
reg = <0x1>;
qcom,mode = "standalone_pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_active";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
qcom,latency-us = <2000>;
qcom,ss-power = <5000>;
qcom,energy-overhead = <60100000>;
@@ -105,15 +57,8 @@
qcom,lpm-level@2 {
reg = <0x2>;
qcom,mode = "pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_gdhs";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
- qcom,latency-us = <3500>;
+ qcom,latency-us = <20000>;
qcom,ss-power = <5000>;
qcom,energy-overhead = <60350000>;
qcom,time-overhead = <6300>;
@@ -122,66 +67,12 @@
qcom,lpm-level@3 {
reg = <0x3>;
qcom,mode = "pc";
- qcom,xo = "xo_on";
qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <4>; /* NORMAL */
- qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
- qcom,irqs-detectable;
- qcom,gpio-detectable;
- qcom,latency-us = <4500>;
+ qcom,latency-us = <30000>;
qcom,ss-power = <5000>;
qcom,energy-overhead = <60350000>;
qcom,time-overhead = <7300>;
};
-
- qcom,lpm-level@4 {
- reg = <0x4>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom,latency-us = <6800>;
- qcom,ss-power = <2000>;
- qcom,energy-overhead = <71850000>;
- qcom,time-overhead = <13300>;
- };
-
- qcom,lpm-level@5 {
- reg = <0x5>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
- qcom,irqs-detectable;
- qcom,latency-us = <8000>;
- qcom,ss-power = <1800>;
- qcom,energy-overhead = <71950000>;
- qcom,time-overhead = <15300>;
- };
-
- qcom,lpm-level@6 {
- reg = <0x6>;
- qcom,mode = "pc";
- qcom,xo = "xo_off";
- qcom,l2 = "l2_cache_pc";
- qcom,vdd-mem-upper-bound = <950000>; /* SVS SOC */
- qcom,vdd-mem-lower-bound = <675000>; /* RETENTION */
- qcom,vdd-dig-upper-bound = <3>; /* SVS SOC */
- qcom,vdd-dig-lower-bound = <1>; /* RETENTION */
- qcom,latency-us = <9800>;
- qcom,ss-power = <0>;
- qcom,energy-overhead = <76350000>;
- qcom,time-overhead = <28300>;
- };
};
qcom,pm-boot {
diff --git a/arch/arm/boot/dts/msm9625-regulator.dtsi b/arch/arm/boot/dts/msm9625-regulator.dtsi
index ee48b7f..eb56d1c 100644
--- a/arch/arm/boot/dts/msm9625-regulator.dtsi
+++ b/arch/arm/boot/dts/msm9625-regulator.dtsi
@@ -194,6 +194,15 @@
qcom,use-voltage-corner;
status = "okay";
};
+ pm8019_l10_floor_corner: regulator-l10-floor-corner {
+ compatible = "qcom,rpm-regulator-smd";
+ regulator-name = "8019_l10_floor_corner";
+ qcom,set = <3>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <7>;
+ qcom,use-voltage-floor-corner;
+ qcom,always-send-voltage;
+ };
};
rpm-regulator-ldoa11 {
diff --git a/arch/arm/boot/dts/msm9625-v1.dtsi b/arch/arm/boot/dts/msm9625-v1.dtsi
index b238ba5..daf774b 100644
--- a/arch/arm/boot/dts/msm9625-v1.dtsi
+++ b/arch/arm/boot/dts/msm9625-v1.dtsi
@@ -48,6 +48,7 @@
/* CoreSight */
&tmc_etr {
qcom,reset-flush-race;
+ qcom,byte-cntr-absent;
};
&stm {
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index 5520401..59d7ba0 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -529,10 +529,10 @@
qcom,freq-control-mask = <0x0>;
qcom,vdd-restriction-temp = <5>;
qcom,vdd-restriction-temp-hysteresis = <10>;
- vdd_dig-supply = <&pm8019_l10_corner>;
+ vdd-dig-supply = <&pm8019_l10_floor_corner>;
qcom,vdd-dig-rstr{
- qcom,vdd-rstr-reg = "vdd_dig";
+ qcom,vdd-rstr-reg = "vdd-dig";
qcom,levels = <5 7 7>; /* Nominal, Super Turbo, Super Turbo */
qcom,min-level = <1>; /* No Request */
};
diff --git a/arch/arm/boot/dts/msmkrypton.dtsi b/arch/arm/boot/dts/msmkrypton.dtsi
index dd5b7a0..cdaf964 100644
--- a/arch/arm/boot/dts/msmkrypton.dtsi
+++ b/arch/arm/boot/dts/msmkrypton.dtsi
@@ -210,4 +210,11 @@
qcom,irq-no-suspend;
};
};
+
+ rpm_bus: qcom,rpm-smd {
+ compatible = "qcom,rpm-smd";
+ rpm-channel-name = "rpm_requests";
+ rpm-channel-type = <15>; /* SMD_APPS_RPM */
+ rpm-standalone = <1>;
+ };
};
diff --git a/arch/arm/boot/dts/msmsamarium.dtsi b/arch/arm/boot/dts/msmsamarium.dtsi
index fdf2680..251bef2 100644
--- a/arch/arm/boot/dts/msmsamarium.dtsi
+++ b/arch/arm/boot/dts/msmsamarium.dtsi
@@ -217,4 +217,21 @@
interrupts = <0 29 1>;
qcom,rx-ring-size = <64>;
};
+
+ spmi_bus: qcom,spmi@fc4c0000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xfc4cf000 0x1000>,
+ <0Xfc4cb000 0x1000>,
+ <0Xfc4ca000 0x1000>;
+ reg-names = "core", "intr", "cnfg";
+ interrupts = <0 190 0>;
+ qcom,pmic-arb-channel = <0>;
+ qcom,pmic-arb-ee = <0>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ qcom,not-wakeup; /* Needed until MPM is fully configured. */
+ };
};
diff --git a/arch/arm/common/cpaccess.c b/arch/arm/common/cpaccess.c
index 3572e5a..cca0b39 100644
--- a/arch/arm/common/cpaccess.c
+++ b/arch/arm/common/cpaccess.c
@@ -385,6 +385,12 @@
}
sema_init(&cp_sem, 1);
+
+ /*
+ * Make the target instruction writeable when built as a module
+ */
+ set_memory_rw((unsigned long)&cpaccess_dummy_inst & PAGE_MASK, 1);
+
return 0;
exit1:
diff --git a/arch/arm/configs/apq8084_defconfig b/arch/arm/configs/apq8084_defconfig
index 2965607..4315d3f 100644
--- a/arch/arm/configs/apq8084_defconfig
+++ b/arch/arm/configs/apq8084_defconfig
@@ -52,6 +52,8 @@
CONFIG_MSM_RPM_REGULATOR_SMD=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_SYSMON_COMM=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_VENUS=y
CONFIG_MSM_DIRECT_SCLK_ACCESS=y
CONFIG_MSM_WATCHDOG_V2=y
CONFIG_MSM_MEMORY_DUMP=y
@@ -404,3 +406,5 @@
CONFIG_CORESIGHT_FUNNEL=y
CONFIG_CORESIGHT_REPLICATOR=y
CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_ETM=y
+CONFIG_USB_BAM=y
diff --git a/arch/arm/configs/msm8226-perf_defconfig b/arch/arm/configs/msm8226-perf_defconfig
index 7615eb2..31d133a 100644
--- a/arch/arm/configs/msm8226-perf_defconfig
+++ b/arch/arm/configs/msm8226-perf_defconfig
@@ -223,6 +223,7 @@
CONFIG_DM_CRYPT=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
+CONFIG_KS8851=y
# CONFIG_MSM_RMNET is not set
CONFIG_MSM_RMNET_BAM=y
CONFIG_PPP=y
@@ -290,6 +291,7 @@
CONFIG_OV8825=y
CONFIG_OV12830=y
CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_EEPROM=y
CONFIG_MSM_CPP=y
CONFIG_MSM_CCI=y
CONFIG_MSM_CSI30_HEADER=y
diff --git a/arch/arm/configs/msm8226_defconfig b/arch/arm/configs/msm8226_defconfig
index 7b2d38b..24ac0d8 100644
--- a/arch/arm/configs/msm8226_defconfig
+++ b/arch/arm/configs/msm8226_defconfig
@@ -225,6 +225,7 @@
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_TUN=y
+CONFIG_KS8851=y
# CONFIG_MSM_RMNET is not set
CONFIG_MSM_RMNET_BAM=y
CONFIG_PPP=y
@@ -294,6 +295,7 @@
CONFIG_OV8825=y
CONFIG_OV12830=y
CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_EEPROM=y
CONFIG_MSM_CPP=y
CONFIG_MSM_CCI=y
CONFIG_MSM_CSI30_HEADER=y
@@ -458,6 +460,7 @@
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_ENABLE_DEFAULT_TRACERS=y
CONFIG_DYNAMIC_DEBUG=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
CONFIG_EARLY_PRINTK=y
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index 2a8bc91..a5f0704 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -71,7 +71,6 @@
CONFIG_MSM_RTB_SEPARATE_CPUS=y
CONFIG_MSM_ENABLE_WDOG_DEBUG_CONTROL=y
CONFIG_MSM_BOOT_STATS=y
-CONFIG_MSM_XPU_ERR_FATAL=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
@@ -422,6 +421,7 @@
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_ENABLE_DEFAULT_TRACERS=y
CONFIG_DYNAMIC_DEBUG=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_KEYS=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index a26d8e6..f2f6558 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -81,7 +81,6 @@
CONFIG_MSM_L1_ERR_LOG=y
CONFIG_MSM_L2_ERP_2BIT_PANIC=y
CONFIG_MSM_ENABLE_WDOG_DEBUG_CONTROL=y
-CONFIG_MSM_UARTDM_Core_v14=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_STRICT_MEMORY_RWX=y
CONFIG_NO_HZ=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index 8447dd55..71742a5 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -86,7 +86,6 @@
CONFIG_MSM_CACHE_DUMP=y
CONFIG_MSM_CACHE_DUMP_ON_PANIC=y
CONFIG_MSM_ENABLE_WDOG_DEBUG_CONTROL=y
-CONFIG_MSM_UARTDM_Core_v14=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_XPU_ERR_FATAL=y
CONFIG_STRICT_MEMORY_RWX=y
@@ -499,6 +498,7 @@
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_CPU_FREQ_SWITCH_PROFILER=y
CONFIG_DYNAMIC_DEBUG=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
CONFIG_EARLY_PRINTK=y
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 10e580b..76bd74c 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -282,9 +282,9 @@
select QMI_ENCDEC
select DONT_MAP_HOLE_AFTER_MEMBANK0
select MSM_ULTRASOUND_B
- select MSM_LPM_TEST
select MSM_RPM_LOG
select ARCH_WANT_KMAP_ATOMIC_FLUSH
+ select KRAIT_REGULATOR
config ARCH_APQ8084
bool "APQ8084"
@@ -307,6 +307,10 @@
select MEMORY_HOLE_CARVEOUT
select DONT_MAP_HOLE_AFTER_MEMBANK0
select QMI_ENCDEC
+ select MSM_SPM_V2
+ select MSM_L2_SPM
+ select MSM_PM8X60 if PM
+ select MSM_RPM_SMD
config ARCH_MPQ8092
bool "MPQ8092"
@@ -430,6 +434,7 @@
select MAY_HAVE_SPARSE_IRQ
select SPARSE_IRQ
select MEMORY_HOLE_CARVEOUT
+ select QMI_ENCDEC
config ARCH_MSM8610
bool "MSM8610"
@@ -3056,4 +3061,11 @@
Support the wallclk directory in sysfs filesystem to enable the
wall clock simulation and read the current SFN.
+config KRAIT_REGULATOR
+ bool "Support Kraits powered via ganged regulators in the pmic"
+ help
+ Certain MSMs have the Krait CPUs powered via a single supply
+ line from the PMIC. This supply line is powered by multiple
+ regulators running in ganged mode inside the PMIC. Enable
+ this option to support such configurations.
endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 56fb625..cd71104 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -296,7 +296,7 @@
obj-$(CONFIG_ARCH_MSM8610) += gdsc.o
obj-$(CONFIG_ARCH_MPQ8092) += gdsc.o
obj-$(CONFIG_ARCH_APQ8084) += gdsc.o
-obj-$(CONFIG_ARCH_MSM8974) += krait-regulator.o
+obj-$(CONFIG_KRAIT_REGULATOR) += krait-regulator.o
obj-$(CONFIG_ARCH_MSMKRYPTON) += board-krypton.o board-krypton-gpiomux.o
obj-$(CONFIG_ARCH_MSMSAMARIUM) += board-samarium.o board-samarium-gpiomux.o
obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
@@ -331,7 +331,7 @@
obj-$(CONFIG_MSM_SDIO_SMEM) += sdio_smem.o
obj-$(CONFIG_MSM_RPM) += rpm.o rpm_resources.o
obj-$(CONFIG_MSM_LPM_TEST) += test-lpm.o
-obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o lpm_levels.o lpm_resources.o
+obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o lpm_levels.o
obj-$(CONFIG_MSM_MPM_OF) += mpm-of.o
obj-$(CONFIG_MSM_MPM) += mpm.o
obj-$(CONFIG_MSM_RPM_STATS_LOG) += rpm_stats.o rpm_master_stat.o
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index 3505afe..72472f9 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -53,13 +53,13 @@
dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v1-mtp.dtb
dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v1-rumi.dtb
dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v1-sim.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2-cdp.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2-fluid.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2-liquid.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2-mtp.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2-cdp.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2-liquid.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2-dragonboard.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2.0-1-cdp.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2.0-1-fluid.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2.0-1-liquid.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2.0-1-mtp.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2.0-1-cdp.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2.0-1-liquid.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2.0-1-dragonboard.dtb
# APQ8084
zreladdr-$(CONFIG_ARCH_APQ8084) := 0x00008000
diff --git a/arch/arm/mach-msm/board-8064-storage.c b/arch/arm/mach-msm/board-8064-storage.c
index dd6c9ec..094765f 100644
--- a/arch/arm/mach-msm/board-8064-storage.c
+++ b/arch/arm/mach-msm/board-8064-storage.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,7 @@
#include <mach/msm_bus_board.h>
#include <mach/board.h>
#include <mach/gpiomux.h>
+#include <mach/socinfo.h>
#include "devices.h"
#include "board-8064.h"
#include "board-storage-common-a.h"
@@ -238,6 +239,10 @@
400000, 24000000, 48000000, 96000000
};
+static unsigned int sdc1_sup_clk_rates_all[] = {
+ 400000, 24000000, 48000000, 96000000, 192000000
+};
+
static struct mmc_platform_data sdc1_data = {
.ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
#ifdef CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT
@@ -331,8 +336,16 @@
void __init apq8064_init_mmc(void)
{
- if (apq8064_sdc1_pdata)
+ if (apq8064_sdc1_pdata) {
+ /* 8064 v2 supports upto 200MHz clock on SDC1 slot */
+ if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 2) {
+ apq8064_sdc1_pdata->sup_clk_table =
+ sdc1_sup_clk_rates_all;
+ apq8064_sdc1_pdata->sup_clk_cnt =
+ ARRAY_SIZE(sdc1_sup_clk_rates_all);
+ }
apq8064_add_sdcc(1, apq8064_sdc1_pdata);
+ }
if (apq8064_sdc2_pdata)
apq8064_add_sdcc(2, apq8064_sdc2_pdata);
diff --git a/arch/arm/mach-msm/board-8084.c b/arch/arm/mach-msm/board-8084.c
index 2a6bbb7..de6b50c 100644
--- a/arch/arm/mach-msm/board-8084.c
+++ b/arch/arm/mach-msm/board-8084.c
@@ -29,6 +29,9 @@
#include <mach/socinfo.h>
#include <mach/clk-provider.h>
#include <mach/msm_smem.h>
+#include <mach/rpm-smd.h>
+#include <mach/rpm-regulator-smd.h>
+#include "spm.h"
#include "board-dt.h"
#include "clock.h"
#include "devices.h"
@@ -88,6 +91,9 @@
msm_smem_init();
msm_init_modem_notifier_list();
msm_smd_init();
+ msm_rpm_driver_init();
+ rpm_regulator_smd_driver_init();
+ msm_spm_device_init();
msm_clock_init(&msm8084_clock_init_data);
tsens_tm_init_driver();
}
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 0d62b7a..aff2d75 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -51,7 +51,6 @@
#include "platsmp.h"
#include "spm.h"
#include "pm.h"
-#include "lpm_resources.h"
#include "modem_notifier.h"
static struct memtype_reserve msm8226_reserve_table[] __initdata = {
@@ -116,7 +115,6 @@
msm_init_modem_notifier_list();
msm_smd_init();
msm_rpm_driver_init();
- msm_lpmrs_module_init();
msm_spm_device_init();
msm_pm_sleep_status_init();
rpm_regulator_smd_driver_init();
diff --git a/arch/arm/mach-msm/board-8610-gpiomux.c b/arch/arm/mach-msm/board-8610-gpiomux.c
index f8ca143..01f1468 100644
--- a/arch/arm/mach-msm/board-8610-gpiomux.c
+++ b/arch/arm/mach-msm/board-8610-gpiomux.c
@@ -531,7 +531,8 @@
}
msm_gpiomux_install(wcnss_5wire_interface,
ARRAY_SIZE(wcnss_5wire_interface));
- msm_gpiomux_install(msm_lcd_configs, ARRAY_SIZE(msm_lcd_configs));
+ msm_gpiomux_install_nowrite(msm_lcd_configs,
+ ARRAY_SIZE(msm_lcd_configs));
msm_gpiomux_install(msm_keypad_configs,
ARRAY_SIZE(msm_keypad_configs));
msm_gpiomux_install(sd_card_det, ARRAY_SIZE(sd_card_det));
diff --git a/arch/arm/mach-msm/board-8610.c b/arch/arm/mach-msm/board-8610.c
index c6c9d14..d175bb4 100644
--- a/arch/arm/mach-msm/board-8610.c
+++ b/arch/arm/mach-msm/board-8610.c
@@ -52,7 +52,6 @@
#include "platsmp.h"
#include "spm.h"
#include "pm.h"
-#include "lpm_resources.h"
#include "modem_notifier.h"
static struct memtype_reserve msm8610_reserve_table[] __initdata = {
@@ -107,7 +106,6 @@
msm_init_modem_notifier_list();
msm_smd_init();
msm_rpm_driver_init();
- msm_lpmrs_module_init();
msm_spm_device_init();
msm_pm_sleep_status_init();
rpm_regulator_smd_driver_init();
diff --git a/arch/arm/mach-msm/board-8930-gpiomux.c b/arch/arm/mach-msm/board-8930-gpiomux.c
index 4298d96..62e8122 100644
--- a/arch/arm/mach-msm/board-8930-gpiomux.c
+++ b/arch/arm/mach-msm/board-8930-gpiomux.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -726,6 +726,96 @@
}
};
+#ifdef CONFIG_MMC_MSM_SDC2_SUPPORT
+static struct gpiomux_setting sdcc2_clk_actv_cfg = {
+ .func = GPIOMUX_FUNC_2,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting sdcc2_cmd_data_0_3_actv_cfg = {
+ .func = GPIOMUX_FUNC_2,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting sdcc2_suspend_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting sdcc2_data_1_suspend_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+/**
+ * DAT_0 to DAT_3 lines (gpio 89 - 92) are shared with ethernet
+ * CMD line (gpio 97) is shared with USB
+ * CLK line (gpio 98) is shared with battery alarm in
+ */
+static struct msm_gpiomux_config msm8960_sdcc2_configs[] __initdata = {
+ {
+ /* DATA_3 */
+ .gpio = 92,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &sdcc2_cmd_data_0_3_actv_cfg,
+ [GPIOMUX_SUSPENDED] = &sdcc2_suspend_cfg,
+ },
+ },
+ {
+ /* DATA_2 */
+ .gpio = 91,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &sdcc2_cmd_data_0_3_actv_cfg,
+ [GPIOMUX_SUSPENDED] = &sdcc2_suspend_cfg,
+ },
+ },
+ {
+ /* DATA_1 */
+ .gpio = 90,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &sdcc2_cmd_data_0_3_actv_cfg,
+ [GPIOMUX_SUSPENDED] = &sdcc2_data_1_suspend_cfg,
+ },
+ },
+ {
+ /* DATA_0 */
+ .gpio = 89,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &sdcc2_cmd_data_0_3_actv_cfg,
+ [GPIOMUX_SUSPENDED] = &sdcc2_suspend_cfg,
+ },
+ },
+ {
+ /* CMD */
+ .gpio = 97,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &sdcc2_cmd_data_0_3_actv_cfg,
+ [GPIOMUX_SUSPENDED] = &sdcc2_suspend_cfg,
+ },
+ },
+ {
+ /* CLK */
+ .gpio = 98,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &sdcc2_clk_actv_cfg,
+ [GPIOMUX_SUSPENDED] = &sdcc2_suspend_cfg,
+ },
+ },
+};
+
+static void msm_gpiomux_sdc2_install(void)
+{
+ msm_gpiomux_install(msm8960_sdcc2_configs,
+ ARRAY_SIZE(msm8960_sdcc2_configs));
+}
+#else
+static void msm_gpiomux_sdc2_install(void) {}
+#endif /* CONFIG_MMC_MSM_SDC2_SUPPORT */
+
int __init msm8930_init_gpiomux(void)
{
int rc = msm_gpiomux_init(NR_GPIO_IRQS);
@@ -802,5 +892,7 @@
msm_gpiomux_install(msm_sitar_config, ARRAY_SIZE(msm_sitar_config));
+ msm_gpiomux_sdc2_install();
+
return 0;
}
diff --git a/arch/arm/mach-msm/board-8930-regulator-pm8038.c b/arch/arm/mach-msm/board-8930-regulator-pm8038.c
index 8ed93ea..8e04003 100644
--- a/arch/arm/mach-msm/board-8930-regulator-pm8038.c
+++ b/arch/arm/mach-msm/board-8930-regulator-pm8038.c
@@ -99,6 +99,7 @@
REGULATOR_SUPPLY("mhl_iovcc18", "0-0039"),
REGULATOR_SUPPLY("vdd-io", "spi0.0"),
REGULATOR_SUPPLY("vdd-phy", "spi0.0"),
+ REGULATOR_SUPPLY("sdc_vdd", "msm_sdcc.2"),
};
VREG_CONSUMERS(L12) = {
REGULATOR_SUPPLY("8038_l12", NULL),
diff --git a/arch/arm/mach-msm/board-8930-regulator-pm8917.c b/arch/arm/mach-msm/board-8930-regulator-pm8917.c
index cdc419f..e63fbdd 100644
--- a/arch/arm/mach-msm/board-8930-regulator-pm8917.c
+++ b/arch/arm/mach-msm/board-8930-regulator-pm8917.c
@@ -81,6 +81,7 @@
REGULATOR_SUPPLY("cam_vana", "4-006c"),
REGULATOR_SUPPLY("cam_vana", "4-0048"),
REGULATOR_SUPPLY("cam_vana", "4-0020"),
+ REGULATOR_SUPPLY("sdc_vdd", "msm_sdcc.2"),
};
VREG_CONSUMERS(L12) = {
REGULATOR_SUPPLY("8917_l12", NULL),
diff --git a/arch/arm/mach-msm/board-8930-storage.c b/arch/arm/mach-msm/board-8930-storage.c
index d045040..fec87ae 100644
--- a/arch/arm/mach-msm/board-8930-storage.c
+++ b/arch/arm/mach-msm/board-8930-storage.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -49,6 +49,13 @@
.lpm_uA = 9000,
.hpm_uA = 200000, /* 200mA */
},
+ /* SDCC2 : SDIO slot connected */
+ [SDCC2] = {
+ .name = "sdc_vdd",
+ .high_vol_level = 1800000,
+ .low_vol_level = 1800000,
+ .hpm_uA = 200000, /* 200mA */
+ },
/* SDCC3 : External card slot connected */
[SDCC3] = {
.name = "sdc_vdd",
@@ -94,7 +101,7 @@
* during sleep.
*/
.lpm_uA = 2000,
- }
+ },
};
static struct msm_mmc_slot_reg_data mmc_slot_vreg_data[MAX_SDCC_CONTROLLER] = {
@@ -103,6 +110,10 @@
.vdd_data = &mmc_vdd_reg_data[SDCC1],
.vdd_io_data = &mmc_vdd_io_reg_data[SDCC1],
},
+ /* SDCC2 : SDIO card slot connected */
+ [SDCC2] = {
+ .vdd_data = &mmc_vdd_reg_data[SDCC2],
+ },
/* SDCC3 : External card slot connected */
[SDCC3] = {
.vdd_data = &mmc_vdd_reg_data[SDCC3],
@@ -170,6 +181,15 @@
{TLMM_PULL_SDC3_DATA, GPIO_CFG_PULL_UP}
};
+static struct msm_mmc_gpio sdc2_gpio[] = {
+ {92, "sdc2_dat_3"},
+ {91, "sdc2_dat_2"},
+ {90, "sdc2_dat_1"},
+ {89, "sdc2_dat_0"},
+ {97, "sdc2_cmd"},
+ {98, "sdc2_clk"}
+};
+
static struct msm_mmc_pad_pull_data mmc_pad_pull_data[MAX_SDCC_CONTROLLER] = {
[SDCC1] = {
.on = sdc1_pad_pull_on_cfg,
@@ -207,10 +227,21 @@
},
};
+static struct msm_mmc_gpio_data mmc_gpio_data[MAX_SDCC_CONTROLLER] = {
+ [SDCC2] = {
+ .gpio = sdc2_gpio,
+ .size = ARRAY_SIZE(sdc2_gpio),
+ },
+};
+
static struct msm_mmc_pin_data mmc_slot_pin_data[MAX_SDCC_CONTROLLER] = {
[SDCC1] = {
.pad_data = &mmc_pad_data[SDCC1],
},
+ [SDCC2] = {
+ .is_gpio = true,
+ .gpio_data = &mmc_gpio_data[SDCC2],
+ },
[SDCC3] = {
.pad_data = &mmc_pad_data[SDCC3],
},
@@ -248,6 +279,23 @@
};
#endif
+#ifdef CONFIG_MMC_MSM_SDC2_SUPPORT
+static unsigned int sdc2_sup_clk_rates[] = {
+ 400000, 24000000, 48000000
+};
+
+static struct mmc_platform_data msm8960_sdc2_data = {
+ .ocr_mask = MMC_VDD_165_195 | MMC_VDD_27_28 | MMC_VDD_28_29,
+ .mmc_bus_width = MMC_CAP_4_BIT_DATA,
+ .sup_clk_table = sdc2_sup_clk_rates,
+ .sup_clk_cnt = ARRAY_SIZE(sdc2_sup_clk_rates),
+ .vreg_data = &mmc_slot_vreg_data[SDCC2],
+ .pin_data = &mmc_slot_pin_data[SDCC2],
+ .sdiowakeup_irq = MSM_GPIO_TO_INT(90),
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
+};
+#endif
+
#ifdef CONFIG_MMC_MSM_SDC3_SUPPORT
static struct mmc_platform_data msm8960_sdc3_data = {
.ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
@@ -300,6 +348,10 @@
/* SDC1 : eMMC card connected */
msm_add_sdcc(1, &msm8960_sdc1_data);
#endif
+#ifdef CONFIG_MMC_MSM_SDC2_SUPPORT
+ /* SDC2: SDIO slot for WLAN */
+ msm_add_sdcc(2, &msm8960_sdc2_data);
+#endif
#ifdef CONFIG_MMC_MSM_SDC3_SUPPORT
/*
* All 8930 platform boards using the 1.2 SoC have been reworked so that
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 68af757..80a957f 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -45,8 +45,8 @@
#include "clock.h"
#include "devices.h"
#include "spm.h"
+#include "pm.h"
#include "modem_notifier.h"
-#include "lpm_resources.h"
#include "platsmp.h"
@@ -96,7 +96,6 @@
msm_init_modem_notifier_list();
msm_smd_init();
msm_rpm_driver_init();
- msm_lpmrs_module_init();
msm_pm_sleep_status_init();
rpm_regulator_smd_driver_init();
msm_spm_device_init();
diff --git a/arch/arm/mach-msm/board-9625.c b/arch/arm/mach-msm/board-9625.c
index 36ad755..058789a 100644
--- a/arch/arm/mach-msm/board-9625.c
+++ b/arch/arm/mach-msm/board-9625.c
@@ -42,7 +42,6 @@
#include <mach/msm_smem.h>
#include "clock.h"
#include "modem_notifier.h"
-#include "lpm_resources.h"
#include "spm.h"
#define MSM_KERNEL_EBI_SIZE 0x51000
@@ -240,7 +239,6 @@
msm_init_modem_notifier_list();
msm_smd_init();
msm_rpm_driver_init();
- msm_lpmrs_module_init();
rpm_regulator_smd_driver_init();
msm_spm_device_init();
msm_clock_init(&msm9625_clock_init_data);
diff --git a/arch/arm/mach-msm/board-fsm9900-gpiomux.c b/arch/arm/mach-msm/board-fsm9900-gpiomux.c
index dede706..990aefc 100644
--- a/arch/arm/mach-msm/board-fsm9900-gpiomux.c
+++ b/arch/arm/mach-msm/board-fsm9900-gpiomux.c
@@ -17,6 +17,370 @@
#include <mach/board.h>
#include <mach/gpiomux.h>
+static struct gpiomux_setting blsp_uart_no_pull_config = {
+ .func = GPIOMUX_FUNC_2,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting blsp_uart_pull_up_config = {
+ .func = GPIOMUX_FUNC_2,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting blsp_i2c_config = {
+ .func = GPIOMUX_FUNC_3,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_blsp_configs[] __initdata = {
+ {
+ .gpio = 0, /* BLSP UART1 TX */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_uart_no_pull_config,
+ },
+ },
+ {
+ .gpio = 1, /* BLSP UART1 RX */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_uart_pull_up_config,
+ },
+ },
+ {
+ .gpio = 2, /* BLSP I2C SDA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+ },
+ },
+ {
+ .gpio = 3, /* BLSP I2C SCL */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+ },
+ },
+ {
+ .gpio = 6, /* BLSP I2C SDA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+ },
+ },
+ {
+ .gpio = 7, /* BLSP I2C SCL */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+ },
+ },
+ {
+ .gpio = 36, /* BLSP UART10 TX */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_uart_no_pull_config,
+ },
+ },
+ {
+ .gpio = 37, /* BLSP UART10 RX */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_uart_pull_up_config,
+ },
+ },
+ {
+ .gpio = 38, /* BLSP I2C10 SDA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+ },
+ },
+ {
+ .gpio = 39, /* BLSP I2C10 SCL */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+ },
+ },
+
+};
+
+static struct gpiomux_setting geni_func4_config = {
+ .func = GPIOMUX_FUNC_4,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting geni_func5_config = {
+ .func = GPIOMUX_FUNC_5,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct msm_gpiomux_config fsm_geni_configs[] __initdata = {
+ {
+ .gpio = 8, /* GENI7 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func4_config,
+ },
+ },
+ {
+ .gpio = 9, /* GENI1 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func4_config,
+ },
+ },
+ {
+ .gpio = 10, /* GENI2 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func4_config,
+ },
+ },
+ {
+ .gpio = 11, /* GENI7 CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func4_config,
+ },
+ },
+ {
+ .gpio = 20, /* GENI3 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func5_config,
+ },
+ },
+ {
+ .gpio = 21, /* GENI4 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func5_config,
+ },
+ },
+ {
+ .gpio = 22, /* GENI6 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func5_config,
+ },
+ },
+ {
+ .gpio = 23, /* GENI6 CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func5_config,
+ },
+ },
+ {
+ .gpio = 30, /* GENI5 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func4_config,
+ },
+ },
+ {
+ .gpio = 31, /* GENI5 CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func4_config,
+ },
+ },
+
+};
+
+static struct gpiomux_setting dan_spi_func4_config = {
+ .func = GPIOMUX_FUNC_4,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting dan_spi_func1_config = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_dan_spi_configs[] __initdata = {
+ {
+ .gpio = 12, /* BLSP DAN0 SPI_MOSI */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 13, /* BLSP DAN0 SPI_MISO */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 14, /* BLSP DAN0 SPI_CS */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 15, /* BLSP DAN0 SPI_CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 16, /* BLSP DAN1 SPI_MOSI */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 17, /* BLSP DAN1 SPI_MISO */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 18, /* BLSP DAN1 SPI_CS */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 19, /* BLSP DAN1 SPI_CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 81, /* BLSP DAN1 SPI_CS0 */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func1_config,
+ },
+ },
+ {
+ .gpio = 82, /* BLSP DAN1 SPI_CS1 */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func1_config,
+ },
+ },
+};
+
+static struct gpiomux_setting uim_config = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_4MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_uim_configs[] __initdata = {
+ {
+ .gpio = 24, /* UIM_DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &uim_config,
+ },
+ },
+ {
+ .gpio = 25, /* UIM_CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &uim_config,
+ },
+ },
+ {
+ .gpio = 26, /* UIM_RESET */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &uim_config,
+ },
+ },
+ {
+ .gpio = 27, /* UIM_PRESENT */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &uim_config,
+ },
+ },
+};
+
+static struct gpiomux_setting pcie_config = {
+ .func = GPIOMUX_FUNC_4,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_pcie_configs[] __initdata = {
+ {
+ .gpio = 28, /* BLSP PCIE1_CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &pcie_config,
+ },
+ },
+ {
+ .gpio = 32, /* BLSP PCIE0_CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &pcie_config,
+ },
+ },
+};
+
+static struct gpiomux_setting pps_out_config = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_4MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting pps_in_config = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting gps_clk_in_config = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting gps_nav_tlmm_blank_config = {
+ .func = GPIOMUX_FUNC_2,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+static struct msm_gpiomux_config fsm_gps_configs[] __initdata = {
+ {
+ .gpio = 40, /* GPS_PPS_OUT */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &pps_out_config,
+ },
+ },
+ {
+ .gpio = 41, /* GPS_PPS_IN */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &pps_in_config,
+ },
+ },
+ {
+ .gpio = 43, /* GPS_CLK_IN */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &gps_clk_in_config,
+ },
+ },
+ {
+ .gpio = 120, /* GPS_NAV_TLMM_BLANK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &gps_nav_tlmm_blank_config,
+ },
+ },
+};
+
+static struct gpiomux_setting sd_detect_config = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting sd_wp_config = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_sd_configs[] __initdata = {
+ {
+ .gpio = 42, /* SD_CARD_DET */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &sd_detect_config,
+ },
+ },
+ {
+ .gpio = 122, /* BLSP SD WRITE PROTECT */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &sd_wp_config,
+ },
+ },
+};
+
void __init fsm9900_init_gpiomux(void)
{
int rc;
@@ -26,4 +390,13 @@
pr_err("%s failed %d\n", __func__, rc);
return;
}
+
+ msm_gpiomux_install(fsm_blsp_configs, ARRAY_SIZE(fsm_blsp_configs));
+ msm_gpiomux_install(fsm_geni_configs, ARRAY_SIZE(fsm_geni_configs));
+ msm_gpiomux_install(fsm_dan_spi_configs,
+ ARRAY_SIZE(fsm_dan_spi_configs));
+ msm_gpiomux_install(fsm_uim_configs, ARRAY_SIZE(fsm_uim_configs));
+ msm_gpiomux_install(fsm_pcie_configs, ARRAY_SIZE(fsm_pcie_configs));
+ msm_gpiomux_install(fsm_gps_configs, ARRAY_SIZE(fsm_gps_configs));
+ msm_gpiomux_install(fsm_sd_configs, ARRAY_SIZE(fsm_sd_configs));
}
diff --git a/arch/arm/mach-msm/board-krypton.c b/arch/arm/mach-msm/board-krypton.c
index 13d7e8b..8ceccf4 100644
--- a/arch/arm/mach-msm/board-krypton.c
+++ b/arch/arm/mach-msm/board-krypton.c
@@ -24,6 +24,7 @@
#include <mach/msm_iomap.h>
#include <mach/msm_memtypes.h>
#include <mach/msm_smd.h>
+#include <mach/rpm-smd.h>
#include <mach/restart.h>
#include <mach/socinfo.h>
#include <mach/clk-provider.h>
@@ -64,6 +65,7 @@
msm_smem_init();
msm_init_modem_notifier_list();
msm_smd_init();
+ msm_rpm_driver_init();
msm_clock_init(&msmkrypton_clock_init_data);
}
diff --git a/arch/arm/mach-msm/board-samarium.c b/arch/arm/mach-msm/board-samarium.c
index be09b54..6133983 100644
--- a/arch/arm/mach-msm/board-samarium.c
+++ b/arch/arm/mach-msm/board-samarium.c
@@ -44,6 +44,8 @@
CLK_DUMMY("core_clk", USB_HS_SYSTEM_CLK, "msm_otg", OFF),
CLK_DUMMY("iface_clk", USB_HS_AHB_CLK, "msm_otg", OFF),
CLK_DUMMY("xo", CXO_OTG_CLK, "msm_otg", OFF),
+ CLK_DUMMY("dfab_clk", DFAB_CLK, "msm_sps", OFF),
+ CLK_DUMMY("dma_bam_pclk", DMA_BAM_P_CLK, "msm_sps", OFF),
};
static struct clock_init_data msm_dummy_clock_init_data __initdata = {
diff --git a/arch/arm/mach-msm/cache_erp.c b/arch/arm/mach-msm/cache_erp.c
index f52bc28..97b8448 100644
--- a/arch/arm/mach-msm/cache_erp.c
+++ b/arch/arm/mach-msm/cache_erp.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/proc_fs.h>
#include <linux/cpu.h>
+#include <linux/seq_file.h>
#include <linux/io.h>
#include <mach/msm-krait-l2-accessors.h>
#include <mach/msm_iomap.h>
@@ -159,25 +160,23 @@
return cesynr;
}
-static int proc_read_status(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int cache_erp_show(struct seq_file *m, void *v)
{
struct msm_l1_err_stats *l1_stats;
- char *p = page;
- int len, cpu, ret, bytes_left = PAGE_SIZE;
+ int cpu;
for_each_present_cpu(cpu) {
l1_stats = &per_cpu(msm_l1_erp_stats, cpu);
- ret = snprintf(p, bytes_left,
- "CPU %d:\n" \
- "\tD-cache tag parity errors:\t%u\n" \
- "\tD-cache data parity errors:\t%u\n" \
- "\tI-cache tag parity errors:\t%u\n" \
- "\tI-cache data parity errors:\t%u\n" \
- "\tD-cache timing errors:\t\t%u\n" \
- "\tI-cache timing errors:\t\t%u\n" \
- "\tTLB multi-hit errors:\t\t%u\n\n", \
+ seq_printf(m,
+ "CPU %d:\n"
+ "\tD-cache tag parity errors:\t%u\n"
+ "\tD-cache data parity errors:\t%u\n"
+ "\tI-cache tag parity errors:\t%u\n"
+ "\tI-cache data parity errors:\t%u\n"
+ "\tD-cache timing errors:\t\t%u\n"
+ "\tI-cache timing errors:\t\t%u\n"
+ "\tTLB multi-hit errors:\t\t%u\n\n",
cpu,
l1_stats->dctpe,
l1_stats->dcdpe,
@@ -186,18 +185,16 @@
l1_stats->dcte,
l1_stats->icte,
l1_stats->tlbmh);
- p += ret;
- bytes_left -= ret;
}
- p += snprintf(p, bytes_left,
- "L2 master port decode errors:\t\t%u\n" \
- "L2 master port slave errors:\t\t%u\n" \
- "L2 tag soft errors, single-bit:\t\t%u\n" \
- "L2 tag soft errors, double-bit:\t\t%u\n" \
- "L2 data soft errors, single-bit:\t%u\n" \
- "L2 data soft errors, double-bit:\t%u\n" \
- "L2 modified soft errors:\t\t%u\n" \
+ seq_printf(m,
+ "L2 master port decode errors:\t\t%u\n"
+ "L2 master port slave errors:\t\t%u\n"
+ "L2 tag soft errors, single-bit:\t\t%u\n"
+ "L2 tag soft errors, double-bit:\t\t%u\n"
+ "L2 data soft errors, single-bit:\t%u\n"
+ "L2 data soft errors, double-bit:\t%u\n"
+ "L2 modified soft errors:\t\t%u\n"
"L2 master port LDREX NOK errors:\t%u\n",
msm_l2_erp_stats.mpdcd,
msm_l2_erp_stats.mpslv,
@@ -208,16 +205,21 @@
msm_l2_erp_stats.mse,
msm_l2_erp_stats.mplxrexnok);
- len = (p - page) - off;
- if (len < 0)
- len = 0;
-
- *eof = (len <= count) ? 1 : 0;
- *start = page + off;
-
- return len;
+ return 0;
}
+static int cache_erp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cache_erp_show, NULL);
+}
+
+static const struct file_operations cache_erp_fops = {
+ .open = cache_erp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int msm_erp_dump_regions(void)
{
int i = 0;
@@ -235,26 +237,30 @@
}
#ifdef CONFIG_MSM_L1_ERR_LOG
-static int proc_read_log(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int cache_erp_log_show(struct seq_file *m, void *v)
{
- char *p = page;
- int len, log_value;
+ int log_value;
+
log_value = __raw_readl(MSM_IMEM_BASE + ERP_LOG_MAGIC_ADDR) ==
ERP_LOG_MAGIC ? 1 : 0;
- p += snprintf(p, PAGE_SIZE, "%d\n", log_value);
+ seq_printf(m, "%d\n", log_value);
- len = (p - page) - off;
- if (len < 0)
- len = 0;
-
- *eof = (len <= count) ? 1 : 0;
- *start = page + off;
-
- return len;
+ return 0;
}
+static int cache_erp_log_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cache_erp_log_show, NULL);
+}
+
+static const struct file_operations cache_erp_log_fops = {
+ .open = cache_erp_log_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void log_cpu_event(void)
{
__raw_writel(ERP_LOG_MAGIC, MSM_IMEM_BASE + ERP_LOG_MAGIC_ADDR);
@@ -263,11 +269,10 @@
static int procfs_event_log_init(void)
{
- procfs_log_entry = create_proc_entry("cpu/msm_erp_log", S_IRUGO, NULL);
-
+ procfs_log_entry = proc_create("cpu/msm_erp_log", S_IRUGO, NULL,
+ &cache_erp_log_fops);
if (!procfs_log_entry)
return -ENODEV;
- procfs_log_entry->read_proc = proc_read_log;
return 0;
}
@@ -561,7 +566,8 @@
goto fail_l1;
}
- procfs_entry = create_proc_entry("cpu/msm_cache_erp", S_IRUGO, NULL);
+ procfs_entry = proc_create("cpu/msm_cache_erp", S_IRUGO, NULL,
+ &cache_erp_fops);
if (!procfs_entry) {
pr_err("Failed to create procfs node for cache error reporting\n");
@@ -580,8 +586,6 @@
smp_call_function_single(cpu, enable_erp_irq_callback, NULL, 1);
put_online_cpus();
- procfs_entry->read_proc = proc_read_status;
-
ret = procfs_event_log_init();
if (ret)
pr_err("Failed to create procfs node for ERP log access\n");
diff --git a/arch/arm/mach-msm/clock-8084.c b/arch/arm/mach-msm/clock-8084.c
index 756ab4a..ffd33a8 100644
--- a/arch/arm/mach-msm/clock-8084.c
+++ b/arch/arm/mach-msm/clock-8084.c
@@ -168,7 +168,8 @@
CLK_DUMMY("", gcc_sdcc4_inactivity_timers_clk.c, "", OFF),
CLK_DUMMY("", gcc_spss_ahb_clk.c, "", OFF),
CLK_DUMMY("", gcc_sys_noc_ufs_axi_clk.c, "", OFF),
- CLK_DUMMY("", gcc_sys_noc_usb3_axi_clk.c, "", OFF),
+ CLK_DUMMY("mem_iface_clk", gcc_sys_noc_usb3_axi_clk.c,
+ "f9304000.qcom,usbbam", OFF),
CLK_DUMMY("", gcc_sys_noc_usb3_sec_axi_clk.c, "", OFF),
CLK_DUMMY("", gcc_tsif_ahb_clk.c, "", OFF),
CLK_DUMMY("", gcc_tsif_inactivity_timers_clk.c, "", OFF),
@@ -183,7 +184,8 @@
CLK_DUMMY("", gcc_ufs_tx_symbol_1_clk.c, "", OFF),
CLK_DUMMY("", gcc_usb2a_phy_sleep_clk.c, "", OFF),
CLK_DUMMY("", gcc_usb2b_phy_sleep_clk.c, "", OFF),
- CLK_DUMMY("", gcc_usb30_master_clk.c, "", OFF),
+ CLK_DUMMY("mem_clk", gcc_usb30_master_clk.c, "f9304000.qcom,usbbam",
+ OFF),
CLK_DUMMY("", gcc_usb30_mock_utmi_clk.c, "", OFF),
CLK_DUMMY("", gcc_usb30_sleep_clk.c, "", OFF),
CLK_DUMMY("", gcc_usb30_sec_master_clk.c, "", OFF),
@@ -395,6 +397,24 @@
CLK_DUMMY("core_clk", qdss_clk.c, "fc355000.funnel", OFF),
CLK_DUMMY("core_clk", qdss_clk.c, "fc36c000.funnel", OFF),
CLK_DUMMY("core_clk", qdss_clk.c, "fc302000.stm", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc34c000.etm", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc34d000.etm", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc34e000.etm", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc34f000.etm", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc310000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc311000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc312000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc313000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc314000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc315000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc316000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc317000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc318000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc340000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc341000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc342000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc343000.cti", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc344000.cti", OFF),
CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc326000.tmc", OFF),
CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc324000.replicator", OFF),
@@ -406,6 +426,24 @@
CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc355000.funnel", OFF),
CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc36c000.funnel", OFF),
CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc302000.stm", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc34c000.etm", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc34d000.etm", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc34e000.etm", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc34f000.etm", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc310000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc311000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc312000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc313000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc314000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc315000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc316000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc317000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc318000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc340000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc341000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc342000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc343000.cti", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc344000.cti", OFF),
};
struct clock_init_data msm8084_clock_init_data __initdata = {
diff --git a/arch/arm/mach-msm/clock-8092.c b/arch/arm/mach-msm/clock-8092.c
index 2040dc4..ec7a4b0 100644
--- a/arch/arm/mach-msm/clock-8092.c
+++ b/arch/arm/mach-msm/clock-8092.c
@@ -44,6 +44,8 @@
CLK_DUMMY("iface_clk", SDC1_P_CLK, "msm_sdcc.1", OFF),
CLK_DUMMY("core_clk", SDC2_CLK, "msm_sdcc.2", OFF),
CLK_DUMMY("iface_clk", SDC2_P_CLK, "msm_sdcc.2", OFF),
+ CLK_DUMMY("dfab_clk", DFAB_CLK, "msm_sps", OFF),
+ CLK_DUMMY("dma_bam_pclk", DMA_BAM_P_CLK, "msm_sps", OFF),
CLK_DUMMY("", usb30_master_clk_src.c, "", OFF),
CLK_DUMMY("", tsif_ref_clk_src.c, "", OFF),
CLK_DUMMY("", ce1_clk_src.c, "", OFF),
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index 65ad6c2..0cd5d55 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -3398,6 +3398,10 @@
CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "6a.qcom,camera"),
CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "6c.qcom,camera"),
+ /* eeprom clocks */
+ CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "6c.qcom,eeprom"),
+ CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "6c.qcom,eeprom"),
+
/* CCI clocks */
CLK_LOOKUP("camss_top_ahb_clk", camss_top_ahb_clk.c,
"fda0c000.qcom,cci"),
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 1514bba..0677525 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -1547,7 +1547,7 @@
F_END
};
-static CLK_SDC(sdc1_clk, 1, 6, 52000000, 104000000);
+static CLK_SDC(sdc1_clk, 1, 6, 52000000, 208000000);
static CLK_SDC(sdc2_clk, 2, 5, 52000000, 104000000);
static CLK_SDC(sdc3_clk, 3, 4, 104000000, 208000000);
static CLK_SDC(sdc4_clk, 4, 3, 33000000, 67000000);
diff --git a/arch/arm/mach-msm/clock-debug.c b/arch/arm/mach-msm/clock-debug.c
index c91af54..35917c3 100644
--- a/arch/arm/mach-msm/clock-debug.c
+++ b/arch/arm/mach-msm/clock-debug.c
@@ -161,25 +161,62 @@
DEFINE_SIMPLE_ATTRIBUTE(clock_hwcg_fops, clock_debug_hwcg_get,
NULL, "%llu\n");
+static void clock_print_fmax_by_level(struct seq_file *m, int level)
+{
+ struct clk *clock = m->private;
+ struct clk_vdd_class *vdd_class = clock->vdd_class;
+ int off, i, vdd_level, nregs = vdd_class->num_regulators;
+
+ vdd_level = find_vdd_level(clock, clock->rate);
+
+ seq_printf(m, "%2s%10lu", vdd_level == level ? "[" : "",
+ clock->fmax[level]);
+ for (i = 0; i < nregs; i++) {
+ off = nregs*level + i;
+ if (vdd_class->vdd_uv)
+ seq_printf(m, "%10u", vdd_class->vdd_uv[off]);
+ if (vdd_class->vdd_ua)
+ seq_printf(m, "%10u", vdd_class->vdd_ua[off]);
+ }
+
+ if (vdd_level == level)
+ seq_puts(m, "]");
+ seq_puts(m, "\n");
+}
+
static int fmax_rates_show(struct seq_file *m, void *unused)
{
struct clk *clock = m->private;
- int level = 0;
+ struct clk_vdd_class *vdd_class = clock->vdd_class;
+ int level = 0, i, nregs = vdd_class->num_regulators;
+ char reg_name[10];
int vdd_level = find_vdd_level(clock, clock->rate);
if (vdd_level < 0) {
seq_printf(m, "could not find_vdd_level for %s, %ld\n",
- clock->dbg_name, clock->rate);
+ clock->dbg_name, clock->rate);
return 0;
}
- for (level = 0; level < clock->num_fmax; level++) {
- if (vdd_level == level)
- seq_printf(m, "[%lu] ", clock->fmax[level]);
- else
- seq_printf(m, "%lu ", clock->fmax[level]);
+
+ seq_printf(m, "%12s", "");
+ for (i = 0; i < nregs; i++) {
+ snprintf(reg_name, ARRAY_SIZE(reg_name), "reg %d", i);
+ seq_printf(m, "%10s", reg_name);
+ if (vdd_class->vdd_ua)
+ seq_printf(m, "%10s", "");
+ }
+
+ seq_printf(m, "\n%12s", "freq");
+ for (i = 0; i < nregs; i++) {
+ seq_printf(m, "%10s", "uV");
+ if (vdd_class->vdd_ua)
+ seq_printf(m, "%10s", "uA");
}
seq_printf(m, "\n");
+ for (level = 0; level < clock->num_fmax; level++)
+ clock_print_fmax_by_level(m, level);
+
return 0;
}
@@ -195,6 +232,83 @@
.release = seq_release,
};
+#define clock_debug_output(m, c, fmt, ...) \
+do { \
+ if (m) \
+ seq_printf(m, fmt, ##__VA_ARGS__); \
+ else if (c) \
+ pr_cont(fmt, ##__VA_ARGS__); \
+ else \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+static int clock_debug_print_clock(struct clk *c, struct seq_file *m)
+{
+ char *start = "";
+
+ if (!c || !c->prepare_count)
+ return 0;
+
+ clock_debug_output(m, 0, "\t");
+ do {
+ if (c->vdd_class)
+ clock_debug_output(m, 1, "%s%s:%u:%u [%ld, %lu]", start,
+ c->dbg_name, c->prepare_count, c->count,
+ c->rate, c->vdd_class->cur_level);
+ else
+ clock_debug_output(m, 1, "%s%s:%u:%u [%ld]", start,
+ c->dbg_name, c->prepare_count, c->count,
+ c->rate);
+ start = " -> ";
+ } while ((c = clk_get_parent(c)));
+
+ clock_debug_output(m, 1, "\n");
+
+ return 1;
+}
+
+/**
+ * clock_debug_print_enabled_clocks() - Print names of enabled clocks
+ *
+ */
+static void clock_debug_print_enabled_clocks(struct seq_file *m)
+{
+ struct clk_table *table;
+ unsigned long flags;
+ int i, cnt = 0;
+
+ clock_debug_output(m, 0, "Enabled clocks:\n");
+ spin_lock_irqsave(&clk_list_lock, flags);
+ list_for_each_entry(table, &clk_list, node) {
+ for (i = 0; i < table->num_clocks; i++)
+ cnt += clock_debug_print_clock(table->clocks[i].clk, m);
+ }
+ spin_unlock_irqrestore(&clk_list_lock, flags);
+
+ if (cnt)
+ clock_debug_output(m, 0, "Enabled clock count: %d\n", cnt);
+ else
+ clock_debug_output(m, 0, "No clocks enabled.\n");
+}
+
+static int enabled_clocks_show(struct seq_file *m, void *unused)
+{
+ clock_debug_print_enabled_clocks(m);
+ return 0;
+}
+
+static int enabled_clocks_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, enabled_clocks_show, inode->i_private);
+}
+
+static const struct file_operations enabled_clocks_fops = {
+ .open = enabled_clocks_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static int list_rates_show(struct seq_file *m, void *unused)
{
struct clk *clock = m->private;
@@ -378,6 +492,10 @@
return -ENOMEM;
}
+ if (!debugfs_create_file("enabled_clocks", S_IRUGO, debugfs_base, NULL,
+ &enabled_clocks_fops))
+ return -ENOMEM;
+
measure = clk_get_sys("debug", "measure");
if (IS_ERR(measure))
measure = NULL;
@@ -424,55 +542,13 @@
return ret;
}
-static int clock_debug_print_clock(struct clk *c)
-{
- char *start = "";
-
- if (!c || !c->prepare_count)
- return 0;
-
- pr_info("\t");
- do {
- if (c->vdd_class)
- pr_cont("%s%s:%u:%u [%ld, %lu]", start, c->dbg_name,
- c->prepare_count, c->count, c->rate,
- c->vdd_class->cur_level);
- else
- pr_cont("%s%s:%u:%u [%ld]", start, c->dbg_name,
- c->prepare_count, c->count, c->rate);
- start = " -> ";
- } while ((c = clk_get_parent(c)));
-
- pr_cont("\n");
-
- return 1;
-}
-
-/**
- * clock_debug_print_enabled() - Print names of enabled clocks for suspend debug
- *
+/*
* Print the names of enabled clocks and their parents if debug_suspend is set
*/
void clock_debug_print_enabled(void)
{
- struct clk_table *table;
- unsigned long flags;
- int i, cnt = 0;
-
if (likely(!debug_suspend))
return;
- pr_info("Enabled clocks:\n");
- spin_lock_irqsave(&clk_list_lock, flags);
- list_for_each_entry(table, &clk_list, node) {
- for (i = 0; i < table->num_clocks; i++)
- cnt += clock_debug_print_clock(table->clocks[i].clk);
- }
- spin_unlock_irqrestore(&clk_list_lock, flags);
-
- if (cnt)
- pr_info("Enabled clock count: %d\n", cnt);
- else
- pr_info("No clocks enabled.\n");
-
+ clock_debug_print_enabled_clocks(NULL);
}
diff --git a/arch/arm/mach-msm/clock-dummy.c b/arch/arm/mach-msm/clock-dummy.c
index 883a5c2..139c756 100644
--- a/arch/arm/mach-msm/clock-dummy.c
+++ b/arch/arm/mach-msm/clock-dummy.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011,2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
static int dummy_clk_set_rate(struct clk *clk, unsigned long rate)
{
+ clk->rate = rate;
return 0;
}
@@ -34,7 +35,7 @@
static unsigned long dummy_clk_get_rate(struct clk *clk)
{
- return 0;
+ return clk->rate;
}
static long dummy_clk_round_rate(struct clk *clk, unsigned long rate)
@@ -42,7 +43,7 @@
return rate;
}
-static struct clk_ops clk_ops_dummy = {
+struct clk_ops clk_ops_dummy = {
.reset = dummy_clk_reset,
.set_rate = dummy_clk_set_rate,
.set_max_rate = dummy_clk_set_max_rate,
diff --git a/arch/arm/mach-msm/clock-mdss-8974.c b/arch/arm/mach-msm/clock-mdss-8974.c
index aeb4e48..47332a4 100644
--- a/arch/arm/mach-msm/clock-mdss-8974.c
+++ b/arch/arm/mach-msm/clock-mdss-8974.c
@@ -1135,7 +1135,7 @@
return rc;
}
-static int vco_enable(struct clk *c)
+static int dsi_pll_enable(struct clk *c)
{
int i, rc = 0;
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
@@ -1163,7 +1163,7 @@
return rc;
}
-static void vco_disable(struct clk *c)
+static void dsi_pll_disable(struct clk *c)
{
int rc = 0;
@@ -1384,19 +1384,32 @@
static int vco_prepare(struct clk *c)
{
- return vco_set_rate(c, vco_cached_rate);
+ int rc = 0;
+
+ if (vco_cached_rate != 0) {
+ rc = vco_set_rate(c, vco_cached_rate);
+ if (rc) {
+ pr_err("%s: vco_set_rate failed. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ }
+
+ rc = dsi_pll_enable(c);
+
+error:
+ return rc;
}
static void vco_unprepare(struct clk *c)
{
vco_cached_rate = c->rate;
+ dsi_pll_disable(c);
}
/* Op structures */
static struct clk_ops clk_ops_dsi_vco = {
- .enable = vco_enable,
- .disable = vco_disable,
.set_rate = vco_set_rate,
.round_rate = vco_round_rate,
.handoff = vco_handoff,
diff --git a/arch/arm/mach-msm/cpr-regulator.c b/arch/arm/mach-msm/cpr-regulator.c
index a2936c2..60a62ec 100644
--- a/arch/arm/mach-msm/cpr-regulator.c
+++ b/arch/arm/mach-msm/cpr-regulator.c
@@ -125,6 +125,8 @@
#define CPR_FUSE_RO_SEL_BITS 3
#define CPR_FUSE_RO_SEL_BITS_MASK ((1<<CPR_FUSE_RO_SEL_BITS)-1)
+#define CPR_FUSE_MIN_QUOT_DIFF 100
+
#define BYTES_PER_FUSE_ROW 8
enum voltage_change_dir {
@@ -200,16 +202,26 @@
u32 vdd_apc_step_down_limit;
};
-static int cpr_debug_enable;
+#define CPR_DEBUG_MASK_IRQ BIT(0)
+#define CPR_DEBUG_MASK_API BIT(1)
+
+static int cpr_debug_enable = CPR_DEBUG_MASK_IRQ;
static int cpr_enable;
static struct cpr_regulator *the_cpr;
module_param_named(debug_enable, cpr_debug_enable, int, S_IRUGO | S_IWUSR);
#define cpr_debug(message, ...) \
do { \
- if (cpr_debug_enable) \
+ if (cpr_debug_enable & CPR_DEBUG_MASK_API) \
pr_info(message, ##__VA_ARGS__); \
} while (0)
+#define cpr_debug_irq(message, ...) \
+ do { \
+ if (cpr_debug_enable & CPR_DEBUG_MASK_IRQ) \
+ pr_info(message, ##__VA_ARGS__); \
+ else \
+ pr_debug(message, ##__VA_ARGS__); \
+ } while (0)
static bool cpr_is_allowed(struct cpr_regulator *cpr_vreg)
{
@@ -267,11 +279,12 @@
cpr_masked_write(cpr_vreg, REG_RBCPR_CTL, mask, value);
}
-static void cpr_ctl_enable(struct cpr_regulator *cpr_vreg)
+static void cpr_ctl_enable(struct cpr_regulator *cpr_vreg, int corner)
{
u32 val;
- if (cpr_is_allowed(cpr_vreg))
+ if (cpr_is_allowed(cpr_vreg) &&
+ (cpr_vreg->ceiling_volt[corner] > cpr_vreg->floor_volt[corner]))
val = RBCPR_CTL_LOOP_EN;
else
val = 0;
@@ -369,7 +382,7 @@
cpr_ctl_disable(the_cpr);
cpr_irq_clr(the_cpr);
cpr_corner_restore(the_cpr, the_cpr->corner);
- cpr_ctl_enable(the_cpr);
+ cpr_ctl_enable(the_cpr, the_cpr->corner);
} else {
cpr_ctl_disable(the_cpr);
cpr_irq_set(the_cpr, 0);
@@ -485,16 +498,16 @@
& RBCPR_RESULT0_ERROR_STEPS_MASK;
last_volt = cpr_vreg->last_volt[corner];
- cpr_debug("last_volt[corner:%d] = %d uV\n", corner, last_volt);
+ cpr_debug_irq("last_volt[corner:%d] = %d uV\n", corner, last_volt);
if (dir == UP) {
- cpr_debug("Up: cpr status = 0x%08x (error_steps=%d)\n",
- reg_val, error_steps);
+ cpr_debug_irq("Up: cpr status = 0x%08x (error_steps=%d)\n",
+ reg_val, error_steps);
if (last_volt >= cpr_vreg->ceiling_volt[corner]) {
- cpr_debug("[corn:%d] @ ceiling: %d >= %d: NACK\n",
- corner, last_volt,
- cpr_vreg->ceiling_volt[corner]);
+ cpr_debug_irq("[corn:%d] @ ceiling: %d >= %d: NACK\n",
+ corner, last_volt,
+ cpr_vreg->ceiling_volt[corner]);
cpr_irq_clr_nack(cpr_vreg);
/* Maximize the UP threshold */
@@ -506,17 +519,18 @@
}
if (error_steps > cpr_vreg->vdd_apc_step_up_limit) {
- cpr_debug("%d is over up-limit(%d): Clamp\n",
- error_steps,
- cpr_vreg->vdd_apc_step_up_limit);
+ cpr_debug_irq("%d is over up-limit(%d): Clamp\n",
+ error_steps,
+ cpr_vreg->vdd_apc_step_up_limit);
error_steps = cpr_vreg->vdd_apc_step_up_limit;
}
/* Calculate new voltage */
new_volt = last_volt + (error_steps * cpr_vreg->step_volt);
if (new_volt > cpr_vreg->ceiling_volt[corner]) {
- cpr_debug("new_volt(%d) >= ceiling_volt(%d): Clamp\n",
- new_volt, cpr_vreg->ceiling_volt[corner]);
+ cpr_debug_irq("new_volt(%d) >= ceiling(%d): Clamp\n",
+ new_volt,
+ cpr_vreg->ceiling_volt[corner]);
new_volt = cpr_vreg->ceiling_volt[corner];
}
@@ -542,15 +556,16 @@
/* Ack */
cpr_irq_clr_ack(cpr_vreg);
- cpr_debug("UP: -> new_volt = %d uV\n", new_volt);
+ cpr_debug_irq("UP: -> new_volt[corner:%d] = %d uV\n",
+ corner, new_volt);
} else if (dir == DOWN) {
- cpr_debug("Down: cpr status = 0x%08x (error_steps=%d)\n",
- reg_val, error_steps);
+ cpr_debug_irq("Down: cpr status = 0x%08x (error_steps=%d)\n",
+ reg_val, error_steps);
if (last_volt <= cpr_vreg->floor_volt[corner]) {
- cpr_debug("[corn:%d] @ floor: %d <= %d: NACK\n",
- corner, last_volt,
- cpr_vreg->floor_volt[corner]);
+ cpr_debug_irq("[corn:%d] @ floor: %d <= %d: NACK\n",
+ corner, last_volt,
+ cpr_vreg->floor_volt[corner]);
cpr_irq_clr_nack(cpr_vreg);
/* Maximize the DOWN threshold */
@@ -571,17 +586,18 @@
}
if (error_steps > cpr_vreg->vdd_apc_step_down_limit) {
- cpr_debug("%d is over down-limit(%d): Clamp\n",
- error_steps,
- cpr_vreg->vdd_apc_step_down_limit);
+ cpr_debug_irq("%d is over down-limit(%d): Clamp\n",
+ error_steps,
+ cpr_vreg->vdd_apc_step_down_limit);
error_steps = cpr_vreg->vdd_apc_step_down_limit;
}
/* Calculte new voltage */
new_volt = last_volt - (error_steps * cpr_vreg->step_volt);
if (new_volt < cpr_vreg->floor_volt[corner]) {
- cpr_debug("new_volt(%d) < floor_volt(%d): Clamp\n",
- new_volt, cpr_vreg->floor_volt[corner]);
+ cpr_debug_irq("new_volt(%d) < floor(%d): Clamp\n",
+ new_volt,
+ cpr_vreg->floor_volt[corner]);
new_volt = cpr_vreg->floor_volt[corner];
}
@@ -601,7 +617,8 @@
/* Ack */
cpr_irq_clr_ack(cpr_vreg);
- cpr_debug("DOWN: -> new_volt = %d uV\n", new_volt);
+ cpr_debug_irq("DOWN: -> new_volt[corner:%d] = %d uV\n",
+ corner, new_volt);
}
}
@@ -613,7 +630,7 @@
mutex_lock(&cpr_vreg->cpr_mutex);
reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
- cpr_debug("IRQ_STATUS = 0x%02X\n", reg_val);
+ cpr_debug_irq("IRQ_STATUS = 0x%02X\n", reg_val);
if (!cpr_is_allowed(cpr_vreg)) {
reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
@@ -632,7 +649,7 @@
cpr_irq_clr_nack(cpr_vreg);
} else if (reg_val & CPR_INT_MID) {
/* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
- cpr_debug("IRQ occured for Mid Flag\n");
+ cpr_debug_irq("IRQ occured for Mid Flag\n");
} else {
pr_err("IRQ occured for unknown flag (0x%08x)\n", reg_val);
}
@@ -678,7 +695,7 @@
if (cpr_is_allowed(cpr_vreg) && cpr_vreg->corner) {
cpr_irq_clr(cpr_vreg);
cpr_corner_switch(cpr_vreg, cpr_vreg->corner);
- cpr_ctl_enable(cpr_vreg);
+ cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
}
mutex_unlock(&cpr_vreg->cpr_mutex);
@@ -744,7 +761,7 @@
if (cpr_is_allowed(cpr_vreg) && cpr_vreg->vreg_enabled) {
cpr_irq_clr(cpr_vreg);
cpr_corner_switch(cpr_vreg, corner);
- cpr_ctl_enable(cpr_vreg);
+ cpr_ctl_enable(cpr_vreg, corner);
}
cpr_vreg->corner = corner;
@@ -793,7 +810,7 @@
cpr_irq_clr(cpr_vreg);
enable_irq(cpr_vreg->cpr_irq);
- cpr_ctl_enable(cpr_vreg);
+ cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
return 0;
}
@@ -1194,6 +1211,29 @@
if (!cpr_vreg->cpr_fuse_bits) {
cpr_vreg->cpr_fuse_disable = 1;
pr_err("cpr_fuse_bits = 0: set cpr_fuse_disable = 1\n");
+ } else {
+ /* Check if the target quotients are too close together */
+ int *quot = cpr_vreg->cpr_fuse_target_quot;
+ bool valid_fuse = true;
+
+ if ((quot[CPR_CORNER_TURBO] > quot[CPR_CORNER_NORMAL]) &&
+ (quot[CPR_CORNER_NORMAL] > quot[CPR_CORNER_SVS])) {
+ if ((quot[CPR_CORNER_TURBO] -
+ quot[CPR_CORNER_NORMAL])
+ <= CPR_FUSE_MIN_QUOT_DIFF)
+ valid_fuse = false;
+ else if ((quot[CPR_CORNER_NORMAL] -
+ quot[CPR_CORNER_SVS])
+ <= CPR_FUSE_MIN_QUOT_DIFF)
+ valid_fuse = false;
+ } else {
+ valid_fuse = false;
+ }
+
+ if (!valid_fuse) {
+ cpr_vreg->cpr_fuse_disable = 1;
+ pr_err("invalid quotient values\n");
+ }
}
return 0;
diff --git a/arch/arm/mach-msm/include/mach/camera2.h b/arch/arm/mach-msm/include/mach/camera2.h
index 3e7e5fd..887c594 100644
--- a/arch/arm/mach-msm/include/mach/camera2.h
+++ b/arch/arm/mach-msm/include/mach/camera2.h
@@ -40,6 +40,7 @@
enum cci_i2c_master_t {
MASTER_0,
MASTER_1,
+ MASTER_MAX,
};
struct msm_camera_slave_info {
diff --git a/arch/arm/mach-msm/include/mach/clk-provider.h b/arch/arm/mach-msm/include/mach/clk-provider.h
index 72b5cc1..75dc240 100644
--- a/arch/arm/mach-msm/include/mach/clk-provider.h
+++ b/arch/arm/mach-msm/include/mach/clk-provider.h
@@ -168,6 +168,7 @@
int msm_clock_register(struct clk_lookup *table, size_t size);
extern struct clk dummy_clk;
+extern struct clk_ops clk_ops_dummy;
#define CLK_DUMMY(clk_name, clk_id, clk_dev, flags) { \
.con_id = clk_name, \
@@ -175,6 +176,16 @@
.clk = &dummy_clk, \
}
+#define DEFINE_CLK_DUMMY(name, _rate) \
+ static struct fixed_clk name = { \
+ .c = { \
+ .dbg_name = #name, \
+ .rate = _rate, \
+ .ops = &clk_ops_dummy, \
+ CLK_INIT(name.c), \
+ }, \
+ };
+
#define CLK_LOOKUP(con, c, dev) { .con_id = con, .clk = &c, .dev_id = dev }
#endif
diff --git a/arch/arm/mach-msm/include/mach/mpm.h b/arch/arm/mach-msm/include/mach/mpm.h
index b92c039..e76a6a9 100644
--- a/arch/arm/mach-msm/include/mach/mpm.h
+++ b/arch/arm/mach-msm/include/mach/mpm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -163,4 +163,23 @@
static inline void msm_mpm_exit_sleep(bool from_idle) {}
static inline void __init of_mpm_init(struct device_node *node) {}
#endif
+#ifdef CONFIG_MSM_MPM_OF
+/** msm_mpm_suspend_prepare() - Called at prepare_late() op during suspend
+ *
+ *
+ * When called the MPM driver checks if the wakeup interrupts can be monitored
+ * by MPM hardware and program them accordingly. If wake up interrupts cannot
+ * be monitored then it disallows system low power modes.
+ */
+void msm_mpm_suspend_prepare(void);
+/** msm_mpm_suspend_wake - Called during wake() op in suspend.
+ *
+ * When called MPM drivers sets the vote for system low power modes depending
+ * on the active interrupts.
+ */
+void msm_mpm_suspend_wake(void);
+#else
+static inline void msm_mpm_suspend_prepare(void){}
+static inline void msm_mpm_suspend_wake(void) {}
+#endif
#endif /* __ARCH_ARM_MACH_MSM_MPM_H */
diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h
index 62fada1..2653ae4 100644
--- a/arch/arm/mach-msm/include/mach/msm_smd.h
+++ b/arch/arm/mach-msm/include/mach/msm_smd.h
@@ -46,6 +46,7 @@
SMD_MODEM = SMEM_MODEM,
SMD_Q6 = SMEM_Q6,
SMD_DSPS = SMEM_DSPS,
+ SMD_TZ = SMEM_DSPS,
SMD_WCNSS = SMEM_WCNSS,
SMD_MODEM_Q6_FW = SMEM_MODEM_Q6_FW,
SMD_RPM = SMEM_RPM,
@@ -72,6 +73,7 @@
SMD_MODEM_RPM,
SMD_QDSP_RPM,
SMD_WCNSS_RPM,
+ SMD_TZ_RPM,
SMD_NUM_TYPE,
SMD_LOOPBACK_TYPE = 100,
diff --git a/arch/arm/mach-msm/include/mach/msm_smem.h b/arch/arm/mach-msm/include/mach/msm_smem.h
index 64ab6bf..a121791 100644
--- a/arch/arm/mach-msm/include/mach/msm_smem.h
+++ b/arch/arm/mach-msm/include/mach/msm_smem.h
@@ -136,6 +136,7 @@
SMEM_BAM_PIPE_MEMORY, /* 468 */
SMEM_IMAGE_VERSION_TABLE, /* 469 */
SMEM_LC_DEBUGGER, /* 470 */
+ SMEM_FLASH_NAND_DEV_INFO, /* 471 */
SMEM_NUM_ITEMS,
};
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index aa33f2c..180d277 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -16,19 +16,64 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
#include <linux/of.h>
#include <mach/mpm.h>
-#include "lpm_resources.h"
#include "pm.h"
#include "rpm-notifier.h"
-
+#include "spm.h"
+#include "idle.h"
enum {
MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
};
-#define MAX_STR_LEN 30
+enum {
+ MSM_SCM_L2_ON = 0,
+ MSM_SCM_L2_OFF = 1,
+ MSM_SCM_L2_GDHS = 3,
+};
+
+struct msm_rpmrs_level {
+ enum msm_pm_sleep_mode sleep_mode;
+ uint32_t l2_cache;
+ bool available;
+ uint32_t latency_us;
+ uint32_t steady_state_power;
+ uint32_t energy_overhead;
+ uint32_t time_overhead_us;
+};
+
+struct lpm_lookup_table {
+ uint32_t modes;
+ const char *mode_name;
+};
+
+static void msm_lpm_level_update(void);
+
+static int msm_lpm_cpu_callback(struct notifier_block *cpu_nb,
+ unsigned long action, void *hcpu);
+
+static struct notifier_block __refdata msm_lpm_cpu_nblk = {
+ .notifier_call = msm_lpm_cpu_callback,
+};
+
+static uint32_t allowed_l2_mode;
+static uint32_t sysfs_dbg_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
+static uint32_t default_l2_mode;
+
+static bool no_l2_saw;
+
+static ssize_t msm_lpm_levels_attr_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf);
+static ssize_t msm_lpm_levels_attr_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count);
+
+#define ADJUST_LATENCY(x) \
+ ((x == MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE) ?\
+ (num_online_cpus()) / 2 : 0)
static int msm_lpm_lvl_dbg_msk;
@@ -39,9 +84,54 @@
static struct msm_rpmrs_level *msm_lpm_levels;
static int msm_lpm_level_count;
-static DEFINE_PER_CPU(uint32_t , msm_lpm_sleep_time);
-static DEFINE_PER_CPU(int , lpm_permitted_level);
-static DEFINE_PER_CPU(struct atomic_notifier_head, lpm_notify_head);
+static struct kobj_attribute lpm_l2_kattr = __ATTR(l2, S_IRUGO|S_IWUSR,\
+ msm_lpm_levels_attr_show, msm_lpm_levels_attr_store);
+
+static struct attribute *lpm_levels_attr[] = {
+ &lpm_l2_kattr.attr,
+ NULL,
+};
+
+static struct attribute_group lpm_levels_attr_grp = {
+ .attrs = lpm_levels_attr,
+};
+
+/* SYSFS */
+static ssize_t msm_lpm_levels_attr_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct kernel_param kp;
+ int rc;
+
+ kp.arg = &sysfs_dbg_l2_mode;
+
+ rc = param_get_uint(buf, &kp);
+
+ if (rc > 0) {
+ strlcat(buf, "\n", PAGE_SIZE);
+ rc++;
+ }
+
+ return rc;
+}
+
+static ssize_t msm_lpm_levels_attr_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct kernel_param kp;
+ unsigned int temp;
+ int rc;
+
+ kp.arg = &temp;
+ rc = param_set_uint(buf, &kp);
+ if (rc)
+ return rc;
+
+ sysfs_dbg_l2_mode = temp;
+ msm_lpm_level_update();
+
+ return count;
+}
static int msm_pm_get_sleep_mode_value(struct device_node *node,
const char *key, uint32_t *sleep_mode_val)
@@ -74,8 +164,7 @@
if (!ret) {
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) {
- if (!strncmp(mode_name, pm_sm_lookup[i].mode_name,
- MAX_STR_LEN)) {
+ if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) {
*sleep_mode_val = pm_sm_lookup[i].modes;
ret = 0;
break;
@@ -85,16 +174,61 @@
return ret;
}
+static int msm_lpm_set_l2_mode(int sleep_mode)
+{
+ int lpm = sleep_mode;
+ int rc = 0;
+
+ if (no_l2_saw)
+ goto bail_set_l2_mode;
+
+ msm_pm_set_l2_flush_flag(MSM_SCM_L2_ON);
+
+ switch (sleep_mode) {
+ case MSM_SPM_L2_MODE_POWER_COLLAPSE:
+ msm_pm_set_l2_flush_flag(MSM_SCM_L2_OFF);
+ break;
+ case MSM_SPM_L2_MODE_GDHS:
+ msm_pm_set_l2_flush_flag(MSM_SCM_L2_GDHS);
+ break;
+ case MSM_SPM_L2_MODE_RETENTION:
+ case MSM_SPM_L2_MODE_DISABLED:
+ break;
+ default:
+ lpm = MSM_SPM_L2_MODE_DISABLED;
+ break;
+ }
+
+ rc = msm_spm_l2_set_low_power_mode(lpm, true);
+
+ if (rc) {
+ if (rc == -ENXIO)
+ WARN_ON_ONCE(1);
+ else
+ pr_err("%s: Failed to set L2 low power mode %d, ERR %d",
+ __func__, lpm, rc);
+ }
+
+bail_set_l2_mode:
+ return rc;
+}
+
static void msm_lpm_level_update(void)
{
- unsigned int lpm_level;
+ int lpm_level;
struct msm_rpmrs_level *level = NULL;
+ uint32_t max_l2_mode;
+ static DEFINE_MUTEX(lpm_lock);
+
+ mutex_lock(&lpm_lock);
+
+ max_l2_mode = min(allowed_l2_mode, sysfs_dbg_l2_mode);
for (lpm_level = 0; lpm_level < msm_lpm_level_count; lpm_level++) {
level = &msm_lpm_levels[lpm_level];
- level->available =
- !msm_lpm_level_beyond_limit(&level->rs_limits);
+ level->available = !(level->l2_cache > max_l2_mode);
}
+ mutex_unlock(&lpm_lock);
}
int msm_lpm_enter_sleep(uint32_t sclk_count, void *limits,
@@ -102,13 +236,7 @@
{
int ret = 0;
int debug_mask;
- struct msm_rpmrs_limits *l = (struct msm_rpmrs_limits *)limits;
- struct msm_lpm_sleep_data sleep_data;
-
- sleep_data.limits = limits;
- sleep_data.kernel_sleep = __get_cpu_var(msm_lpm_sleep_time);
- atomic_notifier_call_chain(&__get_cpu_var(lpm_notify_head),
- MSM_LPM_STATE_ENTER, &sleep_data);
+ uint32_t l2 = *(uint32_t *)limits;
if (from_idle)
debug_mask = msm_lpm_lvl_dbg_msk &
@@ -118,19 +246,20 @@
MSM_LPM_LVL_DBG_SUSPEND_LIMITS;
if (debug_mask)
- pr_info("%s(): pxo:%d l2:%d mem:0x%x(0x%x) dig:0x%x(0x%x)\n",
- __func__, l->pxo, l->l2_cache,
- l->vdd_mem_lower_bound,
- l->vdd_mem_upper_bound,
- l->vdd_dig_lower_bound,
- l->vdd_dig_upper_bound);
+ pr_info("%s(): l2:%d", __func__, l2);
- ret = msm_lpmrs_enter_sleep(sclk_count, l, from_idle, notify_rpm);
+ ret = msm_lpm_set_l2_mode(l2);
+
if (ret) {
- pr_warn("%s() LPM resources failed to enter sleep\n",
- __func__);
- goto bail;
+ if (ret == -ENXIO)
+ ret = 0;
+ else {
+ pr_warn("%s(): Failed to set L2 SPM Mode %d",
+ __func__, l2);
+ goto bail;
+ }
}
+
if (notify_rpm) {
ret = msm_rpm_enter_sleep(debug_mask);
if (ret) {
@@ -138,6 +267,8 @@
__func__, ret);
goto bail;
}
+
+ msm_mpm_enter_sleep(sclk_count, from_idle);
}
bail:
return ret;
@@ -147,12 +278,12 @@
bool notify_rpm, bool collapsed)
{
- msm_lpmrs_exit_sleep((struct msm_rpmrs_limits *)limits,
- from_idle, notify_rpm, collapsed);
- if (notify_rpm)
+ msm_lpm_set_l2_mode(default_l2_mode);
+
+ if (notify_rpm) {
+ msm_mpm_exit_sleep(from_idle);
msm_rpm_exit_sleep();
- atomic_notifier_call_chain(&__get_cpu_var(lpm_notify_head),
- MSM_LPM_STATE_EXIT, NULL);
+ }
}
void msm_lpm_show_resources(void)
@@ -161,48 +292,6 @@
return;
}
-uint32_t msm_pm_get_pxo(struct msm_rpmrs_limits *limits)
-{
- return limits->pxo;
-}
-
-uint32_t msm_pm_get_l2_cache(struct msm_rpmrs_limits *limits)
-{
- return limits->l2_cache;
-}
-
-uint32_t msm_pm_get_vdd_mem(struct msm_rpmrs_limits *limits)
-{
- return limits->vdd_mem_upper_bound;
-}
-
-uint32_t msm_pm_get_vdd_dig(struct msm_rpmrs_limits *limits)
-{
- return limits->vdd_dig_upper_bound;
-}
-
-static bool lpm_level_permitted(int cur_level_count)
-{
- if (__get_cpu_var(lpm_permitted_level) == msm_lpm_level_count + 1)
- return true;
- return (__get_cpu_var(lpm_permitted_level) == cur_level_count);
-}
-
-int msm_lpm_register_notifier(int cpu, int level_iter,
- struct notifier_block *nb, bool is_latency_measure)
-{
- per_cpu(lpm_permitted_level, cpu) = level_iter;
- return atomic_notifier_chain_register(&per_cpu(lpm_notify_head,
- cpu), nb);
-}
-
-int msm_lpm_unregister_notifier(int cpu, struct notifier_block *nb)
-{
- per_cpu(lpm_permitted_level, cpu) = msm_lpm_level_count + 1;
- return atomic_notifier_chain_unregister(&per_cpu(lpm_notify_head, cpu),
- nb);
-}
-
s32 msm_cpuidle_get_deep_idle_latency(void)
{
int i;
@@ -225,17 +314,26 @@
}
return best->latency_us - 1;
}
-static bool msm_lpm_irqs_detectable(struct msm_rpmrs_limits *limits,
- bool irqs_detectable, bool gpio_detectable)
+
+static int msm_lpm_cpu_callback(struct notifier_block *cpu_nb,
+ unsigned long action, void *hcpu)
{
- if (!limits->irqs_detectable)
- return irqs_detectable;
-
- if (!limits->gpio_detectable)
- return gpio_detectable;
-
- return true;
-
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ allowed_l2_mode = default_l2_mode;
+ msm_lpm_level_update();
+ break;
+ case CPU_DEAD_FROZEN:
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ if (num_online_cpus() == 1)
+ allowed_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
+ msm_lpm_level_update();
+ break;
+ }
+ return NOTIFY_OK;
}
static void *msm_lpm_lowest_limits(bool from_idle,
@@ -244,24 +342,18 @@
{
unsigned int cpu = smp_processor_id();
struct msm_rpmrs_level *best_level = NULL;
+ uint32_t best_level_pwr = 0;
uint32_t pwr;
int i;
- int best_level_iter = msm_lpm_level_count + 1;
- bool irqs_detect = false;
- bool gpio_detect = false;
bool modify_event_timer;
uint32_t next_wakeup_us = time_param->sleep_us;
+ uint32_t lvl_latency_us = 0;
+ uint32_t lvl_overhead_us = 0;
+ uint32_t lvl_overhead_energy = 0;
if (!msm_lpm_levels)
return NULL;
- msm_lpm_level_update();
-
- if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
- irqs_detect = msm_mpm_irqs_detectable(from_idle);
- gpio_detect = msm_mpm_gpio_irqs_detectable(from_idle);
- }
-
for (i = 0; i < msm_lpm_level_count; i++) {
struct msm_rpmrs_level *level = &msm_lpm_levels[i];
@@ -273,57 +365,61 @@
if (sleep_mode != level->sleep_mode)
continue;
- if (time_param->latency_us < level->latency_us)
+ lvl_latency_us =
+ level->latency_us + (level->latency_us *
+ ADJUST_LATENCY(sleep_mode));
+
+ lvl_overhead_us =
+ level->time_overhead_us + (level->time_overhead_us *
+ ADJUST_LATENCY(sleep_mode));
+
+ lvl_overhead_energy =
+ level->energy_overhead + level->energy_overhead *
+ ADJUST_LATENCY(sleep_mode);
+
+ if (time_param->latency_us < lvl_latency_us)
continue;
if (time_param->next_event_us &&
- time_param->next_event_us < level->latency_us)
+ time_param->next_event_us < lvl_latency_us)
continue;
if (time_param->next_event_us) {
if ((time_param->next_event_us < time_param->sleep_us)
- || ((time_param->next_event_us - level->latency_us) <
+ || ((time_param->next_event_us - lvl_latency_us) <
time_param->sleep_us)) {
modify_event_timer = true;
next_wakeup_us = time_param->next_event_us -
- level->latency_us;
+ lvl_latency_us;
}
}
- if (next_wakeup_us <= level->time_overhead_us)
+ if (next_wakeup_us <= lvl_overhead_us)
continue;
- if ((sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) &&
- !msm_lpm_irqs_detectable(&level->rs_limits,
- irqs_detect, gpio_detect))
- continue;
-
if ((MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE == sleep_mode)
|| (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == sleep_mode))
if (!cpu && msm_rpm_waiting_for_ack())
break;
if (next_wakeup_us <= 1) {
- pwr = level->energy_overhead;
- } else if (next_wakeup_us <= level->time_overhead_us) {
- pwr = level->energy_overhead / next_wakeup_us;
+ pwr = lvl_overhead_energy;
+ } else if (next_wakeup_us <= lvl_overhead_us) {
+ pwr = lvl_overhead_energy / next_wakeup_us;
} else if ((next_wakeup_us >> 10)
- > level->time_overhead_us) {
+ > lvl_overhead_us) {
pwr = level->steady_state_power;
} else {
pwr = level->steady_state_power;
- pwr -= (level->time_overhead_us *
+ pwr -= (lvl_overhead_us *
level->steady_state_power) /
next_wakeup_us;
- pwr += level->energy_overhead / next_wakeup_us;
+ pwr += lvl_overhead_energy / next_wakeup_us;
}
- if (!best_level || best_level->rs_limits.power[cpu] >= pwr) {
-
- level->rs_limits.latency_us[cpu] = level->latency_us;
- level->rs_limits.power[cpu] = pwr;
+ if (!best_level || (best_level_pwr >= pwr)) {
best_level = level;
- best_level_iter = i;
+ best_level_pwr = pwr;
if (power)
*power = pwr;
if (modify_event_timer &&
@@ -331,37 +427,83 @@
MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
time_param->modified_time_us =
time_param->next_event_us -
- best_level->latency_us;
+ lvl_latency_us;
else
time_param->modified_time_us = 0;
}
}
- if (best_level && !lpm_level_permitted(best_level_iter))
- best_level = NULL;
- else
- per_cpu(msm_lpm_sleep_time, cpu) =
- time_param->modified_time_us ?
- time_param->modified_time_us : time_param->sleep_us;
- return best_level ? &best_level->rs_limits : NULL;
+ return best_level ? &best_level->l2_cache : NULL;
}
-static struct lpm_test_platform_data lpm_test_pdata;
-
-static struct platform_device msm_lpm_test_device = {
- .name = "lpm_test",
- .id = -1,
- .dev = {
- .platform_data = &lpm_test_pdata,
- },
-};
-
static struct msm_pm_sleep_ops msm_lpm_ops = {
.lowest_limits = msm_lpm_lowest_limits,
.enter_sleep = msm_lpm_enter_sleep,
.exit_sleep = msm_lpm_exit_sleep,
};
+static int msm_lpm_get_l2_cache_value(struct device_node *node,
+ char *key, uint32_t *l2_val)
+{
+ int i;
+ struct lpm_lookup_table l2_mode_lookup[] = {
+ {MSM_SPM_L2_MODE_POWER_COLLAPSE, "l2_cache_pc"},
+ {MSM_SPM_L2_MODE_GDHS, "l2_cache_gdhs"},
+ {MSM_SPM_L2_MODE_RETENTION, "l2_cache_retention"},
+ {MSM_SPM_L2_MODE_DISABLED, "l2_cache_active"}
+ };
+ const char *l2_str;
+ int ret;
+
+ ret = of_property_read_string(node, key, &l2_str);
+ if (!ret) {
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(l2_mode_lookup); i++) {
+ if (!strcmp(l2_str, l2_mode_lookup[i].mode_name)) {
+ *l2_val = l2_mode_lookup[i].modes;
+ ret = 0;
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+static int __devinit msm_lpm_levels_sysfs_add(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *low_power_kobj = NULL;
+ int rc = 0;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("%s: cannot find kobject for module %s\n",
+ __func__, KBUILD_MODNAME);
+ rc = -ENOENT;
+ goto resource_sysfs_add_exit;
+ }
+
+ low_power_kobj = kobject_create_and_add(
+ "enable_low_power", module_kobj);
+ if (!low_power_kobj) {
+ pr_err("%s: cannot create kobject\n", __func__);
+ rc = -ENOMEM;
+ goto resource_sysfs_add_exit;
+ }
+
+ rc = sysfs_create_group(low_power_kobj, &lpm_levels_attr_grp);
+resource_sysfs_add_exit:
+ if (rc) {
+ if (low_power_kobj) {
+ sysfs_remove_group(low_power_kobj,
+ &lpm_levels_attr_grp);
+ kobject_del(low_power_kobj);
+ }
+ }
+
+ return rc;
+}
+
static int __devinit msm_lpm_levels_probe(struct platform_device *pdev)
{
struct msm_rpmrs_level *levels = NULL;
@@ -372,7 +514,6 @@
int ret = 0;
uint32_t num_levels = 0;
int idx = 0;
- unsigned int m_cpu = 0;
for_each_child_of_node(pdev->dev.of_node, node)
num_levels++;
@@ -392,49 +533,11 @@
goto fail;
level->sleep_mode = val;
- key = "qcom,xo";
- ret = msm_lpm_get_xo_value(node, key, &val);
- if (ret)
- goto fail;
- level->rs_limits.pxo = val;
-
key = "qcom,l2";
ret = msm_lpm_get_l2_cache_value(node, key, &val);
if (ret)
goto fail;
- level->rs_limits.l2_cache = val;
-
- key = "qcom,vdd-dig-upper-bound";
- ret = of_property_read_u32(node, key, &val);
- if (ret)
- goto fail;
- level->rs_limits.vdd_dig_upper_bound = val;
-
- key = "qcom,vdd-dig-lower-bound";
- ret = of_property_read_u32(node, key, &val);
- if (ret)
- goto fail;
- level->rs_limits.vdd_dig_lower_bound = val;
-
- key = "qcom,vdd-mem-upper-bound";
- ret = of_property_read_u32(node, key, &val);
- if (ret)
- goto fail;
- level->rs_limits.vdd_mem_upper_bound = val;
-
- key = "qcom,vdd-mem-lower-bound";
- ret = of_property_read_u32(node, key, &val);
- if (ret)
- goto fail;
- level->rs_limits.vdd_mem_lower_bound = val;
-
- key = "qcom,gpio-detectable";
- level->rs_limits.gpio_detectable =
- of_property_read_bool(node, key);
-
- key = "qcom,irqs-detectable";
- level->rs_limits.irqs_detectable =
- of_property_read_bool(node, key);
+ level->l2_cache = val;
key = "qcom,latency-us";
ret = of_property_read_u32(node, key, &val);
@@ -463,22 +566,33 @@
level->available = true;
}
+ node = pdev->dev.of_node;
+ key = "qcom,no-l2-saw";
+ no_l2_saw = of_property_read_bool(node, key);
+
msm_lpm_levels = levels;
msm_lpm_level_count = idx;
- lpm_test_pdata.msm_lpm_test_levels = msm_lpm_levels;
- lpm_test_pdata.msm_lpm_test_level_count = msm_lpm_level_count;
- key = "qcom,use-qtimer";
- lpm_test_pdata.use_qtimer =
- of_property_read_bool(pdev->dev.of_node, key);
+ if (num_online_cpus() == 1)
+ allowed_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
- for_each_possible_cpu(m_cpu)
- per_cpu(lpm_permitted_level, m_cpu) =
- msm_lpm_level_count + 1;
+ /* Do the following two steps only if L2 SAW is present */
+ if (!no_l2_saw) {
+ key = "qcom,default-l2-state";
+ if (msm_lpm_get_l2_cache_value(node, key, &default_l2_mode))
+ goto fail;
- platform_device_register(&msm_lpm_test_device);
+ if (msm_lpm_levels_sysfs_add())
+ goto fail;
+ register_hotcpu_notifier(&msm_lpm_cpu_nblk);
+ msm_pm_set_l2_flush_flag(0);
+ } else {
+ msm_pm_set_l2_flush_flag(1);
+ default_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
+ }
+
+ msm_lpm_level_update();
msm_pm_set_sleep_ops(&msm_lpm_ops);
-
return 0;
fail:
pr_err("%s: Error in name %s key %s\n", __func__, node->full_name, key);
diff --git a/arch/arm/mach-msm/lpm_resources.c b/arch/arm/mach-msm/lpm_resources.c
deleted file mode 100644
index 1d9c539..0000000
--- a/arch/arm/mach-msm/lpm_resources.c
+++ /dev/null
@@ -1,1009 +0,0 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-#include <linux/hrtimer.h>
-#include <linux/tick.h>
-#include <mach/mpm.h>
-#include <mach/rpm-smd.h>
-#include <mach/trace_msm_low_power.h>
-#include "spm.h"
-#include "lpm_resources.h"
-#include "rpm-notifier.h"
-#include "idle.h"
-
-
-/*Debug Definitions*/
-enum {
- MSM_LPMRS_DEBUG_RPM = BIT(0),
- MSM_LPMRS_DEBUG_PXO = BIT(1),
- MSM_LPMRS_DEBUG_VDD_DIG = BIT(2),
- MSM_LPMRS_DEBUG_VDD_MEM = BIT(3),
- MSM_LPMRS_DEBUG_L2 = BIT(4),
- MSM_LPMRS_DEBUG_LVLS = BIT(5),
-};
-
-static int msm_lpm_debug_mask;
-module_param_named(
- debug_mask, msm_lpm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
-);
-
-static bool msm_lpm_get_rpm_notif = true;
-
-/*Macros*/
-#define MAX_RS_NAME (16)
-#define MAX_RS_SIZE (4)
-#define IS_RPM_CTL(rs) \
- (!strncmp(rs->name, "rpm_ctl", MAX_RS_NAME))
-#define MAX_STR_LEN 30
-
-static bool msm_lpm_beyond_limits_vdd_dig(struct msm_rpmrs_limits *limits);
-static void msm_lpm_aggregate_vdd_dig(struct msm_rpmrs_limits *limits);
-static void msm_lpm_flush_vdd_dig(int notify_rpm);
-static void msm_lpm_notify_vdd_dig(struct msm_rpm_notifier_data
- *rpm_notifier_cb);
-static int msm_lpm_init_value_vdd_dig(struct device_node *node,
- char *key, uint32_t *default_value);
-
-static bool msm_lpm_beyond_limits_vdd_mem(struct msm_rpmrs_limits *limits);
-static void msm_lpm_aggregate_vdd_mem(struct msm_rpmrs_limits *limits);
-static void msm_lpm_flush_vdd_mem(int notify_rpm);
-static void msm_lpm_notify_vdd_mem(struct msm_rpm_notifier_data
- *rpm_notifier_cb);
-static int msm_lpm_init_value_vdd_mem(struct device_node *node,
- char *key, uint32_t *default_value);
-
-
-static bool msm_lpm_beyond_limits_pxo(struct msm_rpmrs_limits *limits);
-static void msm_lpm_aggregate_pxo(struct msm_rpmrs_limits *limits);
-static void msm_lpm_flush_pxo(int notify_rpm);
-static void msm_lpm_notify_pxo(struct msm_rpm_notifier_data
- *rpm_notifier_cb);
-static int msm_lpm_init_value_pxo(struct device_node *node,
- char *key, uint32_t *default_value);
-
-
-static bool msm_lpm_beyond_limits_l2(struct msm_rpmrs_limits *limits);
-static void msm_lpm_flush_l2(int notify_rpm);
-static void msm_lpm_aggregate_l2(struct msm_rpmrs_limits *limits);
-static int msm_lpm_init_value_l2(struct device_node *node,
- char *key, uint32_t *default_value);
-
-static void msm_lpm_flush_rpm_ctl(int notify_rpm);
-
-static int msm_lpm_rpm_callback(struct notifier_block *rpm_nb,
- unsigned long action, void *rpm_notif);
-
-static int msm_lpm_cpu_callback(struct notifier_block *cpu_nb,
- unsigned long action, void *hcpu);
-
-static ssize_t msm_lpm_resource_attr_show(
- struct kobject *kobj, struct kobj_attribute *attr, char *buf);
-static ssize_t msm_lpm_resource_attr_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count);
-
-
-#define RPMRS_ATTR(_name) \
- __ATTR(_name, S_IRUGO|S_IWUSR, \
- msm_lpm_resource_attr_show, msm_lpm_resource_attr_store)
-
-/*Data structures*/
-struct msm_lpm_rs_data {
- uint32_t type;
- uint32_t id;
- uint32_t key;
- uint32_t value;
- uint32_t default_value;
- struct msm_rpm_request *handle;
-};
-
-enum {
- MSM_LPM_RPM_RS_TYPE = 0,
- MSM_LPM_LOCAL_RS_TYPE = 1,
-};
-
-enum {
- MSM_SCM_L2_ON = 0,
- MSM_SCM_L2_OFF = 1,
- MSM_SCM_L2_GDHS = 3,
-};
-
-struct msm_lpm_resource {
- struct msm_lpm_rs_data rs_data;
- uint32_t sleep_value;
- char name[MAX_RS_NAME];
-
- uint32_t enable_low_power;
- bool valid;
-
- bool (*beyond_limits)(struct msm_rpmrs_limits *limits);
- void (*aggregate)(struct msm_rpmrs_limits *limits);
- void (*flush)(int notify_rpm);
- void (*notify)(struct msm_rpm_notifier_data *rpm_notifier_cb);
- struct kobj_attribute ko_attr;
- int (*init_value)(struct device_node *node,
- char *key, uint32_t *default_value);
-};
-
-struct lpm_lookup_table {
- uint32_t modes;
- const char *mode_name;
-};
-
-static struct msm_lpm_resource msm_lpm_l2 = {
- .name = "l2",
- .beyond_limits = msm_lpm_beyond_limits_l2,
- .aggregate = msm_lpm_aggregate_l2,
- .flush = msm_lpm_flush_l2,
- .notify = NULL,
- .valid = false,
- .ko_attr = RPMRS_ATTR(l2),
- .init_value = msm_lpm_init_value_l2,
-};
-
-static struct msm_lpm_resource msm_lpm_vdd_dig = {
- .name = "vdd-dig",
- .beyond_limits = msm_lpm_beyond_limits_vdd_dig,
- .aggregate = msm_lpm_aggregate_vdd_dig,
- .flush = msm_lpm_flush_vdd_dig,
- .notify = msm_lpm_notify_vdd_dig,
- .valid = false,
- .ko_attr = RPMRS_ATTR(vdd_dig),
- .init_value = msm_lpm_init_value_vdd_dig,
-};
-
-static struct msm_lpm_resource msm_lpm_vdd_mem = {
- .name = "vdd-mem",
- .beyond_limits = msm_lpm_beyond_limits_vdd_mem,
- .aggregate = msm_lpm_aggregate_vdd_mem,
- .flush = msm_lpm_flush_vdd_mem,
- .notify = msm_lpm_notify_vdd_mem,
- .valid = false,
- .ko_attr = RPMRS_ATTR(vdd_mem),
- .init_value = msm_lpm_init_value_vdd_mem,
-};
-
-static struct msm_lpm_resource msm_lpm_pxo = {
- .name = "pxo",
- .beyond_limits = msm_lpm_beyond_limits_pxo,
- .aggregate = msm_lpm_aggregate_pxo,
- .flush = msm_lpm_flush_pxo,
- .notify = msm_lpm_notify_pxo,
- .valid = false,
- .ko_attr = RPMRS_ATTR(pxo),
- .init_value = msm_lpm_init_value_pxo,
-};
-
-static struct msm_lpm_resource *msm_lpm_resources[] = {
- &msm_lpm_vdd_dig,
- &msm_lpm_vdd_mem,
- &msm_lpm_pxo,
- &msm_lpm_l2,
-};
-
-static struct msm_lpm_resource msm_lpm_rpm_ctl = {
- .name = "rpm_ctl",
- .beyond_limits = NULL,
- .aggregate = NULL,
- .flush = msm_lpm_flush_rpm_ctl,
- .valid = true,
- .ko_attr = RPMRS_ATTR(rpm_ctl),
-};
-
-static struct notifier_block msm_lpm_rpm_nblk = {
- .notifier_call = msm_lpm_rpm_callback,
-};
-
-static struct notifier_block __refdata msm_lpm_cpu_nblk = {
- .notifier_call = msm_lpm_cpu_callback,
-};
-
-static DEFINE_SPINLOCK(msm_lpm_sysfs_lock);
-
-/* Attribute Definitions */
-static struct attribute *msm_lpm_attributes[] = {
- &msm_lpm_vdd_dig.ko_attr.attr,
- &msm_lpm_vdd_mem.ko_attr.attr,
- &msm_lpm_pxo.ko_attr.attr,
- &msm_lpm_l2.ko_attr.attr,
- NULL,
-};
-
-static struct attribute_group msm_lpm_attribute_group = {
- .attrs = msm_lpm_attributes,
-};
-
-static struct attribute *msm_lpm_rpm_ctl_attribute[] = {
- &msm_lpm_rpm_ctl.ko_attr.attr,
- NULL,
-};
-
-static struct attribute_group msm_lpm_rpm_ctl_attr_group = {
- .attrs = msm_lpm_rpm_ctl_attribute,
-};
-
-#define GET_RS_FROM_ATTR(attr) \
- (container_of(attr, struct msm_lpm_resource, ko_attr))
-
-/* RPM */
-static struct msm_rpm_request *msm_lpm_create_rpm_request
- (uint32_t rsc_type, uint32_t rsc_id)
-{
- struct msm_rpm_request *handle = NULL;
-
- handle = msm_rpm_create_request(MSM_RPM_CTX_SLEEP_SET,
- rsc_type,
- rsc_id, 1);
- return handle;
-}
-
-static int msm_lpm_send_sleep_data(struct msm_rpm_request *handle,
- uint32_t key, uint8_t *value)
-{
- int ret = 0;
- int msg_id;
-
- if (!handle)
- return ret;
-
- ret = msm_rpm_add_kvp_data_noirq(handle, key, value, MAX_RS_SIZE);
-
- if (ret < 0) {
- pr_err("%s: Error adding kvp data key %u, size %d\n",
- __func__, key, MAX_RS_SIZE);
- return ret;
- }
-
- msg_id = msm_rpm_send_request_noirq(handle);
- if (!msg_id) {
- pr_err("%s: Error sending RPM request key %u, handle 0x%x\n",
- __func__, key, (unsigned int)handle);
- ret = -EIO;
- return ret;
- }
-
- ret = msm_rpm_wait_for_ack_noirq(msg_id);
- if (ret < 0) {
- pr_err("%s: Couldn't get ACK from RPM for Msg %d Error %d",
- __func__, msg_id, ret);
- return ret;
- }
- if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_RPM)
- pr_info("Rs key %u, value %u, size %d\n", key,
- *(unsigned int *)value, MAX_RS_SIZE);
- return ret;
-}
-
-/* RPM Notifier */
-static int msm_lpm_rpm_callback(struct notifier_block *rpm_nb,
- unsigned long action,
- void *rpm_notif)
-{
- int i;
- struct msm_lpm_resource *rs = NULL;
- struct msm_rpm_notifier_data *rpm_notifier_cb =
- (struct msm_rpm_notifier_data *)rpm_notif;
-
- if (!msm_lpm_get_rpm_notif)
- return NOTIFY_DONE;
-
- if (!(rpm_nb && rpm_notif))
- return NOTIFY_BAD;
-
- for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
- rs = msm_lpm_resources[i];
- if (rs && rs->valid && rs->notify)
- rs->notify(rpm_notifier_cb);
- }
-
- return NOTIFY_OK;
-}
-
-/* SYSFS */
-static ssize_t msm_lpm_resource_attr_show(
- struct kobject *kobj, struct kobj_attribute *attr, char *buf)
-{
- struct kernel_param kp;
- unsigned long flags;
- unsigned int temp;
- int rc;
-
- spin_lock_irqsave(&msm_lpm_sysfs_lock, flags);
- temp = GET_RS_FROM_ATTR(attr)->enable_low_power;
- spin_unlock_irqrestore(&msm_lpm_sysfs_lock, flags);
-
- kp.arg = &temp;
- rc = param_get_uint(buf, &kp);
-
- if (rc > 0) {
- strlcat(buf, "\n", PAGE_SIZE);
- rc++;
- }
-
- return rc;
-}
-
-static ssize_t msm_lpm_resource_attr_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
-{
- struct kernel_param kp;
- unsigned long flags;
- unsigned int temp;
- int rc;
-
- kp.arg = &temp;
- rc = param_set_uint(buf, &kp);
- if (rc)
- return rc;
-
- spin_lock_irqsave(&msm_lpm_sysfs_lock, flags);
- GET_RS_FROM_ATTR(attr)->enable_low_power = temp;
-
- if (IS_RPM_CTL(GET_RS_FROM_ATTR(attr))) {
- struct msm_lpm_resource *rs = GET_RS_FROM_ATTR(attr);
- rs->flush(false);
- }
-
- spin_unlock_irqrestore(&msm_lpm_sysfs_lock, flags);
-
- return count;
-}
-
-/* lpm resource handling functions */
-/* Common */
-static void msm_lpm_notify_common(struct msm_rpm_notifier_data *cb,
- struct msm_lpm_resource *rs)
-{
- if ((cb->rsc_type == rs->rs_data.type) &&
- (cb->rsc_id == rs->rs_data.id) &&
- (cb->key == rs->rs_data.key)) {
-
- BUG_ON(cb->size > MAX_RS_SIZE);
-
- if (rs->valid) {
- if (cb->value) {
- memcpy(&rs->rs_data.value, cb->value, cb->size);
- msm_rpm_add_kvp_data_noirq(rs->rs_data.handle,
- cb->key, cb->value, cb->size);
- }
- else
- rs->rs_data.value = rs->rs_data.default_value;
-
- if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_RPM)
- pr_info("Notification received Rs %s value %u\n",
- rs->name, rs->rs_data.value);
- }
- }
-}
-
-/* L2 */
-static bool msm_lpm_beyond_limits_l2(struct msm_rpmrs_limits *limits)
-{
- uint32_t l2;
- bool ret = false;
- struct msm_lpm_resource *rs = &msm_lpm_l2;
-
- if (rs->valid) {
- uint32_t l2_buf = rs->rs_data.value;
-
- if (rs->enable_low_power == 1)
- l2 = MSM_LPM_L2_CACHE_GDHS;
- else if (rs->enable_low_power == 2)
- l2 = MSM_LPM_L2_CACHE_HSFS_OPEN;
- else
- l2 = MSM_LPM_L2_CACHE_ACTIVE ;
-
- if (l2_buf > l2)
- l2 = l2_buf;
- ret = (l2 > limits->l2_cache);
-
- if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_L2)
- pr_info("%s: l2 buf %u, l2 %u, limits %u\n",
- __func__, l2_buf, l2, limits->l2_cache);
- }
- return ret;
-}
-
-static void msm_lpm_aggregate_l2(struct msm_rpmrs_limits *limits)
-{
- struct msm_lpm_resource *rs = &msm_lpm_l2;
-
- if (rs->valid)
- rs->sleep_value = limits->l2_cache;
- trace_lpm_resources(rs->sleep_value, rs->name);
-}
-
-static void msm_lpm_set_l2_mode(int sleep_mode)
-{
- int lpm, rc;
-
- msm_pm_set_l2_flush_flag(MSM_SCM_L2_ON);
-
- switch (sleep_mode) {
- case MSM_LPM_L2_CACHE_HSFS_OPEN:
- lpm = MSM_SPM_L2_MODE_POWER_COLLAPSE;
- msm_pm_set_l2_flush_flag(MSM_SCM_L2_OFF);
- break;
- case MSM_LPM_L2_CACHE_GDHS:
- lpm = MSM_SPM_L2_MODE_GDHS;
- msm_pm_set_l2_flush_flag(MSM_SCM_L2_GDHS);
- break;
- case MSM_LPM_L2_CACHE_RETENTION:
- lpm = MSM_SPM_L2_MODE_RETENTION;
- break;
- default:
- case MSM_LPM_L2_CACHE_ACTIVE:
- lpm = MSM_SPM_L2_MODE_DISABLED;
- break;
- }
-
- rc = msm_spm_l2_set_low_power_mode(lpm, true);
-
- if (rc < 0)
- pr_err("%s: Failed to set L2 low power mode %d",
- __func__, lpm);
-
- if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_L2)
- pr_info("%s: Requesting low power mode %d\n",
- __func__, lpm);
-}
-
-static int msm_lpm_init_value_l2(struct device_node *node,
- char *key, uint32_t *default_value)
-{
- return msm_lpm_get_l2_cache_value(node, key, default_value);
-}
-
-static void msm_lpm_flush_l2(int notify_rpm)
-{
- struct msm_lpm_resource *rs = &msm_lpm_l2;
-
- msm_lpm_set_l2_mode(rs->sleep_value);
-}
-
-int msm_lpm_get_l2_cache_value(struct device_node *node,
- char *key, uint32_t *l2_val)
-{
- int i;
- struct lpm_lookup_table l2_mode_lookup[] = {
- {MSM_LPM_L2_CACHE_HSFS_OPEN, "l2_cache_pc"},
- {MSM_LPM_L2_CACHE_GDHS, "l2_cache_gdhs"},
- {MSM_LPM_L2_CACHE_RETENTION, "l2_cache_retention"},
- {MSM_LPM_L2_CACHE_ACTIVE, "l2_cache_active"}
- };
- const char *l2_str;
- int ret;
-
- ret = of_property_read_string(node, key, &l2_str);
- if (!ret) {
- ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(l2_mode_lookup); i++) {
- if (!strncmp(l2_str, l2_mode_lookup[i].mode_name,
- MAX_STR_LEN)) {
- *l2_val = l2_mode_lookup[i].modes;
- ret = 0;
- break;
- }
- }
- }
- return ret;
-}
-
-/* RPM CTL */
-static void msm_lpm_flush_rpm_ctl(int notify_rpm)
-{
- struct msm_lpm_resource *rs = &msm_lpm_rpm_ctl;
- msm_lpm_send_sleep_data(rs->rs_data.handle,
- rs->rs_data.key,
- (uint8_t *)&rs->sleep_value);
-}
-
-/*VDD Dig*/
-static bool msm_lpm_beyond_limits_vdd_dig(struct msm_rpmrs_limits *limits)
-{
- bool ret = true;
- struct msm_lpm_resource *rs = &msm_lpm_vdd_dig;
-
- if (rs->valid) {
- uint32_t vdd_buf = rs->rs_data.value;
- uint32_t vdd_dig = rs->enable_low_power ? rs->enable_low_power :
- rs->rs_data.default_value;
-
- if (vdd_buf > vdd_dig)
- vdd_dig = vdd_buf;
-
- ret = (vdd_dig > limits->vdd_dig_upper_bound);
-
- if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_VDD_DIG)
- pr_info("%s:buf %d vdd dig %d limits%d\n",
- __func__, vdd_buf, vdd_dig,
- limits->vdd_dig_upper_bound);
- }
- return ret;
-}
-
-static int msm_lpm_init_value_vdd_dig(struct device_node *node,
- char *key, uint32_t *default_value)
-{
- return of_property_read_u32(node, key, default_value);
-}
-
-static void msm_lpm_aggregate_vdd_dig(struct msm_rpmrs_limits *limits)
-{
- struct msm_lpm_resource *rs = &msm_lpm_vdd_dig;
-
- if (rs->valid) {
- uint32_t vdd_buf = rs->rs_data.value;
- if (limits->vdd_dig_lower_bound > vdd_buf)
- rs->sleep_value = limits->vdd_dig_lower_bound;
- else
- rs->sleep_value = vdd_buf;
- }
- trace_lpm_resources(rs->sleep_value, rs->name);
-}
-
-static void msm_lpm_flush_vdd_dig(int notify_rpm)
-{
- if (notify_rpm) {
- struct msm_lpm_resource *rs = &msm_lpm_vdd_dig;
- msm_lpm_send_sleep_data(rs->rs_data.handle,
- rs->rs_data.key,
- (uint8_t *)&rs->sleep_value);
- }
-}
-
-static void msm_lpm_notify_vdd_dig(struct msm_rpm_notifier_data
- *rpm_notifier_cb)
-{
- struct msm_lpm_resource *rs = &msm_lpm_vdd_dig;
- msm_lpm_notify_common(rpm_notifier_cb, rs);
-}
-
-/*VDD Mem*/
-static bool msm_lpm_beyond_limits_vdd_mem(struct msm_rpmrs_limits *limits)
-{
- bool ret = true;
- struct msm_lpm_resource *rs = &msm_lpm_vdd_mem;
-
- if (rs->valid) {
- uint32_t vdd_buf = rs->rs_data.value;
- uint32_t vdd_mem = rs->enable_low_power ? rs->enable_low_power :
- rs->rs_data.default_value;
-
- if (vdd_buf > vdd_mem)
- vdd_mem = vdd_buf;
-
- ret = (vdd_mem > limits->vdd_mem_upper_bound);
-
- if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_VDD_MEM)
- pr_info("%s:buf %d vdd mem %d limits%d\n",
- __func__, vdd_buf, vdd_mem,
- limits->vdd_mem_upper_bound);
- }
- return ret;
-}
-
-static void msm_lpm_aggregate_vdd_mem(struct msm_rpmrs_limits *limits)
-{
- struct msm_lpm_resource *rs = &msm_lpm_vdd_mem;
-
- if (rs->valid) {
- uint32_t vdd_buf = rs->rs_data.value;
- if (limits->vdd_mem_lower_bound > vdd_buf)
- rs->sleep_value = limits->vdd_mem_lower_bound;
- else
- rs->sleep_value = vdd_buf;
- }
- trace_lpm_resources(rs->sleep_value, rs->name);
-}
-
-static void msm_lpm_flush_vdd_mem(int notify_rpm)
-{
- if (notify_rpm) {
- struct msm_lpm_resource *rs = &msm_lpm_vdd_mem;
- msm_lpm_send_sleep_data(rs->rs_data.handle,
- rs->rs_data.key,
- (uint8_t *)&rs->sleep_value);
- }
-}
-
-static void msm_lpm_notify_vdd_mem(struct msm_rpm_notifier_data
- *rpm_notifier_cb)
-{
- struct msm_lpm_resource *rs = &msm_lpm_vdd_mem;
- msm_lpm_notify_common(rpm_notifier_cb, rs);
-}
-
-static int msm_lpm_init_value_vdd_mem(struct device_node *node,
- char *key, uint32_t *default_value)
-{
- return of_property_read_u32(node, key, default_value);
-}
-
-/*PXO*/
-static bool msm_lpm_beyond_limits_pxo(struct msm_rpmrs_limits *limits)
-{
- bool ret = true;
- struct msm_lpm_resource *rs = &msm_lpm_pxo;
-
- if (rs->valid) {
- uint32_t pxo_buf = rs->rs_data.value;
- uint32_t pxo = rs->enable_low_power ? MSM_LPM_PXO_OFF :
- rs->rs_data.default_value;
-
- if (pxo_buf > pxo)
- pxo = pxo_buf;
-
- ret = (pxo > limits->pxo);
-
- if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_PXO)
- pr_info("%s:pxo buf %d pxo %d limits pxo %d\n",
- __func__, pxo_buf, pxo, limits->pxo);
- }
- return ret;
-}
-
-static void msm_lpm_aggregate_pxo(struct msm_rpmrs_limits *limits)
-{
- struct msm_lpm_resource *rs = &msm_lpm_pxo;
-
- if (rs->valid) {
- uint32_t pxo_buf = rs->rs_data.value;
- if (limits->pxo > pxo_buf)
- rs->sleep_value = limits->pxo;
- else
- rs->sleep_value = pxo_buf;
-
- if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_PXO)
- pr_info("%s: pxo buf %d sleep value %d\n",
- __func__, pxo_buf, rs->sleep_value);
- }
- trace_lpm_resources(rs->sleep_value, rs->name);
-}
-
-static void msm_lpm_flush_pxo(int notify_rpm)
-{
- if (notify_rpm) {
- struct msm_lpm_resource *rs = &msm_lpm_pxo;
- msm_lpm_send_sleep_data(rs->rs_data.handle,
- rs->rs_data.key,
- (uint8_t *)&rs->sleep_value);
- }
-}
-
-static void msm_lpm_notify_pxo(struct msm_rpm_notifier_data
- *rpm_notifier_cb)
-{
- struct msm_lpm_resource *rs = &msm_lpm_pxo;
- msm_lpm_notify_common(rpm_notifier_cb, rs);
-}
-
-static int msm_lpm_init_value_pxo(struct device_node *node,
- char *key, uint32_t *default_value)
-{
- return msm_lpm_get_xo_value(node, key, default_value);
-}
-
-static inline bool msm_lpm_use_mpm(struct msm_rpmrs_limits *limits)
-{
- return (limits->pxo == MSM_LPM_PXO_OFF);
-}
-
-int msm_lpm_get_xo_value(struct device_node *node,
- char *key, uint32_t *xo_val)
-{
- int i;
- struct lpm_lookup_table pxo_mode_lookup[] = {
- {MSM_LPM_PXO_OFF, "xo_off"},
- {MSM_LPM_PXO_ON, "xo_on"}
- };
- const char *xo_str;
- int ret;
-
- ret = of_property_read_string(node, key, &xo_str);
- if (!ret) {
- ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(pxo_mode_lookup); i++) {
- if (!strncmp(xo_str, pxo_mode_lookup[i].mode_name,
- MAX_STR_LEN)) {
- *xo_val = pxo_mode_lookup[i].modes;
- ret = 0;
- break;
- }
- }
- }
- return ret;
-}
-
-/* LPM levels interface */
-bool msm_lpm_level_beyond_limit(struct msm_rpmrs_limits *limits)
-{
- int i;
- struct msm_lpm_resource *rs;
- bool beyond_limit = false;
-
- for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
- rs = msm_lpm_resources[i];
- if (rs->beyond_limits && rs->beyond_limits(limits)) {
- beyond_limit = true;
- if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_LVLS)
- pr_info("%s: %s beyond limit", __func__,
- rs->name);
- break;
- }
- }
-
- return beyond_limit;
-}
-
-int msm_lpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
- bool from_idle, bool notify_rpm)
-{
- int ret = 0;
- int i;
- struct msm_lpm_resource *rs = NULL;
-
- for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
- rs = msm_lpm_resources[i];
- if (rs->aggregate)
- rs->aggregate(limits);
- }
-
- msm_lpm_get_rpm_notif = false;
- for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
- rs = msm_lpm_resources[i];
- if (rs->valid && rs->flush)
- rs->flush(notify_rpm);
- }
- msm_lpm_get_rpm_notif = true;
-
- if (notify_rpm)
- msm_mpm_enter_sleep(sclk_count, from_idle);
-
- return ret;
-}
-
-void msm_lpmrs_exit_sleep(struct msm_rpmrs_limits *limits,
- bool from_idle, bool notify_rpm, bool collapsed)
-{
- if (msm_lpm_use_mpm(limits))
- msm_mpm_exit_sleep(from_idle);
-
- if (msm_lpm_l2.valid)
- msm_lpm_set_l2_mode(msm_lpm_l2.rs_data.default_value);
-}
-
-static int msm_lpm_cpu_callback(struct notifier_block *cpu_nb,
- unsigned long action, void *hcpu)
-{
- struct msm_lpm_resource *rs = &msm_lpm_l2;
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- rs->rs_data.value = rs->rs_data.default_value;
- break;
- case CPU_ONLINE_FROZEN:
- case CPU_ONLINE:
- if (num_online_cpus() > 1)
- rs->rs_data.value = rs->rs_data.default_value;
- break;
- case CPU_DEAD_FROZEN:
- case CPU_DEAD:
- if (num_online_cpus() == 1)
- rs->rs_data.value = MSM_LPM_L2_CACHE_HSFS_OPEN;
- break;
- }
- return NOTIFY_OK;
-}
-
-/* RPM CTL */
-static int __devinit msm_lpm_init_rpm_ctl(void)
-{
- struct msm_lpm_resource *rs = &msm_lpm_rpm_ctl;
-
- rs->rs_data.handle = msm_rpm_create_request(
- MSM_RPM_CTX_ACTIVE_SET,
- rs->rs_data.type,
- rs->rs_data.id, 1);
- if (!rs->rs_data.handle)
- return -EIO;
-
- rs->valid = true;
- return 0;
-}
-
-static int __devinit msm_lpm_resource_sysfs_add(void)
-{
- struct kobject *module_kobj = NULL;
- struct kobject *low_power_kobj = NULL;
- struct kobject *mode_kobj = NULL;
- int rc = 0;
-
- module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
- if (!module_kobj) {
- pr_err("%s: cannot find kobject for module %s\n",
- __func__, KBUILD_MODNAME);
- rc = -ENOENT;
- goto resource_sysfs_add_exit;
- }
-
- low_power_kobj = kobject_create_and_add(
- "enable_low_power", module_kobj);
- if (!low_power_kobj) {
- pr_err("%s: cannot create kobject\n", __func__);
- rc = -ENOMEM;
- goto resource_sysfs_add_exit;
- }
-
- mode_kobj = kobject_create_and_add(
- "mode", module_kobj);
- if (!mode_kobj) {
- pr_err("%s: cannot create kobject\n", __func__);
- rc = -ENOMEM;
- goto resource_sysfs_add_exit;
- }
-
- rc = sysfs_create_group(low_power_kobj, &msm_lpm_attribute_group);
- if (rc) {
- pr_err("%s: cannot create kobject attribute group\n", __func__);
- goto resource_sysfs_add_exit;
- }
-
- rc = sysfs_create_group(mode_kobj, &msm_lpm_rpm_ctl_attr_group);
- if (rc) {
- pr_err("%s: cannot create kobject attribute group\n", __func__);
- goto resource_sysfs_add_exit;
- }
-
-resource_sysfs_add_exit:
- if (rc) {
- if (low_power_kobj)
- sysfs_remove_group(low_power_kobj,
- &msm_lpm_attribute_group);
- kobject_del(low_power_kobj);
- kobject_del(mode_kobj);
- }
-
- return rc;
-}
-
-late_initcall(msm_lpm_resource_sysfs_add);
-
-static int __devinit msm_lpmrs_probe(struct platform_device *pdev)
-{
- struct device_node *node = NULL;
- char *key = NULL;
- int ret = 0;
-
- for_each_child_of_node(pdev->dev.of_node, node) {
- struct msm_lpm_resource *rs = NULL;
- const char *val;
- int i;
- bool local_resource;
-
- key = "qcom,name";
- ret = of_property_read_string(node, key, &val);
- if (ret) {
- pr_err("Cannot read string\n");
- goto fail;
- }
-
- for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
- char *lpmrs_name = msm_lpm_resources[i]->name;
- if (!msm_lpm_resources[i]->valid &&
- !strncmp(val, lpmrs_name, strnlen(lpmrs_name,
- MAX_RS_NAME))) {
- rs = msm_lpm_resources[i];
- break;
- }
- }
-
- if (!rs) {
- pr_err("LPM resource not found\n");
- continue;
- }
-
- key = "qcom,init-value";
- ret = rs->init_value(node, key, &rs->rs_data.default_value);
- if (ret) {
- pr_err("%s():Failed to read %s\n", __func__, key);
- goto fail;
- }
-
- rs->rs_data.value = rs->rs_data.default_value;
-
- key = "qcom,local-resource-type";
- local_resource = of_property_read_bool(node, key);
-
- if (!local_resource) {
- key = "qcom,type";
- ret = of_property_read_u32(node, key,
- &rs->rs_data.type);
- if (ret) {
- pr_err("Failed to read type\n");
- goto fail;
- }
-
- key = "qcom,id";
- ret = of_property_read_u32(node, key, &rs->rs_data.id);
- if (ret) {
- pr_err("Failed to read id\n");
- goto fail;
- }
-
- key = "qcom,key";
- ret = of_property_read_u32(node, key, &rs->rs_data.key);
- if (ret) {
- pr_err("Failed to read key\n");
- goto fail;
- }
-
- rs->rs_data.handle = msm_lpm_create_rpm_request(
- rs->rs_data.type,
- rs->rs_data.id);
-
- if (!rs->rs_data.handle) {
- pr_err("%s: Failed to allocate handle for %s\n",
- __func__, rs->name);
- ret = -1;
- goto fail;
- }
- /* fall through */
- }
-
- rs->valid = true;
- }
- msm_rpm_register_notifier(&msm_lpm_rpm_nblk);
- msm_lpm_init_rpm_ctl();
-
- if (msm_lpm_l2.valid) {
- register_hotcpu_notifier(&msm_lpm_cpu_nblk);
- /* For UP mode, set the default to HSFS OPEN*/
- if (num_possible_cpus() == 1) {
- msm_lpm_l2.rs_data.default_value =
- MSM_LPM_L2_CACHE_HSFS_OPEN;
- msm_lpm_l2.rs_data.value = MSM_LPM_L2_CACHE_HSFS_OPEN;
- }
- msm_pm_set_l2_flush_flag(0);
- } else
- msm_pm_set_l2_flush_flag(1);
-
-fail:
- return ret;
-}
-
-static struct of_device_id msm_lpmrs_match_table[] = {
- {.compatible = "qcom,lpm-resources"},
- {},
-};
-
-static struct platform_driver msm_lpmrs_driver = {
- .probe = msm_lpmrs_probe,
- .driver = {
- .name = "lpm-resources",
- .owner = THIS_MODULE,
- .of_match_table = msm_lpmrs_match_table,
- },
-};
-
-int __init msm_lpmrs_module_init(void)
-{
- return platform_driver_register(&msm_lpmrs_driver);
-}
diff --git a/arch/arm/mach-msm/lpm_resources.h b/arch/arm/mach-msm/lpm_resources.h
deleted file mode 100644
index 105cfe6..0000000
--- a/arch/arm/mach-msm/lpm_resources.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ARCH_ARM_MACH_MSM_LPM_RESOURCES_H
-#define __ARCH_ARM_MACH_MSM_LPM_RESOURCES_H
-
-#include "pm.h"
-#include "test-lpm.h"
-
-enum {
- MSM_LPM_PXO_OFF,
- MSM_LPM_PXO_ON
-};
-
-enum {
- MSM_LPM_L2_CACHE_HSFS_OPEN,
- MSM_LPM_L2_CACHE_GDHS,
- MSM_LPM_L2_CACHE_RETENTION,
- MSM_LPM_L2_CACHE_ACTIVE,
-};
-
-struct msm_rpmrs_limits {
- uint32_t pxo;
- uint32_t l2_cache;
- uint32_t vdd_mem_upper_bound;
- uint32_t vdd_mem_lower_bound;
- uint32_t vdd_dig_upper_bound;
- uint32_t vdd_dig_lower_bound;
- bool irqs_detectable;
- bool gpio_detectable;
-
- uint32_t latency_us[NR_CPUS];
- uint32_t power[NR_CPUS];
-};
-
-struct msm_rpmrs_level {
- enum msm_pm_sleep_mode sleep_mode;
- struct msm_rpmrs_limits rs_limits;
- bool available;
- uint32_t latency_us;
- uint32_t steady_state_power;
- uint32_t energy_overhead;
- uint32_t time_overhead_us;
-};
-
-enum {
- MSM_LPM_STATE_ENTER = 0,
- MSM_LPM_STATE_EXIT = 1,
-};
-
-#define MSM_PM(field) MSM_LPM_##field
-
-/**
- * msm_pm_get_pxo() - get the limits for pxo
- * @limits: pointer to the msm_rpmrs_limits structure
- *
- * This function gets the limits to the resource pxo on
- * 8974
- */
-
-uint32_t msm_pm_get_pxo(struct msm_rpmrs_limits *limits);
-
-/**
- * msm_pm_get_l2_cache() - get the limits for l2 cache
- * @limits: pointer to the msm_rpmrs_limits structure
- *
- * This function gets the limits to the resource l2 cache
- * on 8974
- */
-
-uint32_t msm_pm_get_l2_cache(struct msm_rpmrs_limits *limits);
-
-/**
- * msm_pm_get_vdd_mem() - get the limits for pxo
- * @limits: pointer to the msm_rpmrs_limits structure
- *
- * This function gets the limits to the resource vdd mem
- * on 8974
- */
-
-uint32_t msm_pm_get_vdd_mem(struct msm_rpmrs_limits *limits);
-
-/**
- * msm_pm_get_vdd_dig() - get the limits for vdd dig
- * @limits: pointer to the msm_rpmrs_limits structure
- *
- * This function gets the limits to the resource on 8974
- */
-
-uint32_t msm_pm_get_vdd_dig(struct msm_rpmrs_limits *limits);
-
-/**
- * msm_lpm_get_xo_value() - get the enum value for xo
- * @node pointer to the device node
- * @key pxo property key
- * @xo_val xo enum value
- */
-int msm_lpm_get_xo_value(struct device_node *node,
- char *key, uint32_t *xo_val);
-
-/**
- * msm_lpm_get_l2_cache_value() - get the enum value for l2 cache
- * @node pointer to the device node
- * @key l2 cache property key
- * @l2_val l2 mode enum value
- */
-int msm_lpm_get_l2_cache_value(struct device_node *node,
- char *key, uint32_t *l2_val);
-
-/**
- * struct msm_lpm_sleep_data - abstraction to get sleep data
- * @limits: pointer to the msm_rpmrs_limits structure
- * @kernel_sleep: kernel sleep time as decided by the power calculation
- * algorithm
- *
- * This structure is an abstraction to get the limits and kernel sleep time
- * during enter sleep.
- */
-
-struct msm_lpm_sleep_data {
- struct msm_rpmrs_limits *limits;
- uint32_t kernel_sleep;
-};
-
-/**
- * msm_lpm_register_notifier() - register for notifications
- * @cpu: cpu to debug
- * @level_iter: low power level index to debug
- * @nb: notifier block to callback on notifications
- * @is_latency_measure: is it latency measure
- *
- * This function sets the permitted level to the index of the
- * level under test and registers notifier for callback.
- */
-
-int msm_lpm_register_notifier(int cpu, int level_iter,
- struct notifier_block *nb, bool is_latency_measure);
-
-/**
- * msm_lpm_unregister_notifier() - unregister from notifications
- * @cpu: cpu to debug
- * @nb: notifier block to callback on notifications
- *
- * This function sets the permitted level to a value one more than
- * available levels count which indicates that all levels are
- * permitted and it also unregisters notifier for callback.
- */
-
-int msm_lpm_unregister_notifier(int cpu, struct notifier_block *nb);
-
-#ifdef CONFIG_MSM_RPM_SMD
-
-/**
- * msm_lpm_level_beyond_limit() - Check if the resources in a low power level
- * is beyond the limits of the driver votes received for those resources.This
- * function is used by lpm_levels to eliminate any low power level that cannot
- * be entered.
- *
- * @limits: pointer to the resource limits of a low power level.
- *
- * returns true if the resource limits are beyond driver resource votes.
- * false otherwise.
- */
-bool msm_lpm_level_beyond_limit(struct msm_rpmrs_limits *limits);
-
-/**
- * msm_lpmrs_enter_sleep() - Enter sleep flushes the sleep votes of low power
- * resources to the RPM driver, also configure the MPM if needed depending
- * on the low power mode being entered. L2 low power mode is also set in
- * this function.
-
- * @sclk_count: wakeup counter for RPM.
- * @limits: pointer to the resource limits of the low power mode being entered.
- * @from_idle: bool to determine if this call being made as a part of
- * idle power collapse.
- * @notify_rpm: bool that informs if this is an RPM notified power collapse.
- *
- * returns 0 on success.
- */
-int msm_lpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
- bool from_idle, bool notify_rpm);
-
-/**
- * msm_lpmrs_exit_sleep() - Exit sleep, reset the MPM and L2 mode.
- * @ limits: pointer to resource limits of the most recent low power mode.
- * @from_idle: bool to determine if this call being made as a part of
- * idle power collapse.
- * @notify_rpm: bool that informs if this is an RPM notified power collapse.
- * @collapsed: bool that informs if the Krait was power collapsed.
- */
-void msm_lpmrs_exit_sleep(struct msm_rpmrs_limits *limits,
- bool from_idle, bool notify_rpm, bool collapsed);
-/**
- * msm_lpmrs_module_init() - Init function that parses the device tree to
- * get the low power resource attributes and registers with RPM driver for
- * callback notification.
- *
- * returns 0 on success.
- */
-int __init msm_lpmrs_module_init(void);
-
-#else
-static inline bool msm_lpm_level_beyond_limit(struct msm_rpmrs_limits *limits)
-{
- return true;
-}
-
-static inline int msm_lpmrs_enter_sleep(uint32_t sclk_count,
- struct msm_rpmrs_limits *limits, bool from_idle, bool notify_rpm)
-{
- return 0;
-}
-
-static inline void msm_lpmrs_exit_sleep(struct msm_rpmrs_limits *limits,
- bool from_idle, bool notify_rpm, bool collapsed)
-{
- return;
-}
-
-static inline int __init msm_lpmrs_module_init(void)
-{
- return 0;
-}
-#endif /* CONFIG_MSM_RPM_SMD */
-
-#endif
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 2ce4fa0..a974018 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -308,6 +308,8 @@
unsigned long memory_reserve_prop_length;
unsigned int memory_size;
unsigned int memory_start;
+ unsigned int num_holes = 0;
+ int i;
int ret;
memory_name_prop = of_get_flat_dt_prop(node,
@@ -358,21 +360,27 @@
mem_remove:
if (memory_remove_prop) {
- if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
+ if (!memory_remove_prop_length || (memory_remove_prop_length %
+ (2 * sizeof(unsigned int)) != 0)) {
WARN(1, "Memory remove malformed\n");
goto mem_reserve;
}
- memory_start = be32_to_cpu(memory_remove_prop[0]);
- memory_size = be32_to_cpu(memory_remove_prop[1]);
+ num_holes = memory_remove_prop_length /
+ (2 * sizeof(unsigned int));
- ret = memblock_remove(memory_start, memory_size);
- if (ret)
- WARN(1, "Failed to remove memory %x-%x\n",
+ for (i = 0; i < (num_holes * 2); i += 2) {
+ memory_start = be32_to_cpu(memory_remove_prop[i]);
+ memory_size = be32_to_cpu(memory_remove_prop[i+1]);
+
+ ret = memblock_remove(memory_start, memory_size);
+ if (ret)
+ WARN(1, "Failed to remove memory %x-%x\n",
memory_start, memory_start+memory_size);
- else
- pr_info("Node %s removed memory %x-%x\n", uname,
+ else
+ pr_info("Node %s removed memory %x-%x\n", uname,
memory_start, memory_start+memory_size);
+ }
}
mem_reserve:
@@ -428,6 +436,8 @@
unsigned long memory_remove_prop_length;
unsigned long hole_start;
unsigned long hole_size;
+ unsigned int num_holes = 0;
+ int i = 0;
memory_remove_prop = of_get_flat_dt_prop(node,
"qcom,memblock-remove",
@@ -441,15 +451,21 @@
}
if (memory_remove_prop) {
- if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
+ if (!memory_remove_prop_length || (memory_remove_prop_length %
+ (2 * sizeof(unsigned int)) != 0)) {
WARN(1, "Memory remove malformed\n");
goto out;
}
- hole_start = be32_to_cpu(memory_remove_prop[0]);
- hole_size = be32_to_cpu(memory_remove_prop[1]);
+ num_holes = memory_remove_prop_length /
+ (2 * sizeof(unsigned int));
- adjust_meminfo(hole_start, hole_size);
+ for (i = 0; i < (num_holes * 2); i += 2) {
+ hole_start = be32_to_cpu(memory_remove_prop[i]);
+ hole_size = be32_to_cpu(memory_remove_prop[i+1]);
+
+ adjust_meminfo(hole_start, hole_size);
+ }
}
out:
diff --git a/arch/arm/mach-msm/mpm-of.c b/arch/arm/mach-msm/mpm-of.c
index dbd5d67..a0746f9 100644
--- a/arch/arm/mach-msm/mpm-of.c
+++ b/arch/arm/mach-msm/mpm-of.c
@@ -27,10 +27,17 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
#include <asm/hardware/gic.h>
#include <asm/arch_timer.h>
#include <mach/gpio.h>
#include <mach/mpm.h>
+#include <mach/clk.h>
+#include <mach/rpm-regulator-smd.h>
enum {
MSM_MPM_GIC_IRQ_DOMAIN,
@@ -75,6 +82,12 @@
#define ARCH_TIMER_HZ (19200000)
static struct msm_mpm_device_data msm_mpm_dev_data;
+static struct clk *xo_clk;
+static bool xo_enabled;
+static struct workqueue_struct *msm_mpm_wq;
+static struct work_struct msm_mpm_work;
+static struct completion wake_wq;
+
enum mpm_reg_offsets {
MSM_MPM_REG_WAKEUP,
MSM_MPM_REG_ENABLE,
@@ -257,6 +270,8 @@
else
__clear_bit(d->hwirq, irq_apps);
+ if (!wakeset && (msm_mpm_initialized & MSM_MPM_DEVICE_PROBED))
+ complete(&wake_wq);
}
return 0;
@@ -543,6 +558,54 @@
}
}
}
+static void msm_mpm_sys_low_power_modes(bool allow)
+{
+ if (allow) {
+ if (xo_enabled) {
+ clk_disable_unprepare(xo_clk);
+ xo_enabled = false;
+ }
+ } else {
+ if (!xo_enabled) {
+ /* If we cannot enable XO clock then we want to flag it,
+ * than having to deal with not being able to wakeup
+ * from a non-monitorable interrupt
+ */
+ BUG_ON(clk_prepare_enable(xo_clk));
+ xo_enabled = true;
+ }
+ }
+}
+
+void msm_mpm_suspend_prepare(void)
+{
+ bool allow = msm_mpm_irqs_detectable(false) &&
+ msm_mpm_gpio_irqs_detectable(false);
+ msm_mpm_sys_low_power_modes(allow);
+}
+EXPORT_SYMBOL(msm_mpm_suspend_prepare);
+
+void msm_mpm_suspend_wake(void)
+{
+ bool allow = msm_mpm_irqs_detectable(true) &&
+ msm_mpm_gpio_irqs_detectable(true);
+ msm_mpm_sys_low_power_modes(allow);
+}
+EXPORT_SYMBOL(msm_mpm_suspend_wake);
+
+static void msm_mpm_work_fn(struct work_struct *work)
+{
+ unsigned long flags;
+ while (1) {
+ bool allow;
+ wait_for_completion(&wake_wq);
+ spin_lock_irqsave(&msm_mpm_lock, flags);
+ allow = msm_mpm_irqs_detectable(true) &&
+ msm_mpm_gpio_irqs_detectable(true);
+ spin_unlock_irqrestore(&msm_mpm_lock, flags);
+ msm_mpm_sys_low_power_modes(allow);
+ }
+}
static int __devinit msm_mpm_dev_probe(struct platform_device *pdev)
{
@@ -555,6 +618,13 @@
return 0;
}
+ xo_clk = devm_clk_get(&pdev->dev, "xo");
+
+ if (IS_ERR(xo_clk)) {
+ pr_err("%s(): Cannot get clk resource for XO\n", __func__);
+ return PTR_ERR(xo_clk);
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vmpm");
if (!res) {
pr_err("%s(): Missing RPM memory resource\n", __func__);
@@ -610,10 +680,27 @@
return ret;
}
+
+ init_completion(&wake_wq);
+
+ INIT_WORK(&msm_mpm_work, msm_mpm_work_fn);
+ msm_mpm_wq = create_singlethread_workqueue("mpm");
+
+ if (msm_mpm_wq)
+ queue_work(msm_mpm_wq, &msm_mpm_work);
+ else {
+ pr_warn("%s(): Failed to create wq. So voting against XO off",
+ __func__);
+ /* Throw a BUG. Otherwise, its possible that system allows
+ * XO shutdown when there are non-monitored interrupts are
+ * pending and cause errors at a later point in time.
+ */
+ BUG_ON(clk_prepare_enable(xo_clk));
+ xo_enabled = true;
+ }
+
msm_mpm_initialized |= MSM_MPM_DEVICE_PROBED;
-
return 0;
-
}
static inline int __init mpm_irq_domain_linear_size(struct irq_domain *d)
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_9625.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_9625.c
index 3a996eb..4538c4f 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_9625.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_9625.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -524,6 +524,7 @@
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_ipa,
.mas_hw_id = MAS_IPA,
+ .hw_sel = MSM_BUS_NOC,
},
{
.id = MSM_BUS_MASTER_QDSS_ETR,
diff --git a/arch/arm/mach-msm/pcie.c b/arch/arm/mach-msm/pcie.c
index c09b759..7695b2d 100644
--- a/arch/arm/mach-msm/pcie.c
+++ b/arch/arm/mach-msm/pcie.c
@@ -484,22 +484,19 @@
static void msm_pcie_adjust_tlp_size(struct msm_pcie_dev_t *dev)
{
/*
- * Apply this fix only for device such as APQ8064 version 1.
* Set the Max TLP size to 2K, instead of using default of 4K
* to avoid a RAM problem in PCIE20 core of that version.
*/
- if (readl_relaxed(dev->elbi + PCIE20_ELBI_VERSION) == 0x01002107) {
- /*
- * CFG_REMOTE_RD_REQ_BRIDGE_SIZE:
- * 5=4KB/4=2KB/3=1KB/2=512B/1=256B/0=128B
- */
- writel_relaxed(4, dev->pcie20 +
- PCIE20_PLR_AXI_MSTR_RESP_COMP_CTRL0);
+ /*
+ * CFG_REMOTE_RD_REQ_BRIDGE_SIZE:
+ * 5=4KB/4=2KB/3=1KB/2=512B/1=256B/0=128B
+ */
+ writel_relaxed(4, dev->pcie20 +
+ PCIE20_PLR_AXI_MSTR_RESP_COMP_CTRL0);
- writel_relaxed(1, dev->pcie20 +
- PCIE20_PLR_AXI_MSTR_RESP_COMP_CTRL1);
- }
+ writel_relaxed(1, dev->pcie20 +
+ PCIE20_PLR_AXI_MSTR_RESP_COMP_CTRL1);
};
static int __init msm_pcie_setup(int nr, struct pci_sys_data *sys)
diff --git a/arch/arm/mach-msm/perf_debug.c b/arch/arm/mach-msm/perf_debug.c
index 0a799aa..28d8e42 100644
--- a/arch/arm/mach-msm/perf_debug.c
+++ b/arch/arm/mach-msm/perf_debug.c
@@ -34,6 +34,7 @@
"9 ARM: dts: msm: add perf-events support for msm8226\n"
"10 Perf: Fix counts across power collapse\n"
"11 ARM: dts: msm: add perf-events support for msm8x10, msm8x12\n"
+ "12 Perf: Make per-process counters configurable\n"
;
static ssize_t desc_read(struct file *fp, char __user *buf,
diff --git a/arch/arm/mach-msm/perf_trace_counters.c b/arch/arm/mach-msm/perf_trace_counters.c
index d961994..65b0d28 100644
--- a/arch/arm/mach-msm/perf_trace_counters.c
+++ b/arch/arm/mach-msm/perf_trace_counters.c
@@ -10,9 +10,13 @@
* GNU General Public License for more details.
*/
#include <asm/thread_notify.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
#define CREATE_TRACE_POINTS
#include "perf_trace_counters.h"
+static unsigned int tp_pid_state;
+
static int tracectr_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
@@ -34,9 +38,80 @@
.notifier_call = tracectr_notifier,
};
+static void enable_tp_pid(void)
+{
+ if (tp_pid_state == 0) {
+ tp_pid_state = 1;
+ thread_register_notifier(&tracectr_notifier_block);
+ }
+}
+
+static void disable_tp_pid(void)
+{
+ if (tp_pid_state == 1) {
+ tp_pid_state = 0;
+ thread_unregister_notifier(&tracectr_notifier_block);
+ }
+}
+
+static ssize_t read_enabled_perftp_file_bool(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[2];
+ buf[1] = '\n';
+ if (tp_pid_state == 0)
+ buf[0] = '0';
+ else
+ buf[0] = '1';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_enabled_perftp_file_bool(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ size_t buf_size;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ switch (buf[0]) {
+ case 'y':
+ case 'Y':
+ case '1':
+ enable_tp_pid();
+ break;
+ case 'n':
+ case 'N':
+ case '0':
+ disable_tp_pid();
+ break;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_perftp = {
+ .read = read_enabled_perftp_file_bool,
+ .write = write_enabled_perftp_file_bool,
+ .llseek = default_llseek,
+};
+
int __init init_tracecounters(void)
{
- thread_register_notifier(&tracectr_notifier_block);
+ struct dentry *dir;
+ struct dentry *file;
+ unsigned int value = 1;
+
+ dir = debugfs_create_dir("perf_debug_tp", NULL);
+ if (!dir)
+ return -ENOMEM;
+ file = debugfs_create_file("enabled", 0777, dir,
+ &value, &fops_perftp);
+ if (!file) {
+ debugfs_remove(dir);
+ return -ENOMEM;
+ }
return 0;
}
late_initcall(init_tracecounters);
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index 0503d24..8a3ecb1 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -252,6 +252,7 @@
struct pil_desc *desc = dev_id;
struct pil_priv *priv = desc->priv;
+ pil_info(desc, "Power/Clock ready interrupt received\n");
if (!desc->priv->unvoted_flag) {
desc->priv->unvoted_flag = 1;
__pil_proxy_unvote(priv);
@@ -284,6 +285,12 @@
return ERR_PTR(-EPERM);
}
+ if (phdr->p_filesz > phdr->p_memsz) {
+ pil_err(desc, "Segment %d: file size (%u) is greater than mem size (%u).\n",
+ num, phdr->p_filesz, phdr->p_memsz);
+ return ERR_PTR(-EINVAL);
+ }
+
seg = kmalloc(sizeof(*seg), GFP_KERNEL);
if (!seg)
return ERR_PTR(-ENOMEM);
@@ -512,51 +519,29 @@
int ret = 0, count;
phys_addr_t paddr;
char fw_name[30];
- const struct firmware *fw = NULL;
- const u8 *data;
int num = seg->num;
if (seg->filesz) {
snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
desc->name, num);
- ret = request_firmware(&fw, fw_name, desc->dev);
- if (ret) {
- pil_err(desc, "Failed to locate blob %s\n", fw_name);
+ ret = request_firmware_direct(fw_name, desc->dev, seg->paddr,
+ seg->filesz);
+ if (ret < 0) {
+ pil_err(desc, "Failed to locate blob %s or blob is too big.\n",
+ fw_name);
return ret;
}
- if (fw->size != seg->filesz) {
+ if (ret != seg->filesz) {
pil_err(desc, "Blob size %u doesn't match %lu\n",
- fw->size, seg->filesz);
- ret = -EPERM;
- goto release_fw;
+ ret, seg->filesz);
+ return -EPERM;
}
- }
-
- /* Load the segment into memory */
- count = seg->filesz;
- paddr = seg->paddr;
- data = fw ? fw->data : NULL;
- while (count > 0) {
- int size;
- u8 __iomem *buf;
-
- size = min_t(size_t, IOMAP_SIZE, count);
- buf = ioremap(paddr, size);
- if (!buf) {
- pil_err(desc, "Failed to map memory\n");
- ret = -ENOMEM;
- goto release_fw;
- }
- memcpy(buf, data, size);
- iounmap(buf);
-
- count -= size;
- paddr += size;
- data += size;
+ ret = 0;
}
/* Zero out trailing memory */
+ paddr = seg->paddr + seg->filesz;
count = seg->sz - seg->filesz;
while (count > 0) {
int size;
@@ -566,8 +551,7 @@
buf = ioremap(paddr, size);
if (!buf) {
pil_err(desc, "Failed to map memory\n");
- ret = -ENOMEM;
- goto release_fw;
+ return -ENOMEM;
}
memset(buf, 0, size);
iounmap(buf);
@@ -582,8 +566,6 @@
pil_err(desc, "Blob%u failed verification\n", num);
}
-release_fw:
- release_firmware(fw);
return ret;
}
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index 56047ff..c267541 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -299,8 +299,8 @@
int ret;
mba = devm_kzalloc(&pdev->dev, sizeof(*mba), GFP_KERNEL);
- if (IS_ERR(mba))
- return PTR_ERR(mba);
+ if (!mba)
+ return -ENOMEM;
drv->mba = mba;
q6 = pil_q6v5_init(pdev);
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index 8bceb42..da5e67a 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -40,6 +40,7 @@
#include <mach/trace_msm_low_power.h>
#include <mach/msm-krait-l2-accessors.h>
#include <mach/msm_bus.h>
+#include <mach/mpm.h>
#include <asm/cacheflush.h>
#include <asm/hardware/gic.h>
#include <asm/pgtable.h>
@@ -1132,9 +1133,22 @@
pm_sleep_ops = *ops;
}
+static int msm_suspend_prepare(void)
+{
+ msm_mpm_suspend_prepare();
+ return 0;
+}
+
+static void msm_suspend_wake(void)
+{
+ msm_mpm_suspend_wake();
+}
+
static const struct platform_suspend_ops msm_pm_ops = {
.enter = msm_pm_enter,
.valid = suspend_valid_only_mem,
+ .prepare_late = msm_suspend_prepare,
+ .wake = msm_suspend_wake,
};
static int __devinit msm_pm_snoc_client_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/pm-stats.c b/arch/arm/mach-msm/pm-stats.c
index 1bd9b46..ac4ed25 100644
--- a/arch/arm/mach-msm/pm-stats.c
+++ b/arch/arm/mach-msm/pm-stats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include "pm.h"
@@ -83,46 +84,19 @@
}
/*
- * Helper function of snprintf where buf is auto-incremented, size is auto-
- * decremented, and there is no return value.
- *
- * NOTE: buf and size must be l-values (e.g. variables)
- */
-#define SNPRINTF(buf, size, format, ...) \
- do { \
- if (size > 0) { \
- int ret; \
- ret = snprintf(buf, size, format, ## __VA_ARGS__); \
- if (ret > size) { \
- buf += size; \
- size = 0; \
- } else { \
- buf += ret; \
- size -= ret; \
- } \
- } \
- } while (0)
-
-/*
* Write out the power management statistics.
*/
-static int msm_pm_read_proc
- (char *page, char **start, off_t off, int count, int *eof, void *data)
+
+static int msm_pm_stats_show(struct seq_file *m, void *v)
{
- unsigned int cpu = off / MSM_PM_STAT_COUNT;
- int id = off % MSM_PM_STAT_COUNT;
- char *p = page;
+ int cpu;
+ int bucket_count = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+ int bucket_shift = CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
- if (count < 1024) {
- *start = (char *) 0;
- *eof = 0;
- return 0;
- }
-
- if (cpu < num_possible_cpus()) {
+ for_each_possible_cpu(cpu) {
unsigned long flags;
struct msm_pm_time_stats *stats;
- int i;
+ int i, id;
int64_t bucket_time;
int64_t s;
uint32_t ns;
@@ -130,59 +104,52 @@
spin_lock_irqsave(&msm_pm_stats_lock, flags);
stats = per_cpu(msm_pm_stats, cpu).stats;
- /* Skip the disabled ones */
- if (!stats[id].enabled) {
- *p = '\0';
- p++;
- goto again;
- }
+ for (id = 0; id < MSM_PM_STAT_COUNT; id++) {
+ /* Skip the disabled ones */
+ if (!stats[id].enabled)
+ continue;
- s = stats[id].total_time;
- ns = do_div(s, NSEC_PER_SEC);
- SNPRINTF(p, count,
- "[cpu %u] %s:\n"
- " count: %7d\n"
- " total_time: %lld.%09u\n",
- cpu, stats[id].name,
- stats[id].count,
- s, ns);
-
- bucket_time = stats[id].first_bucket_time;
- for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
- s = bucket_time;
+ s = stats[id].total_time;
ns = do_div(s, NSEC_PER_SEC);
- SNPRINTF(p, count,
- " <%6lld.%09u: %7d (%lld-%lld)\n",
+ seq_printf(m,
+ "[cpu %u] %s:\n"
+ " count: %7d\n"
+ " total_time: %lld.%09u\n",
+ cpu, stats[id].name,
+ stats[id].count,
+ s, ns);
+
+ bucket_time = stats[id].first_bucket_time;
+ for (i = 0; i < bucket_count; i++) {
+ s = bucket_time;
+ ns = do_div(s, NSEC_PER_SEC);
+ seq_printf(m,
+ " <%6lld.%09u: %7d (%lld-%lld)\n",
+ s, ns, stats[id].bucket[i],
+ stats[id].min_time[i],
+ stats[id].max_time[i]);
+
+ bucket_time <<= bucket_shift;
+ }
+
+ seq_printf(m, " >=%6lld.%09u: %7d (%lld-%lld)\n",
s, ns, stats[id].bucket[i],
stats[id].min_time[i],
stats[id].max_time[i]);
-
- bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
}
- SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
- s, ns, stats[id].bucket[i],
- stats[id].min_time[i],
- stats[id].max_time[i]);
-
-again:
- *start = (char *) 1;
- *eof = (off + 1 >= MSM_PM_STAT_COUNT * num_possible_cpus());
-
spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
}
- return p - page;
+ return 0;
}
-#undef SNPRINTF
#define MSM_PM_STATS_RESET "reset"
-
/*
* Reset the power management statistics values.
*/
-static int msm_pm_write_proc(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t msm_pm_write_proc(struct file *file, const char __user *buffer,
+ size_t count, loff_t *off)
{
char buf[sizeof(MSM_PM_STATS_RESET)];
int ret;
@@ -231,6 +198,19 @@
}
#undef MSM_PM_STATS_RESET
+static int msm_pm_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_pm_stats_show, NULL);
+}
+
+static const struct file_operations msm_pm_stats_fops = {
+ .open = msm_pm_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = msm_pm_write_proc,
+};
+
void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size)
{
unsigned int cpu;
@@ -296,11 +276,6 @@
}
- d_entry = create_proc_entry("msm_pm_stats",
- S_IRUGO | S_IWUSR | S_IWGRP, NULL);
- if (d_entry) {
- d_entry->read_proc = msm_pm_read_proc;
- d_entry->write_proc = msm_pm_write_proc;
- d_entry->data = NULL;
- }
+ d_entry = proc_create_data("msm_pm_stats", S_IRUGO | S_IWUSR | S_IWGRP,
+ NULL, &msm_pm_stats_fops, NULL);
}
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
index 64ee880..8baac01 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
@@ -250,7 +250,11 @@
return;
spin_lock_irqsave(&audio->dsp_lock, flags);
- BUG_ON(list_empty(&audio->out_queue));
+ if (list_empty(&audio->out_queue)) {
+ pr_warning("%s: ingore unexpected event from dsp\n", __func__);
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
+ return;
+ }
used_buf = list_first_entry(&audio->out_queue,
struct audio_aio_buffer_node, list);
if (token == used_buf->token) {
diff --git a/arch/arm/mach-msm/restart.c b/arch/arm/mach-msm/restart.c
index e8c7619..c85f7a1 100644
--- a/arch/arm/mach-msm/restart.c
+++ b/arch/arm/mach-msm/restart.c
@@ -47,6 +47,10 @@
#define RESTART_REASON_ADDR 0x65C
#define DLOAD_MODE_ADDR 0x0
+#define EMERGENCY_DLOAD_MODE_ADDR 0xFE0
+#define EMERGENCY_DLOAD_MAGIC1 0x322A4F99
+#define EMERGENCY_DLOAD_MAGIC2 0xC67E4350
+#define EMERGENCY_DLOAD_MAGIC3 0x77777777
#define SCM_IO_DISABLE_PMIC_ARBITER 1
@@ -66,13 +70,13 @@
static int in_panic;
static void *dload_mode_addr;
static bool dload_mode_enabled;
+static void *emergency_dload_mode_addr;
/* Download mode master kill-switch */
static int dload_set(const char *val, struct kernel_param *kp);
static int download_mode = 1;
module_param_call(download_mode, dload_set, param_get_int,
&download_mode, 0644);
-
static int panic_prep_restart(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -100,6 +104,21 @@
return dload_mode_enabled;
}
+static void enable_emergency_dload_mode(void)
+{
+ if (emergency_dload_mode_addr) {
+ __raw_writel(EMERGENCY_DLOAD_MAGIC1,
+ emergency_dload_mode_addr);
+ __raw_writel(EMERGENCY_DLOAD_MAGIC2,
+ emergency_dload_mode_addr +
+ sizeof(unsigned int));
+ __raw_writel(EMERGENCY_DLOAD_MAGIC3,
+ emergency_dload_mode_addr +
+ (2 * sizeof(unsigned int)));
+ mb();
+ }
+}
+
static int dload_set(const char *val, struct kernel_param *kp)
{
int ret;
@@ -123,6 +142,11 @@
#else
#define set_dload_mode(x) do {} while (0)
+static void enable_emergency_dload_mode(void)
+{
+ printk(KERN_ERR "dload mode is not enabled on target\n");
+}
+
static bool get_dload_mode(void)
{
return false;
@@ -239,6 +263,8 @@
unsigned long code;
code = simple_strtoul(cmd + 4, NULL, 16) & 0xff;
__raw_writel(0x6f656d00 | code, restart_reason);
+ } else if (!strncmp(cmd, "edl", 3)) {
+ enable_emergency_dload_mode();
} else {
__raw_writel(0x77665501, restart_reason);
}
@@ -306,6 +332,8 @@
#ifdef CONFIG_MSM_DLOAD_MODE
atomic_notifier_chain_register(&panic_notifier_list, &panic_blk);
dload_mode_addr = MSM_IMEM_BASE + DLOAD_MODE_ADDR;
+ emergency_dload_mode_addr = MSM_IMEM_BASE +
+ EMERGENCY_DLOAD_MODE_ADDR;
set_dload_mode(download_mode);
#endif
msm_tmr0_base = msm_timer_get_timer0_base();
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index a6e3497..e148868 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -724,6 +724,7 @@
[SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
[SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
[SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
+ [SMD_TZ_RPM] = {SMD_TZ, SMD_RPM},
};
struct restart_notifier_block {
diff --git a/arch/arm/mach-msm/smd_private.c b/arch/arm/mach-msm/smd_private.c
index 94192d3..a7ef87f 100644
--- a/arch/arm/mach-msm/smd_private.c
+++ b/arch/arm/mach-msm/smd_private.c
@@ -267,7 +267,8 @@
int is_word_access_ch(unsigned ch_type)
{
if (ch_type == SMD_APPS_RPM || ch_type == SMD_MODEM_RPM ||
- ch_type == SMD_QDSP_RPM || ch_type == SMD_WCNSS_RPM)
+ ch_type == SMD_QDSP_RPM || ch_type == SMD_WCNSS_RPM ||
+ ch_type == SMD_TZ_RPM)
return 1;
else
return 0;
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index e62af21..1d12b07 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -21,6 +21,7 @@
#include <linux/uaccess.h>
#include <linux/user.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
@@ -656,23 +657,23 @@
}
#ifdef CONFIG_PROC_FS
-static int proc_read_status(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int vfp_bounce_show(struct seq_file *m, void *v)
{
- char *p = page;
- int len;
-
- p += snprintf(p, PAGE_SIZE, "%llu\n", atomic64_read(&vfp_bounce_count));
-
- len = (p - page) - off;
- if (len < 0)
- len = 0;
-
- *eof = (len <= count) ? 1 : 0;
- *start = page + off;
-
- return len;
+ seq_printf(m, "%llu\n", atomic64_read(&vfp_bounce_count));
+ return 0;
}
+
+static int vfp_bounce_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vfp_bounce_show, NULL);
+}
+
+static const struct file_operations vfp_bounce_fops = {
+ .open = vfp_bounce_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
/*
@@ -755,11 +756,9 @@
}
#ifdef CONFIG_PROC_FS
- procfs_entry = create_proc_entry("cpu/vfp_bounce", S_IRUGO, NULL);
-
- if (procfs_entry)
- procfs_entry->read_proc = proc_read_status;
- else
+ procfs_entry = proc_create("cpu/vfp_bounce", S_IRUGO, NULL,
+ &vfp_bounce_fops);
+ if (!procfs_entry)
pr_err("Failed to create procfs node for VFP bounce reporting\n");
#endif
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 5401814..7f159f0 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -21,6 +21,7 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/io.h>
#define to_dev(obj) container_of(obj, struct device, kobj)
@@ -98,6 +99,8 @@
struct page **pages;
int nr_pages;
int page_array_size;
+ phys_addr_t dest_addr;
+ size_t dest_size;
struct timer_list timeout;
struct device dev;
bool nowait;
@@ -239,6 +242,10 @@
switch (loading) {
case 1:
+ if (fw_priv->dest_addr) {
+ set_bit(FW_STATUS_LOADING, &fw_priv->status);
+ break;
+ }
firmware_free_data(fw_priv->fw);
memset(fw_priv->fw, 0, sizeof(struct firmware));
/* If the pages are not owned by 'struct firmware' */
@@ -252,6 +259,11 @@
break;
case 0:
if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
+ if (fw_priv->dest_addr) {
+ complete(&fw_priv->completion);
+ clear_bit(FW_STATUS_LOADING, &fw_priv->status);
+ break;
+ }
vunmap(fw_priv->fw->data);
fw_priv->fw->data = vmap(fw_priv->pages,
fw_priv->nr_pages,
@@ -286,6 +298,67 @@
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
+static int __firmware_data_rw(struct firmware_priv *fw_priv, char *buffer,
+ loff_t *offset, size_t count, int read)
+{
+ u8 __iomem *fw_buf;
+ int retval = count;
+
+ if ((*offset + count) > fw_priv->dest_size) {
+ pr_debug("%s: Failed size check.\n", __func__);
+ retval = -EINVAL;
+ goto out;
+ }
+
+ fw_buf = ioremap(fw_priv->dest_addr + *offset, count);
+ if (!fw_buf) {
+ pr_debug("%s: Failed ioremap.\n", __func__);
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ if (read)
+ memcpy(buffer, fw_buf, count);
+ else
+ memcpy(fw_buf, buffer, count);
+
+ *offset += count;
+ iounmap(fw_buf);
+
+out:
+ return retval;
+}
+
+static ssize_t firmware_direct_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = to_dev(kobj);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
+ struct firmware *fw;
+ ssize_t ret_count;
+
+ mutex_lock(&fw_lock);
+ fw = fw_priv->fw;
+
+ if (offset > fw->size) {
+ ret_count = 0;
+ goto out;
+ }
+ if (count > fw->size - offset)
+ count = fw->size - offset;
+
+ if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) {
+ ret_count = -ENODEV;
+ goto out;
+ }
+
+ ret_count = __firmware_data_rw(fw_priv, buffer, &offset, count, 1);
+out:
+ mutex_unlock(&fw_lock);
+ return ret_count;
+}
+
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
@@ -368,6 +441,35 @@
return 0;
}
+static ssize_t firmware_direct_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = to_dev(kobj);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
+ struct firmware *fw;
+ ssize_t retval;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ mutex_lock(&fw_lock);
+ fw = fw_priv->fw;
+ if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ retval = __firmware_data_rw(fw_priv, buffer, &offset, count, 0);
+ if (retval < 0)
+ goto out;
+
+ fw->size = max_t(size_t, offset, fw->size);
+out:
+ mutex_unlock(&fw_lock);
+ return retval;
+}
+
/**
* firmware_data_write - write method for firmware
* @filp: open sysfs file
@@ -433,6 +535,13 @@
.write = firmware_data_write,
};
+static struct bin_attribute firmware_direct_attr_data = {
+ .attr = { .name = "data", .mode = 0644 },
+ .size = 0,
+ .read = firmware_direct_read,
+ .write = firmware_direct_write,
+};
+
static void firmware_class_timeout(u_long data)
{
struct firmware_priv *fw_priv = (struct firmware_priv *) data;
@@ -511,6 +620,8 @@
{
int retval = 0;
struct device *f_dev = &fw_priv->dev;
+ struct bin_attribute *fw_attr_data = fw_priv->dest_addr ?
+ &firmware_direct_attr_data : &firmware_attr_data;
dev_set_uevent_suppress(f_dev, true);
@@ -523,7 +634,7 @@
goto err_put_dev;
}
- retval = device_create_bin_file(f_dev, &firmware_attr_data);
+ retval = device_create_bin_file(f_dev, fw_attr_data);
if (retval) {
dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
goto err_del_dev;
@@ -558,7 +669,7 @@
device_remove_file(f_dev, &dev_attr_loading);
err_del_bin_attr:
- device_remove_bin_file(f_dev, &firmware_attr_data);
+ device_remove_bin_file(f_dev, fw_attr_data);
err_del_dev:
device_del(f_dev);
err_put_dev:
@@ -566,6 +677,35 @@
return retval;
}
+static int
+__request_firmware(const struct firmware **firmware_p, const char *name,
+ struct device *device, phys_addr_t dest_addr, size_t size)
+{
+ struct firmware_priv *fw_priv;
+ int ret;
+
+ fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
+ false);
+ if (IS_ERR_OR_NULL(fw_priv))
+ return PTR_RET(fw_priv);
+
+ fw_priv->dest_addr = dest_addr;
+ fw_priv->dest_size = size;
+
+ ret = usermodehelper_read_trylock();
+ if (WARN_ON(ret)) {
+ dev_err(device, "firmware: %s will not be loaded\n", name);
+ } else {
+ ret = _request_firmware_load(fw_priv, true,
+ firmware_loading_timeout());
+ usermodehelper_read_unlock();
+ }
+ if (ret)
+ _request_firmware_cleanup(firmware_p);
+
+ return ret;
+}
+
/**
* request_firmware: - send firmware request and wait for it
* @firmware_p: pointer to firmware image
@@ -583,27 +723,33 @@
**/
int
request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device)
+ struct device *device)
{
- struct firmware_priv *fw_priv;
+ return __request_firmware(firmware_p, name, device, 0, 0);
+}
+
+/**
+ * request_firmware_direct: - send firmware request and wait for it
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ * @dest_addr: Destination address for the firmware
+ * @dest_size:
+ *
+ * Similar to request_firmware, except takes in a buffer address and
+ * copies firmware data directly to that buffer. Returns the size of
+ * the firmware that was loaded at dest_addr.
+*/
+int request_firmware_direct(const char *name, struct device *device,
+ phys_addr_t dest_addr, size_t dest_size)
+{
+ const struct firmware *fp = NULL;
int ret;
- fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
- false);
- if (IS_ERR_OR_NULL(fw_priv))
- return PTR_RET(fw_priv);
-
- ret = usermodehelper_read_trylock();
- if (WARN_ON(ret)) {
- dev_err(device, "firmware: %s will not be loaded\n", name);
- } else {
- ret = _request_firmware_load(fw_priv, true,
- firmware_loading_timeout());
- usermodehelper_read_unlock();
- }
+ ret = __request_firmware(&fp, name, device, dest_addr, dest_size);
if (ret)
- _request_firmware_cleanup(firmware_p);
-
+ return ret;
+ ret = fp->size;
+ release_firmware(fp);
return ret;
}
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 0c67ed8..a779b24 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -54,7 +54,7 @@
struct diag_dci_data_info *dci_data_smd;
struct mutex dci_stat_mutex;
-void diag_dci_smd_record_info(int read_bytes)
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type)
{
static int curr_dci_data_smd;
static unsigned long iteration;
@@ -67,13 +67,14 @@
temp_data += curr_dci_data_smd;
temp_data->iteration = iteration + 1;
temp_data->data_size = read_bytes;
+ temp_data->ch_type = ch_type;
diag_get_timestamp(temp_data->time_stamp);
curr_dci_data_smd++;
iteration++;
mutex_unlock(&dci_stat_mutex);
}
#else
-void diag_dci_smd_record_info(int read_bytes) { }
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type) { }
#endif
/* Process the data read from the smd dci channel */
@@ -83,7 +84,7 @@
int read_bytes, dci_pkt_len, i;
uint8_t recv_pkt_cmd_code;
- diag_dci_smd_record_info(recd_bytes);
+ diag_dci_smd_record_info(recd_bytes, (uint8_t)smd_info->type);
/* Each SMD read can have multiple DCI packets */
read_bytes = 0;
while (read_bytes < recd_bytes) {
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 520995b..e2c4158 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -101,6 +101,7 @@
unsigned long iteration;
int data_size;
char time_stamp[DIAG_TS_SIZE];
+ uint8_t ch_type;
};
extern struct diag_dci_data_info *dci_data_smd;
@@ -135,7 +136,7 @@
void create_dci_event_mask_tbl(unsigned char *tbl_buf);
int diag_dci_clear_event_mask(void);
int diag_dci_query_event_mask(uint16_t event_id);
-void diag_dci_smd_record_info(int read_bytes);
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type);
uint8_t diag_dci_get_cumulative_real_time(void);
int diag_dci_set_real_time(int client_id, uint8_t real_time);
/* Functions related to DCI wakeup sources */
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index 3d1a6cd..a24fc54 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -81,6 +81,11 @@
"LPASS STM requested state: %d\n"
"RIVA STM requested state: %d\n"
"APPS STM requested state: %d\n"
+ "supports apps hdlc encoding: %d\n"
+ "Modem hdlc encoding: %d\n"
+ "Lpass hdlc encoding: %d\n"
+ "RIVA hdlc encoding: %d\n"
+ "Modem CMD hdlc encoding: %d\n"
"logging_mode: %d\n"
"real_time_mode: %d\n",
(unsigned int)driver->smd_data[MODEM_DATA].ch,
@@ -123,6 +128,11 @@
driver->stm_state_requested[LPASS_DATA],
driver->stm_state_requested[WCNSS_DATA],
driver->stm_state_requested[APPS_DATA],
+ driver->supports_apps_hdlc_encoding,
+ driver->smd_data[MODEM_DATA].encode_hdlc,
+ driver->smd_data[LPASS_DATA].encode_hdlc,
+ driver->smd_data[WCNSS_DATA].encode_hdlc,
+ driver->smd_cmd[MODEM_DATA].encode_hdlc,
driver->logging_mode,
driver->real_time_mode);
@@ -202,11 +212,13 @@
if (temp_data->iteration != 0) {
bytes_written = scnprintf(
buf + bytes_in_buf, bytes_remaining,
- "i %-20ld\t"
- "s %-20d\t"
- "t %-20s\n",
+ "i %-10ld\t"
+ "s %-10d\t"
+ "c %-10d\t"
+ "t %-15s\n",
temp_data->iteration,
temp_data->data_size,
+ temp_data->ch_type,
temp_data->time_stamp);
bytes_in_buf += bytes_written;
bytes_remaining -= bytes_written;
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index aa1d847..c91095e 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -491,6 +491,8 @@
feature_bytes[0] |= F_DIAG_LOG_ON_DEMAND_RSP_ON_MASTER;
feature_bytes[0] |= driver->supports_separate_cmdrsp ?
F_DIAG_REQ_RSP_CHANNEL : 0;
+ feature_bytes[0] |= driver->supports_apps_hdlc_encoding ?
+ F_DIAG_HDLC_ENCODE_IN_APPS_MASK : 0;
feature_bytes[1] |= F_DIAG_OVER_STM;
memcpy(buf+header_size, &feature_bytes, FEATURE_MASK_LEN_BYTES);
total_len = header_size + FEATURE_MASK_LEN_BYTES;
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 3ae56c5..7ef1d80 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -27,7 +27,7 @@
/* Size of the USB buffers used for read and write*/
#define USB_MAX_OUT_BUF 4096
-#define APPS_BUF_SIZE 2000
+#define APPS_BUF_SIZE 4096
#define IN_BUF_SIZE 16384
#define MAX_IN_BUF_SIZE 32768
#define MAX_SYNC_OBJ_NAME_SIZE 32
@@ -217,6 +217,7 @@
int peripheral; /* The peripheral this smd channel communicates with */
int type; /* The type of smd channel (data, control, dci) */
uint16_t peripheral_mask;
+ int encode_hdlc; /* Whether data is raw and needs to be hdlc encoded */
smd_channel_t *ch;
smd_channel_t *ch_save;
@@ -229,11 +230,16 @@
unsigned char *buf_in_1;
unsigned char *buf_in_2;
+ unsigned char *buf_in_1_raw;
+ unsigned char *buf_in_2_raw;
+
struct diag_request *write_ptr_1;
struct diag_request *write_ptr_2;
struct diag_nrt_wake_lock nrt_lock;
+ struct workqueue_struct *wq;
+
struct work_struct diag_read_smd_work;
struct work_struct diag_notify_update_smd_work;
int notify_context;
@@ -270,6 +276,7 @@
unsigned int buf_tbl_size;
int use_device_tree;
int supports_separate_cmdrsp;
+ int supports_apps_hdlc_encoding;
/* The state requested in the STM command */
int stm_state_requested[NUM_STM_PROCESSORS];
/* The current STM state */
@@ -301,7 +308,7 @@
mempool_t *diag_hdlc_pool;
mempool_t *diag_user_pool;
mempool_t *diag_write_struct_pool;
- struct mutex diagmem_mutex;
+ spinlock_t diag_mem_lock;
int count;
int count_hdlc_pool;
int count_user_pool;
@@ -384,7 +391,6 @@
struct diag_request *write_ptr_mdm;
#endif
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
- spinlock_t hsic_ready_spinlock;
/* common for all bridges */
struct work_struct diag_connect_work;
struct work_struct diag_disconnect_work;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 6e70062..24d7fac 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -131,12 +131,8 @@
mutex_lock(&driver->diagchar_mutex);
if (buf_hdlc) {
err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
- if (err) {
- /*Free the buffer right away if write failed */
+ if (err)
diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
- diagmem_free(driver, (unsigned char *)driver->
- write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
- }
buf_hdlc = NULL;
#ifdef DIAG_DEBUG
pr_debug("diag: Number of bytes written "
@@ -543,58 +539,10 @@
return exit_stat;
}
-static void diag_update_data_ready(int index)
-{
- int clear_bit = 1;
- unsigned long hsic_lock_flags;
- unsigned long ready_lock_flags;
- int i;
-
- /*
- * Determine whether the data_ready USER_SPACE_DATA_TYPE bit
- * should be updated/cleared or not. There is a race condition that
- * can occur when in MEMORY_DEVICE_MODE with the hsic data.
- * When new hsic data arrives we prepare the data so it can
- * later be copied to userspace. We set the USER_SPACE_DATA_TYPE
- * bit in data ready at that time. We later copy the hsic data
- * to userspace and clear the USER_SPACE_DATA_TYPE bit in
- * data ready. The race condition occurs if new data arrives (bit set)
- * while we are processing the current data and sending
- * it to userspace (bit clear). The clearing of the bit can
- * overwrite the setting of the bit.
- */
-
- spin_lock_irqsave(&driver->hsic_ready_spinlock, ready_lock_flags);
- for (i = 0; i < MAX_HSIC_CH; i++) {
- if (diag_hsic[i].hsic_inited) {
- spin_lock_irqsave(&diag_hsic[i].hsic_spinlock,
- hsic_lock_flags);
- if ((diag_hsic[i].num_hsic_buf_tbl_entries > 0) &&
- diag_hsic[i].hsic_device_enabled &&
- diag_hsic[i].hsic_ch) {
- /* New data do not clear the bit */
- clear_bit = 0;
- }
- spin_unlock_irqrestore(&diag_hsic[i].hsic_spinlock,
- hsic_lock_flags);
- if (!clear_bit)
- break;
- }
- }
-
- if (clear_bit)
- driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
-
- spin_unlock_irqrestore(&driver->hsic_ready_spinlock, ready_lock_flags);
-}
#else
inline uint16_t diag_get_remote_device_mask(void) { return 0; }
inline int diag_copy_remote(char __user *buf, size_t count, int *pret,
int *pnum_data) { return 0; }
-static void diag_update_data_ready(int index)
-{
- driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
-}
#endif
int diag_command_reg(unsigned long ioarg)
@@ -1216,8 +1164,8 @@
return -EINVAL;
}
- wait_event_interruptible(driver->wait_q,
- driver->data_ready[index]);
+ wait_event_interruptible(driver->wait_q, driver->data_ready[index]);
+
mutex_lock(&driver->diagchar_mutex);
clear_read_wakelock = 0;
@@ -1227,6 +1175,7 @@
pr_debug("diag: process woken up\n");
/*Copy the type of data being passed*/
data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
+ driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
/* place holder for number of data field */
ret += 4;
@@ -1362,10 +1311,9 @@
/* copy number of data fields */
COPY_USER_SPACE_OR_EXIT(buf+4, num_data, 4);
ret -= 4;
- diag_update_data_ready(index);
for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) {
if (driver->smd_data[i].ch)
- queue_work(driver->diag_wq,
+ queue_work(driver->smd_data[i].wq,
&(driver->smd_data[i].diag_read_smd_work));
}
#ifdef CONFIG_DIAG_SDIO_PIPE
@@ -1866,10 +1814,6 @@
if (HDLC_OUT_BUF_SIZE - driver->used <= (2*payload_size) + 3) {
err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
if (err) {
- /*Free the buffer right away if write failed */
- if (driver->logging_mode == USB_MODE)
- diagmem_free(driver, (unsigned char *)driver->
- write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
ret = -EIO;
goto fail_free_hdlc;
}
@@ -1894,10 +1838,6 @@
(unsigned int)(buf_hdlc + HDLC_OUT_BUF_SIZE)) {
err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
if (err) {
- /*Free the buffer right away if write failed */
- if (driver->logging_mode == USB_MODE)
- diagmem_free(driver, (unsigned char *)driver->
- write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
ret = -EIO;
goto fail_free_hdlc;
}
@@ -1919,10 +1859,6 @@
if (pkt_type == DATA_TYPE_RESPONSE) {
err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
if (err) {
- /*Free the buffer right away if write failed */
- if (driver->logging_mode == USB_MODE)
- diagmem_free(driver, (unsigned char *)driver->
- write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
ret = -EIO;
goto fail_free_hdlc;
}
@@ -2176,7 +2112,6 @@
diag_masks_init();
diagfwd_init();
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
- spin_lock_init(&driver->hsic_ready_spinlock);
diagfwd_bridge_init(HSIC);
diagfwd_bridge_init(HSIC_2);
/* register HSIC device */
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index de4433b..a1f6b2c 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -61,14 +61,21 @@
/* Number of entries in table of buffers */
static unsigned int buf_tbl_size = 10;
struct diag_master_table entry;
-struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
-struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
int wrap_enabled;
uint16_t wrap_count;
void encode_rsp_and_send(int buf_length)
{
+ struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+ struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
struct diag_smd_info *data = &(driver->smd_data[MODEM_DATA]);
+
+ if (buf_length > APPS_BUF_SIZE) {
+ pr_err("diag: In %s, invalid len %d, permissible len %d\n",
+ __func__, buf_length, APPS_BUF_SIZE);
+ return;
+ }
+
send.state = DIAG_STATE_START;
send.pkt = driver->apps_rsp_buf;
send.last = (void *)(driver->apps_rsp_buf + buf_length);
@@ -237,6 +244,124 @@
}
}
}
+int diag_add_hdlc_encoding(struct diag_smd_info *smd_info, void *buf,
+ int total_recd, uint8_t *encode_buf,
+ int *encoded_length)
+{
+ struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+ struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+ struct data_header {
+ uint8_t control_char;
+ uint8_t version;
+ uint16_t length;
+ };
+ struct data_header *header;
+ int header_size = sizeof(struct data_header);
+ uint8_t *end_control_char;
+ uint8_t *payload;
+ uint8_t *temp_buf;
+ uint8_t *temp_encode_buf;
+ int src_pkt_len;
+ int encoded_pkt_length;
+ int max_size;
+ int total_processed = 0;
+ int bytes_remaining;
+ int success = 1;
+
+ temp_buf = buf;
+ temp_encode_buf = encode_buf;
+ bytes_remaining = *encoded_length;
+ while (total_processed < total_recd) {
+ header = (struct data_header *)temp_buf;
+ /* Perform initial error checking */
+ if (header->control_char != CONTROL_CHAR ||
+ header->version != 1) {
+ success = 0;
+ break;
+ }
+ payload = temp_buf + header_size;
+ end_control_char = payload + header->length;
+ if (*end_control_char != CONTROL_CHAR) {
+ success = 0;
+ break;
+ }
+
+ max_size = 2 * header->length + 3;
+ if (bytes_remaining < max_size) {
+ pr_err("diag: In %s, Not enough room to encode remaining data for peripheral: %d, bytes available: %d, max_size: %d\n",
+ __func__, smd_info->peripheral,
+ bytes_remaining, max_size);
+ success = 0;
+ break;
+ }
+
+ /* Prepare for encoding the data */
+ send.state = DIAG_STATE_START;
+ send.pkt = payload;
+ send.last = (void *)(payload + header->length - 1);
+ send.terminate = 1;
+
+ enc.dest = temp_encode_buf;
+ enc.dest_last = (void *)(temp_encode_buf + max_size);
+ enc.crc = 0;
+ diag_hdlc_encode(&send, &enc);
+
+ /* Prepare for next packet */
+ src_pkt_len = (header_size + header->length + 1);
+ total_processed += src_pkt_len;
+ temp_buf += src_pkt_len;
+
+ encoded_pkt_length = (uint8_t *)enc.dest - temp_encode_buf;
+ bytes_remaining -= encoded_pkt_length;
+ temp_encode_buf = enc.dest;
+ }
+
+ *encoded_length = (int)(temp_encode_buf - encode_buf);
+
+ return success;
+}
+
+static int check_bufsize_for_encoding(struct diag_smd_info *smd_info, void *buf,
+ int total_recd)
+{
+ int buf_size = IN_BUF_SIZE;
+ int max_size = 2 * total_recd + 3;
+ unsigned char *temp_buf;
+
+ if (max_size > IN_BUF_SIZE) {
+ if (max_size < MAX_IN_BUF_SIZE) {
+ pr_err("diag: In %s, SMD sending packet of %d bytes that may expand to %d bytes, peripheral: %d\n",
+ __func__, total_recd, max_size,
+ smd_info->peripheral);
+ if (buf == smd_info->buf_in_1_raw) {
+ temp_buf = krealloc(smd_info->buf_in_1,
+ max_size, GFP_KERNEL);
+ if (temp_buf) {
+ smd_info->buf_in_1 = temp_buf;
+ buf_size = max_size;
+ } else {
+ buf_size = 0;
+ }
+ } else {
+ temp_buf = krealloc(smd_info->buf_in_2,
+ max_size, GFP_KERNEL);
+ if (temp_buf) {
+ smd_info->buf_in_2 = temp_buf;
+ buf_size = max_size;
+ } else {
+ buf_size = 0;
+ }
+ }
+ } else {
+ pr_err("diag: In %s, SMD sending packet of size %d. HDCL encoding can expand to more than %d bytes, peripheral: %d. Discarding.\n",
+ __func__, max_size, MAX_IN_BUF_SIZE,
+ smd_info->peripheral);
+ buf_size = 0;
+ }
+ }
+
+ return buf_size;
+}
void process_lock_enabling(struct diag_nrt_wake_lock *lock, int real_time)
{
@@ -327,7 +452,7 @@
/* Process the data read from the smd data channel */
int diag_process_smd_read_data(struct diag_smd_info *smd_info, void *buf,
- int total_recd)
+ int total_recd)
{
struct diag_request *write_ptr_modem = NULL;
int *in_busy_ptr = 0;
@@ -345,26 +470,74 @@
return 0;
}
- if (smd_info->buf_in_1 == buf) {
- write_ptr_modem = smd_info->write_ptr_1;
- in_busy_ptr = &smd_info->in_busy_1;
- } else if (smd_info->buf_in_2 == buf) {
- write_ptr_modem = smd_info->write_ptr_2;
- in_busy_ptr = &smd_info->in_busy_2;
- } else {
- pr_err("diag: In %s, no match for in_busy_1\n", __func__);
- }
+ /* If the data is already hdlc encoded */
+ if (!smd_info->encode_hdlc) {
+ if (smd_info->buf_in_1 == buf) {
+ write_ptr_modem = smd_info->write_ptr_1;
+ in_busy_ptr = &smd_info->in_busy_1;
+ } else if (smd_info->buf_in_2 == buf) {
+ write_ptr_modem = smd_info->write_ptr_2;
+ in_busy_ptr = &smd_info->in_busy_2;
+ } else {
+ pr_err("diag: In %s, no match for in_busy_1, peripheral: %d\n",
+ __func__, smd_info->peripheral);
+ }
- if (write_ptr_modem) {
- write_ptr_modem->length = total_recd;
- *in_busy_ptr = 1;
- err = diag_device_write(buf, smd_info->peripheral,
- write_ptr_modem);
- if (err) {
- /* Free up the buffer for future use */
- *in_busy_ptr = 0;
- pr_err_ratelimited("diag: In %s, diag_device_write error: %d\n",
- __func__, err);
+ if (write_ptr_modem) {
+ write_ptr_modem->length = total_recd;
+ *in_busy_ptr = 1;
+ err = diag_device_write(buf, smd_info->peripheral,
+ write_ptr_modem);
+ if (err) {
+ /* Free up the buffer for future use */
+ *in_busy_ptr = 0;
+ pr_err_ratelimited("diag: In %s, diag_device_write error: %d\n",
+ __func__, err);
+ }
+ }
+ } else {
+ /* The data is raw and needs to be hdlc encoded */
+ if (smd_info->buf_in_1_raw == buf) {
+ write_ptr_modem = smd_info->write_ptr_1;
+ in_busy_ptr = &smd_info->in_busy_1;
+ } else if (smd_info->buf_in_2_raw == buf) {
+ write_ptr_modem = smd_info->write_ptr_2;
+ in_busy_ptr = &smd_info->in_busy_2;
+ } else {
+ pr_err("diag: In %s, no match for in_busy_1, peripheral: %d\n",
+ __func__, smd_info->peripheral);
+ }
+
+ if (write_ptr_modem) {
+ int success = 0;
+ int write_length = 0;
+ unsigned char *write_buf = NULL;
+
+ write_length = check_bufsize_for_encoding(smd_info, buf,
+ total_recd);
+ if (write_length) {
+ write_buf = (buf == smd_info->buf_in_1_raw) ?
+ smd_info->buf_in_1 : smd_info->buf_in_2;
+ success = diag_add_hdlc_encoding(smd_info, buf,
+ total_recd, write_buf,
+ &write_length);
+ if (success) {
+ write_ptr_modem->length = write_length;
+ *in_busy_ptr = 1;
+ err = diag_device_write(write_buf,
+ smd_info->peripheral,
+ write_ptr_modem);
+ if (err) {
+ /*
+ * Free up the buffer for
+ * future use
+ */
+ *in_busy_ptr = 0;
+ pr_err_ratelimited("diag: In %s, diag_device_write error: %d\n",
+ __func__, err);
+ }
+ }
+ }
}
}
@@ -384,11 +557,32 @@
return;
}
- if (!smd_info->in_busy_1)
+ /* Determine the buffer to read the data into. */
+ if (smd_info->type == SMD_DATA_TYPE) {
+ /* If the data is raw and not hdlc encoded */
+ if (smd_info->encode_hdlc) {
+ if (!smd_info->in_busy_1)
+ buf = smd_info->buf_in_1_raw;
+ else if (!smd_info->in_busy_2)
+ buf = smd_info->buf_in_2_raw;
+ } else {
+ if (!smd_info->in_busy_1)
+ buf = smd_info->buf_in_1;
+ else if (!smd_info->in_busy_2)
+ buf = smd_info->buf_in_2;
+ }
+ } else if (smd_info->type == SMD_CMD_TYPE) {
+ /* If the data is raw and not hdlc encoded */
+ if (smd_info->encode_hdlc) {
+ if (!smd_info->in_busy_1)
+ buf = smd_info->buf_in_1_raw;
+ } else {
+ if (!smd_info->in_busy_1)
+ buf = smd_info->buf_in_1;
+ }
+ } else if (!smd_info->in_busy_1) {
buf = smd_info->buf_in_1;
- else if (!smd_info->in_busy_2 &&
- (smd_info->type == SMD_DATA_TYPE))
- buf = smd_info->buf_in_2;
+ }
if (!buf && (smd_info->type == SMD_DCI_TYPE ||
smd_info->type == SMD_DCI_CMD_TYPE))
@@ -487,32 +681,12 @@
diag_smd_send_req(smd_info);
}
-#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
-static void diag_mem_dev_mode_ready_update(int index, int hsic_updated)
-{
- if (hsic_updated) {
- unsigned long flags;
- spin_lock_irqsave(&driver->hsic_ready_spinlock, flags);
- driver->data_ready[index] |= USER_SPACE_DATA_TYPE;
- spin_unlock_irqrestore(&driver->hsic_ready_spinlock, flags);
- } else {
- driver->data_ready[index] |= USER_SPACE_DATA_TYPE;
- }
-}
-#else
-static void diag_mem_dev_mode_ready_update(int index, int hsic_updated)
-{
- (void) hsic_updated;
- driver->data_ready[index] |= USER_SPACE_DATA_TYPE;
-}
-#endif
int diag_device_write(void *buf, int data_type, struct diag_request *write_ptr)
{
int i, err = 0, index;
index = 0;
if (driver->logging_mode == MEMORY_DEVICE_MODE) {
- int hsic_updated = 0;
if (data_type == APPS_DATA) {
for (i = 0; i < driver->buf_tbl_size; i++)
if (driver->buf_tbl[i].length == 0) {
@@ -533,7 +707,6 @@
else if (data_type == HSIC_DATA || data_type == HSIC_2_DATA) {
unsigned long flags;
int foundIndex = -1;
- hsic_updated = 1;
index = data_type - HSIC_DATA;
spin_lock_irqsave(&diag_hsic[index].hsic_spinlock,
flags);
@@ -566,8 +739,8 @@
driver->logging_process_id)
break;
if (i < driver->num_clients) {
- diag_mem_dev_mode_ready_update(i, hsic_updated);
pr_debug("diag: wake up logging process\n");
+ driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
wake_up_interruptible(&driver->wait_q);
} else
return -EINVAL;
@@ -575,7 +748,7 @@
if ((data_type >= MODEM_DATA) && (data_type <= WCNSS_DATA)) {
driver->smd_data[data_type].in_busy_1 = 0;
driver->smd_data[data_type].in_busy_2 = 0;
- queue_work(driver->diag_wq,
+ queue_work(driver->smd_data[data_type].wq,
&(driver->smd_data[data_type].
diag_read_smd_work));
if (data_type == MODEM_DATA &&
@@ -616,8 +789,16 @@
driver->write_ptr_svc->buf = buf;
err = usb_diag_write(driver->legacy_ch,
driver->write_ptr_svc);
- } else
- err = -1;
+ /* Free the buffer if write failed */
+ if (err) {
+ diagmem_free(driver,
+ (unsigned char *)driver->
+ write_ptr_svc,
+ POOL_TYPE_WRITE_STRUCT);
+ }
+ } else {
+ err = -ENOMEM;
+ }
} else if ((data_type >= MODEM_DATA) &&
(data_type <= WCNSS_DATA)) {
write_ptr->buf = buf;
@@ -1315,10 +1496,12 @@
{
int i;
- if (index > 490) {
- pr_err("diag: error response too huge, aborting\n");
+ /* -1 to accomodate the first byte 0x13 */
+ if (index > APPS_BUF_SIZE-1) {
+ pr_err("diag: cannot send err rsp, huge length: %d\n", index);
return;
}
+
driver->apps_rsp_buf[0] = 0x13; /* error code 13 */
for (i = 0; i < index; i++)
driver->apps_rsp_buf[i+1] = *(driver->hdlc_buf+i);
@@ -1430,7 +1613,7 @@
driver->smd_data[i].in_busy_2 = 0;
if (queue)
/* Poll SMD data channels to check for data */
- queue_work(driver->diag_wq,
+ queue_work(driver->smd_data[i].wq,
&(driver->smd_data[i].diag_read_smd_work));
}
@@ -1540,19 +1723,24 @@
for (i = 0; i < num_channels; i++) {
if (buf == (void *)data[i].buf_in_1) {
data[i].in_busy_1 = 0;
- queue_work(driver->diag_wq,
- &(data[i].diag_read_smd_work));
found_it = 1;
break;
} else if (buf == (void *)data[i].buf_in_2) {
data[i].in_busy_2 = 0;
- queue_work(driver->diag_wq,
- &(data[i].diag_read_smd_work));
found_it = 1;
break;
}
}
+ if (found_it) {
+ if (data[i].type == SMD_DATA_TYPE)
+ queue_work(data[i].wq,
+ &(data[i].diag_read_smd_work));
+ else
+ queue_work(driver->diag_wq,
+ &(data[i].diag_read_smd_work));
+ }
+
return found_it;
}
@@ -1582,6 +1770,9 @@
}
#endif
if (!found_it) {
+ if (driver->logging_mode != USB_MODE)
+ pr_debug("diag: freeing buffer when not in usb mode\n");
+
diagmem_free(driver, (unsigned char *)buf,
POOL_TYPE_HDLC);
diagmem_free(driver, (unsigned char *)diag_write_ptr,
@@ -1725,8 +1916,12 @@
diag_dci_try_activate_wakeup_source(smd_info->ch);
queue_work(driver->diag_dci_wq,
&(smd_info->diag_read_smd_work));
- } else
+ } else if (smd_info->type == SMD_DATA_TYPE) {
+ queue_work(smd_info->wq,
+ &(smd_info->diag_read_smd_work));
+ } else {
queue_work(driver->diag_wq, &(smd_info->diag_read_smd_work));
+ }
}
static int diag_smd_probe(struct platform_device *pdev)
@@ -1854,8 +2049,10 @@
void diag_smd_destructor(struct diag_smd_info *smd_info)
{
- if (smd_info->type == SMD_DATA_TYPE)
+ if (smd_info->type == SMD_DATA_TYPE) {
wake_lock_destroy(&smd_info->nrt_lock.read_lock);
+ destroy_workqueue(smd_info->wq);
+ }
if (smd_info->ch)
smd_close(smd_info->ch);
@@ -1866,6 +2063,8 @@
kfree(smd_info->buf_in_2);
kfree(smd_info->write_ptr_1);
kfree(smd_info->write_ptr_2);
+ kfree(smd_info->buf_in_1_raw);
+ kfree(smd_info->buf_in_2_raw);
}
int diag_smd_constructor(struct diag_smd_info *smd_info, int peripheral,
@@ -1873,6 +2072,7 @@
{
smd_info->peripheral = peripheral;
smd_info->type = type;
+ smd_info->encode_hdlc = 0;
mutex_init(&smd_info->smd_ch_mutex);
switch (peripheral) {
@@ -1925,6 +2125,58 @@
goto err;
kmemleak_not_leak(smd_info->write_ptr_2);
}
+ if (driver->supports_apps_hdlc_encoding) {
+ /* In support of hdlc encoding */
+ if (smd_info->buf_in_1_raw == NULL) {
+ smd_info->buf_in_1_raw = kzalloc(IN_BUF_SIZE,
+ GFP_KERNEL);
+ if (smd_info->buf_in_1_raw == NULL)
+ goto err;
+ kmemleak_not_leak(smd_info->buf_in_1_raw);
+ }
+ if (smd_info->buf_in_2_raw == NULL) {
+ smd_info->buf_in_2_raw = kzalloc(IN_BUF_SIZE,
+ GFP_KERNEL);
+ if (smd_info->buf_in_2_raw == NULL)
+ goto err;
+ kmemleak_not_leak(smd_info->buf_in_2_raw);
+ }
+ }
+ }
+
+ if (smd_info->type == SMD_CMD_TYPE &&
+ driver->supports_apps_hdlc_encoding) {
+ /* In support of hdlc encoding */
+ if (smd_info->buf_in_1_raw == NULL) {
+ smd_info->buf_in_1_raw = kzalloc(IN_BUF_SIZE,
+ GFP_KERNEL);
+ if (smd_info->buf_in_1_raw == NULL)
+ goto err;
+ kmemleak_not_leak(smd_info->buf_in_1_raw);
+ }
+ }
+
+ /* The smd data type needs separate work queues for reads */
+ if (type == SMD_DATA_TYPE) {
+ switch (peripheral) {
+ case MODEM_DATA:
+ smd_info->wq = create_singlethread_workqueue(
+ "diag_modem_data_read_wq");
+ break;
+ case LPASS_DATA:
+ smd_info->wq = create_singlethread_workqueue(
+ "diag_lpass_data_read_wq");
+ break;
+ case WCNSS_DATA:
+ smd_info->wq = create_singlethread_workqueue(
+ "diag_wcnss_data_read_wq");
+ break;
+ default:
+ smd_info->wq = NULL;
+ break;
+ }
+ } else {
+ smd_info->wq = NULL;
}
INIT_WORK(&(smd_info->diag_read_smd_work), diag_read_smd_work_fn);
@@ -2017,6 +2269,8 @@
kfree(smd_info->buf_in_2);
kfree(smd_info->write_ptr_1);
kfree(smd_info->write_ptr_2);
+ kfree(smd_info->buf_in_1_raw);
+ kfree(smd_info->buf_in_2_raw);
return 0;
}
@@ -2039,6 +2293,7 @@
driver->buf_tbl_size = (buf_tbl_size < driver->poolsize_hdlc) ?
driver->poolsize_hdlc : buf_tbl_size;
driver->supports_separate_cmdrsp = device_supports_separate_cmdrsp();
+ driver->supports_apps_hdlc_encoding = 0;
mutex_init(&driver->diag_hdlc_mutex);
mutex_init(&driver->diag_cntl_mutex);
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index a832cb3..e0deef3 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -89,6 +89,31 @@
}
}
+static void process_hdlc_encoding_feature(struct diag_smd_info *smd_info,
+ uint8_t feature_mask)
+{
+ /*
+ * Check if apps supports hdlc encoding and the
+ * peripheral supports apps hdlc encoding
+ */
+ if (driver->supports_apps_hdlc_encoding &&
+ (feature_mask & F_DIAG_HDLC_ENCODE_IN_APPS_MASK)) {
+ driver->smd_data[smd_info->peripheral].encode_hdlc =
+ ENABLE_APPS_HDLC_ENCODING;
+ if (driver->separate_cmdrsp[smd_info->peripheral] &&
+ smd_info->peripheral < NUM_SMD_CMD_CHANNELS)
+ driver->smd_cmd[smd_info->peripheral].encode_hdlc =
+ ENABLE_APPS_HDLC_ENCODING;
+ } else {
+ driver->smd_data[smd_info->peripheral].encode_hdlc =
+ DISABLE_APPS_HDLC_ENCODING;
+ if (driver->separate_cmdrsp[smd_info->peripheral] &&
+ smd_info->peripheral < NUM_SMD_CMD_CHANNELS)
+ driver->smd_cmd[smd_info->peripheral].encode_hdlc =
+ DISABLE_APPS_HDLC_ENCODING;
+ }
+}
+
/* Process the data read from the smd control channel */
int diag_process_smd_cntl_read_data(struct diag_smd_info *smd_info, void *buf,
int total_recd)
@@ -187,6 +212,12 @@
else
driver->separate_cmdrsp[periph] =
DISABLE_SEPARATE_CMDRSP;
+ /*
+ * Check if apps supports hdlc encoding and the
+ * peripheral supports apps hdlc encoding
+ */
+ process_hdlc_encoding_feature(smd_info,
+ feature_mask);
if (feature_mask_len > 1) {
feature_mask = *(uint8_t *)(buf+13);
process_stm_feature(smd_info,
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index c90c132..d79195c 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -48,6 +48,9 @@
/* Denotes we support diag over stm */
#define F_DIAG_OVER_STM 0x02
+ /* Perform hdlc encoding of data coming from smd channel */
+#define F_DIAG_HDLC_ENCODE_IN_APPS_MASK 0x40
+
#define ENABLE_SEPARATE_CMDRSP 1
#define DISABLE_SEPARATE_CMDRSP 0
@@ -57,6 +60,9 @@
#define UPDATE_PERIPHERAL_STM_STATE 1
#define CLEAR_PERIPHERAL_STM_STATE 2
+#define ENABLE_APPS_HDLC_ENCODING 1
+#define DISABLE_APPS_HDLC_ENCODING 0
+
struct cmd_code_range {
uint16_t cmd_code_lo;
uint16_t cmd_code_hi;
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
index a6ef3ca..4ceca4f 100644
--- a/drivers/char/diag/diagmem.c
+++ b/drivers/char/diag/diagmem.c
@@ -25,22 +25,24 @@
void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
{
void *buf = NULL;
+ unsigned long flags;
int index;
+ spin_lock_irqsave(&driver->diag_mem_lock, flags);
index = 0;
if (pool_type == POOL_TYPE_COPY) {
if (driver->diagpool) {
- mutex_lock(&driver->diagmem_mutex);
- if (driver->count < driver->poolsize) {
+ if ((driver->count < driver->poolsize) &&
+ (size <= driver->itemsize)) {
atomic_add(1, (atomic_t *)&driver->count);
buf = mempool_alloc(driver->diagpool,
GFP_ATOMIC);
}
- mutex_unlock(&driver->diagmem_mutex);
}
} else if (pool_type == POOL_TYPE_HDLC) {
if (driver->diag_hdlc_pool) {
- if (driver->count_hdlc_pool < driver->poolsize_hdlc) {
+ if ((driver->count_hdlc_pool < driver->poolsize_hdlc) &&
+ (size <= driver->itemsize_hdlc)) {
atomic_add(1,
(atomic_t *)&driver->count_hdlc_pool);
buf = mempool_alloc(driver->diag_hdlc_pool,
@@ -49,7 +51,8 @@
}
} else if (pool_type == POOL_TYPE_USER) {
if (driver->diag_user_pool) {
- if (driver->count_user_pool < driver->poolsize_user) {
+ if ((driver->count_user_pool < driver->poolsize_user) &&
+ (size <= driver->itemsize_user)) {
atomic_add(1,
(atomic_t *)&driver->count_user_pool);
buf = mempool_alloc(driver->diag_user_pool,
@@ -58,8 +61,9 @@
}
} else if (pool_type == POOL_TYPE_WRITE_STRUCT) {
if (driver->diag_write_struct_pool) {
- if (driver->count_write_struct_pool <
- driver->poolsize_write_struct) {
+ if ((driver->count_write_struct_pool <
+ driver->poolsize_write_struct) &&
+ (size <= driver->itemsize_write_struct)) {
atomic_add(1,
(atomic_t *)&driver->count_write_struct_pool);
buf = mempool_alloc(
@@ -71,8 +75,9 @@
pool_type == POOL_TYPE_HSIC_2) {
index = pool_type - POOL_TYPE_HSIC;
if (diag_hsic[index].diag_hsic_pool) {
- if (diag_hsic[index].count_hsic_pool <
- diag_hsic[index].poolsize_hsic) {
+ if ((diag_hsic[index].count_hsic_pool <
+ diag_hsic[index].poolsize_hsic) &&
+ (size <= diag_hsic[index].itemsize_hsic)) {
atomic_add(1, (atomic_t *)
&diag_hsic[index].count_hsic_pool);
buf = mempool_alloc(
@@ -85,7 +90,8 @@
index = pool_type - POOL_TYPE_HSIC_WRITE;
if (diag_hsic[index].diag_hsic_write_pool) {
if (diag_hsic[index].count_hsic_write_pool <
- diag_hsic[index].poolsize_hsic_write) {
+ diag_hsic[index].poolsize_hsic_write &&
+ (size <= diag_hsic[index].itemsize_hsic_write)) {
atomic_add(1, (atomic_t *)
&diag_hsic[index].
count_hsic_write_pool);
@@ -96,14 +102,17 @@
}
#endif
}
+ spin_unlock_irqrestore(&driver->diag_mem_lock, flags);
return buf;
}
void diagmem_exit(struct diagchar_dev *driver, int pool_type)
{
int index;
+ unsigned long flags;
index = 0;
+ spin_lock_irqsave(&driver->diag_mem_lock, flags);
if (driver->diagpool) {
if (driver->count == 0 && driver->ref_count == 0) {
mempool_destroy(driver->diagpool);
@@ -176,12 +185,18 @@
}
}
#endif
+ spin_unlock_irqrestore(&driver->diag_mem_lock, flags);
}
void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type)
{
int index;
+ unsigned long flags;
+ if (!buf)
+ return;
+
+ spin_lock_irqsave(&driver->diag_mem_lock, flags);
index = 0;
if (pool_type == POOL_TYPE_COPY) {
if (driver->diagpool != NULL && driver->count > 0) {
@@ -246,13 +261,13 @@
__func__, pool_type);
}
-
+ spin_unlock_irqrestore(&driver->diag_mem_lock, flags);
diagmem_exit(driver, pool_type);
}
void diagmem_init(struct diagchar_dev *driver)
{
- mutex_init(&driver->diagmem_mutex);
+ spin_lock_init(&driver->diag_mem_lock);
if (driver->count == 0) {
driver->diagpool = mempool_create_kmalloc_pool(
diff --git a/drivers/coresight/coresight-csr.c b/drivers/coresight/coresight-csr.c
index 132df90..6efab5b 100644
--- a/drivers/coresight/coresight-csr.c
+++ b/drivers/coresight/coresight-csr.c
@@ -65,6 +65,7 @@
#define CSR_QDSSPWRREQIGNORE (0x060)
#define CSR_QDSSSPARE (0x064)
#define CSR_IPCAT (0x068)
+#define CSR_BYTECNTVAL (0x06C)
#define BLKSIZE_256 0
#define BLKSIZE_512 1
@@ -159,6 +160,19 @@
}
EXPORT_SYMBOL(coresight_csr_hwctrl_set);
+void coresight_csr_set_byte_cntr(uint32_t count)
+{
+ struct csr_drvdata *drvdata = csrdrvdata;
+
+ CSR_UNLOCK(drvdata);
+
+ csr_writel(drvdata, count, CSR_BYTECNTVAL);
+ mb();
+
+ CSR_LOCK(drvdata);
+}
+EXPORT_SYMBOL(coresight_csr_set_byte_cntr);
+
static int __devinit csr_probe(struct platform_device *pdev)
{
int ret;
diff --git a/drivers/coresight/coresight-cti.c b/drivers/coresight/coresight-cti.c
index d0900d1..d139583 100644
--- a/drivers/coresight/coresight-cti.c
+++ b/drivers/coresight/coresight-cti.c
@@ -107,22 +107,37 @@
return 0;
}
-static void __cti_map_trigin(struct cti_drvdata *drvdata, int trig, int ch)
+static int __cti_map_trigin(struct cti_drvdata *drvdata, int trig, int ch)
{
uint32_t ctien;
+ int ret;
+
+ if (drvdata->refcnt == 0) {
+ ret = cti_enable(drvdata);
+ if (ret)
+ return ret;
+ }
CTI_UNLOCK(drvdata);
ctien = cti_readl(drvdata, CTIINEN(trig));
+ if (ctien & (0x1 << ch))
+ goto out;
cti_writel(drvdata, (ctien | 0x1 << ch), CTIINEN(trig));
CTI_LOCK(drvdata);
+
+ drvdata->refcnt++;
+ return 0;
+out:
+ CTI_LOCK(drvdata);
+ return 0;
}
int coresight_cti_map_trigin(struct coresight_cti *cti, int trig, int ch)
{
struct cti_drvdata *drvdata;
- int ret = 0;
+ int ret;
if (IS_ERR_OR_NULL(cti))
return -EINVAL;
@@ -134,36 +149,43 @@
drvdata = to_cti_drvdata(cti);
mutex_lock(&drvdata->mutex);
- if (drvdata->refcnt == 0) {
- ret = cti_enable(drvdata);
- if (ret)
- goto err;
- }
- drvdata->refcnt++;
-
- __cti_map_trigin(drvdata, trig, ch);
-err:
+ ret = __cti_map_trigin(drvdata, trig, ch);
mutex_unlock(&drvdata->mutex);
return ret;
}
EXPORT_SYMBOL(coresight_cti_map_trigin);
-static void __cti_map_trigout(struct cti_drvdata *drvdata, int trig, int ch)
+static int __cti_map_trigout(struct cti_drvdata *drvdata, int trig, int ch)
{
uint32_t ctien;
+ int ret;
+
+ if (drvdata->refcnt == 0) {
+ ret = cti_enable(drvdata);
+ if (ret)
+ return ret;
+ }
CTI_UNLOCK(drvdata);
ctien = cti_readl(drvdata, CTIOUTEN(trig));
+ if (ctien & (0x1 << ch))
+ goto out;
cti_writel(drvdata, (ctien | 0x1 << ch), CTIOUTEN(trig));
CTI_LOCK(drvdata);
+
+ drvdata->refcnt++;
+ return 0;
+out:
+ CTI_LOCK(drvdata);
+ return 0;
}
int coresight_cti_map_trigout(struct coresight_cti *cti, int trig, int ch)
{
struct cti_drvdata *drvdata;
- int ret = 0;
+ int ret;
if (IS_ERR_OR_NULL(cti))
return -EINVAL;
@@ -175,15 +197,7 @@
drvdata = to_cti_drvdata(cti);
mutex_lock(&drvdata->mutex);
- if (drvdata->refcnt == 0) {
- ret = cti_enable(drvdata);
- if (ret)
- goto err;
- }
- drvdata->refcnt++;
-
- __cti_map_trigout(drvdata, trig, ch);
-err:
+ ret = __cti_map_trigout(drvdata, trig, ch);
mutex_unlock(&drvdata->mutex);
return ret;
}
@@ -193,9 +207,11 @@
{
CTI_UNLOCK(drvdata);
- cti_writel(drvdata, 0x1, CTICONTROL);
+ cti_writel(drvdata, 0x0, CTICONTROL);
CTI_LOCK(drvdata);
+
+ clk_disable_unprepare(drvdata->clk);
}
static void __cti_unmap_trigin(struct cti_drvdata *drvdata, int trig, int ch)
@@ -205,9 +221,19 @@
CTI_UNLOCK(drvdata);
ctien = cti_readl(drvdata, CTIINEN(trig));
+ if (!(ctien & (0x1 << ch)))
+ goto out;
cti_writel(drvdata, (ctien & ~(0x1 << ch)), CTIINEN(trig));
CTI_LOCK(drvdata);
+
+ if (drvdata->refcnt == 1)
+ cti_disable(drvdata);
+ drvdata->refcnt--;
+ return;
+out:
+ CTI_LOCK(drvdata);
+ return;
}
void coresight_cti_unmap_trigin(struct coresight_cti *cti, int trig, int ch)
@@ -224,13 +250,8 @@
mutex_lock(&drvdata->mutex);
__cti_unmap_trigin(drvdata, trig, ch);
-
- if (drvdata->refcnt == 1)
- cti_disable(drvdata);
- drvdata->refcnt--;
mutex_unlock(&drvdata->mutex);
- clk_disable_unprepare(drvdata->clk);
}
EXPORT_SYMBOL(coresight_cti_unmap_trigin);
@@ -241,9 +262,19 @@
CTI_UNLOCK(drvdata);
ctien = cti_readl(drvdata, CTIOUTEN(trig));
+ if (!(ctien & (0x1 << ch)))
+ goto out;
cti_writel(drvdata, (ctien & ~(0x1 << ch)), CTIOUTEN(trig));
CTI_LOCK(drvdata);
+
+ if (drvdata->refcnt == 1)
+ cti_disable(drvdata);
+ drvdata->refcnt--;
+ return;
+out:
+ CTI_LOCK(drvdata);
+ return;
}
void coresight_cti_unmap_trigout(struct coresight_cti *cti, int trig, int ch)
@@ -260,13 +291,7 @@
mutex_lock(&drvdata->mutex);
__cti_unmap_trigout(drvdata, trig, ch);
-
- if (drvdata->refcnt == 1)
- cti_disable(drvdata);
- drvdata->refcnt--;
mutex_unlock(&drvdata->mutex);
-
- clk_disable_unprepare(drvdata->clk);
}
EXPORT_SYMBOL(coresight_cti_unmap_trigout);
diff --git a/drivers/coresight/coresight-priv.h b/drivers/coresight/coresight-priv.h
index b570252..3ad1f34 100644
--- a/drivers/coresight/coresight-priv.h
+++ b/drivers/coresight/coresight-priv.h
@@ -48,12 +48,14 @@
extern void msm_qdss_csr_disable_bam_to_usb(void);
extern void msm_qdss_csr_disable_flush(void);
extern int coresight_csr_hwctrl_set(phys_addr_t addr, uint32_t val);
+extern void coresight_csr_set_byte_cntr(uint32_t);
#else
static inline void msm_qdss_csr_enable_bam_to_usb(void) {}
static inline void msm_qdss_csr_disable_bam_to_usb(void) {}
static inline void msm_qdss_csr_disable_flush(void) {}
static inline int coresight_csr_hwctrl_set(phys_addr_t addr,
uint32_t val) { return -ENOSYS; }
+static inline void coresight_csr_set_byte_cntr(uint32_t val) {}
#endif
#ifdef CONFIG_CORESIGHT_ETM
extern unsigned int etm_readl_cp14(uint32_t off);
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/coresight/coresight-tmc.c
index 4186abe..c501700 100644
--- a/drivers/coresight/coresight-tmc.c
+++ b/drivers/coresight/coresight-tmc.c
@@ -30,6 +30,10 @@
#include <linux/of_coresight.h>
#include <linux/coresight.h>
#include <linux/coresight-cti.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
#include <linux/usb/usb_qdss.h>
#include <mach/memory.h>
#include <mach/sps.h>
@@ -135,6 +139,8 @@
struct device *dev;
struct coresight_device *csdev;
struct miscdevice miscdev;
+ struct cdev byte_cntr_dev;
+ struct class *byte_cntr_class;
struct clk *clk;
spinlock_t spinlock;
bool reset_flush_race;
@@ -157,6 +163,19 @@
bool enable;
enum tmc_config_type config_type;
uint32_t trigger_cntr;
+ int byte_cntr_irq;
+ atomic_t byte_cntr_irq_cnt;
+ uint32_t byte_cntr_value;
+ struct mutex byte_cntr_read_lock;
+ struct mutex byte_cntr_lock;
+ uint32_t byte_cntr_block_size;
+ bool byte_cntr_overflow;
+ bool byte_cntr_present;
+ bool byte_cntr_enable;
+ uint32_t byte_cntr_overflow_cnt;
+ bool byte_cntr_read_active;
+ wait_queue_head_t wq;
+ char *byte_cntr_node;
};
static void tmc_wait_for_flush(struct tmc_drvdata *drvdata)
@@ -368,6 +387,52 @@
mutex_unlock(&drvdata->usb_lock);
}
+static uint32_t tmc_etr_get_write_ptr(struct tmc_drvdata *drvdata)
+{
+ uint32_t rwp = 0;
+
+ TMC_UNLOCK(drvdata);
+
+ rwp = tmc_readl(drvdata, TMC_RWP);
+
+ TMC_LOCK(drvdata);
+
+ return rwp;
+}
+
+static void tmc_etr_byte_cntr_start(struct tmc_drvdata *drvdata)
+{
+ if (!drvdata->byte_cntr_present)
+ return;
+
+ mutex_lock(&drvdata->byte_cntr_lock);
+ atomic_set(&drvdata->byte_cntr_irq_cnt, 0);
+ drvdata->byte_cntr_overflow = false;
+ drvdata->byte_cntr_read_active = false;
+ drvdata->byte_cntr_enable = true;
+ if (drvdata->byte_cntr_value != 0)
+ drvdata->byte_cntr_overflow_cnt = drvdata->size /
+ (drvdata->byte_cntr_value * 8);
+ else
+ drvdata->byte_cntr_overflow_cnt = 0;
+ coresight_csr_set_byte_cntr(drvdata->byte_cntr_value);
+ mutex_unlock(&drvdata->byte_cntr_lock);
+}
+
+static void tmc_etr_byte_cntr_stop(struct tmc_drvdata *drvdata)
+{
+ if (!drvdata->byte_cntr_present)
+ return;
+
+ mutex_lock(&drvdata->byte_cntr_lock);
+ coresight_csr_set_byte_cntr(0);
+ drvdata->byte_cntr_value = 0;
+ drvdata->byte_cntr_enable = false;
+ mutex_unlock(&drvdata->byte_cntr_lock);
+
+ wake_up(&drvdata->wq);
+}
+
static void __tmc_etb_enable(struct tmc_drvdata *drvdata)
{
/* Zero out the memory to help with debug */
@@ -438,10 +503,14 @@
coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
coresight_cti_map_trigin(drvdata->cti_reset, 0, 0);
} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM &&
- !drvdata->reset_flush_race) {
- coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
- coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+ tmc_etr_byte_cntr_start(drvdata);
+ if (!drvdata->reset_flush_race) {
+ coresight_cti_map_trigout(drvdata->cti_flush,
+ 3, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset,
+ 2, 0);
+ }
} else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
drvdata->usbch = usb_qdss_open("qdss", drvdata,
usb_notifier);
@@ -674,10 +743,14 @@
coresight_cti_unmap_trigin(drvdata->cti_reset, 0, 0);
coresight_cti_unmap_trigout(drvdata->cti_flush, 1, 0);
} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM &&
- !drvdata->reset_flush_race) {
- coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
- coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+ tmc_etr_byte_cntr_stop(drvdata);
+ if (!drvdata->reset_flush_race) {
+ coresight_cti_unmap_trigin(drvdata->cti_reset,
+ 2, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush,
+ 3, 0);
+ }
} else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
tmc_etr_bam_disable(drvdata);
usb_qdss_close(drvdata->usbch);
@@ -929,6 +1002,161 @@
.llseek = no_llseek,
};
+static int tmc_etr_byte_cntr_open(struct inode *inode, struct file *file)
+{
+ struct tmc_drvdata *drvdata = container_of(inode->i_cdev,
+ struct tmc_drvdata,
+ byte_cntr_dev);
+
+ if (drvdata->out_mode != TMC_ETR_OUT_MODE_MEM ||
+ !drvdata->byte_cntr_enable)
+ return -EPERM;
+
+ if (!mutex_trylock(&drvdata->byte_cntr_read_lock))
+ return -EPERM;
+
+ file->private_data = drvdata;
+ nonseekable_open(inode, file);
+ drvdata->byte_cntr_block_size = drvdata->byte_cntr_value * 8;
+ drvdata->byte_cntr_read_active = true;
+ dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
+ return 0;
+}
+
+static void tmc_etr_read_bytes(struct tmc_drvdata *drvdata, loff_t *ppos,
+ size_t bytes, size_t *len)
+{
+ if (*len >= bytes) {
+ atomic_dec(&drvdata->byte_cntr_irq_cnt);
+ *len = bytes;
+ } else {
+ if (((uint32_t)*ppos % bytes) + *len > bytes)
+ *len = bytes - ((uint32_t)*ppos % bytes);
+ if ((*len + (uint32_t)*ppos) % bytes == 0)
+ atomic_dec(&drvdata->byte_cntr_irq_cnt);
+ }
+}
+
+static size_t tmc_etr_flush_bytes(struct tmc_drvdata *drvdata, loff_t *ppos,
+ size_t bytes)
+{
+ uint32_t rwp = 0;
+ size_t len = bytes;
+
+ rwp = tmc_etr_get_write_ptr(drvdata);
+ if (rwp >= (drvdata->paddr + *ppos)) {
+ if (len > (rwp - drvdata->paddr - *ppos))
+ len = rwp - drvdata->paddr - *ppos;
+ }
+ return len;
+}
+
+static ssize_t tmc_etr_byte_cntr_read(struct file *file, char __user *data,
+ size_t len, loff_t *ppos)
+{
+ struct tmc_drvdata *drvdata = file->private_data;
+ char *bufp = drvdata->vaddr + *ppos;
+ size_t bytes = drvdata->byte_cntr_block_size;
+ int ret = 0;
+
+ if (!data)
+ return -EINVAL;
+ if (drvdata->byte_cntr_overflow)
+ return -EIO;
+
+ mutex_lock(&drvdata->byte_cntr_lock);
+ /* In case the byte counter is enabled and disabled multiple times
+ * prevent unexpected data from being given to the user
+ */
+ if (!drvdata->byte_cntr_read_active)
+ goto read_err0;
+
+ if (!drvdata->byte_cntr_enable) {
+ if (!atomic_read(&drvdata->byte_cntr_irq_cnt)) {
+ /* Read the last 'block' of data which might be needed
+ * to be read partially. If already read, return 0
+ */
+ len = tmc_etr_flush_bytes(drvdata, ppos, bytes);
+ if (!len)
+ goto read_err0;
+ } else {
+ /* Keep reading until you reach the last block of data
+ */
+ tmc_etr_read_bytes(drvdata, ppos, bytes, &len);
+ }
+ } else {
+ if (!atomic_read(&drvdata->byte_cntr_irq_cnt)) {
+ mutex_unlock(&drvdata->byte_cntr_lock);
+ if (wait_event_interruptible(drvdata->wq,
+ (atomic_read(&drvdata->byte_cntr_irq_cnt) > 0) ||
+ !drvdata->byte_cntr_enable)) {
+ ret = -ERESTARTSYS;
+ goto read_err1;
+ }
+ mutex_lock(&drvdata->byte_cntr_lock);
+ if (!drvdata->byte_cntr_read_active) {
+ ret = 0;
+ goto read_err0;
+ }
+ }
+ if (drvdata->byte_cntr_overflow) {
+ ret = -EIO;
+ goto read_err0;
+ }
+ if (!drvdata->byte_cntr_enable &&
+ !atomic_read(&drvdata->byte_cntr_irq_cnt)) {
+ len = tmc_etr_flush_bytes(drvdata, ppos, bytes);
+ if (!len) {
+ ret = 0;
+ goto read_err0;
+ }
+ } else {
+ tmc_etr_read_bytes(drvdata, ppos, bytes, &len);
+ }
+ }
+ if (copy_to_user(data, bufp, len)) {
+ mutex_unlock(&drvdata->byte_cntr_lock);
+ dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+ ret = -EFAULT;
+ goto read_err1;
+ }
+ mutex_unlock(&drvdata->byte_cntr_lock);
+
+ if (*ppos + len >= drvdata->size)
+ *ppos = 0;
+ else
+ *ppos += len;
+
+ dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n",
+ __func__, len, (int) (drvdata->size - *ppos));
+ return len;
+
+read_err0:
+ mutex_unlock(&drvdata->byte_cntr_lock);
+read_err1:
+ return ret;
+}
+
+static int tmc_etr_byte_cntr_release(struct inode *inode, struct file *file)
+{
+ struct tmc_drvdata *drvdata = file->private_data;
+
+ mutex_lock(&drvdata->byte_cntr_lock);
+ drvdata->byte_cntr_read_active = false;
+ mutex_unlock(&drvdata->byte_cntr_lock);
+ mutex_unlock(&drvdata->byte_cntr_read_lock);
+ dev_dbg(drvdata->dev, "%s: released\n", __func__);
+ return 0;
+}
+
+static const struct file_operations byte_cntr_fops = {
+ .owner = THIS_MODULE,
+ .open = tmc_etr_byte_cntr_open,
+ .read = tmc_etr_byte_cntr_read,
+ .release = tmc_etr_byte_cntr_release,
+ .llseek = no_llseek,
+};
+
static ssize_t tmc_show_trigger_cntr(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1044,6 +1272,41 @@
static DEVICE_ATTR(out_mode, S_IRUGO | S_IWUSR, tmc_etr_show_out_mode,
tmc_etr_store_out_mode);
+static ssize_t tmc_etr_show_byte_cntr_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = drvdata->byte_cntr_value;
+
+ if (!drvdata->byte_cntr_present)
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tmc_etr_store_byte_cntr_value(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!drvdata->byte_cntr_present || drvdata->byte_cntr_enable)
+ return -EPERM;
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if ((drvdata->size / 8) < val)
+ return -EINVAL;
+ if (drvdata->size % (val * 8) != 0)
+ return -EINVAL;
+
+ drvdata->byte_cntr_value = val;
+ return size;
+}
+static DEVICE_ATTR(byte_cntr_value, S_IRUGO | S_IWUSR,
+ tmc_etr_show_byte_cntr_value, tmc_etr_store_byte_cntr_value);
+
static struct attribute *tmc_attrs[] = {
&dev_attr_trigger_cntr.attr,
NULL,
@@ -1055,6 +1318,7 @@
static struct attribute *tmc_etr_attrs[] = {
&dev_attr_out_mode.attr,
+ &dev_attr_byte_cntr_value.attr,
NULL,
};
@@ -1118,6 +1382,113 @@
sps_deregister_bam_device(bamdata->handle);
}
+static irqreturn_t tmc_etr_byte_cntr_irq(int irq, void *data)
+{
+ struct tmc_drvdata *drvdata = data;
+
+ atomic_inc(&drvdata->byte_cntr_irq_cnt);
+ if (atomic_read(&drvdata->byte_cntr_irq_cnt) >
+ drvdata->byte_cntr_overflow_cnt) {
+ dev_err(drvdata->dev, "Byte counter overflow\n");
+ drvdata->byte_cntr_overflow = true;
+ }
+ wake_up(&drvdata->wq);
+ return IRQ_HANDLED;
+}
+
+static int tmc_etr_byte_cntr_dev_register(struct tmc_drvdata *drvdata)
+{
+ int ret;
+ struct device *device;
+ dev_t dev;
+
+ ret = alloc_chrdev_region(&dev, 0, 1, drvdata->byte_cntr_node);
+ if (ret)
+ goto dev_err0;
+ cdev_init(&drvdata->byte_cntr_dev, &byte_cntr_fops);
+ drvdata->byte_cntr_dev.owner = THIS_MODULE;
+ drvdata->byte_cntr_dev.ops = &byte_cntr_fops;
+ ret = cdev_add(&drvdata->byte_cntr_dev, dev, 1);
+ if (ret)
+ goto dev_err1;
+ drvdata->byte_cntr_class = class_create(THIS_MODULE,
+ drvdata->byte_cntr_node);
+ if (!drvdata->byte_cntr_class)
+ goto dev_err2;
+ device = device_create(drvdata->byte_cntr_class, NULL,
+ drvdata->byte_cntr_dev.dev, drvdata,
+ drvdata->byte_cntr_node);
+ if (IS_ERR(device)) {
+ ret = PTR_ERR(device);
+ goto dev_err3;
+ }
+ return 0;
+dev_err3:
+ class_destroy(drvdata->byte_cntr_class);
+dev_err2:
+ cdev_del(&drvdata->byte_cntr_dev);
+dev_err1:
+ unregister_chrdev_region(drvdata->byte_cntr_dev.dev, 1);
+dev_err0:
+ return ret;
+}
+
+static void tmc_etr_byte_cntr_dev_deregister(struct tmc_drvdata *drvdata)
+{
+ device_destroy(drvdata->byte_cntr_class, drvdata->byte_cntr_dev.dev);
+ class_destroy(drvdata->byte_cntr_class);
+ cdev_del(&drvdata->byte_cntr_dev);
+ unregister_chrdev_region(drvdata->byte_cntr_dev.dev, 1);
+}
+
+static int tmc_etr_byte_cntr_init(struct platform_device *pdev,
+ struct tmc_drvdata *drvdata)
+{
+ int ret = 0;
+ size_t node_size = strlen("-stream") + 1;
+ char *node_name = (char *)((struct coresight_platform_data *)
+ (pdev->dev.platform_data))->name;
+
+ if (!drvdata->byte_cntr_present) {
+ dev_info(&pdev->dev, "Byte Counter feature absent\n");
+ return 0;
+ }
+
+ drvdata->byte_cntr_irq = platform_get_irq_byname(pdev,
+ "byte-cntr-irq");
+ if (drvdata->byte_cntr_irq < 0) {
+ dev_err(&pdev->dev, "Byte-cntr-irq not specified\n");
+ return 0;
+ }
+ ret = devm_request_irq(&pdev->dev, drvdata->byte_cntr_irq,
+ tmc_etr_byte_cntr_irq,
+ IRQF_TRIGGER_RISING | IRQF_SHARED,
+ node_name, drvdata);
+ if (ret) {
+ dev_err(&pdev->dev, "Request irq failed\n");
+ return ret;
+ }
+ init_waitqueue_head(&drvdata->wq);
+ node_size += strlen(node_name);
+ drvdata->byte_cntr_node = devm_kzalloc(&pdev->dev,
+ node_size, GFP_KERNEL);
+ strlcpy(drvdata->byte_cntr_node, node_name, node_size);
+ strlcat(drvdata->byte_cntr_node, "-stream", node_size);
+ ret = tmc_etr_byte_cntr_dev_register(drvdata);
+ if (ret) {
+ dev_err(&pdev->dev, "Byte cntr node not registered\n");
+ return ret;
+ }
+ dev_info(&pdev->dev, "Byte Counter feature enabled\n");
+ return 0;
+}
+
+static void tmc_etr_byte_cntr_exit(struct tmc_drvdata *drvdata)
+{
+ if (drvdata->byte_cntr_present)
+ tmc_etr_byte_cntr_dev_deregister(drvdata);
+}
+
static int __devinit tmc_probe(struct platform_device *pdev)
{
int ret;
@@ -1162,6 +1533,9 @@
spin_lock_init(&drvdata->spinlock);
mutex_init(&drvdata->read_lock);
mutex_init(&drvdata->usb_lock);
+ mutex_init(&drvdata->byte_cntr_lock);
+ mutex_init(&drvdata->byte_cntr_read_lock);
+ atomic_set(&drvdata->byte_cntr_irq_cnt, 0);
drvdata->clk = devm_clk_get(dev, "core_clk");
if (IS_ERR(drvdata->clk))
@@ -1207,10 +1581,16 @@
memset(drvdata->vaddr, 0, drvdata->size);
drvdata->buf = drvdata->vaddr;
drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
-
- ret = tmc_etr_bam_init(pdev, drvdata);
+ if (pdev->dev.of_node)
+ drvdata->byte_cntr_present = !of_property_read_bool
+ (pdev->dev.of_node,
+ "qcom,byte-cntr-absent");
+ ret = tmc_etr_byte_cntr_init(pdev, drvdata);
if (ret)
goto err0;
+ ret = tmc_etr_bam_init(pdev, drvdata);
+ if (ret)
+ goto err1;
} else {
baddr = devm_kzalloc(dev, PAGE_SIZE + drvdata->size,
GFP_KERNEL);
@@ -1277,7 +1657,7 @@
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc) {
ret = -ENOMEM;
- goto err1;
+ goto err2;
}
if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
desc->type = CORESIGHT_DEV_TYPE_SINK;
@@ -1290,7 +1670,7 @@
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
- goto err1;
+ goto err2;
}
} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
desc->type = CORESIGHT_DEV_TYPE_SINK;
@@ -1303,7 +1683,7 @@
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
- goto err1;
+ goto err2;
}
} else {
desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
@@ -1317,7 +1697,7 @@
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
- goto err1;
+ goto err2;
}
}
@@ -1327,14 +1707,16 @@
drvdata->miscdev.fops = &tmc_fops;
ret = misc_register(&drvdata->miscdev);
if (ret)
- goto err2;
+ goto err3;
dev_info(dev, "TMC initialized\n");
return 0;
-err2:
+err3:
coresight_unregister(drvdata->csdev);
-err1:
+err2:
tmc_etr_bam_exit(drvdata);
+err1:
+ tmc_etr_byte_cntr_exit(drvdata);
err0:
free_contiguous_memory_by_paddr(drvdata->paddr);
return ret;
@@ -1344,6 +1726,7 @@
{
struct tmc_drvdata *drvdata = platform_get_drvdata(pdev);
+ tmc_etr_byte_cntr_exit(drvdata);
misc_deregister(&drvdata->miscdev);
coresight_unregister(drvdata->csdev);
tmc_etr_bam_exit(drvdata);
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 76c6350..4c05978 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -188,12 +188,7 @@
min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
- if ((maj_rev != 0x05) || (min_rev > 0x02) || (step_rev > 0x02)) {
- pr_err("Unknown Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
- pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
- return -EIO;
- };
- if ((min_rev > 0) && (step_rev != 0)) {
+ if (maj_rev != 0x05) {
pr_err("Unknown Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
return -EIO;
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index a80b0c6..1ea3cd2 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -31,6 +31,8 @@
struct ion_iommu_heap {
struct ion_heap heap;
+ struct ion_page_pool **cached_pools;
+ struct ion_page_pool **uncached_pools;
};
/*
@@ -48,9 +50,14 @@
};
#define MAX_VMAP_RETRIES 10
+#define BAD_ORDER -1
-static const unsigned int orders[] = {8, 4, 0};
+static const unsigned int orders[] = {9, 8, 4, 0};
static const int num_orders = ARRAY_SIZE(orders);
+static unsigned int low_gfp_flags = __GFP_HIGHMEM | GFP_KERNEL | __GFP_ZERO;
+static unsigned int high_gfp_flags = (__GFP_HIGHMEM | __GFP_NORETRY
+ | __GFP_NO_KSWAPD | __GFP_NOWARN |
+ __GFP_IO | __GFP_FS | __GFP_ZERO);
struct page_info {
struct page *page;
@@ -58,13 +65,25 @@
struct list_head list;
};
+static int order_to_index(unsigned int order)
+{
+ int i;
+ for (i = 0; i < num_orders; i++)
+ if (order == orders[i])
+ return i;
+ BUG();
+ return BAD_ORDER;
+}
+
static unsigned int order_to_size(int order)
{
return PAGE_SIZE << order;
}
-static struct page_info *alloc_largest_available(unsigned long size,
- unsigned int max_order)
+static struct page_info *alloc_largest_available(struct ion_iommu_heap *heap,
+ unsigned long size,
+ unsigned int max_order,
+ unsigned long flags)
{
struct page *page;
struct page_info *info;
@@ -72,21 +91,35 @@
for (i = 0; i < num_orders; i++) {
gfp_t gfp;
+ int idx = order_to_index(orders[i]);
+ struct ion_page_pool *pool;
+
+ if (idx == BAD_ORDER)
+ continue;
+
+ if (ION_IS_CACHED(flags)) {
+ pool = heap->cached_pools[idx];
+ BUG_ON(!pool);
+ } else {
+ pool = heap->uncached_pools[idx];
+ BUG_ON(!pool);
+ }
+
if (size < order_to_size(orders[i]))
continue;
if (max_order < orders[i])
continue;
- gfp = __GFP_HIGHMEM;
-
if (orders[i]) {
- gfp |= __GFP_COMP | __GFP_NORETRY |
- __GFP_NO_KSWAPD | __GFP_NOWARN;
+ gfp = high_gfp_flags;
} else {
- gfp |= GFP_KERNEL;
+ gfp = low_gfp_flags;
}
trace_alloc_pages_iommu_start(gfp, orders[i]);
- page = alloc_pages(gfp, orders[i]);
+ if (flags & ION_FLAG_POOL_FORCE_ALLOC)
+ page = alloc_pages(gfp, orders[i]);
+ else
+ page = ion_page_pool_alloc(pool);
trace_alloc_pages_iommu_end(gfp, orders[i]);
if (!page) {
trace_alloc_pages_iommu_fail(gfp, orders[i]);
@@ -103,6 +136,47 @@
return NULL;
}
+static int ion_iommu_buffer_zero(struct ion_iommu_priv_data *data)
+{
+ int i, j;
+ unsigned int npages_to_vmap;
+ unsigned int total_pages;
+ void *ptr = NULL;
+
+ /*
+ * As an optimization, we manually zero out all of the
+ * pages in one fell swoop here. To safeguard against
+ * insufficient vmalloc space, we only vmap
+ * `npages_to_vmap' at a time, starting with a
+ * conservative estimate of 1/8 of the total number of
+ * vmalloc pages available. Note that the `pages'
+ * array is composed of all 4K pages, irrespective of
+ * the size of the pages on the sg list.
+ */
+ npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
+ >> PAGE_SHIFT;
+ total_pages = data->nrpages;
+ for (i = 0; i < total_pages; i += npages_to_vmap) {
+ npages_to_vmap = min(npages_to_vmap, total_pages - i);
+ for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
+ ++j) {
+ ptr = vmap(&data->pages[i], npages_to_vmap,
+ VM_IOREMAP, pgprot_kernel);
+ if (ptr)
+ break;
+ else
+ npages_to_vmap >>= 1;
+ }
+ if (!ptr)
+ return -ENOMEM;
+
+ memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
+ vunmap(ptr);
+ }
+
+ return 0;
+}
+
static int ion_iommu_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size, unsigned long align,
@@ -112,13 +186,14 @@
struct list_head pages_list;
struct page_info *info, *tmp_info;
struct ion_iommu_priv_data *data = NULL;
+ struct ion_iommu_heap *iommu_heap =
+ container_of(heap, struct ion_iommu_heap, heap);
if (msm_use_iommu()) {
struct scatterlist *sg;
struct sg_table *table;
int j;
- void *ptr = NULL;
- unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
+ unsigned int num_large_pages = 0;
unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
unsigned int page_tbl_size;
@@ -129,8 +204,10 @@
INIT_LIST_HEAD(&pages_list);
while (size_remaining > 0) {
- info = alloc_largest_available(size_remaining,
- max_order);
+ info = alloc_largest_available(iommu_heap,
+ size_remaining,
+ max_order,
+ flags);
if (!info) {
ret = -ENOMEM;
goto err_free_data;
@@ -190,44 +267,21 @@
kfree(info);
}
- /*
- * As an optimization, we omit __GFP_ZERO from
- * alloc_page above and manually zero out all of the
- * pages in one fell swoop here. To safeguard against
- * insufficient vmalloc space, we only vmap
- * `npages_to_vmap' at a time, starting with a
- * conservative estimate of 1/8 of the total number of
- * vmalloc pages available. Note that the `pages'
- * array is composed of all 4K pages, irrespective of
- * the size of the pages on the sg list.
- */
- npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
- >> PAGE_SHIFT;
- total_pages = data->nrpages;
- for (i = 0; i < total_pages; i += npages_to_vmap) {
- npages_to_vmap = min(npages_to_vmap, total_pages - i);
- for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
- ++j) {
- ptr = vmap(&data->pages[i], npages_to_vmap,
- VM_IOREMAP, pgprot_kernel);
- if (ptr)
- break;
- else
- npages_to_vmap >>= 1;
- }
- if (!ptr) {
+
+ if (flags & ION_FLAG_POOL_FORCE_ALLOC) {
+ ret = ion_iommu_buffer_zero(data);
+ if (ret) {
pr_err("Couldn't vmap the pages for zeroing\n");
- ret = -ENOMEM;
goto err3;
}
- memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
- vunmap(ptr);
- }
- if (!ION_IS_CACHED(flags))
- dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+
+ if (!ION_IS_CACHED(flags))
+ dma_sync_sg_for_device(NULL, table->sgl,
+ table->nents,
DMA_BIDIRECTIONAL);
+ }
buffer->priv_virt = data;
return 0;
@@ -264,14 +318,38 @@
struct scatterlist *sg;
struct sg_table *table = buffer->sg_table;
struct ion_iommu_priv_data *data = buffer->priv_virt;
+ bool cached = ion_buffer_cached(buffer);
+ struct ion_iommu_heap *iommu_heap =
+ container_of(buffer->heap, struct ion_iommu_heap, heap);
if (!table)
return;
if (!data)
return;
- for_each_sg(table->sgl, sg, table->nents, i)
- __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
+ if (!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC))
+ ion_iommu_buffer_zero(data);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ int order = get_order(sg_dma_len(sg));
+ int idx = order_to_index(order);
+ struct ion_page_pool *pool;
+
+ if (idx == BAD_ORDER) {
+ WARN_ON(1);
+ continue;
+ }
+
+ if (cached)
+ pool = iommu_heap->cached_pools[idx];
+ else
+ pool = iommu_heap->uncached_pools[idx];
+
+ if (buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)
+ __free_pages(sg_page(sg), order);
+ else
+ ion_page_pool_free(pool, sg_page(sg));
+ }
sg_free_table(table);
kfree(table);
@@ -356,6 +434,47 @@
{
}
+static int ion_iommu_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+ void *unused)
+{
+
+ struct ion_iommu_heap *iommu_heap = container_of(heap,
+ struct ion_iommu_heap,
+ heap);
+ int i;
+ unsigned long total = 0;
+
+ seq_printf(s, "Cached Pools:\n");
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = iommu_heap->cached_pools[i];
+ seq_printf(s, "%d order %u highmem pages in pool = %lx total\n",
+ pool->high_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->high_count);
+ seq_printf(s, "%d order %u lowmem pages in pool = %lx total\n",
+ pool->low_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->low_count);
+
+ total += (1 << pool->order) * PAGE_SIZE *
+ (pool->low_count + pool->high_count);
+ }
+
+ seq_printf(s, "Uncached Pools:\n");
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = iommu_heap->uncached_pools[i];
+ seq_printf(s, "%d order %u highmem pages in pool = %lx total\n",
+ pool->high_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->high_count);
+ seq_printf(s, "%d order %u lowmem pages in pool = %lx total\n",
+ pool->low_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->low_count);
+
+ total += (1 << pool->order) * PAGE_SIZE *
+ (pool->low_count + pool->high_count);
+ }
+ seq_printf(s, "Total bytes in pool: %lx\n", total);
+ return 0;
+}
+
static struct ion_heap_ops iommu_heap_ops = {
.allocate = ion_iommu_heap_allocate,
.free = ion_iommu_heap_free,
@@ -369,6 +488,7 @@
struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_iommu_heap *iommu_heap;
+ int i;
iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
if (!iommu_heap)
@@ -376,8 +496,66 @@
iommu_heap->heap.ops = &iommu_heap_ops;
iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
+ iommu_heap->uncached_pools = kzalloc(
+ sizeof(struct ion_page_pool *) * num_orders,
+ GFP_KERNEL);
+ if (!iommu_heap->uncached_pools)
+ goto err_alloc_uncached_pools;
+ iommu_heap->cached_pools = kzalloc(
+ sizeof(struct ion_page_pool *) * num_orders,
+ GFP_KERNEL);
+
+ if (!iommu_heap->cached_pools)
+ goto err_alloc_cached_pools;
+
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ gfp_t gfp_flags;
+
+ if (orders[i])
+ gfp_flags = high_gfp_flags | __GFP_ZERO;
+ else
+ gfp_flags = low_gfp_flags | __GFP_ZERO;
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
+ if (!pool)
+ goto err_create_cached_pool;
+ iommu_heap->cached_pools[i] = pool;
+ }
+
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ gfp_t gfp_flags;
+
+ if (orders[i])
+ gfp_flags = high_gfp_flags | __GFP_ZERO;
+ else
+ gfp_flags = low_gfp_flags | __GFP_ZERO;
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
+ if (!pool)
+ goto err_create_uncached_pool;
+ iommu_heap->uncached_pools[i] = pool;
+ }
+ iommu_heap->heap.debug_show = ion_iommu_heap_debug_show;
return &iommu_heap->heap;
+
+err_create_uncached_pool:
+ for (i = 0; i < num_orders; i++)
+ if (iommu_heap->cached_pools[i])
+ ion_page_pool_destroy(iommu_heap->uncached_pools[i]);
+
+
+err_create_cached_pool:
+ for (i = 0; i < num_orders; i++)
+ if (iommu_heap->uncached_pools[i])
+ ion_page_pool_destroy(iommu_heap->cached_pools[i]);
+
+ kfree(iommu_heap->cached_pools);
+err_alloc_cached_pools:
+ kfree(iommu_heap->uncached_pools);
+err_alloc_uncached_pools:
+ kfree(iommu_heap);
+ return ERR_PTR(-ENOMEM);
}
void ion_iommu_heap_destroy(struct ion_heap *heap)
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index fc66328..aac183b 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -17,15 +17,16 @@
msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o
msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o
msm_kgsl_core-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += kgsl_pwrscale_idlestats.o
-msm_kgsl_core-$(CONFIG_MSM_DCVS) += kgsl_pwrscale_msm.o
msm_kgsl_core-$(CONFIG_SYNC) += kgsl_sync.o
msm_adreno-y += \
adreno_ringbuffer.o \
adreno_drawctxt.o \
+ adreno_dispatch.o \
adreno_postmortem.o \
adreno_snapshot.o \
adreno_coresight.o \
+ adreno_trace.o \
adreno_a2xx.o \
adreno_a2xx_trace.o \
adreno_a2xx_snapshot.o \
@@ -34,7 +35,7 @@
adreno_a3xx_snapshot.o \
adreno.o
-msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o
+msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o adreno_profile.o
msm_z180-y += \
z180.o \
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index baf335f..184dd982 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -23,8 +23,6 @@
#include <mach/socinfo.h>
#include <mach/msm_bus_board.h>
#include <mach/msm_bus.h>
-#include <mach/msm_dcvs.h>
-#include <mach/msm_dcvs_scm.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
@@ -34,6 +32,7 @@
#include "adreno.h"
#include "adreno_pm4types.h"
+#include "adreno_trace.h"
#include "a2xx_reg.h"
#include "a3xx_reg.h"
@@ -215,7 +214,7 @@
512, 0, 2, SZ_128K, 0x3FF037, 0x3FF016 },
};
-static unsigned int adreno_isidle(struct kgsl_device *device);
+static bool adreno_isidle(struct kgsl_device *device);
/**
* adreno_perfcounter_init: Reserve kernel performance counters
@@ -276,7 +275,7 @@
}
/**
- * adreno_perfcounter_read_group: Determine which countables are in counters
+ * adreno_perfcounter_read_group() - Determine which countables are in counters
* @adreno_dev: Adreno device to configure
* @reads: List of kgsl_perfcounter_read_groups
* @count: Length of list
@@ -353,6 +352,61 @@
}
/**
+ * adreno_perfcounter_get_groupid() - Get the performance counter ID
+ * @adreno_dev: Adreno device
+ * @name: Performance counter group name string
+ *
+ * Get the groupid based on the name and return this ID
+ */
+
+int adreno_perfcounter_get_groupid(struct adreno_device *adreno_dev,
+ const char *name)
+{
+
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct adreno_perfcount_group *group;
+ int i;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ /* perfcounter get/put/query not allowed on a2xx */
+ if (adreno_is_a2xx(adreno_dev))
+ return -EINVAL;
+
+ for (i = 0; i < counters->group_count; ++i) {
+ group = &(counters->groups[i]);
+ if (!strcmp(group->name, name))
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * adreno_perfcounter_get_name() - Get the group name
+ * @adreno_dev: Adreno device
+ * @groupid: Desired performance counter groupid
+ *
+ * Get the name based on the groupid and return it
+ */
+
+const char *adreno_perfcounter_get_name(struct adreno_device *adreno_dev,
+ unsigned int groupid)
+{
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+
+ /* perfcounter get/put/query not allowed on a2xx */
+ if (adreno_is_a2xx(adreno_dev))
+ return NULL;
+
+ if (groupid >= counters->group_count)
+ return NULL;
+
+ return counters->groups[groupid].name;
+}
+
+/**
* adreno_perfcounter_query_group: Determine which countables are in counters
* @adreno_dev: Adreno device to configure
* @groupid: Desired performance counter group
@@ -445,8 +499,11 @@
for (i = 0; i < group->reg_count; i++) {
if (group->regs[i].countable == countable) {
/* Countable already associated with counter */
- group->regs[i].refcount++;
- group->regs[i].flags |= flags;
+ if (flags & PERFCOUNTER_FLAG_KERNEL)
+ group->regs[i].kernelcount++;
+ else
+ group->regs[i].usercount++;
+
if (offset)
*offset = group->regs[i].offset;
return 0;
@@ -463,14 +520,20 @@
/* initialize the new counter */
group->regs[empty].countable = countable;
- group->regs[empty].refcount = 1;
+
+ /* set initial kernel and user count */
+ if (flags & PERFCOUNTER_FLAG_KERNEL) {
+ group->regs[empty].kernelcount = 1;
+ group->regs[empty].usercount = 0;
+ } else {
+ group->regs[empty].kernelcount = 0;
+ group->regs[empty].usercount = 1;
+ }
/* enable the new counter */
adreno_dev->gpudev->perfcounter_enable(adreno_dev, groupid, empty,
countable);
- group->regs[empty].flags = flags;
-
if (offset)
*offset = group->regs[empty].offset;
@@ -483,12 +546,13 @@
* @adreno_dev: Adreno device to configure
* @groupid: Desired performance counter group
* @countable: Countable desired to be freed from a counter
+ * @flags: Flag to determine if kernel or user space request
*
* Put a performance counter/countable pair that was previously received. If
* noone else is using the countable, free up the counter for others.
*/
int adreno_perfcounter_put(struct adreno_device *adreno_dev,
- unsigned int groupid, unsigned int countable)
+ unsigned int groupid, unsigned int countable, unsigned int flags)
{
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
struct adreno_perfcount_group *group;
@@ -504,24 +568,27 @@
group = &(counters->groups[groupid]);
+ /*
+ * Find if the counter/countable pair is used currently.
+ * Start cycling through registers in the bank.
+ */
for (i = 0; i < group->reg_count; i++) {
+ /* check if countable assigned is what we are looking for */
if (group->regs[i].countable == countable) {
- if (group->regs[i].refcount > 0) {
- group->regs[i].refcount--;
+ /* found pair, book keep count based on request type */
+ if (flags & PERFCOUNTER_FLAG_KERNEL &&
+ group->regs[i].kernelcount > 0)
+ group->regs[i].kernelcount--;
+ else if (group->regs[i].usercount > 0)
+ group->regs[i].usercount--;
+ else
+ break;
- /*
- * book keeping to ensure we never free a
- * perf counter used by kernel
- */
- if (group->regs[i].flags &&
- group->regs[i].refcount == 0)
- group->regs[i].refcount++;
-
- /* make available if not used */
- if (group->regs[i].refcount == 0)
- group->regs[i].countable =
- KGSL_PERFCOUNTER_NOT_USED;
- }
+ /* mark available if not used anymore */
+ if (group->regs[i].kernelcount == 0 &&
+ group->regs[i].usercount == 0)
+ group->regs[i].countable =
+ KGSL_PERFCOUNTER_NOT_USED;
return 0;
}
@@ -532,23 +599,9 @@
static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
{
- irqreturn_t result;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- result = adreno_dev->gpudev->irq_handler(adreno_dev);
-
- device->pwrctrl.irq_last = 1;
- if (device->requested_state == KGSL_STATE_NONE) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
- queue_work(device->work_queue, &device->idle_check_ws);
- }
-
- /* Reset the time-out in our idle timer */
- mod_timer_pending(&device->idle_timer,
- jiffies + device->pwrctrl.interval_timeout);
- mod_timer_pending(&device->hang_timer,
- (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
- return result;
+ return adreno_dev->gpudev->irq_handler(adreno_dev);
}
static void adreno_cleanup_pt(struct kgsl_device *device,
@@ -563,6 +616,8 @@
kgsl_mmu_unmap(pagetable, &device->memstore);
+ kgsl_mmu_unmap(pagetable, &adreno_dev->profile.shared_buffer);
+
kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
}
@@ -585,6 +640,11 @@
if (result)
goto unmap_memptrs_desc;
+ result = kgsl_mmu_map_global(pagetable,
+ &adreno_dev->profile.shared_buffer);
+ if (result)
+ goto unmap_profile_shared;
+
result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory);
if (result)
goto unmap_memstore_desc;
@@ -598,6 +658,9 @@
device->mmu.setstate_memory.size;
return result;
+unmap_profile_shared:
+ kgsl_mmu_unmap(pagetable, &adreno_dev->profile.shared_buffer);
+
unmap_memstore_desc:
kgsl_mmu_unmap(pagetable, &device->memstore);
@@ -845,7 +908,7 @@
adreno_dev->dev.cff_dump_enable);
}
-static void adreno_iommu_setstate(struct kgsl_device *device,
+static int adreno_iommu_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
@@ -858,22 +921,24 @@
struct kgsl_context *context;
struct adreno_context *adreno_ctx = NULL;
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ unsigned int result;
if (adreno_use_default_setstate(adreno_dev)) {
kgsl_mmu_device_setstate(&device->mmu, flags);
- return;
+ return 0;
}
num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
context = kgsl_context_get(device, context_id);
if (context == NULL)
- return;
+ return -EINVAL;
adreno_ctx = ADRENO_CONTEXT(context);
- if (kgsl_mmu_enable_clk(&device->mmu,
- KGSL_IOMMU_CONTEXT_USER))
- return;
+ result = kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
+
+ if (result)
+ goto done;
pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
device->mmu.hwpagetable);
@@ -907,14 +972,24 @@
* This returns the per context timestamp but we need to
* use the global timestamp for iommu clock disablement
*/
- adreno_ringbuffer_issuecmds(device, adreno_ctx, KGSL_CMD_FLAGS_PMODE,
- &link[0], sizedwords);
+ result = adreno_ringbuffer_issuecmds(device, adreno_ctx,
+ KGSL_CMD_FLAGS_PMODE, &link[0], sizedwords);
- kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts, true);
+ /*
+ * On error disable the IOMMU clock right away otherwise turn it off
+ * after the command has been retired
+ */
+ if (result)
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
+ else
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts, true);
+
+done:
kgsl_context_put(context);
+ return result;
}
-static void adreno_gpummu_setstate(struct kgsl_device *device,
+static int adreno_gpummu_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
@@ -925,6 +1000,7 @@
unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
struct kgsl_context *context;
struct adreno_context *adreno_ctx = NULL;
+ int ret = 0;
/*
* Fix target freeze issue by adding TLB flush for each submit
@@ -941,7 +1017,8 @@
if (!adreno_use_default_setstate(adreno_dev)) {
context = kgsl_context_get(device, context_id);
if (context == NULL)
- return;
+ return -EINVAL;
+
adreno_ctx = ADRENO_CONTEXT(context);
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
@@ -1016,7 +1093,7 @@
sizedwords += 2;
}
- adreno_ringbuffer_issuecmds(device, adreno_ctx,
+ ret = adreno_ringbuffer_issuecmds(device, adreno_ctx,
KGSL_CMD_FLAGS_PMODE,
&link[0], sizedwords);
@@ -1024,9 +1101,11 @@
} else {
kgsl_mmu_device_setstate(&device->mmu, flags);
}
+
+ return ret;
}
-static void adreno_setstate(struct kgsl_device *device,
+static int adreno_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
@@ -1035,6 +1114,8 @@
return adreno_gpummu_setstate(device, context_id, flags);
else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
return adreno_iommu_setstate(device, context_id, flags);
+
+ return 0;
}
static unsigned int
@@ -1268,172 +1349,6 @@
}
-static struct msm_dcvs_core_info *adreno_of_get_dcvs(struct device_node *parent)
-{
- struct device_node *node, *child;
- struct msm_dcvs_core_info *info = NULL;
- int count = 0;
- int ret = -EINVAL;
-
- node = adreno_of_find_subnode(parent, "qcom,dcvs-core-info");
- if (node == NULL)
- return ERR_PTR(-EINVAL);
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
-
- if (info == NULL) {
- KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*info));
- ret = -ENOMEM;
- goto err;
- }
-
- for_each_child_of_node(node, child)
- count++;
-
- info->power_param.num_freq = count;
-
- info->freq_tbl = kzalloc(info->power_param.num_freq *
- sizeof(struct msm_dcvs_freq_entry),
- GFP_KERNEL);
-
- if (info->freq_tbl == NULL) {
- KGSL_CORE_ERR("kzalloc(%d) failed\n",
- info->power_param.num_freq *
- sizeof(struct msm_dcvs_freq_entry));
- ret = -ENOMEM;
- goto err;
- }
-
- for_each_child_of_node(node, child) {
- unsigned int index;
-
- if (adreno_of_read_property(child, "reg", &index))
- goto err;
-
- if (index >= info->power_param.num_freq) {
- KGSL_CORE_ERR("DCVS freq entry %d is out of range\n",
- index);
- continue;
- }
-
- if (adreno_of_read_property(child, "qcom,freq",
- &info->freq_tbl[index].freq))
- goto err;
-
- if (adreno_of_read_property(child, "qcom,voltage",
- &info->freq_tbl[index].voltage))
- info->freq_tbl[index].voltage = 0;
-
- if (adreno_of_read_property(child, "qcom,is_trans_level",
- &info->freq_tbl[index].is_trans_level))
- info->freq_tbl[index].is_trans_level = 0;
-
- if (adreno_of_read_property(child, "qcom,active-energy-offset",
- &info->freq_tbl[index].active_energy_offset))
- info->freq_tbl[index].active_energy_offset = 0;
-
- if (adreno_of_read_property(child, "qcom,leakage-energy-offset",
- &info->freq_tbl[index].leakage_energy_offset))
- info->freq_tbl[index].leakage_energy_offset = 0;
- }
-
- if (adreno_of_read_property(node, "qcom,num-cores", &info->num_cores))
- goto err;
-
- info->sensors = kzalloc(info->num_cores *
- sizeof(int),
- GFP_KERNEL);
-
- for (count = 0; count < info->num_cores; count++) {
- if (adreno_of_read_property(node, "qcom,sensors",
- &(info->sensors[count])))
- goto err;
- }
-
- if (adreno_of_read_property(node, "qcom,core-core-type",
- &info->core_param.core_type))
- goto err;
-
- if (adreno_of_read_property(node, "qcom,algo-disable-pc-threshold",
- &info->algo_param.disable_pc_threshold))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-em-win-size-min-us",
- &info->algo_param.em_win_size_min_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-em-win-size-max-us",
- &info->algo_param.em_win_size_max_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-em-max-util-pct",
- &info->algo_param.em_max_util_pct))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-group-id",
- &info->algo_param.group_id))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-max-freq-chg-time-us",
- &info->algo_param.max_freq_chg_time_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-slack-mode-dynamic",
- &info->algo_param.slack_mode_dynamic))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-slack-weight-thresh-pct",
- &info->algo_param.slack_weight_thresh_pct))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-slack-time-min-us",
- &info->algo_param.slack_time_min_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-slack-time-max-us",
- &info->algo_param.slack_time_max_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-ss-win-size-min-us",
- &info->algo_param.ss_win_size_min_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-ss-win-size-max-us",
- &info->algo_param.ss_win_size_max_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-ss-util-pct",
- &info->algo_param.ss_util_pct))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-ss-no-corr-below-freq",
- &info->algo_param.ss_no_corr_below_freq))
- goto err;
-
- if (adreno_of_read_property(node, "qcom,energy-active-coeff-a",
- &info->energy_coeffs.active_coeff_a))
- goto err;
- if (adreno_of_read_property(node, "qcom,energy-active-coeff-b",
- &info->energy_coeffs.active_coeff_b))
- goto err;
- if (adreno_of_read_property(node, "qcom,energy-active-coeff-c",
- &info->energy_coeffs.active_coeff_c))
- goto err;
- if (adreno_of_read_property(node, "qcom,energy-leakage-coeff-a",
- &info->energy_coeffs.leakage_coeff_a))
- goto err;
- if (adreno_of_read_property(node, "qcom,energy-leakage-coeff-b",
- &info->energy_coeffs.leakage_coeff_b))
- goto err;
- if (adreno_of_read_property(node, "qcom,energy-leakage-coeff-c",
- &info->energy_coeffs.leakage_coeff_c))
- goto err;
- if (adreno_of_read_property(node, "qcom,energy-leakage-coeff-d",
- &info->energy_coeffs.leakage_coeff_d))
- goto err;
-
- if (adreno_of_read_property(node, "qcom,power-current-temp",
- &info->power_param.current_temp))
- goto err;
-
- return info;
-
-err:
- if (info)
- kfree(info->freq_tbl);
-
- kfree(info);
-
- return ERR_PTR(ret);
-}
-
static int adreno_of_get_iommu(struct device_node *parent,
struct kgsl_device_platform_data *pdata)
{
@@ -1575,12 +1490,6 @@
goto err;
}
- pdata->core_info = adreno_of_get_dcvs(pdev->dev.of_node);
- if (IS_ERR_OR_NULL(pdata->core_info)) {
- ret = PTR_ERR(pdata->core_info);
- goto err;
- }
-
ret = adreno_of_get_iommu(pdev->dev.of_node, pdata);
if (ret)
goto err;
@@ -1593,10 +1502,6 @@
err:
if (pdata) {
- if (pdata->core_info)
- kfree(pdata->core_info->freq_tbl);
- kfree(pdata->core_info);
-
if (pdata->iommu_data)
kfree(pdata->iommu_data->iommu_ctxs);
@@ -1686,7 +1591,12 @@
if (status)
goto error_close_rb;
+ status = adreno_dispatcher_init(adreno_dev);
+ if (status)
+ goto error_close_device;
+
adreno_debugfs_init(device);
+ adreno_profile_init(device);
adreno_ft_init_sysfs(device);
@@ -1700,6 +1610,8 @@
return 0;
+error_close_device:
+ kgsl_device_platform_remove(device);
error_close_rb:
adreno_ringbuffer_close(&adreno_dev->ringbuffer);
error:
@@ -1717,10 +1629,12 @@
adreno_dev = ADRENO_DEVICE(device);
adreno_coresight_remove(pdev);
+ adreno_profile_close(device);
kgsl_pwrscale_detach_policy(device);
kgsl_pwrscale_close(device);
+ adreno_dispatcher_close(adreno_dev);
adreno_ringbuffer_close(&adreno_dev->ringbuffer);
kgsl_device_platform_remove(device);
@@ -1732,8 +1646,7 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int i;
- if (KGSL_STATE_DUMP_AND_FT != device->state)
- kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
/* Power up the device */
kgsl_pwrctrl_enable(device);
@@ -1803,8 +1716,7 @@
kgsl_cffdump_open(device);
- if (KGSL_STATE_DUMP_AND_FT != device->state)
- kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
regulator_left_on = (regulator_is_enabled(device->pwrctrl.gpu_reg) ||
(device->pwrctrl.gpu_cx &&
@@ -1855,11 +1767,11 @@
if (status)
goto error_irq_off;
- mod_timer(&device->hang_timer,
- (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
-
adreno_perfcounter_start(adreno_dev);
+ /* Start the dispatcher */
+ adreno_dispatcher_start(adreno_dev);
+
device->reset_counter++;
return 0;
@@ -1889,6 +1801,7 @@
adreno_dev->drawctxt_active = NULL;
+ adreno_dispatcher_stop(adreno_dev);
adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
kgsl_mmu_stop(&device->mmu);
@@ -1896,7 +1809,6 @@
device->ftbl->irqctrl(device, 0);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
del_timer_sync(&device->idle_timer);
- del_timer_sync(&device->hang_timer);
adreno_ocmem_gmem_free(adreno_dev);
@@ -1908,917 +1820,41 @@
return 0;
}
-/*
- * Set the reset status of all contexts to
- * INNOCENT_CONTEXT_RESET_EXT except for the bad context
- * since thats the guilty party, if fault tolerance failed then
- * mark all as guilty
- */
-
-static int _mark_context_status(int id, void *ptr, void *data)
-{
- unsigned int ft_status = *((unsigned int *) data);
- struct kgsl_context *context = ptr;
- struct adreno_context *adreno_context = ADRENO_CONTEXT(context);
-
- if (ft_status) {
- context->reset_status =
- KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
- adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
- } else if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
- context->reset_status) {
- if (adreno_context->flags & (CTXT_FLAGS_GPU_HANG |
- CTXT_FLAGS_GPU_HANG_FT))
- context->reset_status =
- KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
- else
- context->reset_status =
- KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
- }
-
- return 0;
-}
-
-static void adreno_mark_context_status(struct kgsl_device *device,
- int ft_status)
-{
- /* Mark the status for all the contexts in the device */
-
- read_lock(&device->context_lock);
- idr_for_each(&device->context_idr, _mark_context_status, &ft_status);
- read_unlock(&device->context_lock);
-}
-
-/*
- * For hung contexts set the current memstore value to the most recent issued
- * timestamp - this resets the status and lets the system continue on
- */
-
-static int _set_max_ts(int id, void *ptr, void *data)
-{
- struct kgsl_device *device = data;
- struct kgsl_context *context = ptr;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
-
- if (drawctxt && drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(context->id,
- soptimestamp), drawctxt->timestamp);
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(context->id,
- eoptimestamp), drawctxt->timestamp);
- }
-
- return 0;
-}
-
-static void adreno_set_max_ts_for_bad_ctxs(struct kgsl_device *device)
-{
- read_lock(&device->context_lock);
- idr_for_each(&device->context_idr, _set_max_ts, device);
- read_unlock(&device->context_lock);
-}
-
-static void adreno_destroy_ft_data(struct adreno_ft_data *ft_data)
-{
- vfree(ft_data->rb_buffer);
- vfree(ft_data->bad_rb_buffer);
- vfree(ft_data->good_rb_buffer);
-}
-
-static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
- unsigned int *ptr,
- bool inc)
-{
- int status = -EINVAL;
- unsigned int val1;
- unsigned int size = rb->buffer_desc.size;
- unsigned int start_ptr = *ptr;
-
- while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
- if (inc)
- start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
- size);
- else
- start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
- size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
- /* Ensure above read is finished before next read */
- rmb();
- if (KGSL_CMD_IDENTIFIER == val1) {
- if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
- start_ptr = adreno_ringbuffer_dec_wrapped(
- start_ptr, size);
- *ptr = start_ptr;
- status = 0;
- break;
- }
- }
- return status;
-}
-
-static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
- unsigned int *rb_rptr,
- unsigned int global_eop,
- bool inc)
-{
- int status = -EINVAL;
- unsigned int temp_rb_rptr = *rb_rptr;
- unsigned int size = rb->buffer_desc.size;
- unsigned int val[3];
- int i = 0;
- bool check = false;
-
- if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
- return status;
-
- do {
- /*
- * when decrementing we need to decrement first and
- * then read make sure we cover all the data
- */
- if (!inc)
- temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
- temp_rb_rptr, size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
- temp_rb_rptr);
- /* Ensure above read is finished before next read */
- rmb();
-
- if (check && ((inc && val[i] == global_eop) ||
- (!inc && (val[i] ==
- cp_type3_packet(CP_MEM_WRITE, 2) ||
- val[i] == CACHE_FLUSH_TS)))) {
- /* decrement i, i.e i = (i - 1 + 3) % 3 if
- * we are going forward, else increment i */
- i = (i + 2) % 3;
- if (val[i] == rb->device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp)) {
- int j = ((i + 2) % 3);
- if ((inc && (val[j] == CACHE_FLUSH_TS ||
- val[j] == cp_type3_packet(
- CP_MEM_WRITE, 2))) ||
- (!inc && val[j] == global_eop)) {
- /* Found the global eop */
- status = 0;
- break;
- }
- }
- /* if no match found then increment i again
- * since we decremented before matching */
- i = (i + 1) % 3;
- }
- if (inc)
- temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
- temp_rb_rptr, size);
-
- i = (i + 1) % 3;
- if (2 == i)
- check = true;
- } while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
- /* temp_rb_rptr points to the command stream after global eop,
- * move backward till the start of command sequence */
- if (!status) {
- status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
- if (!status) {
- *rb_rptr = temp_rb_rptr;
- KGSL_FT_INFO(rb->device,
- "Offset of cmd sequence after eop timestamp: 0x%x\n",
- temp_rb_rptr / sizeof(unsigned int));
- }
- }
- if (status)
- KGSL_FT_ERR(rb->device,
- "Failed to find the command sequence after eop timestamp %x\n",
- global_eop);
- return status;
-}
-
-static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
- unsigned int *rb_rptr,
- unsigned int ib1)
-{
- int status = -EINVAL;
- unsigned int temp_rb_rptr = *rb_rptr;
- unsigned int size = rb->buffer_desc.size;
- unsigned int val[2];
- int i = 0;
- bool check = false;
- bool ctx_switch = false;
-
- while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
- /* Ensure above read is finished before next read */
- rmb();
-
- if (check && val[i] == ib1) {
- /* decrement i, i.e i = (i - 1 + 2) % 2 */
- i = (i + 1) % 2;
- if (adreno_cmd_is_ib(val[i])) {
- /* go till start of command sequence */
- status = _find_start_of_cmd_seq(rb,
- &temp_rb_rptr, false);
-
- KGSL_FT_INFO(rb->device,
- "Found the hanging IB at offset 0x%x\n",
- temp_rb_rptr / sizeof(unsigned int));
- break;
- }
- /* if no match the increment i since we decremented
- * before checking */
- i = (i + 1) % 2;
- }
- /* Make sure you do not encounter a context switch twice, we can
- * encounter it once for the bad context as the start of search
- * can point to the context switch */
- if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
- if (ctx_switch) {
- KGSL_FT_ERR(rb->device,
- "Context switch encountered before bad "
- "IB found\n");
- break;
- }
- ctx_switch = true;
- }
- i = (i + 1) % 2;
- if (1 == i)
- check = true;
- temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
- size);
- }
- if (!status)
- *rb_rptr = temp_rb_rptr;
- return status;
-}
-
-static void adreno_setup_ft_data(struct kgsl_device *device,
- struct adreno_ft_data *ft_data)
-{
- int ret = 0;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- struct kgsl_context *context;
- struct adreno_context *adreno_context;
- unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
-
- memset(ft_data, 0, sizeof(*ft_data));
- ft_data->start_of_replay_cmds = 0xFFFFFFFF;
- ft_data->replay_for_snapshot = 0xFFFFFFFF;
-
- adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &ft_data->ib1);
-
- kgsl_sharedmem_readl(&device->memstore, &ft_data->context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
-
- kgsl_sharedmem_readl(&device->memstore,
- &ft_data->global_eop,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp));
-
- /* Ensure context id and global eop ts read complete */
- rmb();
-
- ft_data->rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!ft_data->rb_buffer) {
- KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
- rb->buffer_desc.size);
- return;
- }
-
- ft_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!ft_data->bad_rb_buffer) {
- KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
- rb->buffer_desc.size);
- return;
- }
-
- ft_data->good_rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!ft_data->good_rb_buffer) {
- KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
- rb->buffer_desc.size);
- return;
- }
- ft_data->status = 0;
-
- /* find the start of bad command sequence in rb */
- context = kgsl_context_get(device, ft_data->context_id);
-
- ft_data->ft_policy = adreno_dev->ft_policy;
-
- if (!ft_data->ft_policy)
- ft_data->ft_policy = KGSL_FT_DEFAULT_POLICY;
-
- /* Look for the command stream that is right after the global eop */
- ret = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
- ft_data->global_eop + 1, false);
- if (ret) {
- ft_data->ft_policy |= KGSL_FT_TEMP_DISABLE;
- goto done;
- } else {
- ft_data->start_of_replay_cmds = rb_rptr;
- ft_data->ft_policy &= ~KGSL_FT_TEMP_DISABLE;
- }
-
- if (context) {
- adreno_context = ADRENO_CONTEXT(context);
- if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
- if (ft_data->ib1) {
- ret = _find_hanging_ib_sequence(rb,
- &rb_rptr, ft_data->ib1);
- if (ret) {
- KGSL_FT_ERR(device,
- "Start not found for replay IB seq\n");
- goto done;
- }
- ft_data->start_of_replay_cmds = rb_rptr;
- ft_data->replay_for_snapshot = rb_rptr;
- }
- }
- }
-
-done:
- kgsl_context_put(context);
-}
-
-static int
-_adreno_check_long_ib(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int curr_global_ts = 0;
-
- /* check if the global ts is still the same */
- kgsl_sharedmem_readl(&device->memstore,
- &curr_global_ts,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp));
- /* Ensure above read is finished before long ib check */
- rmb();
-
- /* Mark long ib as handled */
- adreno_dev->long_ib = 0;
-
- if (curr_global_ts == adreno_dev->long_ib_ts) {
- KGSL_FT_ERR(device,
- "IB ran too long, invalidate ctxt\n");
- return 1;
- } else {
- /* Do nothing GPU has gone ahead */
- KGSL_FT_INFO(device, "false long ib detection return\n");
- return 0;
- }
-}
-
/**
- * adreno_soft_reset() - Do a soft reset of the GPU hardware
- * @device: KGSL device to soft reset
+ * adreno_reset() - Helper function to reset the GPU
+ * @device: Pointer to the KGSL device structure for the GPU
*
- * "soft reset" the GPU hardware - this is a fast path GPU reset
- * The GPU hardware is reset but we never pull power so we can skip
- * a lot of the standard adreno_stop/adreno_start sequence
+ * Try to reset the GPU to recover from a fault. First, try to do a low latency
+ * soft reset. If the soft reset fails for some reason, then bring out the big
+ * guns and toggle the footswitch.
*/
-int adreno_soft_reset(struct kgsl_device *device)
+int adreno_reset(struct kgsl_device *device)
{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int ret;
- /* If the jump table index is 0 soft reset is not supported */
- if ((!adreno_dev->pm4_jt_idx) || (!adreno_dev->gpudev->soft_reset)) {
- dev_WARN_ONCE(device->dev, 1, "Soft reset not supported");
- return -EINVAL;
- }
+ /* Try soft reset first */
+ if (adreno_soft_reset(device) == 0)
+ return 0;
- if (adreno_dev->drawctxt_active)
- kgsl_context_put(&adreno_dev->drawctxt_active->base);
-
- adreno_dev->drawctxt_active = NULL;
-
- /* Stop the ringbuffer */
- adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
-
- /* Delete the idle timer */
- del_timer_sync(&device->idle_timer);
-
- /* Make sure we are totally awake */
- kgsl_pwrctrl_enable(device);
-
- /* Reset the GPU */
- adreno_dev->gpudev->soft_reset(adreno_dev);
-
- /* Reinitialize the GPU */
- adreno_dev->gpudev->start(adreno_dev);
-
- /* Enable IRQ */
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
- device->ftbl->irqctrl(device, 1);
-
- /*
- * Restart the ringbuffer - we can go down the warm start path because
- * power was never yanked
- */
- ret = adreno_ringbuffer_warm_start(&adreno_dev->ringbuffer);
+ /* If it failed, then pull the power */
+ ret = adreno_stop(device);
if (ret)
return ret;
- device->reset_counter++;
+ ret = adreno_start(device);
- return 0;
-}
-
-static int
-_adreno_ft_restart_device(struct kgsl_device *device,
- struct kgsl_context *context)
-{
- /* If device soft reset fails try hard reset */
- if (adreno_soft_reset(device))
- KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
- else
- /* Soft reset is successful */
- goto reset_done;
-
- /* restart device */
- if (adreno_stop(device)) {
- KGSL_FT_ERR(device, "Device stop failed\n");
- return 1;
- }
-
- if (adreno_init(device)) {
- KGSL_FT_ERR(device, "Device init failed\n");
- return 1;
- }
-
- if (adreno_start(device)) {
- KGSL_FT_ERR(device, "Device start failed\n");
- return 1;
- }
-
-reset_done:
- if (context)
- kgsl_mmu_setstate(&device->mmu, context->pagetable,
- KGSL_MEMSTORE_GLOBAL);
-
- /* If iommu is used then we need to make sure that the iommu clocks
- * are on since there could be commands in pipeline that touch iommu */
- if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
- if (kgsl_mmu_enable_clk(&device->mmu,
- KGSL_IOMMU_CONTEXT_USER))
- return 1;
- }
-
- return 0;
-}
-
-static inline void
-_adreno_debug_ft_info(struct kgsl_device *device,
- struct adreno_ft_data *ft_data)
-{
-
- /*
- * Dumping rb is a very useful tool to debug FT.
- * It will tell us if we are extracting the rb correctly
- * NOP'ing the right IB, skipping the EOF correctly etc.
- */
- if (device->ft_log >= 7) {
-
- /* Print fault tolerance data here */
- KGSL_FT_INFO(device, "Temp RB buffer size 0x%X\n",
- ft_data->rb_size);
- adreno_dump_rb(device, ft_data->rb_buffer,
- ft_data->rb_size<<2, 0, ft_data->rb_size);
-
- KGSL_FT_INFO(device, "Bad RB buffer size 0x%X\n",
- ft_data->bad_rb_size);
- adreno_dump_rb(device, ft_data->bad_rb_buffer,
- ft_data->bad_rb_size<<2, 0, ft_data->bad_rb_size);
-
- KGSL_FT_INFO(device, "Good RB buffer size 0x%X\n",
- ft_data->good_rb_size);
- adreno_dump_rb(device, ft_data->good_rb_buffer,
- ft_data->good_rb_size<<2, 0, ft_data->good_rb_size);
-
- }
-}
-
-static int
-_adreno_ft_resubmit_rb(struct kgsl_device *device,
- struct adreno_ringbuffer *rb,
- struct kgsl_context *context,
- struct adreno_ft_data *ft_data,
- unsigned int *buff, unsigned int size)
-{
- unsigned int ret = 0;
- unsigned int retry_num = 0;
-
- _adreno_debug_ft_info(device, ft_data);
-
- do {
- ret = _adreno_ft_restart_device(device, context);
- if (ret == 0)
- break;
+ if (ret == 0) {
/*
- * If device restart fails sleep for 20ms before
- * attempting restart. This allows GPU HW to settle
- * and improve the chances of next restart to be
- * successful.
+ * If active_cnt is non-zero then the system was active before
+ * going into a reset - put it back in that state
*/
- msleep(20);
- KGSL_FT_ERR(device, "Retry device restart %d\n", retry_num);
- retry_num++;
- } while (retry_num < 4);
- if (ret) {
- KGSL_FT_ERR(device, "Device restart failed\n");
- BUG_ON(1);
- goto done;
- }
-
- if (size) {
-
- /* submit commands and wait for them to pass */
- adreno_ringbuffer_restore(rb, buff, size);
-
- ret = adreno_idle(device);
- }
-
-done:
- return ret;
-}
-
-
-static int
-_adreno_ft(struct kgsl_device *device,
- struct adreno_ft_data *ft_data)
-{
- int ret = 0, i;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- struct kgsl_context *context;
- struct adreno_context *adreno_context = NULL;
- struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
- unsigned int long_ib = 0;
- static int no_context_ft;
- struct kgsl_mmu *mmu = &device->mmu;
-
- context = kgsl_context_get(device, ft_data->context_id);
-
- if (context == NULL) {
- KGSL_FT_ERR(device, "Last context unknown id:%d\n",
- ft_data->context_id);
- if (no_context_ft) {
- /*
- * If 2 consecutive no context ft occurred then
- * just reset GPU
- */
- no_context_ft = 0;
- goto play_good_cmds;
- }
- } else {
- no_context_ft = 0;
- adreno_context = ADRENO_CONTEXT(context);
- adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
- /*
- * set the invalid ts flag to 0 for this context since we have
- * detected a hang for it
- */
- context->wait_on_invalid_ts = false;
-
- if (!(adreno_context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
- ft_data->status = 1;
- KGSL_FT_ERR(device, "Fault tolerance not supported\n");
- goto play_good_cmds;
- }
-
- /*
- * This flag will be set by userspace for contexts
- * that do not want to be fault tolerant (ex: OPENCL)
- */
- if (adreno_context->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE) {
- ft_data->status = 1;
- KGSL_FT_ERR(device,
- "No FT set for this context play good cmds\n");
- goto play_good_cmds;
- }
-
- }
-
- /* Check if we detected a long running IB, if false return */
- if ((adreno_context) && (adreno_dev->long_ib)) {
- long_ib = _adreno_check_long_ib(device);
- if (!long_ib) {
- adreno_context->flags &= ~CTXT_FLAGS_GPU_HANG;
- return 0;
- }
- }
-
- /*
- * Extract valid contents from rb which can still be executed after
- * hang
- */
- adreno_ringbuffer_extract(rb, ft_data);
-
- /* If long IB detected do not attempt replay of bad cmds */
- if (long_ib) {
- ft_data->status = 1;
- _adreno_debug_ft_info(device, ft_data);
- goto play_good_cmds;
- }
-
- if ((ft_data->ft_policy & KGSL_FT_DISABLE) ||
- (ft_data->ft_policy & KGSL_FT_TEMP_DISABLE)) {
- KGSL_FT_ERR(device, "NO FT policy play only good cmds\n");
- ft_data->status = 1;
- goto play_good_cmds;
- }
-
- /* Do not try to replay if hang is due to a pagefault */
- if (context && test_bit(KGSL_CONTEXT_PAGEFAULT, &context->priv)) {
- /* Resume MMU */
- mmu->mmu_ops->mmu_pagefault_resume(mmu);
- if ((ft_data->context_id == context->id) &&
- (ft_data->global_eop == context->pagefault_ts)) {
- ft_data->ft_policy &= ~KGSL_FT_REPLAY;
- KGSL_FT_ERR(device, "MMU fault skipping replay\n");
- }
- clear_bit(KGSL_CONTEXT_PAGEFAULT, &context->priv);
- }
-
- if (ft_data->ft_policy & KGSL_FT_REPLAY) {
- ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
- ft_data->bad_rb_buffer, ft_data->bad_rb_size);
-
- if (ret) {
- KGSL_FT_ERR(device, "Replay status: 1\n");
- ft_data->status = 1;
- } else
- goto play_good_cmds;
- }
-
- if (ft_data->ft_policy & KGSL_FT_SKIPIB) {
- for (i = 0; i < ft_data->bad_rb_size; i++) {
- if ((ft_data->bad_rb_buffer[i] ==
- CP_HDR_INDIRECT_BUFFER_PFD) &&
- (ft_data->bad_rb_buffer[i+1] == ft_data->ib1)) {
-
- ft_data->bad_rb_buffer[i] = cp_nop_packet(2);
- ft_data->bad_rb_buffer[i+1] =
- KGSL_NOP_IB_IDENTIFIER;
- ft_data->bad_rb_buffer[i+2] =
- KGSL_NOP_IB_IDENTIFIER;
- break;
- }
- }
-
- if ((i == (ft_data->bad_rb_size)) || (!ft_data->ib1)) {
- KGSL_FT_ERR(device, "Bad IB to NOP not found\n");
- ft_data->status = 1;
- goto play_good_cmds;
- }
-
- ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
- ft_data->bad_rb_buffer, ft_data->bad_rb_size);
-
- if (ret) {
- KGSL_FT_ERR(device, "NOP faulty IB status: 1\n");
- ft_data->status = 1;
- } else {
- ft_data->status = 0;
- goto play_good_cmds;
- }
- }
-
- if (ft_data->ft_policy & KGSL_FT_SKIPFRAME) {
- for (i = 0; i < ft_data->bad_rb_size; i++) {
- if (ft_data->bad_rb_buffer[i] ==
- KGSL_END_OF_FRAME_IDENTIFIER) {
- ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
- break;
- }
- }
-
- /* EOF not found in RB, discard till EOF in
- next IB submission */
- if (adreno_context && (i == ft_data->bad_rb_size)) {
- adreno_context->flags |= CTXT_FLAGS_SKIP_EOF;
- KGSL_FT_INFO(device,
- "EOF not found in RB, skip next issueib till EOF\n");
- ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
- }
-
- ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
- ft_data->bad_rb_buffer, ft_data->bad_rb_size);
-
- if (ret) {
- KGSL_FT_ERR(device, "Skip EOF status: 1\n");
- ft_data->status = 1;
- } else {
- ft_data->status = 0;
- goto play_good_cmds;
- }
- }
-
-play_good_cmds:
-
- if (ft_data->status)
- KGSL_FT_ERR(device, "Bad context commands failed\n");
- else {
- KGSL_FT_INFO(device, "Bad context commands success\n");
-
- if (adreno_context) {
- adreno_context->flags = (adreno_context->flags &
- ~CTXT_FLAGS_GPU_HANG) | CTXT_FLAGS_GPU_HANG_FT;
- }
-
- if (last_active_ctx)
- _kgsl_context_get(&last_active_ctx->base);
-
- adreno_dev->drawctxt_active = last_active_ctx;
- }
-
- ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
- ft_data->good_rb_buffer, ft_data->good_rb_size);
-
- if (ret) {
- /*
- * If we fail here we can try to invalidate another
- * context and try fault tolerance again, although
- * we will only try ft with no context once to avoid
- * going into continuous loop of trying ft with no context
- */
- if (!context)
- no_context_ft = 1;
- ret = -EAGAIN;
- KGSL_FT_ERR(device, "Playing good commands unsuccessful\n");
- goto done;
- } else
- KGSL_FT_INFO(device, "Playing good commands successful\n");
-
- /* ringbuffer now has data from the last valid context id,
- * so restore the active_ctx to the last valid context */
- if (ft_data->last_valid_ctx_id) {
- struct kgsl_context *last_ctx = kgsl_context_get(device,
- ft_data->last_valid_ctx_id);
-
- adreno_dev->drawctxt_active = ADRENO_CONTEXT(last_ctx);
- }
-
-done:
- /* Turn off iommu clocks */
- if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
- kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
-
- kgsl_context_put(context);
- return ret;
-}
-
-static int
-adreno_ft(struct kgsl_device *device,
- struct adreno_ft_data *ft_data)
-{
- int ret = 0;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-
- /*
- * If GPU FT is turned off do not run FT.
- * If GPU stall detection is suspected to be false,
- * we can use this option to confirm stall detection.
- */
- if (ft_data->ft_policy & KGSL_FT_OFF) {
- KGSL_FT_ERR(device, "GPU FT turned off\n");
- return 0;
- }
-
- KGSL_FT_INFO(device,
- "Start Parameters: IB1: 0x%X, "
- "Bad context_id: %u, global_eop: 0x%x\n",
- ft_data->ib1, ft_data->context_id, ft_data->global_eop);
-
- KGSL_FT_INFO(device, "Last issued global timestamp: %x\n",
- rb->global_ts);
-
- /* We may need to replay commands multiple times based on whether
- * multiple contexts hang the GPU */
- while (true) {
-
- ret = _adreno_ft(device, ft_data);
-
- if (-EAGAIN == ret) {
- /* setup new fault tolerance parameters and retry, this
- * means more than 1 contexts are causing hang */
- adreno_destroy_ft_data(ft_data);
- adreno_setup_ft_data(device, ft_data);
- KGSL_FT_INFO(device,
- "Retry. Parameters: "
- "IB1: 0x%X, Bad context_id: %u, global_eop: 0x%x\n",
- ft_data->ib1, ft_data->context_id,
- ft_data->global_eop);
- } else {
- break;
- }
- }
-
- if (ret)
- goto done;
-
- /* Restore correct states after fault tolerance */
- if (adreno_dev->drawctxt_active)
- device->mmu.hwpagetable =
- adreno_dev->drawctxt_active->base.pagetable;
- else
- device->mmu.hwpagetable = device->mmu.defaultpagetable;
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp), rb->global_ts);
-
- /* switch to NULL ctxt */
- if (adreno_dev->drawctxt_active != NULL)
- adreno_drawctxt_switch(adreno_dev, NULL, 0);
-
-done:
- adreno_set_max_ts_for_bad_ctxs(device);
- adreno_mark_context_status(device, ret);
- KGSL_FT_ERR(device, "policy 0x%X status 0x%x\n",
- ft_data->ft_policy, ret);
- return ret;
-}
-
-int
-adreno_dump_and_exec_ft(struct kgsl_device *device)
-{
- int result = -ETIMEDOUT;
- struct adreno_ft_data ft_data;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- unsigned int curr_pwrlevel;
-
- if (device->state == KGSL_STATE_HUNG)
- goto done;
- if (device->state == KGSL_STATE_DUMP_AND_FT) {
- mutex_unlock(&device->mutex);
- wait_for_completion(&device->ft_gate);
- mutex_lock(&device->mutex);
- if (device->state != KGSL_STATE_HUNG)
- result = 0;
- } else {
- /*
- * While fault tolerance is happening we do not want the
- * idle_timer to fire and attempt to change any device state
- */
- del_timer_sync(&device->idle_timer);
-
- kgsl_pwrctrl_set_state(device, KGSL_STATE_DUMP_AND_FT);
- INIT_COMPLETION(device->ft_gate);
- /* Detected a hang */
-
- kgsl_cffdump_hang(device);
- /* Run fault tolerance at max power level */
- curr_pwrlevel = pwr->active_pwrlevel;
- kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel);
-
- /* Get the fault tolerance data as soon as hang is detected */
- adreno_setup_ft_data(device, &ft_data);
-
- /*
- * If long ib is detected, do not attempt postmortem or
- * snapshot, if GPU is still executing commands
- * we will get errors
- */
- if (!adreno_dev->long_ib) {
- /*
- * Trigger an automatic dump of the state to
- * the console
- */
- kgsl_postmortem_dump(device, 0);
-
- /*
- * Make a GPU snapshot. For now, do it after the
- * PM dump so we can at least be sure the PM dump
- * will work as it always has
- */
- kgsl_device_snapshot(device, 1);
- }
-
- result = adreno_ft(device, &ft_data);
- adreno_destroy_ft_data(&ft_data);
-
- /* restore power level */
- kgsl_pwrctrl_pwrlevel_change(device, curr_pwrlevel);
-
- if (result) {
- kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
- } else {
+ if (atomic_read(&device->active_cnt))
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
- mod_timer(&device->hang_timer,
- (jiffies +
- msecs_to_jiffies(KGSL_TIMEOUT_PART)));
- }
- complete_all(&device->ft_gate);
}
-done:
- return result;
+
+ return ret;
}
-EXPORT_SYMBOL(adreno_dump_and_exec_ft);
/**
* _ft_sysfs_store() - Common routine to write to FT sysfs files
@@ -3217,140 +2253,166 @@
return status;
}
-static int adreno_ringbuffer_drain(struct kgsl_device *device,
- unsigned int *regs)
+/**
+ * adreno_hw_isidle() - Check if the GPU core is idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Return true if the RBBM status register for the GPU type indicates that the
+ * hardware is idle
+ */
+static bool adreno_hw_isidle(struct kgsl_device *device)
+{
+ unsigned int reg_rbbm_status;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* Don't consider ourselves idle if there is an IRQ pending */
+ if (adreno_dev->gpudev->irq_pending(adreno_dev))
+ return false;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
+ ®_rbbm_status);
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ if (reg_rbbm_status == 0x110)
+ return true;
+ } else if (adreno_is_a3xx(adreno_dev)) {
+ if (!(reg_rbbm_status & 0x80000000))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * adreno_soft_reset() - Do a soft reset of the GPU hardware
+ * @device: KGSL device to soft reset
+ *
+ * "soft reset" the GPU hardware - this is a fast path GPU reset
+ * The GPU hardware is reset but we never pull power so we can skip
+ * a lot of the standard adreno_stop/adreno_start sequence
+ */
+int adreno_soft_reset(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- unsigned long wait = jiffies;
- unsigned long timeout = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
- unsigned int rptr;
+ int ret;
- do {
- /*
- * Wait is "jiffies" first time in the loop to start
- * GPU stall detection immediately.
- */
- if (time_after(jiffies, wait)) {
- /* Check to see if the core is hung */
- if (adreno_ft_detect(device, regs))
- return -ETIMEDOUT;
+ /* If the jump table index is 0 soft reset is not supported */
+ if ((!adreno_dev->pm4_jt_idx) || (!adreno_dev->gpudev->soft_reset)) {
+ dev_WARN_ONCE(device->dev, 1, "Soft reset not supported");
+ return -EINVAL;
+ }
- wait = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
- }
- rptr = adreno_get_rptr(rb);
- if (time_after(jiffies, timeout)) {
- KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n",
- rptr, rb->wptr);
- return -ETIMEDOUT;
- }
- } while (rptr != rb->wptr);
+ if (adreno_dev->drawctxt_active)
+ kgsl_context_put(&adreno_dev->drawctxt_active->base);
+
+ adreno_dev->drawctxt_active = NULL;
+
+ /* Stop the ringbuffer */
+ adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
+
+ /* Delete the idle timer */
+ del_timer_sync(&device->idle_timer);
+
+ /* Make sure we are totally awake */
+ kgsl_pwrctrl_enable(device);
+
+ /* Reset the GPU */
+ adreno_dev->gpudev->soft_reset(adreno_dev);
+
+ /* Reinitialize the GPU */
+ adreno_dev->gpudev->start(adreno_dev);
+
+ /* Enable IRQ */
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+ device->ftbl->irqctrl(device, 1);
+
+ /*
+ * Restart the ringbuffer - we can go down the warm start path because
+ * power was never yanked
+ */
+ ret = adreno_ringbuffer_warm_start(&adreno_dev->ringbuffer);
+ if (ret)
+ return ret;
+
+ device->reset_counter++;
return 0;
}
-/* Caller must hold the device mutex. */
+/*
+ * adreno_isidle() - return true if the GPU hardware is idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Return true if the GPU hardware is idle and there are no commands pending in
+ * the ringbuffer
+ */
+static bool adreno_isidle(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int rptr;
+
+ if (!kgsl_pwrctrl_isenabled(device))
+ return true;
+
+ rptr = adreno_get_rptr(&adreno_dev->ringbuffer);
+
+ if (rptr == adreno_dev->ringbuffer.wptr)
+ return adreno_hw_isidle(device);
+
+ return false;
+}
+
+/**
+ * adreno_idle() - wait for the GPU hardware to go idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Wait up to ADRENO_IDLE_TIMEOUT milliseconds for the GPU hardware to go quiet.
+ */
+
int adreno_idle(struct kgsl_device *device)
{
- unsigned long wait_time;
- unsigned long wait_time_part;
- unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned long wait = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
- memset(prev_reg_val, 0, sizeof(prev_reg_val));
+ /*
+ * Make sure the device mutex is held so the dispatcher can't send any
+ * more commands to the hardware
+ */
- kgsl_cffdump_regpoll(device,
- adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
- 0x00000000, 0x80000000);
+ BUG_ON(!mutex_is_locked(&device->mutex));
-retry:
- /* First, wait for the ringbuffer to drain */
- if (adreno_ringbuffer_drain(device, prev_reg_val))
- goto err;
+ if (adreno_is_a3xx(adreno_dev))
+ kgsl_cffdump_regpoll(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
+ 0x00000000, 0x80000000);
+ else
+ kgsl_cffdump_regpoll(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
+ 0x110, 0x110);
- /* now, wait for the GPU to finish its operations */
- wait_time = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
- wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
-
- while (time_before(jiffies, wait_time)) {
+ while (time_before(jiffies, wait)) {
if (adreno_isidle(device))
return 0;
-
- /* Dont wait for timeout, detect hang faster. */
- if (time_after(jiffies, wait_time_part)) {
- wait_time_part = jiffies +
- msecs_to_jiffies(KGSL_TIMEOUT_PART);
- if ((adreno_ft_detect(device, prev_reg_val)))
- goto err;
- }
-
}
-err:
- KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n");
- if (KGSL_STATE_DUMP_AND_FT != device->state &&
- !adreno_dump_and_exec_ft(device)) {
- wait_time = jiffies + ADRENO_IDLE_TIMEOUT;
- goto retry;
- }
+ kgsl_postmortem_dump(device, 0);
+
return -ETIMEDOUT;
}
/**
- * is_adreno_rbbm_status_idle - Check if GPU core is idle by probing
- * rbbm_status register
- * @device - Pointer to the GPU device whose idle status is to be
- * checked
- * @returns - Returns whether the core is idle (based on rbbm_status)
- * false if the core is active, true if the core is idle
+ * adreno_drain() - Drain the dispatch queue
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Tell the dispatcher to pause - this has the effect of draining the inflight
+ * command batches
*/
-static bool is_adreno_rbbm_status_idle(struct kgsl_device *device)
+static int adreno_drain(struct kgsl_device *device)
{
- unsigned int reg_rbbm_status;
- bool status = false;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- /* Is the core idle? */
- adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
- ®_rbbm_status);
-
- if (adreno_is_a2xx(adreno_dev)) {
- if (reg_rbbm_status == 0x110)
- status = true;
- } else {
- if (!(reg_rbbm_status & 0x80000000))
- status = true;
- }
- return status;
-}
-
-static unsigned int adreno_isidle(struct kgsl_device *device)
-{
- int status = false;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-
- /* If the device isn't active, don't force it on. */
- if (kgsl_pwrctrl_isenabled(device)) {
- /* Is the ring buffer is empty? */
- unsigned int rptr = adreno_get_rptr(rb);
- if (rptr == rb->wptr) {
- /*
- * Are there interrupts pending? If so then pretend we
- * are not idle - this avoids the possiblity that we go
- * to a lower power state without handling interrupts
- * first.
- */
-
- if (!adreno_dev->gpudev->irq_pending(adreno_dev)) {
- /* Is the core idle? */
- status = is_adreno_rbbm_status_idle(device);
- }
- }
- } else {
- status = true;
- }
- return status;
+ adreno_dispatcher_pause(adreno_dev);
+ return 0;
}
/* Caller must hold the device mutex. */
@@ -3359,6 +2421,9 @@
int status = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ /* process any profiling results that are available */
+ adreno_profile_process_results(device);
+
/* switch to NULL ctxt */
if (adreno_dev->drawctxt_active != NULL) {
adreno_drawctxt_switch(adreno_dev, NULL, 0);
@@ -3518,342 +2583,6 @@
__raw_writel(value, reg);
}
-static unsigned int _get_context_id(struct kgsl_context *k_ctxt)
-{
- unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
-
- if (k_ctxt != NULL) {
- struct adreno_context *a_ctxt = ADRENO_CONTEXT(k_ctxt);
- if (kgsl_context_detached(k_ctxt))
- context_id = KGSL_CONTEXT_INVALID;
- else if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
- context_id = k_ctxt->id;
- }
-
- return context_id;
-}
-
-static unsigned int adreno_check_hw_ts(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int timestamp)
-{
- int status = 0;
- unsigned int ref_ts, enableflag;
- unsigned int context_id = _get_context_id(context);
-
- /*
- * If the context ID is invalid, we are in a race with
- * the context being destroyed by userspace so bail.
- */
- if (context_id == KGSL_CONTEXT_INVALID) {
- KGSL_DRV_WARN(device, "context was detached");
- return -EINVAL;
- }
-
- status = kgsl_check_timestamp(device, context, timestamp);
- if (status)
- return status;
-
- kgsl_sharedmem_readl(&device->memstore, &enableflag,
- KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable));
- /*
- * Barrier is needed here to make sure the read from memstore
- * has posted
- */
-
- mb();
-
- if (enableflag) {
- kgsl_sharedmem_readl(&device->memstore, &ref_ts,
- KGSL_MEMSTORE_OFFSET(context_id,
- ref_wait_ts));
-
- /* Make sure the memstore read has posted */
- mb();
- if (timestamp_cmp(ref_ts, timestamp) >= 0) {
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ref_wait_ts), timestamp);
- /* Make sure the memstore write is posted */
- wmb();
- }
- } else {
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ref_wait_ts), timestamp);
- enableflag = 1;
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ts_cmp_enable), enableflag);
-
- /* Make sure the memstore write gets posted */
- wmb();
-
- /*
- * submit a dummy packet so that even if all
- * commands upto timestamp get executed we will still
- * get an interrupt
- */
-
- if (context && device->state != KGSL_STATE_SLUMBER) {
- adreno_ringbuffer_issuecmds(device,
- ADRENO_CONTEXT(context),
- KGSL_CMD_FLAGS_GET_INT, NULL, 0);
- }
- }
-
- return 0;
-}
-
-/* Return 1 if the event timestmp has already passed, 0 if it was marked */
-static int adreno_next_event(struct kgsl_device *device,
- struct kgsl_event *event)
-{
- return adreno_check_hw_ts(device, event->context, event->timestamp);
-}
-
-static int adreno_check_interrupt_timestamp(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int timestamp)
-{
- int status;
-
- mutex_lock(&device->mutex);
- status = adreno_check_hw_ts(device, context, timestamp);
- mutex_unlock(&device->mutex);
-
- return status;
-}
-
-/*
- wait_event_interruptible_timeout checks for the exit condition before
- placing a process in wait q. For conditional interrupts we expect the
- process to already be in its wait q when its exit condition checking
- function is called.
-*/
-#define kgsl_wait_event_interruptible_timeout(wq, condition, timeout, io)\
-({ \
- long __ret = timeout; \
- if (io) \
- __wait_io_event_interruptible_timeout(wq, condition, __ret);\
- else \
- __wait_event_interruptible_timeout(wq, condition, __ret);\
- __ret; \
-})
-
-
-
-unsigned int adreno_ft_detect(struct kgsl_device *device,
- unsigned int *prev_reg_val)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- unsigned int curr_reg_val[FT_DETECT_REGS_COUNT];
- unsigned int fast_hang_detected = 1;
- unsigned int long_ib_detected = 1;
- unsigned int i;
- static unsigned long next_hang_detect_time;
- static unsigned int prev_global_ts;
- unsigned int curr_global_ts = 0;
- unsigned int curr_context_id = 0;
- static struct adreno_context *curr_context;
- static struct kgsl_context *context;
- static char pid_name[TASK_COMM_LEN] = "unknown";
-
- if (!adreno_dev->fast_hang_detect)
- fast_hang_detected = 0;
-
- if (!adreno_dev->long_ib_detect)
- long_ib_detected = 0;
-
- if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED))
- return 0;
-
- if (is_adreno_rbbm_status_idle(device) &&
- (kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED)
- == rb->global_ts)) {
-
- /*
- * On A2XX if the RPTR != WPTR and the device is idle, then
- * the last write to WPTR probably failed to latch so write it
- * again
- */
-
- if (adreno_is_a2xx(adreno_dev)) {
- unsigned int rptr;
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
- &rptr);
- if (rptr != adreno_dev->ringbuffer.wptr)
- adreno_writereg(adreno_dev,
- ADRENO_REG_CP_RB_WPTR,
- adreno_dev->ringbuffer.wptr);
- }
-
- return 0;
- }
-
- /*
- * Time interval between hang detection should be KGSL_TIMEOUT_PART
- * or more, if next hang detection is requested < KGSL_TIMEOUT_PART
- * from the last time do nothing.
- */
- if ((next_hang_detect_time) &&
- (time_before(jiffies, next_hang_detect_time)))
- return 0;
- else
- next_hang_detect_time = (jiffies +
- msecs_to_jiffies(KGSL_TIMEOUT_PART-1));
-
- /* Read the current Hang detect reg values here */
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
- if (ft_detect_regs[i] == 0)
- continue;
- kgsl_regread(device, ft_detect_regs[i],
- &curr_reg_val[i]);
- }
-
- /* Read the current global timestamp here */
- kgsl_sharedmem_readl(&device->memstore,
- &curr_global_ts,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp));
- /* Make sure the memstore read has posted */
- mb();
-
- if (curr_global_ts == prev_global_ts) {
-
- /* If we don't already have a good context, get it. */
- if (kgsl_context_detached(context)) {
- kgsl_context_put(context);
- context = NULL;
- curr_context = NULL;
- strlcpy(pid_name, "unknown", sizeof(pid_name));
-
- kgsl_sharedmem_readl(&device->memstore,
- &curr_context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
- /* Make sure the memstore read has posted */
- mb();
-
- context = kgsl_context_get(device, curr_context_id);
- if (context != NULL) {
- struct task_struct *task;
- curr_context = ADRENO_CONTEXT(context);
- curr_context->ib_gpu_time_used = 0;
- task = find_task_by_vpid(context->pid);
- if (task)
- get_task_comm(pid_name, task);
- } else {
- KGSL_DRV_ERR(device,
- "Fault tolerance no context found\n");
- }
- }
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
- if (curr_reg_val[i] != prev_reg_val[i]) {
- fast_hang_detected = 0;
-
- /* Check for long IB here */
- if ((i >=
- LONG_IB_DETECT_REG_INDEX_START)
- &&
- (i <=
- LONG_IB_DETECT_REG_INDEX_END))
- long_ib_detected = 0;
- }
- }
-
- if (fast_hang_detected) {
- KGSL_FT_ERR(device,
- "Proc %s, ctxt_id %d ts %d triggered fault tolerance"
- " on global ts %d\n",
- pid_name, context ? context->id : 0,
- (kgsl_readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED) + 1),
- curr_global_ts + 1);
- return 1;
- }
-
- if (curr_context != NULL) {
-
- curr_context->ib_gpu_time_used += KGSL_TIMEOUT_PART;
- KGSL_FT_INFO(device,
- "Proc %s used GPU Time %d ms on timestamp 0x%X\n",
- pid_name, curr_context->ib_gpu_time_used,
- curr_global_ts+1);
-
- if ((long_ib_detected) &&
- (!(curr_context->flags &
- CTXT_FLAGS_NO_FAULT_TOLERANCE))) {
- curr_context->ib_gpu_time_used +=
- KGSL_TIMEOUT_PART;
- if (curr_context->ib_gpu_time_used >
- KGSL_TIMEOUT_LONG_IB_DETECTION) {
- if (adreno_dev->long_ib_ts !=
- curr_global_ts) {
- KGSL_FT_ERR(device,
- "Proc %s, ctxt_id %d ts %d"
- "used GPU for %d ms long ib "
- "detected on global ts %d\n",
- pid_name, context->id,
- (kgsl_readtimestamp(device,
- context,
- KGSL_TIMESTAMP_RETIRED)+1),
- curr_context->ib_gpu_time_used,
- curr_global_ts+1);
- adreno_dev->long_ib = 1;
- adreno_dev->long_ib_ts =
- curr_global_ts;
- curr_context->ib_gpu_time_used =
- 0;
- return 1;
- }
- }
- }
- }
- } else {
- /* GPU is moving forward */
- prev_global_ts = curr_global_ts;
- kgsl_context_put(context);
- context = NULL;
- curr_context = NULL;
- strlcpy(pid_name, "unknown", sizeof(pid_name));
- adreno_dev->long_ib = 0;
- adreno_dev->long_ib_ts = 0;
- }
-
-
- /* If hangs are not detected copy the current reg values
- * to previous values and return no hang */
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++)
- prev_reg_val[i] = curr_reg_val[i];
- return 0;
-}
-
-static int _check_pending_timestamp(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int timestamp)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int context_id = _get_context_id(context);
- unsigned int ts_issued;
-
- if (context_id == KGSL_CONTEXT_INVALID)
- return -EINVAL;
-
- ts_issued = adreno_context_timestamp(context, &adreno_dev->ringbuffer);
-
- if (timestamp_cmp(timestamp, ts_issued) <= 0)
- return 0;
-
- if (context && !context->wait_on_invalid_ts) {
- KGSL_DRV_ERR(device, "Cannot wait for invalid ts <%d:0x%x>, last issued ts <%d:0x%x>\n",
- context_id, timestamp, context_id, ts_issued);
-
- /* Only print this message once */
- context->wait_on_invalid_ts = true;
- }
-
- return -EINVAL;
-}
-
/**
* adreno_waittimestamp - sleep while waiting for the specified timestamp
* @device - pointer to a KGSL device structure
@@ -3861,147 +2590,35 @@
* @timestamp - GPU timestamp to wait for
* @msecs - amount of time to wait (in milliseconds)
*
- * Wait 'msecs' milliseconds for the specified timestamp to expire. Wake up
- * every KGSL_TIMEOUT_PART milliseconds to check for a device hang and process
- * one if it happened. Otherwise, spend most of our time in an interruptible
- * wait for the timestamp interrupt to be processed. This function must be
- * called with the mutex already held.
+ * Wait up to 'msecs' milliseconds for the specified timestamp to expire.
*/
static int adreno_waittimestamp(struct kgsl_device *device,
- struct kgsl_context *context,
- unsigned int timestamp,
- unsigned int msecs)
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int msecs)
{
- static unsigned int io_cnt;
- struct adreno_context *adreno_ctx = context ? ADRENO_CONTEXT(context) :
- NULL;
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- unsigned int context_id = _get_context_id(context);
- unsigned int time_elapsed = 0;
- unsigned int wait;
- int ts_compare = 1;
- int io, ret = -ETIMEDOUT;
+ int ret;
+ struct adreno_context *drawctxt;
- if (context_id == KGSL_CONTEXT_INVALID) {
- KGSL_DRV_WARN(device, "context was detached");
+ if (context == NULL) {
+ /* If they are doing then complain once */
+ dev_WARN_ONCE(device->dev, 1,
+ "IOCTL_KGSL_DEVICE_WAITTIMESTAMP is deprecated\n");
return -EINVAL;
}
- /*
- * Check to see if the requested timestamp is "newer" then the last
- * timestamp issued. If it is complain once and return error. Only
- * print the message once per context so that badly behaving
- * applications don't spam the logs
- */
+ /* Return -EINVAL if the context has been detached */
+ if (kgsl_context_detached(context))
+ return -EINVAL;
- if (adreno_ctx && !(adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS)) {
- if (_check_pending_timestamp(device, context, timestamp))
- return -EINVAL;
+ ret = adreno_drawctxt_wait(ADRENO_DEVICE(device), context,
+ timestamp, msecs_to_jiffies(msecs));
- /* Reset the invalid timestamp flag on a valid wait */
- context->wait_on_invalid_ts = false;
- }
+ /* If the context got invalidated then return a specific error */
+ drawctxt = ADRENO_CONTEXT(context);
- /*
- * On the first time through the loop only wait 100ms.
- * this gives enough time for the engine to start moving and oddly
- * provides better hang detection results than just going the full
- * KGSL_TIMEOUT_PART right off the bat. The exception to this rule
- * is if msecs happens to be < 100ms then just use 20ms or the msecs,
- * whichever is larger because anything less than 20 is unreliable
- */
-
- if (msecs == 0 || msecs >= 100)
- wait = 100;
- else
- wait = (msecs > 20) ? msecs : 20;
-
- do {
- long status;
-
- /*
- * if the timestamp happens while we're not
- * waiting, there's a chance that an interrupt
- * will not be generated and thus the timestamp
- * work needs to be queued.
- */
-
- if (kgsl_check_timestamp(device, context, timestamp)) {
- queue_work(device->work_queue, &device->ts_expired_ws);
- ret = 0;
- break;
- }
-
- /*
- * For proper power accounting sometimes we need to call
- * io_wait_interruptible_timeout and sometimes we need to call
- * plain old wait_interruptible_timeout. We call the regular
- * timeout N times out of 100, where N is a number specified by
- * the current power level
- */
-
- io_cnt = (io_cnt + 1) % 100;
- io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
- ? 0 : 1;
-
- mutex_unlock(&device->mutex);
-
- /* Wait for a timestamp event */
- status = kgsl_wait_event_interruptible_timeout(
- device->wait_queue,
- adreno_check_interrupt_timestamp(device, context,
- timestamp), msecs_to_jiffies(wait), io);
-
- mutex_lock(&device->mutex);
-
- /*
- * If status is non zero then either the condition was satisfied
- * or there was an error. In either event, this is the end of
- * the line for us
- */
-
- if (status != 0) {
- ret = (status > 0) ? 0 : (int) status;
- break;
- }
- time_elapsed += wait;
-
- /* If user specified timestamps are being used, wait at least
- * KGSL_SYNCOBJ_SERVER_TIMEOUT msecs for the user driver to
- * issue a IB for a timestamp before checking to see if the
- * current timestamp we are waiting for is valid or not
- */
-
- if (ts_compare && (adreno_ctx &&
- (adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS))) {
- if (time_elapsed > KGSL_SYNCOBJ_SERVER_TIMEOUT) {
- ret = _check_pending_timestamp(device, context,
- timestamp);
- if (ret)
- break;
-
- /* Don't do this check again */
- ts_compare = 0;
-
- /*
- * Reset the invalid timestamp flag on a valid
- * wait
- */
- context->wait_on_invalid_ts = false;
- }
- }
-
- /*
- * We want to wait the floor of KGSL_TIMEOUT_PART
- * and (msecs - time_elapsed).
- */
-
- if (KGSL_TIMEOUT_PART < (msecs - time_elapsed))
- wait = KGSL_TIMEOUT_PART;
- else
- wait = (msecs - time_elapsed);
-
- } while (!msecs || time_elapsed < msecs);
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ ret = -EDEADLK;
return ret;
}
@@ -4010,13 +2627,13 @@
struct kgsl_context *context, enum kgsl_timestamp_type type)
{
unsigned int timestamp = 0;
- unsigned int context_id = _get_context_id(context);
+ unsigned int id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
/*
- * If the context ID is invalid, we are in a race with
+ * If the context is detached we are in a race with
* the context being destroyed by userspace so bail.
*/
- if (context_id == KGSL_CONTEXT_INVALID) {
+ if (context && kgsl_context_detached(context)) {
KGSL_DRV_WARN(device, "context was detached");
return timestamp;
}
@@ -4030,11 +2647,11 @@
}
case KGSL_TIMESTAMP_CONSUMED:
kgsl_sharedmem_readl(&device->memstore, ×tamp,
- KGSL_MEMSTORE_OFFSET(context_id, soptimestamp));
+ KGSL_MEMSTORE_OFFSET(id, soptimestamp));
break;
case KGSL_TIMESTAMP_RETIRED:
kgsl_sharedmem_readl(&device->memstore, ×tamp,
- KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp));
+ KGSL_MEMSTORE_OFFSET(id, eoptimestamp));
break;
}
@@ -4082,7 +2699,7 @@
case IOCTL_KGSL_PERFCOUNTER_PUT: {
struct kgsl_perfcounter_put *put = data;
result = adreno_perfcounter_put(adreno_dev, put->groupid,
- put->countable);
+ put->countable, PERFCOUNTER_FLAG_NONE);
break;
}
case IOCTL_KGSL_PERFCOUNTER_QUERY: {
@@ -4194,6 +2811,7 @@
.gpuid = adreno_gpuid,
.snapshot = adreno_snapshot,
.irq_handler = adreno_irq_handler,
+ .drain = adreno_drain,
/* Optional functions */
.setstate = adreno_setstate,
.drawctxt_create = adreno_drawctxt_create,
@@ -4201,7 +2819,6 @@
.drawctxt_destroy = adreno_drawctxt_destroy,
.setproperty = adreno_setproperty,
.postmortem_dump = adreno_dump,
- .next_event = adreno_next_event,
};
static struct platform_driver adreno_platform_driver = {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index cb75b34..32e43b2 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -16,6 +16,7 @@
#include "kgsl_device.h"
#include "adreno_drawctxt.h"
#include "adreno_ringbuffer.h"
+#include "adreno_profile.h"
#include "kgsl_iommu.h"
#include <mach/ocmem.h>
@@ -38,6 +39,7 @@
#define KGSL_CMD_FLAGS_PMODE 0x00000001
#define KGSL_CMD_FLAGS_INTERNAL_ISSUE 0x00000002
#define KGSL_CMD_FLAGS_GET_INT 0x00000004
+#define KGSL_CMD_FLAGS_PROFILE 0x00000008
#define KGSL_CMD_FLAGS_EOF 0x00000100
/* Command identifiers */
@@ -48,6 +50,8 @@
#define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD
#define KGSL_END_OF_FRAME_IDENTIFIER 0x2E0F2E0F
#define KGSL_NOP_IB_IDENTIFIER 0x20F20F20
+#define KGSL_START_OF_PROFILE_IDENTIFIER 0x2DEFADE1
+#define KGSL_END_OF_PROFILE_IDENTIFIER 0x2DEFADE2
#ifdef CONFIG_MSM_SCM
#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz)
@@ -92,6 +96,46 @@
TRACE_BUS_CTL,
};
+/*
+ * Maximum size of the dispatcher ringbuffer - the actual inflight size will be
+ * smaller then this but this size will allow for a larger range of inflight
+ * sizes that can be chosen at runtime
+ */
+
+#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128
+
+/**
+ * struct adreno_dispatcher - container for the adreno GPU dispatcher
+ * @mutex: Mutex to protect the structure
+ * @state: Current state of the dispatcher (active or paused)
+ * @timer: Timer to monitor the progress of the command batches
+ * @inflight: Number of command batch operations pending in the ringbuffer
+ * @fault: True if a HW fault was detected
+ * @pending: Priority list of contexts waiting to submit command batches
+ * @plist_lock: Spin lock to protect the pending queue
+ * @cmdqueue: Queue of command batches currently flight
+ * @head: pointer to the head of of the cmdqueue. This is the oldest pending
+ * operation
+ * @tail: pointer to the tail of the cmdqueue. This is the most recently
+ * submitted operation
+ * @work: work_struct to put the dispatcher in a work queue
+ * @kobj: kobject for the dispatcher directory in the device sysfs node
+ */
+struct adreno_dispatcher {
+ struct mutex mutex;
+ unsigned int state;
+ struct timer_list timer;
+ unsigned int inflight;
+ int fault;
+ struct plist_head pending;
+ spinlock_t plist_lock;
+ struct kgsl_cmdbatch *cmdqueue[ADRENO_DISPATCH_CMDQUEUE_SIZE];
+ unsigned int head;
+ unsigned int tail;
+ struct work_struct work;
+ struct kobject kobj;
+};
+
struct adreno_gpudev;
struct adreno_device {
@@ -131,6 +175,8 @@
struct ocmem_buf *ocmem_hdl;
unsigned int ocmem_base;
unsigned int gpu_cycles;
+ struct adreno_profile profile;
+ struct adreno_dispatcher dispatcher;
};
#define PERFCOUNTER_FLAG_NONE 0x0
@@ -141,24 +187,27 @@
/**
* struct adreno_perfcount_register: register state
* @countable: countable the register holds
- * @refcount: number of users of the register
+ * @kernelcount: number of user space users of the register
+ * @usercount: number of kernel users of the register
* @offset: register hardware offset
*/
struct adreno_perfcount_register {
unsigned int countable;
- unsigned int refcount;
+ unsigned int kernelcount;
+ unsigned int usercount;
unsigned int offset;
- unsigned int flags;
};
/**
* struct adreno_perfcount_group: registers for a hardware group
* @regs: available registers for this group
* @reg_count: total registers for this group
+ * @name: group name for this group
*/
struct adreno_perfcount_group {
struct adreno_perfcount_register *regs;
unsigned int reg_count;
+ const char *name;
};
/**
@@ -258,9 +307,9 @@
/* GPU specific function hooks */
int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
- void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
- void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
- void (*ctxt_draw_workaround)(struct adreno_device *,
+ int (*ctxt_save)(struct adreno_device *, struct adreno_context *);
+ int (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
+ int (*ctxt_draw_workaround)(struct adreno_device *,
struct adreno_context *);
irqreturn_t (*irq_handler)(struct adreno_device *);
void (*irq_control)(struct adreno_device *, int);
@@ -283,46 +332,6 @@
void (*postmortem_dump)(struct adreno_device *adreno_dev);
};
-/*
- * struct adreno_ft_data - Structure that contains all information to
- * perform gpu fault tolerance
- * @ib1 - IB1 that the GPU was executing when hang happened
- * @context_id - Context which caused the hang
- * @global_eop - eoptimestamp at time of hang
- * @rb_buffer - Buffer that holds the commands from good contexts
- * @rb_size - Number of valid dwords in rb_buffer
- * @bad_rb_buffer - Buffer that holds commands from the hanging context
- * bad_rb_size - Number of valid dwords in bad_rb_buffer
- * @good_rb_buffer - Buffer that holds commands from good contexts
- * good_rb_size - Number of valid dwords in good_rb_buffer
- * @last_valid_ctx_id - The last context from which commands were placed in
- * ringbuffer before the GPU hung
- * @step - Current fault tolerance step being executed
- * @err_code - Fault tolerance error code
- * @fault - Indicates whether the hang was caused due to a pagefault
- * @start_of_replay_cmds - Offset in ringbuffer from where commands can be
- * replayed during fault tolerance
- * @replay_for_snapshot - Offset in ringbuffer where IB's can be saved for
- * replaying with snapshot
- */
-struct adreno_ft_data {
- unsigned int ib1;
- unsigned int context_id;
- unsigned int global_eop;
- unsigned int *rb_buffer;
- unsigned int rb_size;
- unsigned int *bad_rb_buffer;
- unsigned int bad_rb_size;
- unsigned int *good_rb_buffer;
- unsigned int good_rb_size;
- unsigned int last_valid_ctx_id;
- unsigned int status;
- unsigned int ft_policy;
- unsigned int err_code;
- unsigned int start_of_replay_cmds;
- unsigned int replay_for_snapshot;
-};
-
#define FT_DETECT_REGS_COUNT 12
struct log_field {
@@ -402,23 +411,37 @@
void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
int hang);
-int adreno_dump_and_exec_ft(struct kgsl_device *device);
+void adreno_dispatcher_start(struct adreno_device *adreno_dev);
+int adreno_dispatcher_init(struct adreno_device *adreno_dev);
+void adreno_dispatcher_close(struct adreno_device *adreno_dev);
+int adreno_dispatcher_idle(struct adreno_device *adreno_dev,
+ unsigned int timeout);
+void adreno_dispatcher_irq_fault(struct kgsl_device *device);
+void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
-void adreno_dump_rb(struct kgsl_device *device, const void *buf,
- size_t len, int start, int size);
+int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp);
-unsigned int adreno_ft_detect(struct kgsl_device *device,
- unsigned int *prev_reg_val);
+void adreno_dispatcher_schedule(struct kgsl_device *device);
+void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
+int adreno_reset(struct kgsl_device *device);
int adreno_ft_init_sysfs(struct kgsl_device *device);
void adreno_ft_uninit_sysfs(struct kgsl_device *device);
+int adreno_perfcounter_get_groupid(struct adreno_device *adreno_dev,
+ const char *name);
+
+const char *adreno_perfcounter_get_name(struct adreno_device
+ *adreno_dev, unsigned int groupid);
+
int adreno_perfcounter_get(struct adreno_device *adreno_dev,
unsigned int groupid, unsigned int countable, unsigned int *offset,
unsigned int flags);
int adreno_perfcounter_put(struct adreno_device *adreno_dev,
- unsigned int groupid, unsigned int countable);
+ unsigned int groupid, unsigned int countable, unsigned int flags);
int adreno_soft_reset(struct kgsl_device *device);
@@ -519,9 +542,7 @@
{
if (k_ctxt) {
struct adreno_context *a_ctxt = ADRENO_CONTEXT(k_ctxt);
-
- if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
- return a_ctxt->timestamp;
+ return a_ctxt->timestamp;
}
return rb->global_ts;
}
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 3d72c5c..cce4f91 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1451,7 +1451,7 @@
return ret;
}
-static void a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
+static int a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
@@ -1468,7 +1468,7 @@
ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW)
adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
else
- return;
+ return 0;
/*
* Issue an empty draw call to avoid possible hangs due to
* repeated idles without intervening draw calls.
@@ -1499,41 +1499,46 @@
| adreno_dev->pix_shader_start;
}
- adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE,
- &cmd[0], cmds - cmd);
+ return adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE, &cmd[0], cmds - cmd);
}
-static void a2xx_drawctxt_save(struct adreno_device *adreno_dev,
+static int a2xx_drawctxt_save(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
+ int ret;
if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
- return;
+ return 0;
- if (context->flags & CTXT_FLAGS_GPU_HANG)
- KGSL_CTXT_WARN(device,
- "Current active context has caused gpu hang\n");
+ if (context->state == ADRENO_CONTEXT_STATE_INVALID)
+ return 0;
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
context->reg_save[1],
context->reg_save[2] << 2, true);
/* save registers and constants. */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->reg_save, 3);
+ if (ret)
+ return ret;
+
if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
kgsl_cffdump_syncmem(context->base.device,
&context->gpustate,
context->shader_save[1],
context->shader_save[2] << 2, true);
/* save shader partitioning and instructions. */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->shader_save, 3);
+ if (ret)
+ return ret;
kgsl_cffdump_syncmem(context->base.device,
&context->gpustate,
context->shader_fixup[1],
@@ -1542,10 +1547,13 @@
* fixup shader partitioning parameter for
* SET_SHADER_BASES.
*/
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_fixup, 3);
+ if (ret)
+ return ret;
+
context->flags |= CTXT_FLAGS_SHADER_RESTORE;
}
}
@@ -1558,32 +1566,41 @@
/* save gmem.
* (note: changes shader. shader must already be saved.)
*/
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.gmem_save, 3);
+ if (ret)
+ return ret;
+
kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
context->chicken_restore[1],
context->chicken_restore[2] << 2, true);
/* Restore TP0_CHICKEN */
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->chicken_restore, 3);
+
+ if (ret)
+ return ret;
}
adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
context->flags |= CTXT_FLAGS_GMEM_RESTORE;
} else if (adreno_is_a2xx(adreno_dev))
- a2xx_drawctxt_draw_workaround(adreno_dev, context);
+ return a2xx_drawctxt_draw_workaround(adreno_dev, context);
+
+ return 0;
}
-static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
+static int a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
unsigned int cmds[5];
+ int ret = 0;
if (context == NULL) {
/* No context - set the default pagetable and thats it */
@@ -1598,7 +1615,7 @@
: KGSL_CONTEXT_INVALID;
kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
id);
- return;
+ return 0;
}
cmds[0] = cp_nop_packet(1);
@@ -1607,8 +1624,11 @@
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->base.id;
- adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ ret = adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
cmds, 5);
+ if (ret)
+ return ret;
+
kgsl_mmu_setstate(&device->mmu, context->base.pagetable,
context->base.id);
@@ -1621,9 +1641,11 @@
context->context_gmem_shadow.gmem_restore[2] << 2,
true);
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.gmem_restore, 3);
+ if (ret)
+ return ret;
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
kgsl_cffdump_syncmem(context->base.device,
@@ -1632,9 +1654,11 @@
context->chicken_restore[2] << 2, true);
/* Restore TP0_CHICKEN */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->chicken_restore, 3);
+ if (ret)
+ return ret;
}
context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
@@ -1646,8 +1670,10 @@
context->reg_restore[2] << 2, true);
/* restore registers and constants. */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
+ if (ret)
+ return ret;
/* restore shader instructions & partitioning. */
if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
@@ -1656,18 +1682,22 @@
context->shader_restore[1],
context->shader_restore[2] << 2, true);
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_restore, 3);
+ if (ret)
+ return ret;
}
}
if (adreno_is_a20x(adreno_dev)) {
cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
cmds[1] = context->bin_base_offset;
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, cmds, 2);
}
+
+ return ret;
}
/*
@@ -1734,13 +1764,14 @@
if (!status) {
if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
- /* This indicates that we could not read CP_INT_STAT.
- * As a precaution just wake up processes so
- * they can check their timestamps. Since, we
- * did not ack any interrupts this interrupt will
- * be generated again */
+ /*
+ * This indicates that we could not read CP_INT_STAT.
+ * As a precaution schedule the dispatcher to check
+ * things out. Since we did not ack any interrupts this
+ * interrupt will be generated again
+ */
KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
- wake_up_interruptible_all(&device->wait_queue);
+ adreno_dispatcher_schedule(device);
} else
KGSL_DRV_WARN(device, "Spurious interrput detected\n");
return;
@@ -1766,7 +1797,7 @@
if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
queue_work(device->work_queue, &device->ts_expired_ws);
- wake_up_interruptible_all(&device->wait_queue);
+ adreno_dispatcher_schedule(device);
}
}
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index b1b27f5..8b75c4e 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2382,32 +2382,38 @@
return ret;
}
-static void a3xx_drawctxt_save(struct adreno_device *adreno_dev,
+static int a3xx_drawctxt_save(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
+ int ret;
if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
- return;
+ return 0;
- if (context->flags & CTXT_FLAGS_GPU_HANG)
- KGSL_CTXT_WARN(device,
- "Current active context has caused gpu hang\n");
+ if (context->state == ADRENO_CONTEXT_STATE_INVALID)
+ return 0;
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
/* Fixup self modifying IBs for save operations */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, context->save_fixup, 3);
+ if (ret)
+ return ret;
/* save registers and constants. */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->regconstant_save, 3);
+ if (ret)
+ return ret;
if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
/* Save shader instructions */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE, context->shader_save, 3);
+ if (ret)
+ return ret;
context->flags |= CTXT_FLAGS_SHADER_RESTORE;
}
@@ -2425,19 +2431,25 @@
context->context_gmem_shadow.gmem_save[1],
context->context_gmem_shadow.gmem_save[2] << 2, true);
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.
gmem_save, 3);
+ if (ret)
+ return ret;
+
context->flags |= CTXT_FLAGS_GMEM_RESTORE;
}
+
+ return 0;
}
-static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
+static int a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
unsigned int cmds[5];
+ int ret = 0;
if (context == NULL) {
/* No context - set the default pagetable and thats it */
@@ -2452,7 +2464,7 @@
: KGSL_CONTEXT_INVALID;
kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
id);
- return;
+ return 0;
}
cmds[0] = cp_nop_packet(1);
@@ -2461,8 +2473,11 @@
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->base.id;
- adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ ret = adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
cmds, 5);
+ if (ret)
+ return ret;
+
kgsl_mmu_setstate(&device->mmu, context->base.pagetable,
context->base.id);
@@ -2478,36 +2493,47 @@
context->context_gmem_shadow.gmem_restore[2] << 2,
true);
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.
gmem_restore, 3);
+ if (ret)
+ return ret;
context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
}
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
+ if (ret)
+ return ret;
/* Fixup self modifying IBs for restore operations */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->restore_fixup, 3);
+ if (ret)
+ return ret;
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->constant_restore, 3);
+ if (ret)
+ return ret;
if (context->flags & CTXT_FLAGS_SHADER_RESTORE)
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_restore, 3);
-
+ if (ret)
+ return ret;
/* Restore HLSQ_CONTROL_0 register */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->hlsqcontrol_restore, 3);
}
+
+ return ret;
}
static int a3xx_rb_init(struct adreno_device *adreno_dev,
@@ -2621,11 +2647,8 @@
{
struct kgsl_device *device = &adreno_dev->dev;
- /* Wake up everybody waiting for the interrupt */
- wake_up_interruptible_all(&device->wait_queue);
-
- /* Schedule work to free mem and issue ibs */
queue_work(device->work_queue, &device->ts_expired_ws);
+ adreno_dispatcher_schedule(device);
}
/**
@@ -3152,115 +3175,118 @@
*/
static struct adreno_perfcount_register a3xx_perfcounters_cp[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_CP_0_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_CP_0_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_rbbm[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RBBM_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RBBM_1_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RBBM_0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RBBM_1_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_pc[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_1_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_2_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_3_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_1_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_2_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_3_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_vfd[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VFD_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VFD_1_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VFD_0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VFD_1_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_hlsq[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_1_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_2_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_3_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_4_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_5_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_1_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_2_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_3_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_4_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_5_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_vpc[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VPC_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VPC_1_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VPC_0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VPC_1_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_tse[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TSE_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TSE_1_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TSE_0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TSE_1_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_ras[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RAS_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RAS_1_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RAS_0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RAS_1_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_uche[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_1_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_2_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_3_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_4_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_5_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_1_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_2_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_3_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_4_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_5_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_tp[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_1_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_2_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_3_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_4_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_5_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_1_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_2_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_3_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_4_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_5_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_sp[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_1_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_2_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_3_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_4_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_5_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_6_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_7_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_1_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_2_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_3_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_4_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_5_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_6_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_7_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_rb[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RB_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RB_1_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RB_0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RB_1_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_pwr[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PWR_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PWR_1_LO, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_1_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_vbif[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_CNT0_LO },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_CNT1_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT1_LO },
};
static struct adreno_perfcount_register a3xx_perfcounters_vbif_pwr[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT0_LO },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT1_LO },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT2_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT0_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT1_LO },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT2_LO },
};
+#define A3XX_PERFCOUNTER_GROUP(name) { a3xx_perfcounters_##name, \
+ ARRAY_SIZE(a3xx_perfcounters_##name), __stringify(name) }
+
static struct adreno_perfcount_group a3xx_perfcounter_groups[] = {
- { a3xx_perfcounters_cp, ARRAY_SIZE(a3xx_perfcounters_cp) },
- { a3xx_perfcounters_rbbm, ARRAY_SIZE(a3xx_perfcounters_rbbm) },
- { a3xx_perfcounters_pc, ARRAY_SIZE(a3xx_perfcounters_pc) },
- { a3xx_perfcounters_vfd, ARRAY_SIZE(a3xx_perfcounters_vfd) },
- { a3xx_perfcounters_hlsq, ARRAY_SIZE(a3xx_perfcounters_hlsq) },
- { a3xx_perfcounters_vpc, ARRAY_SIZE(a3xx_perfcounters_vpc) },
- { a3xx_perfcounters_tse, ARRAY_SIZE(a3xx_perfcounters_tse) },
- { a3xx_perfcounters_ras, ARRAY_SIZE(a3xx_perfcounters_ras) },
- { a3xx_perfcounters_uche, ARRAY_SIZE(a3xx_perfcounters_uche) },
- { a3xx_perfcounters_tp, ARRAY_SIZE(a3xx_perfcounters_tp) },
- { a3xx_perfcounters_sp, ARRAY_SIZE(a3xx_perfcounters_sp) },
- { a3xx_perfcounters_rb, ARRAY_SIZE(a3xx_perfcounters_rb) },
- { a3xx_perfcounters_pwr, ARRAY_SIZE(a3xx_perfcounters_pwr) },
- { a3xx_perfcounters_vbif, ARRAY_SIZE(a3xx_perfcounters_vbif) },
- { a3xx_perfcounters_vbif_pwr, ARRAY_SIZE(a3xx_perfcounters_vbif_pwr) },
+ A3XX_PERFCOUNTER_GROUP(cp),
+ A3XX_PERFCOUNTER_GROUP(rbbm),
+ A3XX_PERFCOUNTER_GROUP(pc),
+ A3XX_PERFCOUNTER_GROUP(vfd),
+ A3XX_PERFCOUNTER_GROUP(hlsq),
+ A3XX_PERFCOUNTER_GROUP(vpc),
+ A3XX_PERFCOUNTER_GROUP(tse),
+ A3XX_PERFCOUNTER_GROUP(ras),
+ A3XX_PERFCOUNTER_GROUP(uche),
+ A3XX_PERFCOUNTER_GROUP(tp),
+ A3XX_PERFCOUNTER_GROUP(sp),
+ A3XX_PERFCOUNTER_GROUP(rb),
+ A3XX_PERFCOUNTER_GROUP(pwr),
+ A3XX_PERFCOUNTER_GROUP(vbif),
+ A3XX_PERFCOUNTER_GROUP(vbif_pwr),
};
static struct adreno_perfcounters a3xx_perfcounters = {
@@ -3304,6 +3330,9 @@
/* Reserve and start countable 1 in the PWR perfcounter group */
adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
NULL, PERFCOUNTER_FLAG_KERNEL);
+
+ /* Default performance counter profiling to false */
+ adreno_dev->profile.enabled = false;
}
/**
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
new file mode 100644
index 0000000..e429934
--- /dev/null
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -0,0 +1,1038 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+
+#include "kgsl.h"
+#include "adreno.h"
+#include "adreno_ringbuffer.h"
+#include "adreno_trace.h"
+
+#define ADRENO_DISPATCHER_ACTIVE 0
+#define ADRENO_DISPATCHER_PAUSE 1
+
+#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+
+/* Number of commands that can be queued in a context before it sleeps */
+static unsigned int _context_cmdqueue_size = 50;
+
+/* Number of milliseconds to wait for the context queue to clear */
+static unsigned int _context_queue_wait = 10000;
+
+/* Number of command batches sent at a time from a single context */
+static unsigned int _context_cmdbatch_burst = 5;
+
+/* Number of command batches inflight in the ringbuffer at any time */
+static unsigned int _dispatcher_inflight = 15;
+
+/* Command batch timeout (in milliseconds) */
+static unsigned int _cmdbatch_timeout = 2000;
+
+/**
+ * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
+ * @drawctxt: Pointer to the adreno draw context
+ *
+ * Dequeue a new command batch from the context list
+ */
+static inline struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
+ struct adreno_context *drawctxt)
+{
+ struct kgsl_cmdbatch *cmdbatch = NULL;
+
+ mutex_lock(&drawctxt->mutex);
+ if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
+ cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ drawctxt->cmdqueue_head =
+ CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
+ ADRENO_CONTEXT_CMDQUEUE_SIZE);
+ drawctxt->queued--;
+ }
+
+ mutex_unlock(&drawctxt->mutex);
+
+ return cmdbatch;
+}
+
+/**
+ * adreno_dispatcher_requeue_cmdbatch() - Put a command back on the context
+ * queue
+ * @drawctxt: Pointer to the adreno draw context
+ * @cmdbatch: Pointer to the KGSL cmdbatch to requeue
+ *
+ * Failure to submit a command to the ringbuffer isn't the fault of the command
+ * being submitted so if a failure happens, push it back on the head of the the
+ * context queue to be reconsidered again
+ */
+static inline void adreno_dispatcher_requeue_cmdbatch(
+ struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch)
+{
+ unsigned int prev;
+ mutex_lock(&drawctxt->mutex);
+
+ if (kgsl_context_detached(&drawctxt->base) ||
+ drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+ mutex_unlock(&drawctxt->mutex);
+ return;
+ }
+
+ prev = drawctxt->cmdqueue_head - 1;
+
+ if (prev < 0)
+ prev = ADRENO_CONTEXT_CMDQUEUE_SIZE - 1;
+
+ /*
+ * The maximum queue size always needs to be one less then the size of
+ * the ringbuffer queue so there is "room" to put the cmdbatch back in
+ */
+
+ BUG_ON(prev == drawctxt->cmdqueue_tail);
+
+ drawctxt->cmdqueue[prev] = cmdbatch;
+ drawctxt->queued++;
+
+ /* Reset the command queue head to reflect the newly requeued change */
+ drawctxt->cmdqueue_head = prev;
+ mutex_unlock(&drawctxt->mutex);
+}
+
+/**
+ * dispatcher_queue_context() - Queue a context in the dispatcher pending list
+ * @dispatcher: Pointer to the adreno dispatcher struct
+ * @drawctxt: Pointer to the adreno draw context
+ *
+ * Add a context to the dispatcher pending list.
+ */
+static void dispatcher_queue_context(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ spin_lock(&dispatcher->plist_lock);
+
+ if (plist_node_empty(&drawctxt->pending)) {
+ /* Get a reference to the context while it sits on the list */
+ _kgsl_context_get(&drawctxt->base);
+ trace_dispatch_queue_context(drawctxt);
+ plist_add(&drawctxt->pending, &dispatcher->pending);
+ }
+
+ spin_unlock(&dispatcher->plist_lock);
+}
+
+/**
+ * sendcmd() - Send a command batch to the GPU hardware
+ * @dispatcher: Pointer to the adreno dispatcher struct
+ * @cmdbatch: Pointer to the KGSL cmdbatch being sent
+ *
+ * Send a KGSL command batch to the GPU hardware
+ */
+static int sendcmd(struct adreno_device *adreno_dev,
+ struct kgsl_cmdbatch *cmdbatch)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int ret;
+
+ dispatcher->inflight++;
+
+ mutex_lock(&device->mutex);
+
+ if (dispatcher->inflight == 1) {
+ /* Time to make the donuts. Turn on the GPU */
+ ret = kgsl_active_count_get(device);
+ if (ret) {
+ dispatcher->inflight--;
+ mutex_unlock(&device->mutex);
+ return ret;
+ }
+ }
+
+ ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch);
+
+ /* Turn the GPU back off on failure. Sad face. */
+ if (ret && dispatcher->inflight == 1)
+ kgsl_active_count_put(device);
+
+ mutex_unlock(&device->mutex);
+
+ if (ret) {
+ dispatcher->inflight--;
+ KGSL_DRV_ERR(device,
+ "Unable to submit command to the ringbuffer\n");
+ return ret;
+ }
+
+ trace_adreno_cmdbatch_submitted(cmdbatch, dispatcher->inflight);
+
+ dispatcher->cmdqueue[dispatcher->tail] = cmdbatch;
+ dispatcher->tail = (dispatcher->tail + 1) %
+ ADRENO_DISPATCH_CMDQUEUE_SIZE;
+
+ /*
+ * If this is the first command in the pipe then the GPU will
+ * immediately start executing it so we can start the expiry timeout on
+ * the command batch here. Subsequent command batches will have their
+ * timer started when the previous command batch is retired
+ */
+ if (dispatcher->inflight == 1) {
+ cmdbatch->expires = jiffies +
+ msecs_to_jiffies(_cmdbatch_timeout);
+ mod_timer(&dispatcher->timer, cmdbatch->expires);
+ }
+
+ return 0;
+}
+
+/**
+ * dispatcher_context_sendcmds() - Send commands from a context to the GPU
+ * @adreno_dev: Pointer to the adreno device struct
+ * @drawctxt: Pointer to the adreno context to dispatch commands from
+ *
+ * Dequeue and send a burst of commands from the specified context to the GPU
+ */
+static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int count = 0;
+
+ /*
+ * Each context can send a specific number of command batches per cycle
+ */
+ for ( ; count < _context_cmdbatch_burst &&
+ dispatcher->inflight < _dispatcher_inflight; count++) {
+ int ret;
+ struct kgsl_cmdbatch *cmdbatch =
+ adreno_dispatcher_get_cmdbatch(drawctxt);
+
+ if (cmdbatch == NULL)
+ break;
+
+ ret = sendcmd(adreno_dev, cmdbatch);
+
+ /*
+ * There are various reasons why we can't submit a command (no
+ * memory for the commands, full ringbuffer, etc) but none of
+ * these are actually the current command's fault. Requeue it
+ * back on the context and let it come back around again if
+ * conditions improve
+ */
+ if (ret) {
+ adreno_dispatcher_requeue_cmdbatch(drawctxt, cmdbatch);
+ break;
+ }
+ }
+
+ /*
+ * If the context successfully submitted commands, then
+ * unconditionally put it back on the queue to be considered the
+ * next time around. This might seem a little wasteful but it is
+ * reasonable to think that a busy context will stay busy.
+ */
+
+ if (count) {
+ dispatcher_queue_context(adreno_dev, drawctxt);
+
+ /*
+ * If we submitted something there will be room in the
+ * context queue so ping the context wait queue on the
+ * chance that the context is snoozing
+ */
+
+ wake_up_interruptible_all(&drawctxt->wq);
+ }
+
+ return count;
+}
+
+/**
+ * _adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
+ * @adreno_dev: Pointer to the adreno device struct
+ *
+ * Issue as many commands as possible (up to inflight) from the pending contexts
+ * This function assumes the dispatcher mutex has been locked.
+ */
+static int _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ /* Don't do anything if the dispatcher is paused */
+ if (dispatcher->state != ADRENO_DISPATCHER_ACTIVE)
+ return 0;
+
+ while (dispatcher->inflight < _dispatcher_inflight) {
+ struct adreno_context *drawctxt = NULL;
+
+ spin_lock(&dispatcher->plist_lock);
+
+ if (!plist_head_empty(&dispatcher->pending)) {
+ drawctxt = plist_first_entry(&dispatcher->pending,
+ struct adreno_context, pending);
+
+ plist_del(&drawctxt->pending, &dispatcher->pending);
+ }
+
+ spin_unlock(&dispatcher->plist_lock);
+
+ if (drawctxt == NULL)
+ break;
+
+ if (kgsl_context_detached(&drawctxt->base) ||
+ drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+ kgsl_context_put(&drawctxt->base);
+ continue;
+ }
+
+ dispatcher_context_sendcmds(adreno_dev, drawctxt);
+ kgsl_context_put(&drawctxt->base);
+ }
+
+ return 0;
+}
+
+/**
+ * adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
+ * @adreno_dev: Pointer to the adreno device struct
+ *
+ * Lock the dispatcher and call _adreno_dispatcher_issueibcmds
+ */
+int adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int ret;
+
+ mutex_lock(&dispatcher->mutex);
+ ret = _adreno_dispatcher_issuecmds(adreno_dev);
+ mutex_unlock(&dispatcher->mutex);
+
+ return ret;
+}
+
+static int _check_context_queue(struct adreno_context *drawctxt)
+{
+ int ret;
+
+ mutex_lock(&drawctxt->mutex);
+
+ /*
+ * Wake up if there is room in the context or if the whole thing got
+ * invalidated while we were asleep
+ */
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ ret = 1;
+ else
+ ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
+
+ mutex_unlock(&drawctxt->mutex);
+
+ return ret;
+}
+
+/**
+ * adreno_dispatcher_replay() - Replay commands from the dispatcher queue
+ * @adreno_dev: Pointer to the adreno device struct
+ *
+ * Replay the commands from the dispatcher inflight queue. This is called after
+ * a power down/up to recover from a fault
+ */
+int adreno_dispatcher_replay(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ struct kgsl_cmdbatch **replay;
+ int i, ptr, count = 0;
+
+ BUG_ON(!mutex_is_locked(&dispatcher->mutex));
+
+ replay = kzalloc(sizeof(*replay) * dispatcher->inflight, GFP_KERNEL);
+
+ /*
+ * If we can't allocate enough memory for the replay commands then we
+ * are in a bad way. Invalidate everything, reset the GPU and see ya
+ * later alligator
+ */
+
+ if (replay == NULL) {
+
+ ptr = dispatcher->head;
+
+ while (ptr != dispatcher->tail) {
+ struct kgsl_context *context =
+ dispatcher->cmdqueue[ptr]->context;
+
+ adreno_drawctxt_invalidate(device, context);
+ ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ }
+
+ /* Reset the dispatcher queue */
+ dispatcher->inflight = 0;
+ dispatcher->head = dispatcher->tail = 0;
+
+ /* Reset the hardware */
+ mutex_lock(&device->mutex);
+
+ /*
+ * If adreno_reset fails then the GPU is not alive and there
+ * isn't anything we can do to recover at this point
+ */
+
+ BUG_ON(adreno_reset(device));
+ mutex_unlock(&device->mutex);
+
+ return 0;
+ }
+
+ ptr = dispatcher->head;
+
+ while (ptr != dispatcher->tail) {
+ struct kgsl_cmdbatch *cmdbatch = dispatcher->cmdqueue[ptr];
+ struct adreno_context *drawctxt =
+ ADRENO_CONTEXT(cmdbatch->context);
+
+ if (cmdbatch->invalid)
+ adreno_drawctxt_invalidate(device, cmdbatch->context);
+
+ if (!kgsl_context_detached(cmdbatch->context) &&
+ drawctxt->state == ADRENO_CONTEXT_STATE_ACTIVE) {
+ /*
+ * The context for the command batch is still valid -
+ * add it to the replay list
+ */
+ replay[count++] = dispatcher->cmdqueue[ptr];
+ } else {
+ /*
+ * Skip over invaliated or detached contexts - cancel
+ * any pending events for the timestamp and destroy the
+ * command batch
+ */
+ mutex_lock(&device->mutex);
+ kgsl_cancel_events_timestamp(device, cmdbatch->context,
+ cmdbatch->timestamp);
+ mutex_unlock(&device->mutex);
+
+ kgsl_cmdbatch_destroy(cmdbatch);
+ }
+
+ ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ }
+
+ /* Reset the dispatcher queue */
+ dispatcher->inflight = 0;
+ dispatcher->head = dispatcher->tail = 0;
+
+ mutex_lock(&device->mutex);
+ BUG_ON(adreno_reset(device));
+ mutex_unlock(&device->mutex);
+
+ /* Replay the pending command buffers */
+ for (i = 0; i < count; i++) {
+ int ret = sendcmd(adreno_dev, replay[i]);
+
+ /*
+ * I'm afraid that if we get an error during replay we
+ * are not going to space today
+ */
+
+ BUG_ON(ret);
+ }
+
+ /*
+ * active_count will be set when we come into this function because
+ * there were inflight commands. By virtue of setting ->inflight back
+ * to 0 sendcmd() will increase the active count again on the first
+ * submission. This active_count_put is needed to put the universe back
+ * in balance and as a bonus it ensures that the hardware stays up for
+ * the entire reset process
+ */
+ mutex_lock(&device->mutex);
+ kgsl_active_count_put(device);
+ mutex_unlock(&device->mutex);
+
+ kfree(replay);
+ return 0;
+}
+
+/**
+ * adreno_dispatcher_queue_cmd() - Queue a new command in the context
+ * @adreno_dev: Pointer to the adreno device struct
+ * @drawctxt: Pointer to the adreno draw context
+ * @cmdbatch: Pointer to the command batch being submitted
+ * @timestamp: Pointer to the requested timestamp
+ *
+ * Queue a command in the context - if there isn't any room in the queue, then
+ * block until there is
+ */
+int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp)
+{
+ int ret;
+
+ mutex_lock(&drawctxt->mutex);
+
+ if (drawctxt->flags & CTXT_FLAGS_BEING_DESTROYED) {
+ mutex_unlock(&drawctxt->mutex);
+ return -EINVAL;
+ }
+
+ /* Wait for room in the context queue */
+
+ while (drawctxt->queued >= _context_cmdqueue_size) {
+ trace_adreno_drawctxt_sleep(drawctxt);
+ mutex_unlock(&drawctxt->mutex);
+
+ ret = wait_event_interruptible_timeout(drawctxt->wq,
+ _check_context_queue(drawctxt),
+ msecs_to_jiffies(_context_queue_wait));
+
+ mutex_lock(&drawctxt->mutex);
+ trace_adreno_drawctxt_wake(drawctxt);
+
+ if (ret <= 0) {
+ mutex_unlock(&drawctxt->mutex);
+ return (ret == 0) ? -ETIMEDOUT : (int) ret;
+ }
+
+ /*
+ * Account for the possiblity that the context got invalidated
+ * while we were sleeping
+ */
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+ mutex_unlock(&drawctxt->mutex);
+ return -EDEADLK;
+ }
+ }
+
+ /*
+ * If the UMD specified a timestamp then use that under the condition
+ * that it is greater then the last queued timestamp in the context.
+ */
+
+ if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
+ if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0) {
+ mutex_unlock(&drawctxt->mutex);
+ return -ERANGE;
+ }
+
+ drawctxt->timestamp = *timestamp;
+ } else
+ drawctxt->timestamp++;
+
+ cmdbatch->timestamp = drawctxt->timestamp;
+ *timestamp = drawctxt->timestamp;
+
+ /* Put the command into the queue */
+ drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch;
+ drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
+ ADRENO_CONTEXT_CMDQUEUE_SIZE;
+
+ drawctxt->queued++;
+ trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);
+
+
+ mutex_unlock(&drawctxt->mutex);
+
+ /* Add the context to the dispatcher pending list */
+ dispatcher_queue_context(adreno_dev, drawctxt);
+
+ /*
+ * Only issue commands if inflight is less than burst -this prevents us
+ * from sitting around waiting for the mutex on a busy system - the work
+ * loop will schedule it for us. Inflight is mutex protected but the
+ * worse that can happen is that it will go to 0 after we check and if
+ * it goes to 0 it is because the work loop decremented it and the work
+ * queue will try to schedule new commands anyway.
+ */
+
+ if (adreno_dev->dispatcher.inflight < _context_cmdbatch_burst)
+ adreno_dispatcher_issuecmds(adreno_dev);
+
+ return 0;
+}
+
+/**
+ * dispatcher_do_fault() - Handle a GPU fault and reset the GPU
+ * @device: Pointer to the KGSL device
+ * @cmdbatch: Pointer to the command batch believed to be responsible for the
+ * fault
+ * @invalidate: Non zero if the current command should be invalidated
+ *
+ * Trigger a fault in the dispatcher and start the replay process
+ */
+static void dispatcher_do_fault(struct kgsl_device *device,
+ struct kgsl_cmdbatch *cmdbatch, int invalidate)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ unsigned int reg;
+
+ /* Stop the timers */
+ del_timer_sync(&dispatcher->timer);
+
+ mutex_lock(&device->mutex);
+
+ /*
+ * There is an interesting race condition here - when a command batch
+ * expires and we invaliate before we recover we run the risk of having
+ * the UMD clean up the context and free memory that the GPU is still
+ * using. Not that it is dangerous because we are a few microseconds
+ * away from resetting, but it still ends up in pagefaults and log
+ * messages and so on and so forth. To avoid this we mark the command
+ * batch itself as invalid and then reset - the context will get
+ * invalidated in the replay.
+ */
+
+ if (invalidate)
+ cmdbatch->invalid = 1;
+
+ /*
+ * Stop the CP in its tracks - this ensures that we don't get activity
+ * while we are trying to dump the state of the system
+ */
+
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, ®);
+ reg |= (1 << 27) | (1 << 28);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
+
+ kgsl_postmortem_dump(device, 0);
+ kgsl_device_snapshot(device, 1);
+ mutex_unlock(&device->mutex);
+
+ /* If we can't replay then bravely run away and die */
+ if (adreno_dispatcher_replay(adreno_dev))
+ BUG();
+}
+
+static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch,
+ unsigned int consumed, unsigned int retired)
+{
+ return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) &&
+ (timestamp_cmp(retired, cmdbatch->timestamp) < 0));
+}
+
+/**
+ * adreno_dispatcher_work() - Master work handler for the dispatcher
+ * @work: Pointer to the work struct for the current work queue
+ *
+ * Process expired commands and send new ones.
+ */
+static void adreno_dispatcher_work(struct work_struct *work)
+{
+ struct adreno_dispatcher *dispatcher =
+ container_of(work, struct adreno_dispatcher, work);
+ struct adreno_device *adreno_dev =
+ container_of(dispatcher, struct adreno_device, dispatcher);
+ struct kgsl_device *device = &adreno_dev->dev;
+ int inv, count = 0;
+
+ mutex_lock(&dispatcher->mutex);
+
+ while (dispatcher->head != dispatcher->tail) {
+ uint32_t consumed, retired = 0;
+ struct kgsl_cmdbatch *cmdbatch =
+ dispatcher->cmdqueue[dispatcher->head];
+ struct adreno_context *drawctxt;
+ BUG_ON(cmdbatch == NULL);
+
+ drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+
+ /*
+ * First try to expire the timestamp. This happens if the
+ * context is valid and the timestamp expired normally or if the
+ * context was destroyed before the command batch was finished
+ * in the GPU. Either way retire the command batch advance the
+ * pointers and continue processing the queue
+ */
+
+ if (!kgsl_context_detached(cmdbatch->context))
+ retired = kgsl_readtimestamp(device, cmdbatch->context,
+ KGSL_TIMESTAMP_RETIRED);
+
+ if (kgsl_context_detached(cmdbatch->context) ||
+ (timestamp_cmp(cmdbatch->timestamp, retired) <= 0)) {
+
+ trace_adreno_cmdbatch_retired(cmdbatch,
+ dispatcher->inflight - 1);
+
+ /* Reduce the number of inflight command batches */
+ dispatcher->inflight--;
+
+ /* Zero the old entry*/
+ dispatcher->cmdqueue[dispatcher->head] = NULL;
+
+ /* Advance the buffer head */
+ dispatcher->head = CMDQUEUE_NEXT(dispatcher->head,
+ ADRENO_DISPATCH_CMDQUEUE_SIZE);
+
+ /* Destroy the retired command batch */
+ kgsl_cmdbatch_destroy(cmdbatch);
+
+ /* Update the expire time for the next command batch */
+
+ if (dispatcher->inflight > 0) {
+ cmdbatch =
+ dispatcher->cmdqueue[dispatcher->head];
+ cmdbatch->expires = jiffies +
+ msecs_to_jiffies(_cmdbatch_timeout);
+ }
+
+ count++;
+
+ BUG_ON(dispatcher->inflight == 0 && dispatcher->fault);
+ continue;
+ }
+
+ /*
+ * If we got a fault from the interrupt handler, this command
+ * is to blame. Invalidate it, reset and replay
+ */
+
+ if (dispatcher->fault) {
+ dispatcher_do_fault(device, cmdbatch, 1);
+ goto done;
+ }
+
+ /* Get the last consumed timestamp */
+ consumed = kgsl_readtimestamp(device, cmdbatch->context,
+ KGSL_TIMESTAMP_CONSUMED);
+
+ /* Break here if fault detection is disabled for the context */
+ if (drawctxt->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE)
+ break;
+
+ /*
+ * The last line of defense is to check if the command batch has
+ * timed out. If we get this far but the timeout hasn't expired
+ * yet then the GPU is still ticking away
+ */
+
+ if (time_is_after_jiffies(cmdbatch->expires))
+ break;
+
+ /* Boom goes the dynamite */
+
+ pr_err("-----------------------\n");
+
+ pr_err("dispatcher: expired ctx=%d ts=%d consumed=%d retired=%d\n",
+ cmdbatch->context->id, cmdbatch->timestamp, consumed,
+ retired);
+ pr_err("dispatcher: jiffies=%lu expired=%lu\n", jiffies,
+ cmdbatch->expires);
+
+ /*
+ * If execution stopped after the current command batch was
+ * consumed then invalidate the context for the current command
+ * batch
+ */
+
+ inv = cmdbatch_consumed(cmdbatch, consumed, retired);
+
+ dispatcher_do_fault(device, cmdbatch, inv);
+ break;
+ }
+
+ /*
+ * Decrement the active count to 0 - this will allow the system to go
+ * into suspend even if there are queued command batches
+ */
+
+ if (count && dispatcher->inflight == 0) {
+ mutex_lock(&device->mutex);
+ kgsl_active_count_put(device);
+ mutex_unlock(&device->mutex);
+ }
+
+ /* Dispatch new commands if we have the room */
+ if (dispatcher->inflight < _dispatcher_inflight)
+ _adreno_dispatcher_issuecmds(adreno_dev);
+
+done:
+ /* Either update the timer for the next command batch or disable it */
+ if (dispatcher->inflight) {
+ struct kgsl_cmdbatch *cmdbatch
+ = dispatcher->cmdqueue[dispatcher->head];
+
+ mod_timer(&dispatcher->timer, cmdbatch->expires);
+ } else
+ del_timer_sync(&dispatcher->timer);
+
+ /* Before leaving update the pwrscale information */
+ mutex_lock(&device->mutex);
+ kgsl_pwrscale_idle(device);
+ mutex_unlock(&device->mutex);
+
+ mutex_unlock(&dispatcher->mutex);
+}
+
+void adreno_dispatcher_schedule(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ queue_work(device->work_queue, &dispatcher->work);
+}
+
+/*
+ * This is called when the timer expires - it either means the GPU is hung or
+ * the IB is taking too long to execute
+ */
+void adreno_dispatcher_timer(unsigned long data)
+{
+ struct adreno_device *adreno_dev = (struct adreno_device *) data;
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ adreno_dispatcher_schedule(device);
+}
+/**
+ * adreno_dispatcher_fault_irq() - Trigger a fault in the dispatcher
+ * @device: Pointer to the KGSL device
+ *
+ * Called from an interrupt context this will trigger a fault in the
+ * dispatcher
+ */
+void adreno_dispatcher_fault_irq(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ dispatcher->fault = 1;
+ adreno_dispatcher_schedule(device);
+}
+
+/**
+ * adreno_dispatcher_pause() - stop the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Pause the dispather so it doesn't accept any new commands
+ */
+void adreno_dispatcher_pause(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ /*
+ * This will probably get called while holding other mutexes so don't
+ * take the dispatcher mutex. The biggest penalty is that another
+ * command might be submitted while we are in here but thats okay
+ * because whoever is waiting for the drain will just have another
+ * command batch to wait for
+ */
+
+ dispatcher->state = ADRENO_DISPATCHER_PAUSE;
+}
+
+/**
+ * adreno_dispatcher_start() - activate the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Set the disaptcher active and start the loop once to get things going
+ */
+void adreno_dispatcher_start(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
+
+ /* Schedule the work loop to get things going */
+ adreno_dispatcher_schedule(&adreno_dev->dev);
+}
+
+/**
+ * adreno_dispatcher_stop() - stop the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Stop the dispatcher and close all the timers
+ */
+void adreno_dispatcher_stop(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ del_timer_sync(&dispatcher->timer);
+ dispatcher->state = ADRENO_DISPATCHER_PAUSE;
+}
+
+/**
+ * adreno_dispatcher_close() - close the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Close the dispatcher and free all the oustanding commands and memory
+ */
+void adreno_dispatcher_close(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ mutex_lock(&dispatcher->mutex);
+ del_timer_sync(&dispatcher->timer);
+
+ while (dispatcher->head != dispatcher->tail) {
+ kgsl_cmdbatch_destroy(dispatcher->cmdqueue[dispatcher->head]);
+ dispatcher->head = (dispatcher->head + 1)
+ % ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ }
+
+ mutex_unlock(&dispatcher->mutex);
+
+ kobject_put(&dispatcher->kobj);
+}
+
+struct dispatcher_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct adreno_dispatcher *,
+ struct dispatcher_attribute *, char *);
+ ssize_t (*store)(struct adreno_dispatcher *,
+ struct dispatcher_attribute *, const char *buf,
+ size_t count);
+ unsigned int max;
+ unsigned int *value;
+};
+
+#define DISPATCHER_UINT_ATTR(_name, _mode, _max, _value) \
+ struct dispatcher_attribute dispatcher_attr_##_name = { \
+ .attr = { .name = __stringify(_name), .mode = _mode }, \
+ .show = _show_uint, \
+ .store = _store_uint, \
+ .max = _max, \
+ .value = &(_value), \
+ }
+
+#define to_dispatcher_attr(_a) \
+ container_of((_a), struct dispatcher_attribute, attr)
+#define to_dispatcher(k) container_of(k, struct adreno_dispatcher, kobj)
+
+static ssize_t _store_uint(struct adreno_dispatcher *dispatcher,
+ struct dispatcher_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ int ret = kstrtoul(buf, 0, &val);
+
+ if (ret)
+ return ret;
+
+ if (!val || (attr->max && (val > attr->max)))
+ return -EINVAL;
+
+ *((unsigned int *) attr->value) = val;
+ return size;
+}
+
+static ssize_t _show_uint(struct adreno_dispatcher *dispatcher,
+ struct dispatcher_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ *((unsigned int *) attr->value));
+}
+
+static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE,
+ _dispatcher_inflight);
+/*
+ * Our code that "puts back" a command from the context is much cleaner
+ * if we are sure that there will always be enough room in the
+ * ringbuffer so restrict the maximum size of the context queue to
+ * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1
+ */
+static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644,
+ ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size);
+static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0,
+ _context_cmdbatch_burst);
+static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0, _cmdbatch_timeout);
+static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
+
+static struct attribute *dispatcher_attrs[] = {
+ &dispatcher_attr_inflight.attr,
+ &dispatcher_attr_context_cmdqueue_size.attr,
+ &dispatcher_attr_context_burst_count.attr,
+ &dispatcher_attr_cmdbatch_timeout.attr,
+ &dispatcher_attr_context_queue_wait.attr,
+ NULL,
+};
+
+static ssize_t dispatcher_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct adreno_dispatcher *dispatcher = to_dispatcher(kobj);
+ struct dispatcher_attribute *pattr = to_dispatcher_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (pattr->show)
+ ret = pattr->show(dispatcher, pattr, buf);
+
+ return ret;
+}
+
+static ssize_t dispatcher_sysfs_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adreno_dispatcher *dispatcher = to_dispatcher(kobj);
+ struct dispatcher_attribute *pattr = to_dispatcher_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (pattr->store)
+ ret = pattr->store(dispatcher, pattr, buf, count);
+
+ return ret;
+}
+
+static void dispatcher_sysfs_release(struct kobject *kobj)
+{
+}
+
+static const struct sysfs_ops dispatcher_sysfs_ops = {
+ .show = dispatcher_sysfs_show,
+ .store = dispatcher_sysfs_store
+};
+
+static struct kobj_type ktype_dispatcher = {
+ .sysfs_ops = &dispatcher_sysfs_ops,
+ .default_attrs = dispatcher_attrs,
+ .release = dispatcher_sysfs_release
+};
+
+/**
+ * adreno_dispatcher_init() - Initialize the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Initialize the dispatcher
+ */
+int adreno_dispatcher_init(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int ret;
+
+ memset(dispatcher, 0, sizeof(*dispatcher));
+
+ mutex_init(&dispatcher->mutex);
+
+ setup_timer(&dispatcher->timer, adreno_dispatcher_timer,
+ (unsigned long) adreno_dev);
+
+ INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
+
+ plist_head_init(&dispatcher->pending);
+ spin_lock_init(&dispatcher->plist_lock);
+
+ dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
+
+ ret = kobject_init_and_add(&dispatcher->kobj, &ktype_dispatcher,
+ &device->dev->kobj, "dispatch");
+
+ return ret;
+}
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index bf173a7..1a4310e 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -13,10 +13,12 @@
#include <linux/slab.h>
#include <linux/msm_kgsl.h>
+#include <linux/sched.h>
#include "kgsl.h"
#include "kgsl_sharedmem.h"
#include "adreno.h"
+#include "adreno_trace.h"
#define KGSL_INIT_REFTIMESTAMP 0x7FFFFFFF
@@ -132,6 +134,247 @@
*incmd = cmd;
}
+static void wait_callback(struct kgsl_device *device, void *priv, u32 id,
+ u32 timestamp, u32 type)
+{
+ struct adreno_context *drawctxt = priv;
+ wake_up_interruptible_all(&drawctxt->waiting);
+}
+
+#define adreno_wait_event_interruptible_timeout(wq, condition, timeout, io) \
+({ \
+ long __ret = timeout; \
+ if (io) \
+ __wait_io_event_interruptible_timeout(wq, condition, __ret); \
+ else \
+ __wait_event_interruptible_timeout(wq, condition, __ret); \
+ __ret; \
+})
+
+#define adreno_wait_event_interruptible(wq, condition, io) \
+({ \
+ long __ret; \
+ if (io) \
+ __wait_io_event_interruptible(wq, condition, __ret); \
+ else \
+ __wait_event_interruptible(wq, condition, __ret); \
+ __ret; \
+})
+
+static int _check_context_timestamp(struct kgsl_device *device,
+ struct adreno_context *drawctxt, unsigned int timestamp)
+{
+ int ret = 0;
+
+ /* Bail if the drawctxt has been invalidated or destroyed */
+ if (kgsl_context_detached(&drawctxt->base) ||
+ drawctxt->state != ADRENO_CONTEXT_STATE_ACTIVE)
+ return 1;
+
+ mutex_lock(&device->mutex);
+ ret = kgsl_check_timestamp(device, &drawctxt->base, timestamp);
+ mutex_unlock(&device->mutex);
+
+ return ret;
+}
+
+/**
+ * adreno_drawctxt_wait() - sleep until a timestamp expires
+ * @adreno_dev: pointer to the adreno_device struct
+ * @drawctxt: Pointer to the draw context to sleep for
+ * @timetamp: Timestamp to wait on
+ * @timeout: Number of jiffies to wait (0 for infinite)
+ *
+ * Register an event to wait for a timestamp on a context and sleep until it
+ * has past. Returns < 0 on error, -ETIMEDOUT if the timeout expires or 0
+ * on success
+ */
+int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
+ struct kgsl_context *context,
+ uint32_t timestamp, unsigned int timeout)
+{
+ static unsigned int io_cnt;
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ int ret, io;
+
+ if (kgsl_context_detached(context))
+ return -EINVAL;
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ return -EDEADLK;
+
+ /* Needs to hold the device mutex */
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ trace_adreno_drawctxt_wait_start(context->id, timestamp);
+
+ ret = kgsl_add_event(device, context->id, timestamp,
+ wait_callback, drawctxt, NULL);
+ if (ret)
+ goto done;
+
+ /*
+ * For proper power accounting sometimes we need to call
+ * io_wait_interruptible_timeout and sometimes we need to call
+ * plain old wait_interruptible_timeout. We call the regular
+ * timeout N times out of 100, where N is a number specified by
+ * the current power level
+ */
+
+ io_cnt = (io_cnt + 1) % 100;
+ io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
+ ? 0 : 1;
+
+ mutex_unlock(&device->mutex);
+
+ if (timeout) {
+ ret = (int) adreno_wait_event_interruptible_timeout(
+ drawctxt->waiting,
+ _check_context_timestamp(device, drawctxt, timestamp),
+ msecs_to_jiffies(timeout), io);
+
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else if (ret > 0)
+ ret = 0;
+ } else {
+ ret = (int) adreno_wait_event_interruptible(drawctxt->waiting,
+ _check_context_timestamp(device, drawctxt, timestamp),
+ io);
+ }
+
+ mutex_lock(&device->mutex);
+
+ /* -EDEADLK if the context was invalidated while we were waiting */
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ ret = -EDEADLK;
+
+
+ /* Return -EINVAL if the context was detached while we were waiting */
+ if (kgsl_context_detached(context))
+ ret = -EINVAL;
+
+done:
+ trace_adreno_drawctxt_wait_done(context->id, timestamp, ret);
+ return ret;
+}
+
+static void global_wait_callback(struct kgsl_device *device, void *priv, u32 id,
+ u32 timestamp, u32 type)
+{
+ struct adreno_context *drawctxt = priv;
+
+ wake_up_interruptible_all(&drawctxt->waiting);
+ kgsl_context_put(&drawctxt->base);
+}
+
+static int _check_global_timestamp(struct kgsl_device *device,
+ unsigned int timestamp)
+{
+ int ret;
+
+ mutex_lock(&device->mutex);
+ ret = kgsl_check_timestamp(device, NULL, timestamp);
+ mutex_unlock(&device->mutex);
+
+ return ret;
+}
+
+int adreno_drawctxt_wait_global(struct adreno_device *adreno_dev,
+ struct kgsl_context *context,
+ uint32_t timestamp, unsigned int timeout)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ int ret;
+
+ /* Needs to hold the device mutex */
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ _kgsl_context_get(context);
+
+ trace_adreno_drawctxt_wait_start(KGSL_MEMSTORE_GLOBAL, timestamp);
+
+ ret = kgsl_add_event(device, KGSL_MEMSTORE_GLOBAL, timestamp,
+ global_wait_callback, drawctxt, NULL);
+ if (ret) {
+ kgsl_context_put(context);
+ goto done;
+ }
+
+ mutex_unlock(&device->mutex);
+
+ if (timeout) {
+ ret = (int) wait_event_interruptible_timeout(drawctxt->waiting,
+ _check_global_timestamp(device, timestamp),
+ msecs_to_jiffies(timeout));
+
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else if (ret > 0)
+ ret = 0;
+ } else {
+ ret = (int) wait_event_interruptible(drawctxt->waiting,
+ _check_global_timestamp(device, timestamp));
+ }
+
+ mutex_lock(&device->mutex);
+
+ if (ret)
+ kgsl_cancel_events_timestamp(device, NULL, timestamp);
+
+done:
+ trace_adreno_drawctxt_wait_done(KGSL_MEMSTORE_GLOBAL, timestamp, ret);
+ return ret;
+}
+
+/**
+ * adreno_drawctxt_invalidate() - Invalidate an adreno draw context
+ * @device: Pointer to the KGSL device structure for the GPU
+ * @context: Pointer to the KGSL context structure
+ *
+ * Invalidate the context and remove all queued commands and cancel any pending
+ * waiters
+ */
+void adreno_drawctxt_invalidate(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+
+ trace_adreno_drawctxt_invalidate(drawctxt);
+
+ drawctxt->state = ADRENO_CONTEXT_STATE_INVALID;
+
+ /* Clear the pending queue */
+ mutex_lock(&drawctxt->mutex);
+
+ while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
+ struct kgsl_cmdbatch *cmdbatch =
+ drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+
+ drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
+ ADRENO_CONTEXT_CMDQUEUE_SIZE;
+
+ mutex_unlock(&drawctxt->mutex);
+
+ mutex_lock(&device->mutex);
+ kgsl_cancel_events_timestamp(device, context,
+ cmdbatch->timestamp);
+ mutex_unlock(&device->mutex);
+
+ kgsl_cmdbatch_destroy(cmdbatch);
+ mutex_lock(&drawctxt->mutex);
+ }
+
+ mutex_unlock(&drawctxt->mutex);
+
+ /* Give the bad news to everybody waiting around */
+ wake_up_interruptible_all(&drawctxt->waiting);
+ wake_up_interruptible_all(&drawctxt->wq);
+}
+
/**
* adreno_drawctxt_create - create a new adreno draw context
* @dev_priv: the owner of the context
@@ -149,6 +392,7 @@
int ret;
drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
+
if (drawctxt == NULL)
return ERR_PTR(-ENOMEM);
@@ -168,22 +412,30 @@
KGSL_CONTEXT_NO_FAULT_TOLERANCE |
KGSL_CONTEXT_TYPE_MASK);
+ /* Always enable per-context timestamps */
+ *flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
+ drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
+
if (*flags & KGSL_CONTEXT_PREAMBLE)
drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
if (*flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC;
- if (*flags & KGSL_CONTEXT_PER_CONTEXT_TS)
- drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
-
- if (*flags & KGSL_CONTEXT_USER_GENERATED_TS) {
- if (!(*flags & KGSL_CONTEXT_PER_CONTEXT_TS)) {
- ret = -EINVAL;
- goto err;
- }
+ if (*flags & KGSL_CONTEXT_USER_GENERATED_TS)
drawctxt->flags |= CTXT_FLAGS_USER_GENERATED_TS;
- }
+
+ mutex_init(&drawctxt->mutex);
+ init_waitqueue_head(&drawctxt->wq);
+ init_waitqueue_head(&drawctxt->waiting);
+
+ /*
+ * Set up the plist node for the dispatcher. For now all contexts have
+ * the same priority, but later the priority will be set at create time
+ * by the user
+ */
+
+ plist_node_init(&drawctxt->pending, ADRENO_CONTEXT_DEFAULT_PRIORITY);
if (*flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
drawctxt->flags |= CTXT_FLAGS_NO_FAULT_TOLERANCE;
@@ -196,12 +448,6 @@
goto err;
kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(drawctxt->base.id, ref_wait_ts),
- KGSL_INIT_REFTIMESTAMP);
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(drawctxt->base.id, ts_cmp_enable),
- 0);
- kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
0);
kgsl_sharedmem_writel(device, &device->memstore,
@@ -219,18 +465,20 @@
* @context: Generic KGSL context container for the context
*
*/
-void adreno_drawctxt_detach(struct kgsl_context *context)
+int adreno_drawctxt_detach(struct kgsl_context *context)
{
struct kgsl_device *device;
struct adreno_device *adreno_dev;
struct adreno_context *drawctxt;
+ int ret;
if (context == NULL)
- return;
+ return 0;
device = context->device;
adreno_dev = ADRENO_DEVICE(device);
drawctxt = ADRENO_CONTEXT(context);
+
/* deactivate context */
if (adreno_dev->drawctxt_active == drawctxt) {
/* no need to save GMEM or shader, the context is
@@ -246,11 +494,39 @@
adreno_drawctxt_switch(adreno_dev, NULL, 0);
}
- if (device->state != KGSL_STATE_HUNG)
- adreno_idle(device);
+ mutex_lock(&drawctxt->mutex);
+
+ while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
+ struct kgsl_cmdbatch *cmdbatch =
+ drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+
+ drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
+ ADRENO_CONTEXT_CMDQUEUE_SIZE;
+
+ mutex_unlock(&drawctxt->mutex);
+
+ /*
+ * Don't hold the drawctxt mutex while the cmdbatch is being
+ * destroyed because the cmdbatch destroy takes the device
+ * mutex and the world falls in on itself
+ */
+
+ kgsl_cmdbatch_destroy(cmdbatch);
+ mutex_lock(&drawctxt->mutex);
+ }
+
+ mutex_unlock(&drawctxt->mutex);
+
+ /* Wait for the last global timestamp to pass before continuing */
+ ret = adreno_drawctxt_wait_global(adreno_dev, context,
+ drawctxt->internal_timestamp, 10 * 1000);
+
+ adreno_profile_process_results(device);
kgsl_sharedmem_free(&drawctxt->gpustate);
kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow);
+
+ return ret;
}
@@ -294,11 +570,12 @@
* Switch the current draw context
*/
-void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
unsigned int flags)
{
struct kgsl_device *device = &adreno_dev->dev;
+ int ret = 0;
if (drawctxt) {
if (flags & KGSL_CONTEXT_SAVE_GMEM)
@@ -314,9 +591,9 @@
if (adreno_dev->drawctxt_active == drawctxt) {
if (adreno_dev->gpudev->ctxt_draw_workaround &&
adreno_is_a225(adreno_dev))
- adreno_dev->gpudev->ctxt_draw_workaround(
+ ret = adreno_dev->gpudev->ctxt_draw_workaround(
adreno_dev, drawctxt);
- return;
+ return ret;
}
KGSL_CTXT_INFO(device, "from %d to %d flags %d\n",
@@ -325,7 +602,15 @@
drawctxt ? drawctxt->base.id : 0, flags);
/* Save the old context */
- adreno_dev->gpudev->ctxt_save(adreno_dev, adreno_dev->drawctxt_active);
+ ret = adreno_dev->gpudev->ctxt_save(adreno_dev,
+ adreno_dev->drawctxt_active);
+
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Error in GPU context %d save: %d\n",
+ adreno_dev->drawctxt_active->base.id, ret);
+ return ret;
+ }
/* Put the old instance of the active drawctxt */
if (adreno_dev->drawctxt_active) {
@@ -338,6 +623,14 @@
_kgsl_context_get(&drawctxt->base);
/* Set the new context */
- adreno_dev->gpudev->ctxt_restore(adreno_dev, drawctxt);
+ ret = adreno_dev->gpudev->ctxt_restore(adreno_dev, drawctxt);
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Error in GPU context %d restore: %d\n",
+ drawctxt->base.id, ret);
+ return ret;
+ }
+
adreno_dev->drawctxt_active = drawctxt;
+ return 0;
}
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 88d1b8c..f8469e2 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -61,7 +61,20 @@
{ KGSL_CONTEXT_TYPE_GL, "GL" }, \
{ KGSL_CONTEXT_TYPE_CL, "CL" }, \
{ KGSL_CONTEXT_TYPE_C2D, "C2D" }, \
- { KGSL_CONTEXT_TYPE_RS, "RS" }
+ { KGSL_CONTEXT_TYPE_RS, "RS" }, \
+ { KGSL_CONTEXT_TYPE_UNKNOWN, "UNKNOWN" }
+
+struct adreno_context_type {
+ unsigned int type;
+ const char *str;
+};
+
+#define ADRENO_CONTEXT_CMDQUEUE_SIZE 128
+
+#define ADRENO_CONTEXT_DEFAULT_PRIORITY 1
+
+#define ADRENO_CONTEXT_STATE_ACTIVE 0
+#define ADRENO_CONTEXT_STATE_INVALID 1
struct kgsl_device;
struct adreno_device;
@@ -93,18 +106,58 @@
struct kgsl_memdesc quad_vertices_restore;
};
+/**
+ * struct adreno_context - Adreno GPU draw context
+ * @id: Unique integer ID of the context
+ * @timestamp: Last issued context-specific timestamp
+ * @internal_timestamp: Global timestamp of the last issued command
+ * @state: Current state of the context
+ * @flags: Bitfield controlling behavior of the context
+ * @type: Context type (GL, CL, RS)
+ * @mutex: Mutex to protect the cmdqueue
+ * @pagetable: Pointer to the GPU pagetable for the context
+ * @gpustate: Pointer to the GPU scratch memory for context save/restore
+ * @reg_restore: Command buffer for restoring context registers
+ * @shader_save: Command buffer for saving shaders
+ * @shader_restore: Command buffer to restore shaders
+ * @context_gmem_shadow: GMEM shadow structure for save/restore
+ * @reg_save: A2XX command buffer to save context registers
+ * @shader_fixup: A2XX command buffer to "fix" shaders on restore
+ * @chicken_restore: A2XX command buffer to "fix" register restore
+ * @bin_base_offset: Saved value of the A2XX BIN_BASE_OFFSET register
+ * @regconstant_save: A3XX command buffer to save some registers
+ * @constant_retore: A3XX command buffer to restore some registers
+ * @hslqcontrol_restore: A3XX command buffer to restore HSLSQ registers
+ * @save_fixup: A3XX command buffer to "fix" register save
+ * @restore_fixup: A3XX cmmand buffer to restore register save fixes
+ * @shader_load_commands: A3XX GPU memory descriptor for shader load IB
+ * @shader_save_commands: A3XX GPU memory descriptor for shader save IB
+ * @constantr_save_commands: A3XX GPU memory descriptor for constant save IB
+ * @constant_load_commands: A3XX GPU memory descriptor for constant load IB
+ * @cond_execs: A3XX GPU memory descriptor for conditional exec IB
+ * @hlsq_restore_commands: A3XX GPU memory descriptor for HLSQ restore IB
+ * @cmdqueue: Queue of command batches waiting to be dispatched for this context
+ * @cmdqueue_head: Head of the cmdqueue queue
+ * @cmdqueue_tail: Tail of the cmdqueue queue
+ * @pending: Priority list node for the dispatcher list of pending contexts
+ * @wq: Workqueue structure for contexts to sleep pending room in the queue
+ * @waiting: Workqueue structure for contexts waiting for a timestamp or event
+ * @queued: Number of commands queued in the cmdqueue
+ */
struct adreno_context {
struct kgsl_context base;
unsigned int ib_gpu_time_used;
unsigned int timestamp;
+ unsigned int internal_timestamp;
+ int state;
uint32_t flags;
unsigned int type;
+ struct mutex mutex;
struct kgsl_memdesc gpustate;
unsigned int reg_restore[3];
unsigned int shader_save[3];
unsigned int shader_restore[3];
- /* Information of the GMEM shadow that is created in context create */
struct gmem_shadow_t context_gmem_shadow;
/* A2XX specific items */
@@ -125,23 +178,41 @@
struct kgsl_memdesc constant_load_commands[3];
struct kgsl_memdesc cond_execs[4];
struct kgsl_memdesc hlsqcontrol_restore_commands[1];
+
+ /* Dispatcher */
+ struct kgsl_cmdbatch *cmdqueue[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ int cmdqueue_head;
+ int cmdqueue_tail;
+
+ struct plist_node pending;
+ wait_queue_head_t wq;
+ wait_queue_head_t waiting;
+
+ int queued;
};
struct kgsl_context *adreno_drawctxt_create(struct kgsl_device_private *,
uint32_t *flags);
-void adreno_drawctxt_detach(struct kgsl_context *context);
+int adreno_drawctxt_detach(struct kgsl_context *context);
void adreno_drawctxt_destroy(struct kgsl_context *context);
-void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
unsigned int flags);
void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
struct kgsl_context *context,
unsigned int offset);
+int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
+ struct kgsl_context *context,
+ uint32_t timestamp, unsigned int timeout);
+
+void adreno_drawctxt_invalidate(struct kgsl_device *device,
+ struct kgsl_context *context);
+
/* GPU context switch helper functions */
void build_quad_vtxbuff(struct adreno_context *drawctxt,
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 7a070a6..294ae76 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -22,6 +22,7 @@
#include "adreno_ringbuffer.h"
#include "kgsl_cffdump.h"
#include "kgsl_pwrctrl.h"
+#include "adreno_trace.h"
#include "a2xx_reg.h"
#include "a3xx_reg.h"
@@ -79,6 +80,8 @@
{KGSL_CMD_INTERNAL_IDENTIFIER, "CMD__INT"},
{KGSL_START_OF_IB_IDENTIFIER, "IB_START"},
{KGSL_END_OF_IB_IDENTIFIER, "IB___END"},
+ {KGSL_START_OF_PROFILE_IDENTIFIER, "PRO_STRT"},
+ {KGSL_END_OF_PROFILE_IDENTIFIER, "PRO__END"},
};
static uint32_t adreno_is_pm4_len(uint32_t word)
@@ -457,6 +460,9 @@
adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ),
&cp_ib2_bufsz);
+ trace_adreno_gpu_fault(rbbm_status, cp_rb_rptr, cp_rb_wptr,
+ cp_ib1_base, cp_ib1_bufsz, cp_ib2_base, cp_ib2_bufsz);
+
/* If postmortem dump is not enabled, dump minimal set and return */
if (!device->pm_dump_enable) {
@@ -642,5 +648,9 @@
error_vfree:
vfree(rb_copy);
end:
+ /* Restart the dispatcher after a manually triggered dump */
+ if (manual)
+ adreno_dispatcher_start(adreno_dev);
+
return result;
}
diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c
new file mode 100644
index 0000000..896b6e8
--- /dev/null
+++ b/drivers/gpu/msm/adreno_profile.c
@@ -0,0 +1,1161 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+
+#include "adreno.h"
+#include "adreno_profile.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+
+#define ASSIGNS_STR_FORMAT "%.8s:%u "
+
+/*
+ * Raw Data for processing later:
+ * : 3 - timestamp, count, context id
+ * [per counter] - data for each counter
+ * : 1 - Register offset
+ * : 2 - Pre IB register hi/lo value
+ * : 2 - Post IB register hi/lo value
+ * [per counter end]
+ */
+#define SIZE_DATA(cnt) (3 + (cnt) * 5)
+
+/*
+ * Pre-IB command size (in dwords):
+ * : 2 - NOP start identifier
+ * : 3 - timestamp
+ * : 3 - count
+ * : 3 - context id
+ * [loop count start] - for each counter to watch
+ * : 3 - Register offset
+ * : 3 - Register read lo
+ * : 3 - Register read high
+ * [loop end]
+ * : 2 - NOP end identifier
+ */
+#define SIZE_PREIB(cnt) (13 + (cnt) * 9)
+
+/*
+ * Post-IB command size (in dwords):
+ * : 2 - NOP start identifier
+ * [loop count start] - for each counter to watch
+ * : 3 - Register read lo
+ * : 3 - Register read high
+ * [loop end]
+ * : 2 - NOP end identifier
+ */
+#define SIZE_POSTIB(cnt) (4 + (cnt) * 6)
+
+/* Counter data + Pre size + post size = total size */
+#define SIZE_SHARED_ENTRY(cnt) (SIZE_DATA(cnt) + SIZE_PREIB(cnt) \
+ + SIZE_POSTIB(cnt))
+
+/*
+ * Space for following string :"%u %u %u %.5s %u "
+ * [count iterations]: "%.8s:%u %llu %llu%c"
+ */
+#define SIZE_PIPE_ENTRY(cnt) (50 + (cnt) * 62)
+#define SIZE_LOG_ENTRY(cnt) (5 + (cnt) * 5)
+
+static struct adreno_context_type ctxt_type_table[] = {ADRENO_DRAWCTXT_TYPES};
+
+static const char *get_api_type_str(unsigned int type)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(ctxt_type_table) - 1; i++) {
+ if (ctxt_type_table[i].type == type)
+ break;
+ }
+ return ctxt_type_table[i].str;
+}
+
+static inline void _create_ib_ref(struct kgsl_memdesc *memdesc,
+ unsigned int *cmd, unsigned int cnt, unsigned int off)
+{
+ cmd[0] = CP_HDR_INDIRECT_BUFFER_PFD;
+ cmd[1] = memdesc->gpuaddr + off;
+ cmd[2] = cnt;
+}
+
+#define IB_START(cmd) do { \
+ *cmd++ = cp_nop_packet(1); \
+ *cmd++ = KGSL_START_OF_PROFILE_IDENTIFIER; \
+ } while (0);
+
+#define IB_END(cmd) do { \
+ *cmd++ = cp_nop_packet(1); \
+ *cmd++ = KGSL_END_OF_PROFILE_IDENTIFIER; \
+ } while (0);
+
+#define IB_CMD(cmd, type, val1, val2, off) do { \
+ *cmd++ = cp_type3_packet(type, 2); \
+ *cmd++ = val1; \
+ *cmd++ = val2; \
+ off += sizeof(unsigned int); \
+ } while (0);
+
+static void _build_pre_ib_cmds(struct adreno_profile *profile,
+ unsigned int *rbcmds, unsigned int head,
+ unsigned int timestamp, unsigned int ctxt_id)
+{
+ struct adreno_profile_assigns_list *entry;
+ unsigned int *start, *ibcmds;
+ unsigned int count = profile->assignment_count;
+ unsigned int gpuaddr = profile->shared_buffer.gpuaddr;
+ unsigned int ib_offset = head + SIZE_DATA(count);
+ unsigned int data_offset = head * sizeof(unsigned int);
+
+ ibcmds = ib_offset + ((unsigned int *) profile->shared_buffer.hostptr);
+ start = ibcmds;
+
+ /* start of profile identifier */
+ IB_START(ibcmds);
+
+ /* timestamp */
+ IB_CMD(ibcmds, CP_MEM_WRITE, gpuaddr + data_offset,
+ timestamp, data_offset);
+
+ /* count: number of perf counters pairs GPU will write */
+ IB_CMD(ibcmds, CP_MEM_WRITE, gpuaddr + data_offset,
+ profile->assignment_count, data_offset);
+
+ /* context id */
+ IB_CMD(ibcmds, CP_MEM_WRITE, gpuaddr + data_offset,
+ ctxt_id, data_offset);
+
+ /* loop for each countable assigned */
+ list_for_each_entry(entry, &profile->assignments_list, list) {
+ IB_CMD(ibcmds, CP_MEM_WRITE, gpuaddr + data_offset,
+ entry->offset, data_offset);
+ IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset,
+ gpuaddr + data_offset, data_offset);
+ IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset + 1,
+ gpuaddr + data_offset, data_offset);
+
+ /* skip over post_ib counter data */
+ data_offset += sizeof(unsigned int) * 2;
+ }
+
+ /* end of profile identifier */
+ IB_END(ibcmds);
+
+ _create_ib_ref(&profile->shared_buffer, rbcmds,
+ ibcmds - start, ib_offset * sizeof(unsigned int));
+}
+
+static void _build_post_ib_cmds(struct adreno_profile *profile,
+ unsigned int *rbcmds, unsigned int head)
+{
+ struct adreno_profile_assigns_list *entry;
+ unsigned int *start, *ibcmds;
+ unsigned int count = profile->assignment_count;
+ unsigned int gpuaddr = profile->shared_buffer.gpuaddr;
+ unsigned int ib_offset = head + SIZE_DATA(count) + SIZE_PREIB(count);
+ unsigned int data_offset = head * sizeof(unsigned int);
+
+ ibcmds = ib_offset + ((unsigned int *) profile->shared_buffer.hostptr);
+ start = ibcmds;
+ /* end of profile identifier */
+ IB_END(ibcmds);
+
+ /* skip over pre_ib preamble */
+ data_offset += sizeof(unsigned int) * 3;
+
+ /* loop for each countable assigned */
+ list_for_each_entry(entry, &profile->assignments_list, list) {
+ /* skip over pre_ib counter data */
+ data_offset += sizeof(unsigned int) * 3;
+
+ IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset,
+ gpuaddr + data_offset, data_offset);
+ IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset + 1,
+ gpuaddr + data_offset, data_offset);
+ }
+
+ /* end of profile identifier */
+ IB_END(ibcmds);
+
+ _create_ib_ref(&profile->shared_buffer, rbcmds,
+ ibcmds - start, ib_offset * sizeof(unsigned int));
+}
+
+static bool shared_buf_empty(struct adreno_profile *profile)
+{
+ if (profile->shared_buffer.hostptr == NULL ||
+ profile->shared_buffer.size == 0)
+ return true;
+
+ if (profile->shared_head == profile->shared_tail)
+ return true;
+
+ return false;
+}
+
+static inline void shared_buf_inc(unsigned int max_size,
+ unsigned int *offset, size_t inc)
+{
+ *offset = (*offset + inc) % max_size;
+}
+
+static inline void log_buf_wrapcnt(unsigned int cnt, unsigned int *off)
+{
+ *off = (*off + cnt) % ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS;
+}
+
+static inline void log_buf_wrapinc(unsigned int *profile_log_buffer,
+ unsigned int **ptr)
+{
+ *ptr += 1;
+ if (*ptr >= (profile_log_buffer +
+ ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS))
+ *ptr -= ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS;
+}
+
+static inline unsigned int log_buf_available(struct adreno_profile *profile,
+ unsigned int *head_ptr)
+{
+ unsigned int tail, head;
+
+ tail = (unsigned int) profile->log_tail -
+ (unsigned int) profile->log_buffer;
+ head = (unsigned int) head_ptr - (unsigned int) profile->log_buffer;
+ if (tail > head)
+ return (tail - head) / sizeof(unsigned int);
+ else
+ return ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS - ((head - tail) /
+ sizeof(unsigned int));
+}
+
+static inline unsigned int shared_buf_available(struct adreno_profile *profile)
+{
+ if (profile->shared_tail > profile->shared_head)
+ return profile->shared_tail - profile->shared_head;
+ else
+ return profile->shared_size -
+ (profile->shared_head - profile->shared_tail);
+}
+
+static struct adreno_profile_assigns_list *_find_assignment_by_offset(
+ struct adreno_profile *profile, unsigned int offset)
+{
+ struct adreno_profile_assigns_list *entry;
+
+ list_for_each_entry(entry, &profile->assignments_list, list) {
+ if (entry->offset == offset)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static bool _in_assignments_list(struct adreno_profile *profile,
+ unsigned int groupid, unsigned int countable)
+{
+ struct adreno_profile_assigns_list *entry;
+
+ list_for_each_entry(entry, &profile->assignments_list, list) {
+ if (entry->groupid == groupid && entry->countable ==
+ countable)
+ return true;
+ }
+
+ return false;
+}
+
+static bool _add_to_assignments_list(struct adreno_profile *profile,
+ const char *str, unsigned int groupid, unsigned int countable,
+ unsigned int offset)
+{
+ struct adreno_profile_assigns_list *entry;
+
+ /* first make sure we can alloc memory */
+ entry = kmalloc(sizeof(struct adreno_profile_assigns_list), GFP_KERNEL);
+ if (!entry)
+ return false;
+
+ list_add_tail(&entry->list, &profile->assignments_list);
+
+ entry->countable = countable;
+ entry->groupid = groupid;
+ entry->offset = offset;
+
+ strlcpy(entry->name, str, sizeof(entry->name));
+
+ profile->assignment_count++;
+
+ return true;
+}
+
+static void check_close_profile(struct adreno_profile *profile)
+{
+ if (profile->log_buffer == NULL)
+ return;
+
+ if (!adreno_profile_enabled(profile) && shared_buf_empty(profile)) {
+ if (profile->log_head == profile->log_tail) {
+ vfree(profile->log_buffer);
+ profile->log_buffer = NULL;
+ profile->log_head = NULL;
+ profile->log_tail = NULL;
+ }
+ }
+}
+
+static bool results_available(struct kgsl_device *device,
+ unsigned int *shared_buf_tail)
+{
+ unsigned int global_eop;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ unsigned int off = profile->shared_tail;
+ unsigned int *shared_ptr = (unsigned int *)
+ profile->shared_buffer.hostptr;
+ unsigned int ts, cnt;
+ int ts_cmp;
+
+ /*
+ * If shared_buffer empty or Memstore EOP timestamp is less than
+ * outstanding counter buffer timestamps then no results available
+ */
+ if (shared_buf_empty(profile))
+ return false;
+
+ global_eop = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+ do {
+ cnt = *(shared_ptr + off + 1);
+ if (cnt == 0)
+ return false;
+
+ ts = *(shared_ptr + off);
+ ts_cmp = timestamp_cmp(ts, global_eop);
+ if (ts_cmp >= 0) {
+ *shared_buf_tail = off;
+ if (off == profile->shared_tail)
+ return false;
+ else
+ return true;
+ }
+ shared_buf_inc(profile->shared_size, &off,
+ SIZE_SHARED_ENTRY(cnt));
+ } while (off != profile->shared_head);
+
+ *shared_buf_tail = profile->shared_head;
+
+ return true;
+}
+
+static void transfer_results(struct kgsl_device *device,
+ unsigned int shared_buf_tail)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ unsigned int buf_off;
+ unsigned int ts, cnt, ctxt_id, pid, tid, client_type;
+ unsigned int *ptr = (unsigned int *) profile->shared_buffer.hostptr;
+ struct kgsl_context *k_ctxt;
+ unsigned int *log_ptr, *log_base;
+ struct adreno_profile_assigns_list *assigns_list;
+ int i;
+
+ log_ptr = profile->log_head;
+ log_base = profile->log_buffer;
+ if (log_ptr == NULL)
+ return;
+
+ /*
+ * go through counter buffers and format for write into log_buffer
+ * if log buffer doesn't have space just overwrite it circularly
+ * shared_buf is guaranteed to not wrap within an entry so can use
+ * ptr increment
+ */
+ while (profile->shared_tail != shared_buf_tail) {
+ buf_off = profile->shared_tail;
+ /*
+ * format: timestamp, count, context_id
+ * count entries: pc_off, pc_start, pc_end
+ */
+ ts = *(ptr + buf_off);
+ cnt = *(ptr + buf_off + 1);
+ ctxt_id = *(ptr + buf_off + 2);
+ /*
+ * if entry overwrites the tail of log_buffer then adjust tail
+ * ptr to make room for the new entry, discarding old entry
+ */
+ while (log_buf_available(profile, log_ptr) <=
+ SIZE_LOG_ENTRY(cnt)) {
+ unsigned int size_tail, boff;
+ size_tail = SIZE_LOG_ENTRY(0xffff &
+ *(profile->log_tail));
+ boff = ((unsigned int) profile->log_tail -
+ (unsigned int) log_base) / sizeof(unsigned int);
+ log_buf_wrapcnt(size_tail, &boff);
+ profile->log_tail = log_base + boff;
+ }
+
+ /* find Adreno ctxt struct */
+ k_ctxt = idr_find(&device->context_idr, ctxt_id);
+ if (k_ctxt == NULL) {
+ shared_buf_inc(profile->shared_size,
+ &profile->shared_tail,
+ SIZE_SHARED_ENTRY(cnt));
+ continue;
+ } else {
+ struct adreno_context *adreno_ctxt =
+ ADRENO_CONTEXT(k_ctxt);
+ pid = k_ctxt->pid; /* pid */
+ tid = k_ctxt->tid; /* tid creator */
+ client_type = adreno_ctxt->type << 16;
+ }
+
+ buf_off += 3;
+ *log_ptr = client_type | cnt;
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = pid;
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = tid;
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = ctxt_id;
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = ts;
+ log_buf_wrapinc(log_base, &log_ptr);
+
+ for (i = 0; i < cnt; i++) {
+ assigns_list = _find_assignment_by_offset(
+ profile, *(ptr + buf_off++));
+ if (assigns_list == NULL) {
+ *log_ptr = (unsigned int) -1;
+ goto err;
+ } else {
+ *log_ptr = assigns_list->groupid << 16 |
+ (assigns_list->countable & 0xffff);
+ }
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = *(ptr + buf_off++); /* perf cntr start hi */
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = *(ptr + buf_off++); /* perf cntr start lo */
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = *(ptr + buf_off++); /* perf cntr end hi */
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = *(ptr + buf_off++); /* perf cntr end lo */
+ log_buf_wrapinc(log_base, &log_ptr);
+
+ }
+ shared_buf_inc(profile->shared_size,
+ &profile->shared_tail,
+ SIZE_SHARED_ENTRY(cnt));
+
+ }
+ profile->log_head = log_ptr;
+ return;
+err:
+ /* reset head/tail to same on error in hopes we work correctly later */
+ profile->log_head = profile->log_tail;
+}
+
+static int profile_enable_get(void *data, u64 *val)
+{
+ struct kgsl_device *device = data;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ mutex_lock(&device->mutex);
+ *val = adreno_profile_enabled(&adreno_dev->profile);
+ mutex_unlock(&device->mutex);
+
+ return 0;
+}
+
+static int profile_enable_set(void *data, u64 val)
+{
+ struct kgsl_device *device = data;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+
+ mutex_lock(&device->mutex);
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ mutex_unlock(&device->mutex);
+ return 0;
+ }
+
+ profile->enabled = val;
+
+ check_close_profile(profile);
+
+ mutex_unlock(&device->mutex);
+
+ return 0;
+}
+
+static ssize_t profile_assignments_read(struct file *filep,
+ char __user *ubuf, size_t max, loff_t *ppos)
+{
+ struct kgsl_device *device = (struct kgsl_device *) filep->private_data;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ struct adreno_profile_assigns_list *entry;
+ int len = 0, max_size = PAGE_SIZE;
+ char *buf, *pos;
+ ssize_t size = 0;
+
+ if (adreno_is_a2xx(adreno_dev))
+ return -EINVAL;
+
+ mutex_lock(&device->mutex);
+
+ buf = kmalloc(max_size, GFP_KERNEL);
+ if (!buf) {
+ mutex_unlock(&device->mutex);
+ return -ENOMEM;
+ }
+
+ pos = buf;
+
+ /* copy all assingments from list to str */
+ list_for_each_entry(entry, &profile->assignments_list, list) {
+ len = snprintf(pos, max_size, ASSIGNS_STR_FORMAT,
+ entry->name, entry->countable);
+
+ max_size -= len;
+ pos += len;
+ }
+
+ size = simple_read_from_buffer(ubuf, max, ppos, buf,
+ strlen(buf));
+
+ kfree(buf);
+
+ mutex_unlock(&device->mutex);
+ return size;
+}
+
+static void _remove_assignment(struct adreno_device *adreno_dev,
+ unsigned int groupid, unsigned int countable)
+{
+ struct adreno_profile *profile = &adreno_dev->profile;
+ struct adreno_profile_assigns_list *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &profile->assignments_list, list) {
+ if (entry->groupid == groupid &&
+ entry->countable == countable) {
+ list_del(&entry->list);
+
+ profile->assignment_count--;
+
+ kfree(entry);
+
+ /* remove from perf counter allocation */
+ adreno_perfcounter_put(adreno_dev, groupid, countable,
+ PERFCOUNTER_FLAG_KERNEL);
+ }
+ }
+}
+
+static void _add_assignment(struct adreno_device *adreno_dev,
+ unsigned int groupid, unsigned int countable)
+{
+ struct adreno_profile *profile = &adreno_dev->profile;
+ unsigned int offset;
+ const char *name = NULL;
+
+ name = adreno_perfcounter_get_name(adreno_dev, groupid);
+ if (!name)
+ return;
+
+ /* if already in assigned list skip it */
+ if (_in_assignments_list(profile, groupid, countable))
+ return;
+
+ /* add to perf counter allocation, if fail skip it */
+ if (adreno_perfcounter_get(adreno_dev, groupid,
+ countable, &offset, PERFCOUNTER_FLAG_NONE))
+ return;
+
+ /* add to assignments list, put counter back if error */
+ if (!_add_to_assignments_list(profile, name, groupid,
+ countable, offset))
+ adreno_perfcounter_put(adreno_dev, groupid,
+ countable, PERFCOUNTER_FLAG_KERNEL);
+}
+
+static char *_parse_next_assignment(struct adreno_device *adreno_dev,
+ char *str, int *groupid, int *countable, bool *remove)
+{
+ char *groupid_str, *countable_str;
+ int ret;
+
+ *groupid = -EINVAL;
+ *countable = -EINVAL;
+ *remove = false;
+
+ /* remove spaces */
+ while (*str == ' ')
+ str++;
+
+ /* check if it's a remove assignment */
+ if (*str == '-') {
+ *remove = true;
+ str++;
+ }
+
+ /* get the groupid string */
+ groupid_str = str;
+ while (*str != ':') {
+ if (*str == '\0')
+ return NULL;
+ *str = tolower(*str);
+ str++;
+ }
+ if (groupid_str == str)
+ return NULL;
+
+ *str = '\0';
+ str++;
+
+ /* get the countable string */
+ countable_str = str;
+ while (*str != ' ' && *str != '\0')
+ str++;
+ if (countable_str == str)
+ return NULL;
+
+ *str = '\0';
+ str++;
+
+ /* set results */
+ *groupid = adreno_perfcounter_get_groupid(adreno_dev,
+ groupid_str);
+ if (*groupid < 0)
+ return NULL;
+ ret = kstrtou32(countable_str, 10, countable);
+ if (ret)
+ return NULL;
+
+ return str;
+}
+
+static ssize_t profile_assignments_write(struct file *filep,
+ const char __user *user_buf, size_t len, loff_t *off)
+{
+ struct kgsl_device *device = (struct kgsl_device *) filep->private_data;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ size_t size = 0;
+ char *buf, *pbuf;
+ bool remove_assignment = false;
+ int groupid, countable;
+
+ if (len >= PAGE_SIZE || len == 0)
+ return -EINVAL;
+
+ if (adreno_is_a2xx(adreno_dev))
+ return -ENOSPC;
+
+ mutex_lock(&device->mutex);
+
+ if (adreno_profile_enabled(profile)) {
+ size = -EINVAL;
+ goto error_unlock;
+ }
+
+ kgsl_active_count_get(device);
+
+ /*
+ * When adding/removing assignments, ensure that the GPU is done with
+ * all it's work. This helps to syncronize the work flow to the
+ * GPU and avoid racey conditions.
+ */
+ if (adreno_idle(device)) {
+ size = -EINVAL;
+ goto error_put;
+ }
+
+ /* clear all shared buffer results */
+ adreno_profile_process_results(device);
+
+ buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!buf) {
+ size = -EINVAL;
+ goto error_put;
+ }
+
+ pbuf = buf;
+
+ /* clear the log buffer */
+ if (profile->log_buffer != NULL) {
+ profile->log_head = profile->log_buffer;
+ profile->log_tail = profile->log_buffer;
+ }
+
+ if (copy_from_user(buf, user_buf, len)) {
+ size = -EFAULT;
+ goto error_free;
+ }
+
+ /* for sanity and parsing, ensure it is null terminated */
+ buf[len] = '\0';
+
+ /* parse file buf and add(remove) to(from) appropriate lists */
+ while (1) {
+ pbuf = _parse_next_assignment(adreno_dev, pbuf, &groupid,
+ &countable, &remove_assignment);
+ if (pbuf == NULL)
+ break;
+
+ if (remove_assignment)
+ _remove_assignment(adreno_dev, groupid, countable);
+ else
+ _add_assignment(adreno_dev, groupid, countable);
+ }
+
+ size = len;
+
+error_free:
+ kfree(buf);
+error_put:
+ kgsl_active_count_put(device);
+error_unlock:
+ mutex_unlock(&device->mutex);
+ return size;
+}
+
+static int _pipe_print_pending(char *ubuf, size_t max)
+{
+ loff_t unused = 0;
+ char str[] = "Operation Would Block!";
+
+ return simple_read_from_buffer(ubuf, max,
+ &unused, str, strlen(str));
+}
+
+static int _pipe_print_results(struct adreno_device *adreno_dev,
+ char *ubuf, size_t max)
+{
+ struct adreno_profile *profile = &adreno_dev->profile;
+ const char *grp_name;
+ char *usr_buf = ubuf;
+ unsigned int *log_ptr = NULL;
+ int len, i;
+ int status = 0;
+ ssize_t size, total_size = 0;
+ unsigned int cnt, api_type, ctxt_id, pid, tid, ts, cnt_reg;
+ unsigned long long pc_start, pc_end;
+ const char *api_str;
+ char format_space;
+ loff_t unused = 0;
+ char pipe_hdr_buf[51]; /* 4 uint32 + 5 space + 5 API type + '\0' */
+ char pipe_cntr_buf[63]; /* 2 uint64 + 1 uint32 + 4 spaces + 8 group */
+
+ /* convert unread entries to ASCII, copy to user-space */
+ log_ptr = profile->log_tail;
+
+ do {
+ cnt = *log_ptr & 0xffff;
+ if (SIZE_PIPE_ENTRY(cnt) > max) {
+ status = 0;
+ goto err;
+ }
+ if ((max - (usr_buf - ubuf)) < SIZE_PIPE_ENTRY(cnt))
+ break;
+
+ api_type = *log_ptr >> 16;
+ api_str = get_api_type_str(api_type);
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ pid = *log_ptr;
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ tid = *log_ptr;
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ ctxt_id = *log_ptr;
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ ts = *log_ptr;
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ len = snprintf(pipe_hdr_buf, sizeof(pipe_hdr_buf) - 1,
+ "%u %u %u %.5s %u ",
+ pid, tid, ctxt_id, api_str, ts);
+ size = simple_read_from_buffer(usr_buf,
+ max - (usr_buf - ubuf),
+ &unused, pipe_hdr_buf, len);
+ if (size < 0) {
+ status = -EINVAL;
+ goto err;
+ }
+
+ unused = 0;
+ usr_buf += size;
+ total_size += size;
+
+ for (i = 0; i < cnt; i++) {
+ grp_name = adreno_perfcounter_get_name(
+ adreno_dev, *log_ptr >> 16);
+ if (grp_name == NULL) {
+ status = -EFAULT;
+ goto err;
+ }
+
+ if (i == cnt - 1)
+ format_space = '\n';
+ else
+ format_space = ' ';
+
+ cnt_reg = *log_ptr & 0xffff;
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ pc_start = *((unsigned long long *) log_ptr);
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ pc_end = *((unsigned long long *) log_ptr);
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+
+ len = snprintf(pipe_cntr_buf,
+ sizeof(pipe_cntr_buf) - 1,
+ "%.8s:%u %llu %llu%c",
+ grp_name, cnt_reg, pc_start,
+ pc_end, format_space);
+
+ size = simple_read_from_buffer(usr_buf,
+ max - (usr_buf - ubuf),
+ &unused, pipe_cntr_buf, len);
+ if (size < 0) {
+ status = size;
+ goto err;
+ }
+ unused = 0;
+ usr_buf += size;
+ total_size += size;
+ }
+ } while (log_ptr != profile->log_head);
+
+ status = total_size;
+err:
+ profile->log_tail = log_ptr;
+
+ return status;
+}
+
+static int profile_pipe_print(struct file *filep, char __user *ubuf,
+ size_t max, loff_t *ppos)
+{
+ struct kgsl_device *device = (struct kgsl_device *) filep->private_data;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ char *usr_buf = ubuf;
+ int status = 0;
+
+ if (adreno_is_a2xx(adreno_dev))
+ return 0;
+
+ /*
+ * this file not seekable since it only supports streaming, ignore
+ * ppos <> 0
+ */
+ /*
+ * format <pid> <tid> <context id> <cnt<<16 | client type> <timestamp>
+ * for each perf counter <cntr_reg_off> <start hi & lo> <end hi & low>
+ */
+
+ mutex_lock(&device->mutex);
+
+ while (1) {
+ /* process any results that are available into the log_buffer */
+ status = adreno_profile_process_results(device);
+ if (status > 0) {
+ /* if we have results, print them and exit */
+ status = _pipe_print_results(adreno_dev, usr_buf, max);
+ break;
+ }
+
+ /* there are no unread results, act accordingly */
+ if (filep->f_flags & O_NONBLOCK) {
+ if (profile->shared_tail != profile->shared_head) {
+ status = _pipe_print_pending(usr_buf, max);
+ break;
+ } else {
+ status = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&device->mutex);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 10);
+ mutex_lock(&device->mutex);
+
+ if (signal_pending(current)) {
+ status = 0;
+ break;
+ }
+ }
+
+ check_close_profile(profile);
+ mutex_unlock(&device->mutex);
+
+ return status;
+}
+
+static int profile_groups_print(struct seq_file *s, void *unused)
+{
+ struct kgsl_device *device = (struct kgsl_device *) s->private;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct adreno_perfcount_group *group;
+ int i, j, used;
+
+ /* perfcounter list not allowed on a2xx */
+ if (adreno_is_a2xx(adreno_dev))
+ return -EINVAL;
+
+ mutex_lock(&device->mutex);
+
+ for (i = 0; i < counters->group_count; ++i) {
+ group = &(counters->groups[i]);
+ /* get number of counters used for this group */
+ used = 0;
+ for (j = 0; j < group->reg_count; j++) {
+ if (group->regs[j].countable !=
+ KGSL_PERFCOUNTER_NOT_USED)
+ used++;
+ }
+
+ seq_printf(s, "%s %d %d\n", group->name,
+ group->reg_count, used);
+ }
+
+ mutex_unlock(&device->mutex);
+
+ return 0;
+}
+
+static int profile_groups_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, profile_groups_print, inode->i_private);
+}
+
+static const struct file_operations profile_groups_fops = {
+ .owner = THIS_MODULE,
+ .open = profile_groups_open,
+ .read = seq_read,
+ .llseek = noop_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations profile_pipe_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = profile_pipe_print,
+ .llseek = noop_llseek,
+};
+
+static const struct file_operations profile_assignments_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = profile_assignments_read,
+ .write = profile_assignments_write,
+ .llseek = noop_llseek,
+};
+
+DEFINE_SIMPLE_ATTRIBUTE(profile_enable_fops,
+ profile_enable_get,
+ profile_enable_set, "%llu\n");
+
+void adreno_profile_init(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ struct dentry *profile_dir;
+ int ret;
+
+ profile->enabled = false;
+
+ /* allocate shared_buffer, which includes pre_ib and post_ib */
+ profile->shared_size = ADRENO_PROFILE_SHARED_BUF_SIZE_DWORDS;
+ ret = kgsl_allocate_contiguous(&profile->shared_buffer,
+ profile->shared_size * sizeof(unsigned int));
+ if (ret) {
+ profile->shared_buffer.hostptr = NULL;
+ profile->shared_size = 0;
+ }
+
+ INIT_LIST_HEAD(&profile->assignments_list);
+
+ /* Create perf counter debugfs */
+ profile_dir = debugfs_create_dir("profiling", device->d_debugfs);
+ if (IS_ERR(profile_dir))
+ return;
+
+ debugfs_create_file("enable", 0644, profile_dir, device,
+ &profile_enable_fops);
+ debugfs_create_file("blocks", 0444, profile_dir, device,
+ &profile_groups_fops);
+ debugfs_create_file("pipe", 0444, profile_dir, device,
+ &profile_pipe_fops);
+ debugfs_create_file("assignments", 0644, profile_dir, device,
+ &profile_assignments_fops);
+}
+
+void adreno_profile_close(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ struct adreno_profile_assigns_list *entry, *tmp;
+
+ profile->enabled = false;
+ vfree(profile->log_buffer);
+ profile->log_buffer = NULL;
+ profile->log_head = NULL;
+ profile->log_tail = NULL;
+ profile->shared_head = 0;
+ profile->shared_tail = 0;
+ kgsl_sharedmem_free(&profile->shared_buffer);
+ profile->shared_buffer.hostptr = NULL;
+ profile->shared_size = 0;
+
+ profile->assignment_count = 0;
+
+ list_for_each_entry_safe(entry, tmp, &profile->assignments_list, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+int adreno_profile_process_results(struct kgsl_device *device)
+{
+
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ unsigned int shared_buf_tail = profile->shared_tail;
+
+ if (!results_available(device, &shared_buf_tail)) {
+ check_close_profile(profile);
+ return 0;
+ }
+
+ /* allocate profile_log_buffer if needed */
+ if (profile->log_buffer == NULL) {
+ profile->log_buffer = vmalloc(ADRENO_PROFILE_LOG_BUF_SIZE);
+ if (profile->log_buffer == NULL)
+ return -ENOMEM;
+ profile->log_tail = profile->log_buffer;
+ profile->log_head = profile->log_buffer;
+ }
+
+ /*
+ * transfer retired results to log_buffer
+ * update shared_buffer tail ptr
+ */
+ transfer_results(device, shared_buf_tail);
+
+ /* check for any cleanup */
+ check_close_profile(profile);
+
+ return 1;
+}
+
+void adreno_profile_preib_processing(struct kgsl_device *device,
+ unsigned int context_id, unsigned int *cmd_flags,
+ unsigned int **rbptr, unsigned int *cmds_gpu)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ int count = profile->assignment_count;
+ unsigned int entry_head = profile->shared_head;
+ unsigned int *shared_ptr;
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ unsigned int rbcmds[3] = { cp_nop_packet(2),
+ KGSL_NOP_IB_IDENTIFIER, KGSL_NOP_IB_IDENTIFIER };
+
+ *cmd_flags &= ~KGSL_CMD_FLAGS_PROFILE;
+
+ if (!adreno_profile_assignments_ready(profile))
+ goto done;
+
+ /*
+ * check if space available, include the post_ib in space available
+ * check so don't have to handle trying to undo the pre_ib insertion in
+ * ringbuffer in the case where only the post_ib fails enough space
+ */
+ if (SIZE_SHARED_ENTRY(count) >= shared_buf_available(profile))
+ goto done;
+
+ if (entry_head + SIZE_SHARED_ENTRY(count) > profile->shared_size) {
+ /* entry_head would wrap, start entry_head at 0 in buffer */
+ entry_head = 0;
+ profile->shared_size = profile->shared_head;
+ profile->shared_head = 0;
+ if (profile->shared_tail == profile->shared_size)
+ profile->shared_tail = 0;
+
+ /* recheck space available */
+ if (SIZE_SHARED_ENTRY(count) >= shared_buf_available(profile))
+ goto done;
+ }
+
+ /* zero out the counter area of shared_buffer entry_head */
+ shared_ptr = entry_head + ((unsigned int *)
+ profile->shared_buffer.hostptr);
+ memset(shared_ptr, 0, SIZE_SHARED_ENTRY(count) * sizeof(unsigned int));
+
+ /* reserve space for the pre ib shared buffer */
+ shared_buf_inc(profile->shared_size, &profile->shared_head,
+ SIZE_SHARED_ENTRY(count));
+
+ /* create the shared ibdesc */
+ _build_pre_ib_cmds(profile, rbcmds, entry_head,
+ rb->global_ts + 1, context_id);
+
+ /* set flag to sync with post ib commands */
+ *cmd_flags |= KGSL_CMD_FLAGS_PROFILE;
+
+done:
+ /* write the ibdesc to the ringbuffer */
+ GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[0]);
+ GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[1]);
+ GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[2]);
+}
+
+void adreno_profile_postib_processing(struct kgsl_device *device,
+ unsigned int *cmd_flags, unsigned int **rbptr,
+ unsigned int *cmds_gpu)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ int count = profile->assignment_count;
+ unsigned int entry_head = profile->shared_head -
+ SIZE_SHARED_ENTRY(count);
+ unsigned int rbcmds[3] = { cp_nop_packet(2),
+ KGSL_NOP_IB_IDENTIFIER, KGSL_NOP_IB_IDENTIFIER };
+
+ if (!adreno_profile_assignments_ready(profile))
+ goto done;
+
+ if (!(*cmd_flags & KGSL_CMD_FLAGS_PROFILE))
+ goto done;
+
+ /* create the shared ibdesc */
+ _build_post_ib_cmds(profile, rbcmds, entry_head);
+
+done:
+ /* write the ibdesc to the ringbuffer */
+ GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[0]);
+ GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[1]);
+ GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[2]);
+
+ /* reset the sync flag */
+ *cmd_flags &= ~KGSL_CMD_FLAGS_PROFILE;
+}
+
diff --git a/drivers/gpu/msm/adreno_profile.h b/drivers/gpu/msm/adreno_profile.h
new file mode 100644
index 0000000..d91b09b
--- /dev/null
+++ b/drivers/gpu/msm/adreno_profile.h
@@ -0,0 +1,92 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_PROFILE_H
+#define __ADRENO_PROFILE_H
+#include <linux/seq_file.h>
+
+/**
+ * struct adreno_profile_assigns_list: linked list for assigned perf counters
+ * @list: linkage for nodes in list
+ * @name: group name or GPU name name
+ * @groupid: group id
+ * @countable: countable assigned to perfcounter
+ * @offset: perfcounter register address offset
+ */
+struct adreno_profile_assigns_list {
+ struct list_head list;
+ char name[25];
+ unsigned int groupid;
+ unsigned int countable;
+ unsigned int offset; /* LO offset, HI offset is +1 */
+};
+
+struct adreno_profile {
+ struct list_head assignments_list; /* list of all assignments */
+ unsigned int assignment_count; /* Number of assigned counters */
+ unsigned int *log_buffer;
+ unsigned int *log_head;
+ unsigned int *log_tail;
+ bool enabled;
+ /* counter, pre_ib, and post_ib held in one large circular buffer
+ * shared between kgsl and GPU
+ * counter entry 0
+ * pre_ib entry 0
+ * post_ib entry 0
+ * ...
+ * counter entry N
+ * pre_ib entry N
+ * post_ib entry N
+ */
+ struct kgsl_memdesc shared_buffer;
+ unsigned int shared_head;
+ unsigned int shared_tail;
+ unsigned int shared_size;
+};
+
+#define ADRENO_PROFILE_SHARED_BUF_SIZE_DWORDS (48 * 4096 / sizeof(uint))
+/* sized @ 48 pages should allow for over 50 outstanding IBs minimum, 1755 max*/
+
+#define ADRENO_PROFILE_LOG_BUF_SIZE (1024 * 920)
+/* sized for 1024 entries of fully assigned 45 cnters in log buffer, 230 pages*/
+#define ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS (ADRENO_PROFILE_LOG_BUF_SIZE / \
+ sizeof(unsigned int))
+
+void adreno_profile_init(struct kgsl_device *device);
+void adreno_profile_close(struct kgsl_device *device);
+int adreno_profile_process_results(struct kgsl_device *device);
+void adreno_profile_preib_processing(struct kgsl_device *device,
+ unsigned int context_id, unsigned int *cmd_flags,
+ unsigned int **rbptr, unsigned int *cmds_gpu);
+void adreno_profile_postib_processing(struct kgsl_device *device,
+ unsigned int *cmd_flags, unsigned int **rbptr,
+ unsigned int *cmds_gpu);
+
+static inline bool adreno_profile_enabled(struct adreno_profile *profile)
+{
+ return profile->enabled;
+}
+
+static inline bool adreno_profile_has_assignments(
+ struct adreno_profile *profile)
+{
+ return list_empty(&profile->assignments_list) ? false : true;
+}
+
+static inline bool adreno_profile_assignments_ready(
+ struct adreno_profile *profile)
+{
+ return adreno_profile_enabled(profile) &&
+ adreno_profile_has_assignments(profile);
+}
+
+#endif
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index e03f708..dc1530a 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -67,11 +67,8 @@
unsigned long wait_time;
unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
unsigned long wait_time_part;
- unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
unsigned int rptr;
- memset(prev_reg_val, 0, sizeof(prev_reg_val));
-
/* if wptr ahead, fill the remaining with NOPs */
if (wptr_ahead) {
/* -1 for header */
@@ -91,10 +88,6 @@
rptr = adreno_get_rptr(rb);
} while (!rptr);
- rb->wptr++;
-
- adreno_ringbuffer_submit(rb);
-
rb->wptr = 0;
}
@@ -109,43 +102,13 @@
if (freecmds == 0 || freecmds > numcmds)
break;
- /* Dont wait for timeout, detect hang faster.
- */
- if (time_after(jiffies, wait_time_part)) {
- wait_time_part = jiffies +
- msecs_to_jiffies(KGSL_TIMEOUT_PART);
- if ((adreno_ft_detect(rb->device,
- prev_reg_val))){
- KGSL_DRV_ERR(rb->device,
- "Hang detected while waiting for freespace in"
- "ringbuffer rptr: 0x%x, wptr: 0x%x\n",
- rptr, rb->wptr);
- goto err;
- }
- }
-
if (time_after(jiffies, wait_time)) {
KGSL_DRV_ERR(rb->device,
"Timed out while waiting for freespace in ringbuffer "
"rptr: 0x%x, wptr: 0x%x\n", rptr, rb->wptr);
- goto err;
+ return -ETIMEDOUT;
}
- continue;
-
-err:
- if (!adreno_dump_and_exec_ft(rb->device)) {
- if (context && context->flags & CTXT_FLAGS_GPU_HANG) {
- KGSL_CTXT_WARN(rb->device,
- "Context %p caused a gpu hang. Will not accept commands for context %d\n",
- context, context->base.id);
- return -EDEADLK;
- }
- wait_time = jiffies + wait_timeout;
- } else {
- /* GPU is hung and fault tolerance failed */
- BUG();
- }
}
return 0;
}
@@ -184,7 +147,8 @@
if (!ret) {
ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
rb->wptr += numcmds;
- }
+ } else
+ ptr = ERR_PTR(ret);
return ptr;
}
@@ -351,7 +315,6 @@
int _ringbuffer_start_common(struct adreno_ringbuffer *rb)
{
int status;
- /*cp_rb_cntl_u cp_rb_cntl; */
union reg_cp_rb_cntl cp_rb_cntl;
unsigned int rb_cntl;
struct kgsl_device *device = rb->device;
@@ -572,28 +535,44 @@
static int
adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
- struct adreno_context *context,
+ struct adreno_context *drawctxt,
unsigned int flags, unsigned int *cmds,
- int sizedwords)
+ int sizedwords, uint32_t timestamp)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
unsigned int *ringcmds;
unsigned int total_sizedwords = sizedwords;
unsigned int i;
unsigned int rcmd_gpu;
- unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
+ unsigned int context_id;
unsigned int gpuaddr = rb->device->memstore.gpuaddr;
- unsigned int timestamp;
+ bool profile_ready;
/*
- * if the context was not created with per context timestamp
- * support, we must use the global timestamp since issueibcmds
- * will be returning that one, or if an internal issue then
- * use global timestamp.
+ * If in stream ib profiling is enabled and there are counters
+ * assigned, then space needs to be reserved for profiling. This
+ * space in the ringbuffer is always consumed (might be filled with
+ * NOPs in error case. profile_ready needs to be consistent through
+ * the _addcmds call since it is allocating additional ringbuffer
+ * command space.
*/
- if ((context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) &&
- !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
- context_id = context->base.id;
+ profile_ready = !adreno_is_a2xx(adreno_dev) &&
+ adreno_profile_assignments_ready(&adreno_dev->profile) &&
+ !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE);
+
+ /* The global timestamp always needs to be incremented */
+ rb->global_ts++;
+
+ /* If this is a internal IB, use the global timestamp for it */
+ if (!drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
+ timestamp = rb->global_ts;
+ context_id = KGSL_MEMSTORE_GLOBAL;
+ } else {
+ context_id = drawctxt->base.id;
+ }
+
+ if (drawctxt)
+ drawctxt->internal_timestamp = rb->global_ts;
/* reserve space to temporarily turn off protected mode
* error checking if needed
@@ -604,13 +583,8 @@
/* internal ib command identifier for the ringbuffer */
total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
- /* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
- total_sizedwords += context ? 13 : 0;
-
- if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
- (flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
- KGSL_CMD_FLAGS_GET_INT)))
- total_sizedwords += 2;
+ /* Add two dwords for the CP_INTERRUPT */
+ total_sizedwords += drawctxt ? 2 : 0;
if (adreno_is_a3xx(adreno_dev))
total_sizedwords += 7;
@@ -618,13 +592,16 @@
if (adreno_is_a2xx(adreno_dev))
total_sizedwords += 2; /* CP_WAIT_FOR_IDLE */
- total_sizedwords += 2; /* scratchpad ts for fault tolerance */
total_sizedwords += 3; /* sop timestamp */
total_sizedwords += 4; /* eop timestamp */
- if (KGSL_MEMSTORE_GLOBAL != context_id)
+ if (adreno_is_a20x(adreno_dev))
+ total_sizedwords += 2; /* CACHE_FLUSH */
+
+ if (drawctxt) {
total_sizedwords += 3; /* global timestamp without cache
* flush for non-zero context */
+ }
if (adreno_is_a20x(adreno_dev))
total_sizedwords += 2; /* CACHE_FLUSH */
@@ -632,8 +609,14 @@
if (flags & KGSL_CMD_FLAGS_EOF)
total_sizedwords += 2;
- ringcmds = adreno_ringbuffer_allocspace(rb, context, total_sizedwords);
- if (!ringcmds)
+ if (profile_ready)
+ total_sizedwords += 6; /* space for pre_ib and post_ib */
+
+ ringcmds = adreno_ringbuffer_allocspace(rb, drawctxt, total_sizedwords);
+
+ if (IS_ERR(ringcmds))
+ return PTR_ERR(ringcmds);
+ if (ringcmds == NULL)
return -ENOSPC;
rcmd_gpu = rb->buffer_desc.gpuaddr
@@ -648,20 +631,10 @@
KGSL_CMD_INTERNAL_IDENTIFIER);
}
- /* always increment the global timestamp. once. */
- rb->global_ts++;
-
- if (KGSL_MEMSTORE_GLOBAL != context_id)
- timestamp = context->timestamp;
- else
- timestamp = rb->global_ts;
-
- /* scratchpad ts for fault tolerance */
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- cp_type0_packet(adreno_getreg(adreno_dev,
- ADRENO_REG_CP_TIMESTAMP), 1));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- rb->global_ts);
+ /* Add any IB required for profiling if it is enabled */
+ if (profile_ready)
+ adreno_profile_preib_processing(rb->device, drawctxt->base.id,
+ &flags, &ringcmds, &rcmd_gpu);
/* start-of-pipeline timestamp */
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
@@ -714,6 +687,12 @@
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x00);
}
+ /* Add any postIB required for profiling if it is enabled and has
+ assigned counters */
+ if (profile_ready)
+ adreno_profile_postib_processing(rb->device, &flags,
+ &ringcmds, &rcmd_gpu);
+
/*
* end-of-pipeline timestamp. If per context timestamps is not
* enabled, then context_id will be KGSL_MEMSTORE_GLOBAL so all
@@ -726,7 +705,7 @@
KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, timestamp);
- if (KGSL_MEMSTORE_GLOBAL != context_id) {
+ if (drawctxt) {
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
@@ -742,56 +721,13 @@
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, CACHE_FLUSH);
}
- if (context) {
- /* Conditional execution based on memory values */
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- cp_type3_packet(CP_COND_EXEC, 4));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
- KGSL_MEMSTORE_OFFSET(
- context_id, ts_cmp_enable)) >> 2);
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
- KGSL_MEMSTORE_OFFSET(
- context_id, ref_wait_ts)) >> 2);
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, timestamp);
- /* # of conditional command DWORDs */
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 8);
-
- /* Clear the ts_cmp_enable for the context */
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- cp_type3_packet(CP_MEM_WRITE, 2));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, gpuaddr +
- KGSL_MEMSTORE_OFFSET(
- context_id, ts_cmp_enable));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x0);
-
- /* Clear the ts_cmp_enable for the global timestamp */
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- cp_type3_packet(CP_MEM_WRITE, 2));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, gpuaddr +
- KGSL_MEMSTORE_OFFSET(
- KGSL_MEMSTORE_GLOBAL, ts_cmp_enable));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x0);
-
- /* Trigger the interrupt */
+ if (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
cp_type3_packet(CP_INTERRUPT, 1));
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
CP_INT_CNTL__RB_INT_MASK);
}
- /*
- * If per context timestamps are enabled and any of the kgsl
- * internal commands want INT to be generated trigger the INT
- */
- if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
- (flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
- KGSL_CMD_FLAGS_GET_INT))) {
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- cp_type3_packet(CP_INTERRUPT, 1));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- CP_INT_CNTL__RB_INT_MASK);
- }
-
if (adreno_is_a3xx(adreno_dev)) {
/* Dummy set-constant to trigger context rollover */
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
@@ -801,12 +737,6 @@
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0);
}
- if (flags & KGSL_CMD_FLAGS_EOF) {
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_nop_packet(1));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- KGSL_END_OF_FRAME_IDENTIFIER);
- }
-
adreno_ringbuffer_submit(rb);
return 0;
@@ -822,14 +752,10 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- if (device->state & KGSL_STATE_HUNG)
- return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
- KGSL_TIMESTAMP_RETIRED);
-
flags |= KGSL_CMD_FLAGS_INTERNAL_ISSUE;
return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds,
- sizedwords);
+ sizedwords, 0);
}
static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
@@ -1022,39 +948,92 @@
return ret;
}
-int
-adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
- struct kgsl_context *context,
- struct kgsl_ibdesc *ibdesc,
- unsigned int numibs,
- uint32_t *timestamp,
- unsigned int flags)
+/**
+ * _ringbuffer_verify_ib() - parse an IB and verify that it is correct
+ * @dev_priv: Pointer to the process struct
+ * @ibdesc: Pointer to the IB descriptor
+ *
+ * This function only gets called if debugging is enabled - it walks the IB and
+ * does additional level parsing and verification above and beyond what KGSL
+ * core does
+ */
+static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv,
+ struct kgsl_ibdesc *ibdesc)
{
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int *link = 0;
+
+ /* Check that the size of the IBs is under the allowable limit */
+ if (ibdesc->sizedwords == 0 || ibdesc->sizedwords > 0xFFFFF) {
+ KGSL_DRV_ERR(device, "Invalid IB size 0x%X\n",
+ ibdesc->sizedwords);
+ return false;
+ }
+
+ if (unlikely(adreno_dev->ib_check_level >= 1) &&
+ !_parse_ibs(dev_priv, ibdesc->gpuaddr, ibdesc->sizedwords)) {
+ KGSL_DRV_ERR(device, "Could not verify the IBs\n");
+ return false;
+ }
+
+ return true;
+}
+
+int
+adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context,
+ struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ int i, ret;
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ return -EDEADLK;
+
+ /* Verify the IBs before they get queued */
+
+ for (i = 0; i < cmdbatch->ibcount; i++) {
+ if (!_ringbuffer_verify_ib(dev_priv, &cmdbatch->ibdesc[i]))
+ return -EINVAL;
+ }
+
+ /* Queue the command in the ringbuffer */
+ ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
+ timestamp);
+
+ if (ret)
+ KGSL_DRV_ERR(device,
+ "adreno_dispatcher_queue_cmd returned %d\n", ret);
+
+ return ret;
+}
+
+/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
+int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
+ struct kgsl_cmdbatch *cmdbatch)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct kgsl_ibdesc *ibdesc;
+ unsigned int numibs;
+ unsigned int *link;
unsigned int *cmds;
unsigned int i;
- struct adreno_context *drawctxt = NULL;
+ struct kgsl_context *context;
+ struct adreno_context *drawctxt;
unsigned int start_index = 0;
int ret;
- if (device->state & KGSL_STATE_HUNG) {
- ret = -EBUSY;
- goto done;
- }
-
- if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
- context == NULL || ibdesc == 0 || numibs == 0) {
- ret = -EINVAL;
- goto done;
- }
+ context = cmdbatch->context;
drawctxt = ADRENO_CONTEXT(context);
- if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
- ret = -EDEADLK;
- goto done;
- }
+ ibdesc = cmdbatch->ibdesc;
+ numibs = cmdbatch->ibcount;
+
+ /* process any profiling results that are available into the log_buf */
+ adreno_profile_process_results(device);
/*When preamble is enabled, the preamble buffer with state restoration
commands are stored in the first node of the IB chain. We can skip that
@@ -1064,15 +1043,6 @@
adreno_dev->drawctxt_active == drawctxt)
start_index = 1;
- if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
- if (flags & KGSL_CMD_FLAGS_EOF)
- drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
- if (start_index)
- numibs = 1;
- else
- numibs = 0;
- }
-
cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
GFP_KERNEL);
if (!link) {
@@ -1091,18 +1061,6 @@
*cmds++ = ibdesc[0].sizedwords;
}
for (i = start_index; i < numibs; i++) {
- if (unlikely(adreno_dev->ib_check_level >= 1 &&
- !_parse_ibs(dev_priv, ibdesc[i].gpuaddr,
- ibdesc[i].sizedwords))) {
- ret = -EINVAL;
- goto done;
- }
-
- if (ibdesc[i].sizedwords == 0) {
- ret = -EINVAL;
- goto done;
- }
-
*cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
*cmds++ = ibdesc[i].gpuaddr;
*cmds++ = ibdesc[i].sizedwords;
@@ -1111,253 +1069,44 @@
*cmds++ = cp_nop_packet(1);
*cmds++ = KGSL_END_OF_IB_IDENTIFIER;
- kgsl_setstate(&device->mmu, context->id,
+ ret = kgsl_setstate(&device->mmu, context->id,
kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
device->id));
- adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
-
- if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
- if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0) {
- KGSL_DRV_ERR(device,
- "Invalid user generated ts <%d:0x%x>, "
- "less than last issued ts <%d:0x%x>\n",
- context->id, *timestamp, context->id,
- drawctxt->timestamp);
- return -ERANGE;
- }
- drawctxt->timestamp = *timestamp;
- } else
- drawctxt->timestamp++;
-
- ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
- drawctxt,
- (flags & KGSL_CMD_FLAGS_EOF),
- &link[0], (cmds - link));
if (ret)
goto done;
- if (drawctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
- *timestamp = drawctxt->timestamp;
- else
- *timestamp = adreno_dev->ringbuffer.global_ts;
+ ret = adreno_drawctxt_switch(adreno_dev, drawctxt, cmdbatch->flags);
+
+ /*
+ * In the unlikely event of an error in the drawctxt switch,
+ * treat it like a hang
+ */
+ if (ret)
+ goto done;
+
+ ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
+ drawctxt,
+ cmdbatch->flags,
+ &link[0], (cmds - link),
+ cmdbatch->timestamp);
#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+ if (ret)
+ goto done;
/*
* insert wait for idle after every IB1
* this is conservative but works reliably and is ok
* even for performance simulations
*/
- adreno_idle(device);
+ ret = adreno_idle(device);
#endif
- /*
- * If context hung and recovered then return error so that the
- * application may handle it
- */
- if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_FT) {
- drawctxt->flags &= ~CTXT_FLAGS_GPU_HANG_FT;
- ret = -EPROTO;
- } else
- ret = 0;
-
done:
- device->pwrctrl.irq_last = 0;
- kgsl_trace_issueibcmds(device, context ? context->id : 0, ibdesc,
- numibs, *timestamp, flags, ret,
- drawctxt ? drawctxt->type : 0);
+ kgsl_trace_issueibcmds(device, context->id, cmdbatch,
+ cmdbatch->timestamp, cmdbatch->flags, ret,
+ drawctxt->type);
kfree(link);
return ret;
}
-
-static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
- unsigned int rb_rptr)
-{
- unsigned int temp_rb_rptr = rb_rptr;
- unsigned int size = rb->buffer_desc.size;
- unsigned int val[2];
- int i = 0;
- bool check = false;
- bool cmd_start = false;
-
- /* Go till the start of the ib sequence and turn on preamble */
- while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
- if (check && KGSL_START_OF_IB_IDENTIFIER == val[i]) {
- /* decrement i */
- i = (i + 1) % 2;
- if (val[i] == cp_nop_packet(4)) {
- temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
- temp_rb_rptr, size);
- kgsl_sharedmem_writel(rb->device,
- &rb->buffer_desc,
- temp_rb_rptr, cp_nop_packet(1));
- }
- KGSL_FT_INFO(rb->device,
- "Turned preamble on at offset 0x%x\n",
- temp_rb_rptr / 4);
- break;
- }
- /* If you reach beginning of next command sequence then exit
- * First command encountered is the current one so don't break
- * on that. */
- if (KGSL_CMD_IDENTIFIER == val[i]) {
- if (cmd_start)
- break;
- cmd_start = true;
- }
-
- i = (i + 1) % 2;
- if (1 == i)
- check = true;
- temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
- size);
- }
-}
-
-void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
- struct adreno_ft_data *ft_data)
-{
- struct kgsl_device *device = rb->device;
- unsigned int rb_rptr = ft_data->start_of_replay_cmds;
- unsigned int good_rb_idx = 0, bad_rb_idx = 0, temp_rb_idx = 0;
- unsigned int last_good_cmd_end_idx = 0, last_bad_cmd_end_idx = 0;
- unsigned int cmd_start_idx = 0;
- unsigned int val1 = 0;
- int copy_rb_contents = 0;
- unsigned int temp_rb_rptr;
- struct kgsl_context *k_ctxt;
- struct adreno_context *a_ctxt;
- unsigned int size = rb->buffer_desc.size;
- unsigned int *temp_rb_buffer = ft_data->rb_buffer;
- int *rb_size = &ft_data->rb_size;
- unsigned int *bad_rb_buffer = ft_data->bad_rb_buffer;
- int *bad_rb_size = &ft_data->bad_rb_size;
- unsigned int *good_rb_buffer = ft_data->good_rb_buffer;
- int *good_rb_size = &ft_data->good_rb_size;
-
- /*
- * If the start index from where commands need to be copied is invalid
- * then no need to save off any commands
- */
- if (0xFFFFFFFF == ft_data->start_of_replay_cmds)
- return;
-
- k_ctxt = kgsl_context_get(device, ft_data->context_id);
-
- if (k_ctxt) {
- a_ctxt = ADRENO_CONTEXT(k_ctxt);
- if (a_ctxt->flags & CTXT_FLAGS_PREAMBLE)
- _turn_preamble_on_for_ib_seq(rb, rb_rptr);
- kgsl_context_put(k_ctxt);
- }
- k_ctxt = NULL;
-
- /* Walk the rb from the context switch. Omit any commands
- * for an invalid context. */
- while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
-
- if (KGSL_CMD_IDENTIFIER == val1) {
- /* Start is the NOP dword that comes before
- * KGSL_CMD_IDENTIFIER */
- cmd_start_idx = temp_rb_idx - 1;
- if ((copy_rb_contents) && (good_rb_idx))
- last_good_cmd_end_idx = good_rb_idx - 1;
- if ((!copy_rb_contents) && (bad_rb_idx))
- last_bad_cmd_end_idx = bad_rb_idx - 1;
- }
-
- /* check for context switch indicator */
- if (val1 == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
- unsigned int temp_idx, val2;
- /* increment by 3 to get to the context_id */
- temp_rb_rptr = rb_rptr + (3 * sizeof(unsigned int)) %
- size;
- kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
- temp_rb_rptr);
-
- /* if context switches to a context that did not cause
- * hang then start saving the rb contents as those
- * commands can be executed */
- k_ctxt = kgsl_context_get(rb->device, val2);
-
- if (k_ctxt) {
- a_ctxt = ADRENO_CONTEXT(k_ctxt);
-
- /* If we are changing to a good context and were not
- * copying commands then copy over commands to the good
- * context */
- if (!copy_rb_contents && ((k_ctxt &&
- !(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
- !k_ctxt)) {
- for (temp_idx = cmd_start_idx;
- temp_idx < temp_rb_idx;
- temp_idx++)
- good_rb_buffer[good_rb_idx++] =
- temp_rb_buffer[temp_idx];
- ft_data->last_valid_ctx_id = val2;
- copy_rb_contents = 1;
- /* remove the good commands from bad buffer */
- bad_rb_idx = last_bad_cmd_end_idx;
- } else if (copy_rb_contents && k_ctxt &&
- (a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
-
- /* If we are changing back to a bad context
- * from good ctxt and were not copying commands
- * to bad ctxt then copy over commands to
- * the bad context */
- for (temp_idx = cmd_start_idx;
- temp_idx < temp_rb_idx;
- temp_idx++)
- bad_rb_buffer[bad_rb_idx++] =
- temp_rb_buffer[temp_idx];
- /* If we are changing to bad context then
- * remove the dwords we copied for this
- * sequence from the good buffer */
- good_rb_idx = last_good_cmd_end_idx;
- copy_rb_contents = 0;
- }
- }
- kgsl_context_put(k_ctxt);
- }
-
- if (copy_rb_contents)
- good_rb_buffer[good_rb_idx++] = val1;
- else
- bad_rb_buffer[bad_rb_idx++] = val1;
-
- /* Copy both good and bad commands to temp buffer */
- temp_rb_buffer[temp_rb_idx++] = val1;
-
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
- }
- *good_rb_size = good_rb_idx;
- *bad_rb_size = bad_rb_idx;
- *rb_size = temp_rb_idx;
-}
-
-void
-adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
- int num_rb_contents)
-{
- int i;
- unsigned int *ringcmds;
- unsigned int rcmd_gpu;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
-
- if (!num_rb_contents)
- return;
-
- if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_RPTR, 0);
- BUG_ON(num_rb_contents > rb->buffer_desc.size);
- }
- ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
- rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
- for (i = 0; i < num_rb_contents; i++)
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, rb_buff[i]);
- rb->wptr += num_rb_contents;
- adreno_ringbuffer_submit(rb);
-}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 9634e32..3aa0101 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -27,7 +27,6 @@
struct kgsl_device;
struct kgsl_device_private;
-struct adreno_ft_data;
#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8
struct kgsl_rbmemptrs {
@@ -99,10 +98,11 @@
int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_ibdesc *ibdesc,
- unsigned int numibs,
- uint32_t *timestamp,
- unsigned int flags);
+ struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp);
+
+int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
+ struct kgsl_cmdbatch *cmdbatch);
int adreno_ringbuffer_init(struct kgsl_device *device);
@@ -124,13 +124,6 @@
void kgsl_cp_intrcallback(struct kgsl_device *device);
-void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
- struct adreno_ft_data *ft_data);
-
-void
-adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
- int num_rb_contents);
-
unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
struct adreno_context *context,
unsigned int numcmds);
diff --git a/arch/arm/boot/dts/apq8074-v2-liquid.dts b/drivers/gpu/msm/adreno_trace.c
similarity index 70%
copy from arch/arm/boot/dts/apq8074-v2-liquid.dts
copy to drivers/gpu/msm/adreno_trace.c
index a0ecb50..607ba8c 100644
--- a/arch/arm/boot/dts/apq8074-v2-liquid.dts
+++ b/drivers/gpu/msm/adreno_trace.c
@@ -8,15 +8,11 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
*/
-/dts-v1/;
+#include "adreno.h"
-/include/ "apq8074-v2.dtsi"
-/include/ "msm8974-liquid.dtsi"
-
-/ {
- model = "Qualcomm APQ 8074v2 LIQUID";
- compatible = "qcom,apq8074-liquid", "qcom,apq8074", "qcom,liquid";
- qcom,msm-id = <184 9 0x20000>;
-};
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "adreno_trace.h"
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
new file mode 100644
index 0000000..59aca2e
--- /dev/null
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -0,0 +1,174 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_ADRENO_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ADRENO_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kgsl
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE adreno_trace
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(adreno_cmdbatch_queued,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int queued),
+ TP_ARGS(cmdbatch, queued),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(unsigned int, queued)
+ ),
+ TP_fast_assign(
+ __entry->id = cmdbatch->context->id;
+ __entry->timestamp = cmdbatch->timestamp;
+ __entry->queued = queued;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u queued=%u",
+ __entry->id, __entry->timestamp, __entry->queued
+ )
+);
+
+DECLARE_EVENT_CLASS(adreno_cmdbatch_template,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
+ TP_ARGS(cmdbatch, inflight),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(unsigned int, inflight)
+ ),
+ TP_fast_assign(
+ __entry->id = cmdbatch->context->id;
+ __entry->timestamp = cmdbatch->timestamp;
+ __entry->inflight = inflight;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u inflight=%u",
+ __entry->id, __entry->timestamp,
+ __entry->inflight
+ )
+);
+
+DEFINE_EVENT(adreno_cmdbatch_template, adreno_cmdbatch_retired,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
+ TP_ARGS(cmdbatch, inflight)
+);
+
+DEFINE_EVENT(adreno_cmdbatch_template, adreno_cmdbatch_submitted,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
+ TP_ARGS(cmdbatch, inflight)
+);
+
+DECLARE_EVENT_CLASS(adreno_drawctxt_template,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ ),
+ TP_fast_assign(
+ __entry->id = drawctxt->base.id;
+ ),
+ TP_printk("ctx=%u", __entry->id)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, adreno_drawctxt_sleep,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, adreno_drawctxt_wake,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, dispatch_queue_context,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, adreno_drawctxt_invalidate,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt)
+);
+
+TRACE_EVENT(adreno_drawctxt_wait_start,
+ TP_PROTO(unsigned int id, unsigned int ts),
+ TP_ARGS(id, ts),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, ts)
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->ts = ts;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u",
+ __entry->id, __entry->ts
+ )
+);
+
+TRACE_EVENT(adreno_drawctxt_wait_done,
+ TP_PROTO(unsigned int id, unsigned int ts, int status),
+ TP_ARGS(id, ts, status),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, ts)
+ __field(int, status)
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->ts = ts;
+ __entry->status = status;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u status=%d",
+ __entry->id, __entry->ts, __entry->status
+ )
+);
+
+TRACE_EVENT(adreno_gpu_fault,
+ TP_PROTO(unsigned int status, unsigned int rptr, unsigned int wptr,
+ unsigned int ib1base, unsigned int ib1size,
+ unsigned int ib2base, unsigned int ib2size),
+ TP_ARGS(status, rptr, wptr, ib1base, ib1size, ib2base, ib2size),
+ TP_STRUCT__entry(
+ __field(unsigned int, status)
+ __field(unsigned int, rptr)
+ __field(unsigned int, wptr)
+ __field(unsigned int, ib1base)
+ __field(unsigned int, ib1size)
+ __field(unsigned int, ib2base)
+ __field(unsigned int, ib2size)
+ ),
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->rptr = rptr;
+ __entry->wptr = wptr;
+ __entry->ib1base = ib1base;
+ __entry->ib1size = ib1size;
+ __entry->ib2base = ib2base;
+ __entry->ib2size = ib2size;
+ ),
+ TP_printk("status=%X RB=%X/%X IB1=%X/%X IB2=%X/%X",
+ __entry->status, __entry->wptr, __entry->rptr,
+ __entry->ib1base, __entry->ib1size, __entry->ib2base,
+ __entry->ib2size)
+);
+
+#endif /* _ADRENO_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 0b99b30..2624c16 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -62,59 +62,10 @@
static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);
/**
- * kgsl_hang_check() - Check for GPU hang
- * data: KGSL device structure
- *
- * This function is called every KGSL_TIMEOUT_PART time when
- * GPU is active to check for hang. If a hang is detected we
- * trigger fault tolerance.
- */
-void kgsl_hang_check(struct work_struct *work)
-{
- struct kgsl_device *device = container_of(work, struct kgsl_device,
- hang_check_ws);
- static unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
-
- mutex_lock(&device->mutex);
-
- if (device->state == KGSL_STATE_ACTIVE) {
-
- /* Check to see if the GPU is hung */
- if (adreno_ft_detect(device, prev_reg_val))
- adreno_dump_and_exec_ft(device);
-
- mod_timer(&device->hang_timer,
- (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
- }
-
- mutex_unlock(&device->mutex);
-}
-
-/**
- * hang_timer() - Hang timer function
- * data: KGSL device structure
- *
- * This function is called when hang timer expires, in this
- * function we check if GPU is in active state and queue the
- * work on device workqueue to check for the hang. We restart
- * the timer after KGSL_TIMEOUT_PART time.
- */
-void hang_timer(unsigned long data)
-{
- struct kgsl_device *device = (struct kgsl_device *) data;
-
- if (device->state == KGSL_STATE_ACTIVE) {
- /* Have work run in a non-interrupt context. */
- queue_work(device->work_queue, &device->hang_check_ws);
- }
-}
-
-/**
* kgsl_trace_issueibcmds() - Call trace_issueibcmds by proxy
* device: KGSL device
* id: ID of the context submitting the command
- * ibdesc: Pointer to the list of IB descriptors
- * numib: Number of IBs in the list
+ * cmdbatch: Pointer to kgsl_cmdbatch describing these commands
* timestamp: Timestamp assigned to the command batch
* flags: Flags sent by the user
* result: Result of the submission attempt
@@ -124,11 +75,11 @@
* GPU specific modules.
*/
void kgsl_trace_issueibcmds(struct kgsl_device *device, int id,
- struct kgsl_ibdesc *ibdesc, int numibs,
+ struct kgsl_cmdbatch *cmdbatch,
unsigned int timestamp, unsigned int flags,
int result, unsigned int type)
{
- trace_kgsl_issueibcmds(device, id, ibdesc, numibs,
+ trace_kgsl_issueibcmds(device, id, cmdbatch,
timestamp, flags, result, type);
}
EXPORT_SYMBOL(kgsl_trace_issueibcmds);
@@ -498,8 +449,9 @@
kref_init(&context->refcount);
context->device = dev_priv->device;
context->pagetable = dev_priv->process_priv->pagetable;
-
- context->pid = dev_priv->process_priv->pid;
+ context->dev_priv = dev_priv;
+ context->pid = task_tgid_nr(current);
+ context->tid = task_pid_nr(current);
ret = kgsl_sync_timeline_create(context);
if (ret)
@@ -529,8 +481,8 @@
EXPORT_SYMBOL(kgsl_context_init);
/**
- * kgsl_context_detach - Release the "master" context reference
- * @context - The context that will be detached
+ * kgsl_context_detach() - Release the "master" context reference
+ * @context: The context that will be detached
*
* This is called when a context becomes unusable, because userspace
* has requested for it to be destroyed. The context itself may
@@ -539,14 +491,12 @@
* detached by checking the KGSL_CONTEXT_DETACHED bit in
* context->priv.
*/
-void
-kgsl_context_detach(struct kgsl_context *context)
+int kgsl_context_detach(struct kgsl_context *context)
{
- struct kgsl_device *device;
- if (context == NULL)
- return;
+ int ret;
- device = context->device;
+ if (context == NULL)
+ return -EINVAL;
/*
* Mark the context as detached to keep others from using
@@ -554,19 +504,22 @@
* we don't try to detach twice.
*/
if (test_and_set_bit(KGSL_CONTEXT_DETACHED, &context->priv))
- return;
+ return -EINVAL;
- trace_kgsl_context_detach(device, context);
+ trace_kgsl_context_detach(context->device, context);
- device->ftbl->drawctxt_detach(context);
+ ret = context->device->ftbl->drawctxt_detach(context);
+
/*
* Cancel events after the device-specific context is
* detached, to avoid possibly freeing memory while
* it is still in use by the GPU.
*/
- kgsl_context_cancel_events(device, context);
+ kgsl_context_cancel_events(context->device, context);
kgsl_context_put(context);
+
+ return ret;
}
void
@@ -578,6 +531,8 @@
trace_kgsl_context_destroy(device, context);
+ BUG_ON(!kgsl_context_detached(context));
+
write_lock(&device->context_lock);
if (context->id != KGSL_CONTEXT_INVALID) {
idr_remove(&device->context_idr, context->id);
@@ -648,11 +603,12 @@
policy_saved = device->pwrscale.policy;
device->pwrscale.policy = NULL;
kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
- /*
- * Make sure no user process is waiting for a timestamp
- * before supending.
- */
- kgsl_active_count_wait(device);
+
+ /* Tell the device to drain the submission queue */
+ device->ftbl->drain(device);
+
+ /* Wait for the active count to hit zero */
+ kgsl_active_count_wait(device, 0);
/*
* An interrupt could have snuck in and requested NAP in
@@ -662,13 +618,10 @@
/* Don't let the timer wake us during suspended sleep. */
del_timer_sync(&device->idle_timer);
- del_timer_sync(&device->hang_timer);
switch (device->state) {
case KGSL_STATE_INIT:
break;
case KGSL_STATE_ACTIVE:
- /* Wait for the device to become idle */
- device->ftbl->idle(device);
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
/* make sure power is on to stop the device */
@@ -812,7 +765,7 @@
list_del(&private->list);
mutex_unlock(&kgsl_driver.process_mutex);
- if (private->kobj.ktype)
+ if (private->kobj.state_in_sysfs)
kgsl_process_uninit_sysfs(private);
if (private->debug_root)
debugfs_remove_recursive(private->debug_root);
@@ -926,21 +879,23 @@
pt_name = task_tgid_nr(current);
private->pagetable = kgsl_mmu_getpagetable(mmu, pt_name);
- if (private->pagetable == NULL) {
- mutex_unlock(&private->process_private_mutex);
- kgsl_put_process_private(cur_dev_priv->device,
- private);
- return NULL;
- }
+ if (private->pagetable == NULL)
+ goto error;
}
- kgsl_process_init_sysfs(private);
- kgsl_process_init_debugfs(private);
+ if (kgsl_process_init_sysfs(cur_dev_priv->device, private))
+ goto error;
+ if (kgsl_process_init_debugfs(private))
+ goto error;
done:
mutex_unlock(&private->process_private_mutex);
-
return private;
+
+error:
+ mutex_unlock(&private->process_private_mutex);
+ kgsl_put_process_private(cur_dev_priv->device, private);
+ return NULL;
}
int kgsl_close_device(struct kgsl_device *device)
@@ -948,7 +903,13 @@
int result = 0;
device->open_count--;
if (device->open_count == 0) {
+
+ /* Wait for the active count to go to 1 */
+ kgsl_active_count_wait(device, 1);
+
+ /* Fail if the wait times out */
BUG_ON(atomic_read(&device->active_cnt) > 1);
+
result = device->ftbl->stop(device);
kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
/*
@@ -986,8 +947,16 @@
if (context == NULL)
break;
- if (context->pid == private->pid)
+ if (context->dev_priv == dev_priv) {
+ /*
+ * Hold a reference to the context in case somebody
+ * tries to put it while we are detaching
+ */
+
+ _kgsl_context_get(context);
kgsl_context_detach(context);
+ kgsl_context_put(context);
+ }
next = next + 1;
}
@@ -1001,6 +970,7 @@
result = kgsl_close_device(device);
mutex_unlock(&device->mutex);
+
kfree(dev_priv);
kgsl_put_process_private(device, private);
@@ -1033,7 +1003,6 @@
* Make sure the gates are open, so they don't block until
* we start suspend or FT.
*/
- complete_all(&device->ft_gate);
complete_all(&device->hwaccess_gate);
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
kgsl_active_count_put(device);
@@ -1429,93 +1398,179 @@
return result;
}
+/**
+ * kgsl_cmdbatch_create() - Create a new cmdbatch structure
+ * @context: Pointer to a KGSL context struct
+ * @numibs: Number of indirect buffers to make room for in the cmdbatch
+ *
+ * Allocate an new cmdbatch structure and add enough room to store the list of
+ * indirect buffers
+ */
+struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_context *context,
+ int numibs)
+{
+ struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL);
+ if (cmdbatch == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ cmdbatch->ibdesc = kzalloc(sizeof(*cmdbatch->ibdesc) * numibs,
+ GFP_KERNEL);
+ if (cmdbatch->ibdesc == NULL) {
+ kfree(cmdbatch);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cmdbatch->ibcount = numibs;
+ cmdbatch->context = context;
+
+ /*
+ * Increase the reference count on the context so it doesn't disappear
+ * during the lifetime of this command batch
+ */
+ _kgsl_context_get(context);
+
+ return cmdbatch;
+}
+
+/**
+ * _kgsl_cmdbatch_verify() - Perform a quick sanity check on a command batch
+ * @device: Pointer to a KGSL instance that owns the command batch
+ * @pagetable: Pointer to the pagetable for the current process
+ * @cmdbatch: Number of indirect buffers to make room for in the cmdbatch
+ *
+ * Do a quick sanity test on the list of indirect buffers in a command batch
+ * verifying that the size and GPU address
+ */
+static bool _kgsl_cmdbatch_verify(struct kgsl_device_private *dev_priv,
+ struct kgsl_cmdbatch *cmdbatch)
+{
+ int i;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+
+ for (i = 0; i < cmdbatch->ibcount; i++) {
+ if (cmdbatch->ibdesc[i].sizedwords == 0) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "IB verification failed: Invalid size\n");
+ return false;
+ }
+
+ if (!kgsl_mmu_gpuaddr_in_range(private->pagetable,
+ cmdbatch->ibdesc[i].gpuaddr)) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "IB verification failed: invalid address 0x%X\n",
+ cmdbatch->ibdesc[i].gpuaddr);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * _kgsl_cmdbatch_create_legacy() - Create a cmdbatch from a legacy ioctl struct
+ * @context: Pointer to the KGSL context that issued the command batch
+ * @param: Pointer to the kgsl_ringbuffer_issueibcmds struct that the user sent
+ *
+ * Create a command batch from the legacy issueibcmds format.
+ */
+static struct kgsl_cmdbatch *_kgsl_cmdbatch_create_legacy(
+ struct kgsl_context *context,
+ struct kgsl_ringbuffer_issueibcmds *param)
+{
+ struct kgsl_cmdbatch *cmdbatch = kgsl_cmdbatch_create(context, 1);
+
+ if (IS_ERR(cmdbatch))
+ return cmdbatch;
+
+ cmdbatch->ibdesc[0].gpuaddr = param->ibdesc_addr;
+ cmdbatch->ibdesc[0].sizedwords = param->numibs;
+ cmdbatch->ibcount = 1;
+ cmdbatch->flags = param->flags;
+
+ return cmdbatch;
+}
+
+/**
+ * _kgsl_cmdbatch_create() - Create a cmdbatch from a ioctl struct
+ * @device: Pointer to the KGSL device for the GPU
+ * @context: Pointer to the KGSL context that issued the command batch
+ * @param: Pointer to the kgsl_ringbuffer_issueibcmds struct that the user sent
+ *
+ * Create a command batch from the standard issueibcmds format sent by the user.
+ */
+struct kgsl_cmdbatch *_kgsl_cmdbatch_create(struct kgsl_device *device,
+ struct kgsl_context *context,
+ struct kgsl_ringbuffer_issueibcmds *param)
+{
+ struct kgsl_cmdbatch *cmdbatch =
+ kgsl_cmdbatch_create(context, param->numibs);
+
+ if (IS_ERR(cmdbatch))
+ return cmdbatch;
+
+ if (copy_from_user(cmdbatch->ibdesc, (void *)param->ibdesc_addr,
+ sizeof(struct kgsl_ibdesc) * param->numibs)) {
+ KGSL_DRV_ERR(device,
+ "Unable to copy the IB userspace commands\n");
+ kgsl_cmdbatch_destroy(cmdbatch);
+ return ERR_PTR(-EFAULT);
+ }
+
+ cmdbatch->flags = param->flags & ~KGSL_CONTEXT_SUBMIT_IB_LIST;
+
+ return cmdbatch;
+}
+
static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
- int result = 0;
- int i = 0;
struct kgsl_ringbuffer_issueibcmds *param = data;
- struct kgsl_ibdesc *ibdesc;
+ struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
+ struct kgsl_cmdbatch *cmdbatch;
+ long result = -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
if (context == NULL) {
- result = -EINVAL;
+ KGSL_DRV_ERR(device,
+ "Could not find context %d\n", param->drawctxt_id);
goto done;
}
if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
- if (!param->numibs) {
- result = -EINVAL;
- goto done;
- }
-
/*
- * Put a reasonable upper limit on the number of IBs that can be
- * submitted
+ * Do a quick sanity check on the number of IBs in the
+ * submission
*/
- if (param->numibs > 10000) {
- result = -EINVAL;
+ if (param->numibs == 0 || param->numibs > 100000) {
+ KGSL_DRV_ERR(device,
+ "Invalid number of IBs %d\n", param->numibs);
goto done;
}
- ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
- GFP_KERNEL);
- if (!ibdesc) {
- KGSL_MEM_ERR(dev_priv->device,
- "kzalloc(%d) failed\n",
- sizeof(struct kgsl_ibdesc) * param->numibs);
- result = -ENOMEM;
- goto done;
- }
+ cmdbatch = _kgsl_cmdbatch_create(device, context, param);
+ } else
+ cmdbatch = _kgsl_cmdbatch_create_legacy(context, param);
- if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
- sizeof(struct kgsl_ibdesc) * param->numibs)) {
- result = -EFAULT;
- KGSL_DRV_ERR(dev_priv->device,
- "copy_from_user failed\n");
- goto free_ibdesc;
- }
- } else {
- KGSL_DRV_INFO(dev_priv->device,
- "Using single IB submission mode for ib submission\n");
- /* If user space driver is still using the old mode of
- * submitting single ib then we need to support that as well */
- ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
- if (!ibdesc) {
- KGSL_MEM_ERR(dev_priv->device,
- "kzalloc(%d) failed\n",
- sizeof(struct kgsl_ibdesc));
- result = -ENOMEM;
- goto done;
- }
- ibdesc[0].gpuaddr = param->ibdesc_addr;
- ibdesc[0].sizedwords = param->numibs;
- param->numibs = 1;
+ if (IS_ERR(cmdbatch)) {
+ result = PTR_ERR(cmdbatch);
+ goto done;
}
- for (i = 0; i < param->numibs; i++) {
- struct kgsl_pagetable *pt = dev_priv->process_priv->pagetable;
-
- if (!kgsl_mmu_gpuaddr_in_range(pt, ibdesc[i].gpuaddr)) {
- result = -ERANGE;
- KGSL_DRV_ERR(dev_priv->device,
- "invalid ib base GPU virtual addr %x\n",
- ibdesc[i].gpuaddr);
- goto free_ibdesc;
- }
+ /* Run basic sanity checking on the command */
+ if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch)) {
+ KGSL_DRV_ERR(device, "Unable to verify the IBs\n");
+ goto free_cmdbatch;
}
- result = dev_priv->device->ftbl->issueibcmds(dev_priv,
- context,
- ibdesc,
- param->numibs,
- ¶m->timestamp,
- param->flags);
+ result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
+ cmdbatch, ¶m->timestamp);
-free_ibdesc:
- kfree(ibdesc);
+free_cmdbatch:
+ if (result)
+ kgsl_cmdbatch_destroy(cmdbatch);
+
done:
kgsl_context_put(context);
return result;
@@ -1656,14 +1711,11 @@
{
struct kgsl_drawctxt_destroy *param = data;
struct kgsl_context *context;
- long result = -EINVAL;
+ long result;
context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
- if (context) {
- kgsl_context_detach(context);
- result = 0;
- }
+ result = kgsl_context_detach(context);
kgsl_context_put(context);
return result;
@@ -2770,8 +2822,7 @@
kgsl_ioctl_device_waittimestamp_ctxtid,
KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
- kgsl_ioctl_rb_issueibcmds,
- KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+ kgsl_ioctl_rb_issueibcmds, 0),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
kgsl_ioctl_cmdstream_readtimestamp,
KGSL_IOCTL_LOCK),
@@ -3453,7 +3504,6 @@
setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
- setup_timer(&device->hang_timer, hang_timer, (unsigned long) device);
status = kgsl_create_device_workqueue(device);
if (status)
goto error_pwrctrl_close;
@@ -3509,11 +3559,10 @@
/* For a manual dump, make sure that the system is idle */
if (manual) {
- kgsl_active_count_wait(device);
+ kgsl_active_count_wait(device, 0);
if (device->state == KGSL_STATE_ACTIVE)
kgsl_idle(device);
-
}
if (device->pm_dump_enable) {
@@ -3527,13 +3576,12 @@
pwr->power_flags, pwr->active_pwrlevel);
KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
- pwr->interval_timeout);
+ pwr->interval_timeout);
}
/* Disable the idle timer so we don't get interrupted */
del_timer_sync(&device->idle_timer);
- del_timer_sync(&device->hang_timer);
/* Force on the clocks */
kgsl_pwrctrl_wake(device);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 8d390a9..de647d5 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -141,6 +141,7 @@
struct kgsl_pagetable;
struct kgsl_memdesc;
+struct kgsl_cmdbatch;
struct kgsl_memdesc_ops {
int (*vmflags)(struct kgsl_memdesc *);
@@ -205,7 +206,6 @@
#define MMU_CONFIG 1
#endif
-void kgsl_hang_check(struct work_struct *work);
void kgsl_mem_entry_destroy(struct kref *kref);
int kgsl_postmortem_dump(struct kgsl_device *device, int manual);
@@ -237,7 +237,7 @@
unsigned int value);
void kgsl_trace_issueibcmds(struct kgsl_device *device, int id,
- struct kgsl_ibdesc *ibdesc, int numibs,
+ struct kgsl_cmdbatch *cmdbatch,
unsigned int timestamp, unsigned int flags,
int result, unsigned int type);
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 2a77632..110264b 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -123,7 +123,6 @@
KGSL_DEBUGFS_LOG(ctxt_log);
KGSL_DEBUGFS_LOG(mem_log);
KGSL_DEBUGFS_LOG(pwr_log);
-KGSL_DEBUGFS_LOG(ft_log);
static int memfree_hist_print(struct seq_file *s, void *unused)
{
@@ -185,7 +184,6 @@
device->drv_log = KGSL_LOG_LEVEL_DEFAULT;
device->mem_log = KGSL_LOG_LEVEL_DEFAULT;
device->pwr_log = KGSL_LOG_LEVEL_DEFAULT;
- device->ft_log = KGSL_LOG_LEVEL_DEFAULT;
debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device,
&cmd_log_fops);
@@ -199,8 +197,6 @@
&pwr_log_fops);
debugfs_create_file("memfree_history", 0444, device->d_debugfs, device,
&memfree_hist_fops);
- debugfs_create_file("log_level_ft", 0644, device->d_debugfs, device,
- &ft_log_fops);
/* Create postmortem dump control files */
@@ -323,16 +319,53 @@
.release = single_release,
};
-void
+
+/**
+ * kgsl_process_init_debugfs() - Initialize debugfs for a process
+ * @private: Pointer to process private structure created for the process
+ *
+ * @returns: 0 on success, error code otherwise
+ *
+ * kgsl_process_init_debugfs() is called at the time of creating the
+ * process struct when a process opens kgsl device for the first time.
+ * The function creates the debugfs files for the process. If debugfs is
+ * disabled in the kernel, we ignore that error and return as successful.
+ */
+int
kgsl_process_init_debugfs(struct kgsl_process_private *private)
{
unsigned char name[16];
+ int ret = 0;
+ struct dentry *dentry;
snprintf(name, sizeof(name), "%d", private->pid);
private->debug_root = debugfs_create_dir(name, proc_d_debugfs);
- debugfs_create_file("mem", 0400, private->debug_root, private,
+
+ if (!private->debug_root)
+ return -EINVAL;
+
+ /*
+ * debugfs_create_dir() and debugfs_create_file() both
+ * return -ENODEV if debugfs is disabled in the kernel.
+ * We make a distinction between these two functions
+ * failing and debugfs being disabled in the kernel.
+ * In the first case, we abort process private struct
+ * creation, in the second we continue without any changes.
+ * So if debugfs is disabled in kernel, return as
+ * success.
+ */
+ dentry = debugfs_create_file("mem", 0400, private->debug_root, private,
&process_mem_fops);
+
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+
+ if (ret == -ENODEV)
+ ret = 0;
+ }
+
+ return ret;
}
void kgsl_core_debugfs_init(void)
diff --git a/drivers/gpu/msm/kgsl_debugfs.h b/drivers/gpu/msm/kgsl_debugfs.h
index ae5601f..b2f137c 100644
--- a/drivers/gpu/msm/kgsl_debugfs.h
+++ b/drivers/gpu/msm/kgsl_debugfs.h
@@ -21,7 +21,7 @@
void kgsl_core_debugfs_init(void);
void kgsl_core_debugfs_close(void);
-void kgsl_device_debugfs_init(struct kgsl_device *device);
+int kgsl_device_debugfs_init(struct kgsl_device *device);
extern struct dentry *kgsl_debugfs_dir;
static inline struct dentry *kgsl_get_debugfs_dir(void)
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index d9aea30..f5b27d0 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -13,6 +13,7 @@
#ifndef __KGSL_DEVICE_H
#define __KGSL_DEVICE_H
+#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/pm_qos.h>
#include <linux/sched.h>
@@ -76,6 +77,7 @@
struct kgsl_context;
struct kgsl_power_stats;
struct kgsl_event;
+struct kgsl_cmdbatch;
struct kgsl_functable {
/* Mandatory functions - these functions must be implemented
@@ -87,7 +89,7 @@
void (*regwrite) (struct kgsl_device *device,
unsigned int offsetwords, unsigned int value);
int (*idle) (struct kgsl_device *device);
- unsigned int (*isidle) (struct kgsl_device *device);
+ bool (*isidle) (struct kgsl_device *device);
int (*suspend_context) (struct kgsl_device *device);
int (*init) (struct kgsl_device *device);
int (*start) (struct kgsl_device *device);
@@ -101,9 +103,8 @@
unsigned int (*readtimestamp) (struct kgsl_device *device,
struct kgsl_context *context, enum kgsl_timestamp_type type);
int (*issueibcmds) (struct kgsl_device_private *dev_priv,
- struct kgsl_context *context, struct kgsl_ibdesc *ibdesc,
- unsigned int sizedwords, uint32_t *timestamp,
- unsigned int flags);
+ struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamps);
int (*setup_pt)(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
void (*cleanup_pt)(struct kgsl_device *device,
@@ -115,14 +116,15 @@
void * (*snapshot)(struct kgsl_device *device, void *snapshot,
int *remain, int hang);
irqreturn_t (*irq_handler)(struct kgsl_device *device);
+ int (*drain)(struct kgsl_device *device);
/* Optional functions - these functions are not mandatory. The
driver will check that the function pointer is not NULL before
calling the hook */
- void (*setstate) (struct kgsl_device *device, unsigned int context_id,
+ int (*setstate) (struct kgsl_device *device, unsigned int context_id,
uint32_t flags);
struct kgsl_context *(*drawctxt_create) (struct kgsl_device_private *,
uint32_t *flags);
- void (*drawctxt_detach) (struct kgsl_context *context);
+ int (*drawctxt_detach) (struct kgsl_context *context);
void (*drawctxt_destroy) (struct kgsl_context *context);
long (*ioctl) (struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
@@ -155,6 +157,26 @@
unsigned int created;
};
+/**
+ * struct kgsl_cmdbatch - KGSl command descriptor
+ * @context: KGSL context that created the command
+ * @timestamp: Timestamp assigned to the command (currently unused)
+ * @flags: flags
+ * @ibcount: Number of IBs in the command list
+ * @ibdesc: Pointer to the list of IBs
+ * @expires: Point in time when the cmdbatch is considered to be hung
+ * @invalid: non-zero if the dispatcher determines the command and the owning
+ * context should be invalidated
+ */
+struct kgsl_cmdbatch {
+ struct kgsl_context *context;
+ uint32_t timestamp;
+ uint32_t flags;
+ uint32_t ibcount;
+ struct kgsl_ibdesc *ibdesc;
+ unsigned long expires;
+ int invalid;
+};
struct kgsl_device {
struct device *dev;
@@ -190,9 +212,7 @@
struct completion hwaccess_gate;
const struct kgsl_functable *ftbl;
struct work_struct idle_check_ws;
- struct work_struct hang_check_ws;
struct timer_list idle_timer;
- struct timer_list hang_timer;
struct kgsl_pwrctrl pwrctrl;
int open_count;
@@ -201,12 +221,11 @@
uint32_t requested_state;
atomic_t active_cnt;
- struct completion suspend_gate;
wait_queue_head_t wait_queue;
+ wait_queue_head_t active_cnt_wq;
struct workqueue_struct *work_queue;
struct device *parentdev;
- struct completion ft_gate;
struct dentry *d_debugfs;
struct idr context_idr;
rwlock_t context_lock;
@@ -233,7 +252,6 @@
int drv_log;
int mem_log;
int pwr_log;
- int ft_log;
int pm_dump_enable;
struct kgsl_pwrscale pwrscale;
struct kobject pwrscale_kobj;
@@ -254,18 +272,15 @@
#define KGSL_DEVICE_COMMON_INIT(_dev) \
.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
- .suspend_gate = COMPLETION_INITIALIZER((_dev).suspend_gate),\
- .ft_gate = COMPLETION_INITIALIZER((_dev).ft_gate),\
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
kgsl_idle_check),\
- .hang_check_ws = __WORK_INITIALIZER((_dev).hang_check_ws,\
- kgsl_hang_check),\
.ts_expired_ws = __WORK_INITIALIZER((_dev).ts_expired_ws,\
kgsl_process_events),\
.context_idr = IDR_INIT((_dev).context_idr),\
.events = LIST_HEAD_INIT((_dev).events),\
.events_pending_list = LIST_HEAD_INIT((_dev).events_pending_list), \
.wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
+ .active_cnt_wq = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).active_cnt_wq),\
.mutex = __MUTEX_INITIALIZER((_dev).mutex),\
.state = KGSL_STATE_INIT,\
.ver_major = DRIVER_VERSION_MAJOR,\
@@ -293,6 +308,7 @@
* @events: list head of pending events for this context
* @events_list: list node for the list of all contexts that have pending events
* @pid: process that owns this context.
+ * @tid: task that created this context.
* @pagefault: flag set if this context caused a pagefault.
* @pagefault_ts: global timestamp of the pagefault, if KGSL_CONTEXT_PAGEFAULT
* is set.
@@ -301,6 +317,8 @@
struct kref refcount;
uint32_t id;
pid_t pid;
+ pid_t tid;
+ struct kgsl_device_private *dev_priv;
unsigned long priv;
struct kgsl_device *device;
struct kgsl_pagetable *pagetable;
@@ -437,8 +455,6 @@
return 0;
}
-
-
int kgsl_check_timestamp(struct kgsl_device *device,
struct kgsl_context *context, unsigned int timestamp);
@@ -594,4 +610,21 @@
{
kgsl_signal_event(device, context, timestamp, KGSL_EVENT_CANCELLED);
}
+
+/**
+ * kgsl_cmdbatch_destroy() - Destroy a command batch structure
+ * @cmdbatch: Pointer to the command batch to destroy
+ *
+ * Destroy and free a command batch
+ */
+static inline void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
+{
+ if (cmdbatch) {
+ kgsl_context_put(cmdbatch->context);
+ kfree(cmdbatch->ibdesc);
+ }
+
+ kfree(cmdbatch);
+}
+
#endif /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_drm.c b/drivers/gpu/msm/kgsl_drm.c
index 1fc7467..c221c4a 100644
--- a/drivers/gpu/msm/kgsl_drm.c
+++ b/drivers/gpu/msm/kgsl_drm.c
@@ -385,6 +385,7 @@
kgsl_gem_free_memory(obj);
drm_gem_object_release(obj);
kfree(obj->driver_private);
+ kfree(obj);
}
int
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 68052b1..2634e4f 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -482,15 +482,17 @@
return NULL;
}
-static void kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
+static int kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
uint32_t flags)
{
struct kgsl_gpummu_pt *gpummu_pt;
if (!kgsl_mmu_enabled())
- return;
+ return 0;
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
- kgsl_idle(mmu->device);
+ int ret = kgsl_idle(mmu->device);
+ if (ret)
+ return ret;
gpummu_pt = mmu->hwpagetable->priv;
kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
gpummu_pt->base.gpuaddr);
@@ -500,12 +502,16 @@
/* Invalidate all and tc */
kgsl_regwrite(mmu->device, MH_MMU_INVALIDATE, 0x00000003);
}
+
+ return 0;
}
-static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
+static int kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
unsigned int context_id)
{
+ int ret = 0;
+
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
@@ -518,10 +524,13 @@
kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
/* call device specific set page table */
- kgsl_setstate(mmu, context_id, KGSL_MMUFLAGS_TLBFLUSH |
+ ret = kgsl_setstate(mmu, context_id,
+ KGSL_MMUFLAGS_TLBFLUSH |
KGSL_MMUFLAGS_PTUPDATE);
}
}
+
+ return ret;
}
static int kgsl_gpummu_init(struct kgsl_mmu *mmu)
@@ -563,6 +572,7 @@
struct kgsl_device *device = mmu->device;
struct kgsl_gpummu_pt *gpummu_pt;
+ int ret;
if (mmu->flags & KGSL_FLAGS_STARTED)
return 0;
@@ -574,9 +584,6 @@
/* setup MMU and sub-client behavior */
kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
- /* idle device */
- kgsl_idle(device);
-
/* enable axi interrupts */
kgsl_regwrite(device, MH_INTERRUPT_MASK,
GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
@@ -607,10 +614,12 @@
kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
(KGSL_PAGETABLE_BASE |
(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
- kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
- mmu->flags |= KGSL_FLAGS_STARTED;
- return 0;
+ ret = kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
+ if (!ret)
+ mmu->flags |= KGSL_FLAGS_STARTED;
+
+ return ret;
}
static int
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index ecda5a7..103736d 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1205,10 +1205,12 @@
return 0;
}
-static void kgsl_iommu_setstate(struct kgsl_mmu *mmu,
+static int kgsl_iommu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
unsigned int context_id)
{
+ int ret = 0;
+
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
@@ -1219,10 +1221,12 @@
flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
mmu->device->id) |
KGSL_MMUFLAGS_TLBFLUSH;
- kgsl_setstate(mmu, context_id,
+ ret = kgsl_setstate(mmu, context_id,
KGSL_MMUFLAGS_PTUPDATE | flags);
}
}
+
+ return ret;
}
/*
@@ -1892,31 +1896,40 @@
* cpu
* Return - void
*/
-static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
+static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
uint32_t flags)
{
struct kgsl_iommu *iommu = mmu->priv;
int temp;
int i;
+ int ret = 0;
phys_addr_t pt_base = kgsl_iommu_get_pt_base_addr(mmu,
mmu->hwpagetable);
phys_addr_t pt_val;
- if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) {
+ ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+
+ if (ret) {
KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
- return;
+ return ret;
}
/* For v0 SMMU GPU needs to be idle for tlb invalidate as well */
- if (msm_soc_version_supports_iommu_v0())
- kgsl_idle(mmu->device);
+ if (msm_soc_version_supports_iommu_v0()) {
+ ret = kgsl_idle(mmu->device);
+ if (ret)
+ return ret;
+ }
/* Acquire GPU-CPU sync Lock here */
_iommu_lock();
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
- if (!msm_soc_version_supports_iommu_v0())
- kgsl_idle(mmu->device);
+ if (!msm_soc_version_supports_iommu_v0()) {
+ ret = kgsl_idle(mmu->device);
+ if (ret)
+ goto unlock;
+ }
for (i = 0; i < iommu->unit_count; i++) {
/* get the lsb value which should not change when
* changing ttbr0 */
@@ -1977,12 +1990,13 @@
}
}
}
-
+unlock:
/* Release GPU-CPU sync Lock here */
_iommu_unlock();
/* Disable smmu clock */
kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ return ret;
}
/*
@@ -2039,6 +2053,7 @@
.mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
.mmu_enable_clk = kgsl_iommu_enable_clk,
+ .mmu_disable_clk = kgsl_iommu_disable_clk,
.mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
.mmu_get_default_ttbr0 = kgsl_iommu_get_default_ttbr0,
.mmu_get_reg_gpuaddr = kgsl_iommu_get_reg_gpuaddr,
diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h
index a7832e4..3a32953 100644
--- a/drivers/gpu/msm/kgsl_log.h
+++ b/drivers/gpu/msm/kgsl_log.h
@@ -103,15 +103,6 @@
#define KGSL_PWR_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args)
-#define KGSL_FT_INFO(_dev, fmt, args...) \
-KGSL_LOG_INFO(_dev->dev, _dev->ft_log, fmt, ##args)
-#define KGSL_FT_WARN(_dev, fmt, args...) \
-KGSL_LOG_WARN(_dev->dev, _dev->ft_log, fmt, ##args)
-#define KGSL_FT_ERR(_dev, fmt, args...) \
-KGSL_LOG_ERR(_dev->dev, _dev->ft_log, fmt, ##args)
-#define KGSL_FT_CRIT(_dev, fmt, args...) \
-KGSL_LOG_CRIT(_dev->dev, _dev->ft_log, fmt, ##args)
-
/* Core error messages - these are for core KGSL functions that have
no device associated with them (such as memory) */
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 952019f..6635a7c 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -566,7 +566,7 @@
}
EXPORT_SYMBOL(kgsl_mmu_putpagetable);
-void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+int kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
uint32_t flags)
{
struct kgsl_device *device = mmu->device;
@@ -574,14 +574,16 @@
if (!(flags & (KGSL_MMUFLAGS_TLBFLUSH | KGSL_MMUFLAGS_PTUPDATE))
&& !adreno_is_a2xx(adreno_dev))
- return;
+ return 0;
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
- return;
+ return 0;
else if (device->ftbl->setstate)
- device->ftbl->setstate(device, context_id, flags);
+ return device->ftbl->setstate(device, context_id, flags);
else if (mmu->mmu_ops->mmu_device_setstate)
- mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+ return mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+
+ return 0;
}
EXPORT_SYMBOL(kgsl_setstate);
@@ -590,7 +592,6 @@
struct kgsl_mh *mh = &device->mh;
/* force mmu off to for now*/
kgsl_regwrite(device, MH_MMU_CONFIG, 0);
- kgsl_idle(device);
/* define physical memory range accessible by the core */
kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index faba81e..a30ee3f 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -133,10 +133,10 @@
int (*mmu_close) (struct kgsl_mmu *mmu);
int (*mmu_start) (struct kgsl_mmu *mmu);
void (*mmu_stop) (struct kgsl_mmu *mmu);
- void (*mmu_setstate) (struct kgsl_mmu *mmu,
+ int (*mmu_setstate) (struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
unsigned int context_id);
- void (*mmu_device_setstate) (struct kgsl_mmu *mmu,
+ int (*mmu_device_setstate) (struct kgsl_mmu *mmu,
uint32_t flags);
void (*mmu_pagefault) (struct kgsl_mmu *mmu);
phys_addr_t (*mmu_get_current_ptbase)
@@ -147,6 +147,8 @@
(struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
int (*mmu_enable_clk)
(struct kgsl_mmu *mmu, int ctx_id);
+ void (*mmu_disable_clk)
+ (struct kgsl_mmu *mmu);
phys_addr_t (*mmu_get_default_ttbr0)(struct kgsl_mmu *mmu,
unsigned int unit_id,
enum kgsl_iommu_context_id ctx_id);
@@ -231,7 +233,7 @@
int kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
-void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+int kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
uint32_t flags);
int kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu,
phys_addr_t pt_base);
@@ -260,19 +262,23 @@
return 0;
}
-static inline void kgsl_mmu_setstate(struct kgsl_mmu *mmu,
+static inline int kgsl_mmu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
unsigned int context_id)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_setstate)
- mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
+ return mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
+
+ return 0;
}
-static inline void kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
+static inline int kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
uint32_t flags)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_device_setstate)
- mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+ return mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+
+ return 0;
}
static inline void kgsl_mmu_stop(struct kgsl_mmu *mmu)
@@ -320,6 +326,12 @@
return 0;
}
+static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
+ mmu->mmu_ops->mmu_disable_clk(mmu);
+}
+
static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
unsigned int ts, bool ts_valid)
{
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 5479ae9..07131f7 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1215,9 +1215,6 @@
} else {
device->pwrctrl.irq_last = 0;
}
- } else if (device->state & (KGSL_STATE_HUNG |
- KGSL_STATE_DUMP_AND_FT)) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
}
mutex_unlock(&device->mutex);
@@ -1273,7 +1270,6 @@
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
return -EBUSY;
}
- del_timer_sync(&device->hang_timer);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
@@ -1343,7 +1339,6 @@
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
del_timer_sync(&device->idle_timer);
- del_timer_sync(&device->hang_timer);
/* make sure power is on to stop the device*/
kgsl_pwrctrl_enable(device);
device->ftbl->suspend_context(device);
@@ -1435,8 +1430,6 @@
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
- mod_timer(&device->hang_timer,
- (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
device->pwrctrl.pm_qos_latency);
case KGSL_STATE_ACTIVE:
@@ -1504,10 +1497,6 @@
return "SLEEP";
case KGSL_STATE_SUSPEND:
return "SUSPEND";
- case KGSL_STATE_HUNG:
- return "HUNG";
- case KGSL_STATE_DUMP_AND_FT:
- return "DNR";
case KGSL_STATE_SLUMBER:
return "SLUMBER";
default:
@@ -1539,7 +1528,6 @@
(device->state != KGSL_STATE_ACTIVE)) {
mutex_unlock(&device->mutex);
wait_for_completion(&device->hwaccess_gate);
- wait_for_completion(&device->ft_gate);
mutex_lock(&device->mutex);
/* Stop the idle timer */
@@ -1595,8 +1583,6 @@
kgsl_pwrscale_idle(device);
if (atomic_dec_and_test(&device->active_cnt)) {
- INIT_COMPLETION(device->suspend_gate);
-
if (device->state == KGSL_STATE_ACTIVE &&
device->requested_state == KGSL_STATE_NONE) {
kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
@@ -1605,29 +1591,41 @@
mod_timer(&device->idle_timer,
jiffies + device->pwrctrl.interval_timeout);
-
- complete(&device->suspend_gate);
}
trace_kgsl_active_count(device,
(unsigned long) __builtin_return_address(0));
+
+ wake_up(&device->active_cnt_wq);
}
EXPORT_SYMBOL(kgsl_active_count_put);
+static int _check_active_count(struct kgsl_device *device, int count)
+{
+ /* Return 0 if the active count is greater than the desired value */
+ return atomic_read(&device->active_cnt) > count ? 0 : 1;
+}
+
/**
* kgsl_active_count_wait() - Wait for activity to finish.
* @device: Pointer to a KGSL device
+ * @count: Active count value to wait for
*
- * Block until all active_cnt users put() their reference.
+ * Block until the active_cnt value hits the desired value
*/
-void kgsl_active_count_wait(struct kgsl_device *device)
+int kgsl_active_count_wait(struct kgsl_device *device, int count)
{
+ int ret = 0;
+
BUG_ON(!mutex_is_locked(&device->mutex));
- if (atomic_read(&device->active_cnt) != 0) {
+ if (atomic_read(&device->active_cnt) > count) {
mutex_unlock(&device->mutex);
- wait_for_completion(&device->suspend_gate);
+ ret = wait_event_timeout(device->active_cnt_wq,
+ _check_active_count(device, count), HZ);
mutex_lock(&device->mutex);
}
+
+ return ret == 0 ? -ETIMEDOUT : 0;
}
EXPORT_SYMBOL(kgsl_active_count_wait);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index b7d9226..71a0fdd 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -123,6 +123,6 @@
int kgsl_active_count_get(struct kgsl_device *device);
int kgsl_active_count_get_light(struct kgsl_device *device);
void kgsl_active_count_put(struct kgsl_device *device);
-void kgsl_active_count_wait(struct kgsl_device *device);
+int kgsl_active_count_wait(struct kgsl_device *device, int count);
#endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index e5e23f0..47554c4 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -48,9 +48,6 @@
#ifdef CONFIG_MSM_SLEEP_STATS_DEVICE
&kgsl_pwrscale_policy_idlestats,
#endif
-#ifdef CONFIG_MSM_DCVS
- &kgsl_pwrscale_policy_msm,
-#endif
NULL
};
diff --git a/drivers/gpu/msm/kgsl_pwrscale_msm.c b/drivers/gpu/msm/kgsl_pwrscale_msm.c
deleted file mode 100644
index 073e474..0000000
--- a/drivers/gpu/msm/kgsl_pwrscale_msm.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/slab.h>
-#include <mach/msm_dcvs.h>
-#include "kgsl.h"
-#include "kgsl_pwrscale.h"
-#include "kgsl_device.h"
-#include "a2xx_reg.h"
-#include "kgsl_trace.h"
-
-struct msm_priv {
- struct kgsl_device *device;
- int enabled;
- unsigned int cur_freq;
- unsigned int req_level;
- int floor_level;
- struct msm_dcvs_core_info *core_info;
- int gpu_busy;
- int dcvs_core_id;
-};
-
-/* reference to be used in idle and freq callbacks */
-static struct msm_priv *the_msm_priv;
-
-static int msm_idle_enable(int type_core_num,
- enum msm_core_control_event event)
-{
- struct msm_priv *priv = the_msm_priv;
-
- switch (event) {
- case MSM_DCVS_ENABLE_IDLE_PULSE:
- priv->enabled = true;
- break;
- case MSM_DCVS_DISABLE_IDLE_PULSE:
- priv->enabled = false;
- break;
- case MSM_DCVS_ENABLE_HIGH_LATENCY_MODES:
- case MSM_DCVS_DISABLE_HIGH_LATENCY_MODES:
- break;
- }
- return 0;
-}
-
-/* Set the requested frequency if it is within 5MHz (delta) of a
- * supported frequency.
- */
-static int msm_set_freq(int core_num, unsigned int freq)
-{
- int i, delta = 5000000;
- struct msm_priv *priv = the_msm_priv;
- struct kgsl_device *device = priv->device;
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-
- /* msm_dcvs manager uses frequencies in kHz */
- freq *= 1000;
- for (i = 0; i < pwr->num_pwrlevels; i++)
- if (abs(pwr->pwrlevels[i].gpu_freq - freq) < delta)
- break;
- if (i == pwr->num_pwrlevels)
- return 0;
-
- mutex_lock(&device->mutex);
- priv->req_level = i;
- if (priv->req_level <= priv->floor_level) {
- kgsl_pwrctrl_pwrlevel_change(device, priv->req_level);
- priv->cur_freq = pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq;
- }
- mutex_unlock(&device->mutex);
-
- /* return current frequency in kHz */
- return priv->cur_freq / 1000;
-}
-
-static int msm_set_min_freq(int core_num, unsigned int freq)
-{
- int i, delta = 5000000;
- struct msm_priv *priv = the_msm_priv;
- struct kgsl_device *device = priv->device;
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-
- /* msm_dcvs manager uses frequencies in kHz */
- freq *= 1000;
- for (i = 0; i < pwr->num_pwrlevels; i++)
- if (abs(pwr->pwrlevels[i].gpu_freq - freq) < delta)
- break;
- if (i == pwr->num_pwrlevels)
- return 0;
-
- mutex_lock(&device->mutex);
- priv->floor_level = i;
- if (priv->floor_level <= priv->req_level)
- kgsl_pwrctrl_pwrlevel_change(device, priv->floor_level);
- else if (priv->floor_level > priv->req_level)
- kgsl_pwrctrl_pwrlevel_change(device, priv->req_level);
-
- priv->cur_freq = pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq;
- mutex_unlock(&device->mutex);
-
- /* return current frequency in kHz */
- return priv->cur_freq / 1000;
-}
-
-static unsigned int msm_get_freq(int core_num)
-{
- struct msm_priv *priv = the_msm_priv;
-
- /* return current frequency in kHz */
- return priv->cur_freq / 1000;
-}
-
-static void msm_busy(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- struct msm_priv *priv = pwrscale->priv;
- if (priv->enabled && !priv->gpu_busy) {
- msm_dcvs_idle(priv->dcvs_core_id, MSM_DCVS_IDLE_EXIT, 0);
- trace_kgsl_mpdcvs(device, 1);
- priv->gpu_busy = 1;
- }
- return;
-}
-
-static void msm_idle(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- struct msm_priv *priv = pwrscale->priv;
-
- if (priv->enabled && priv->gpu_busy)
- if (device->ftbl->isidle(device)) {
- msm_dcvs_idle(priv->dcvs_core_id,
- MSM_DCVS_IDLE_ENTER, 0);
- trace_kgsl_mpdcvs(device, 0);
- priv->gpu_busy = 0;
- }
- return;
-}
-
-static void msm_sleep(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- struct msm_priv *priv = pwrscale->priv;
-
- if (priv->enabled && priv->gpu_busy) {
- msm_dcvs_idle(priv->dcvs_core_id, MSM_DCVS_IDLE_ENTER, 0);
- trace_kgsl_mpdcvs(device, 0);
- priv->gpu_busy = 0;
- }
-
- return;
-}
-
-static void msm_set_io_fraction(struct kgsl_device *device,
- unsigned int value)
-{
- int i;
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-
- for (i = 0; i < pwr->num_pwrlevels; i++)
- pwr->pwrlevels[i].io_fraction = value;
-
-}
-
-static void msm_restore_io_fraction(struct kgsl_device *device)
-{
- int i;
- struct kgsl_device_platform_data *pdata =
- kgsl_device_get_drvdata(device);
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-
- for (i = 0; i < pdata->num_levels; i++)
- pwr->pwrlevels[i].io_fraction =
- pdata->pwrlevel[i].io_fraction;
-}
-
-static int msm_init(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- struct msm_priv *priv;
- struct msm_dcvs_freq_entry *tbl;
- int i, ret = -EINVAL, low_level;
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- struct platform_device *pdev =
- container_of(device->parentdev, struct platform_device, dev);
- struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
-
- if (the_msm_priv) {
- priv = pwrscale->priv = the_msm_priv;
- } else {
- priv = pwrscale->priv = kzalloc(sizeof(struct msm_priv),
- GFP_KERNEL);
- if (pwrscale->priv == NULL)
- return -ENOMEM;
-
- priv->core_info = pdata->core_info;
- tbl = priv->core_info->freq_tbl;
- priv->floor_level = pwr->num_pwrlevels - 1;
- /* Fill in frequency table from low to high, reversing order. */
- low_level = pwr->num_pwrlevels - KGSL_PWRLEVEL_LAST_OFFSET;
- for (i = 0; i <= low_level; i++)
- tbl[i].freq =
- pwr->pwrlevels[low_level - i].gpu_freq / 1000;
- priv->dcvs_core_id =
- msm_dcvs_register_core(MSM_DCVS_CORE_TYPE_GPU,
- 0,
- priv->core_info,
- msm_set_freq, msm_get_freq, msm_idle_enable,
- msm_set_min_freq,
- priv->core_info->sensors[0]);
- if (priv->dcvs_core_id < 0) {
- KGSL_PWR_ERR(device, "msm_dcvs_register_core failed");
- goto err;
- }
- the_msm_priv = priv;
- }
- priv->device = device;
- ret = msm_dcvs_freq_sink_start(priv->dcvs_core_id);
- if (ret >= 0) {
- if (device->ftbl->isidle(device)) {
- priv->gpu_busy = 0;
- msm_dcvs_idle(priv->dcvs_core_id,
- MSM_DCVS_IDLE_ENTER, 0);
- } else {
- priv->gpu_busy = 1;
- }
- msm_set_io_fraction(device, 0);
- return 0;
- }
-
- KGSL_PWR_ERR(device, "msm_dcvs_freq_sink_register failed\n");
-
-err:
- if (!the_msm_priv)
- kfree(pwrscale->priv);
- pwrscale->priv = NULL;
-
- return ret;
-}
-
-static void msm_close(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- struct msm_priv *priv = pwrscale->priv;
-
- if (pwrscale->priv == NULL)
- return;
- msm_dcvs_freq_sink_stop(priv->dcvs_core_id);
- pwrscale->priv = NULL;
- msm_restore_io_fraction(device);
-}
-
-struct kgsl_pwrscale_policy kgsl_pwrscale_policy_msm = {
- .name = "msm",
- .init = msm_init,
- .idle = msm_idle,
- .busy = msm_busy,
- .sleep = msm_sleep,
- .close = msm_close,
-};
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
index 40649d2..8fc1753 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -235,8 +235,10 @@
tz_pwrlevels[0] = j;
ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
sizeof(tz_pwrlevels), NULL, 0);
- if (ret)
+ if (ret) {
+ KGSL_DRV_ERR(device, "Fall back to idle based GPU DCVS algo");
priv->idle_dcvs = 1;
+ }
return 0;
}
#else
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 2939df6..5950451 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -170,17 +170,32 @@
kobject_put(&private->kobj);
}
-void
-kgsl_process_init_sysfs(struct kgsl_process_private *private)
+/**
+ * kgsl_process_init_sysfs() - Initialize and create sysfs files for a process
+ *
+ * @device: Pointer to kgsl device struct
+ * @private: Pointer to the structure for the process
+ *
+ * @returns: 0 on success, error code otherwise
+ *
+ * kgsl_process_init_sysfs() is called at the time of creating the
+ * process struct when a process opens the kgsl device for the first time.
+ * This function creates the sysfs files for the process.
+ */
+int
+kgsl_process_init_sysfs(struct kgsl_device *device,
+ struct kgsl_process_private *private)
{
unsigned char name[16];
- int i, ret;
+ int i, ret = 0;
snprintf(name, sizeof(name), "%d", private->pid);
- if (kobject_init_and_add(&private->kobj, &ktype_mem_entry,
- kgsl_driver.prockobj, name))
- return;
+ ret = kobject_init_and_add(&private->kobj, &ktype_mem_entry,
+ kgsl_driver.prockobj, name);
+
+ if (ret)
+ return ret;
for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
/* We need to check the value of sysfs_create_file, but we
@@ -191,6 +206,7 @@
ret = sysfs_create_file(&private->kobj,
&mem_stats[i].max_attr.attr);
}
+ return ret;
}
static int kgsl_drv_memstat_show(struct device *dev,
@@ -589,13 +605,16 @@
/*
* Allocate space to store the list of pages to send to vmap.
- * This is an array of pointers so we can track 1024 pages per page of
- * allocation which means we can handle up to a 8MB buffer request with
- * two pages; well within the acceptable limits for using kmalloc.
+ * This is an array of pointers so we can track 1024 pages per page
+ * of allocation. Since allocations can be as large as the user dares,
+ * we have to use the kmalloc/vmalloc trick here to make sure we can
+ * get the memory we need.
*/
- pages = kmalloc(memdesc->sglen_alloc * sizeof(struct page *),
- GFP_KERNEL);
+ if ((memdesc->sglen_alloc * sizeof(struct page *)) > PAGE_SIZE)
+ pages = vmalloc(memdesc->sglen_alloc * sizeof(struct page *));
+ else
+ pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (pages == NULL) {
ret = -ENOMEM;
@@ -706,7 +725,10 @@
kgsl_driver.stats.histogram[order]++;
done:
- kfree(pages);
+ if ((memdesc->sglen_alloc * sizeof(struct page *)) > PAGE_SIZE)
+ vfree(pages);
+ else
+ kfree(pages);
if (ret)
kgsl_sharedmem_free(memdesc);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 9f84690..3986c61 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -67,7 +67,8 @@
void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
-void kgsl_process_init_sysfs(struct kgsl_process_private *private);
+int kgsl_process_init_sysfs(struct kgsl_device *device,
+ struct kgsl_process_private *private);
void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
int kgsl_sharedmem_init_sysfs(void);
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index 6094e04..333089a 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -544,6 +544,16 @@
int remain = device->snapshot_maxsize - sizeof(*header);
void *snapshot;
struct timespec boot;
+ int ret = 0;
+
+ /*
+ * Bail if failed to get active count for GPU,
+ * try again
+ */
+ if (kgsl_active_count_get(device)) {
+ KGSL_DRV_ERR(device, "Failed to get GPU active count");
+ return -EINVAL;
+ }
/* increment the hang count (on hang) for good book keeping */
if (hang)
@@ -558,19 +568,23 @@
* of the state and never frozen.
*/
- if (hang && device->snapshot_frozen == 1)
- return 0;
+ if (hang && device->snapshot_frozen == 1) {
+ ret = 0;
+ goto done;
+ }
if (device->snapshot == NULL) {
KGSL_DRV_ERR(device,
"snapshot: No snapshot memory available\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto done;
}
if (remain < sizeof(*header)) {
KGSL_DRV_ERR(device,
"snapshot: Not enough memory for the header\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto done;
}
header->magic = SNAPSHOT_MAGIC;
@@ -606,7 +620,10 @@
__pa(device->snapshot), device->snapshot_size);
if (hang)
sysfs_notify(&device->snapshot_kobj, NULL, "timestamp");
- return 0;
+
+done:
+ kgsl_active_count_put(device);
+ return ret;
}
EXPORT_SYMBOL(kgsl_device_snapshot);
@@ -715,7 +732,10 @@
{
if (device && count > 0) {
mutex_lock(&device->mutex);
- kgsl_device_snapshot(device, 0);
+ if (!kgsl_active_count_get(device)) {
+ kgsl_device_snapshot(device, 0);
+ kgsl_active_count_put(device);
+ }
mutex_unlock(&device->mutex);
}
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 5379670..b7d7235 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -220,6 +220,15 @@
snprintf(str, size, "%u", kpt->timestamp);
}
+static void kgsl_sync_timeline_release_obj(struct sync_timeline *sync_timeline)
+{
+ /*
+ * Make sure to free the timeline only after destroy flag is set.
+ * This is to avoid further accessing to the timeline from KGSL and
+ * also to catch any unbalanced kref of timeline.
+ */
+ BUG_ON(sync_timeline && (sync_timeline->destroyed != true));
+}
static const struct sync_timeline_ops kgsl_sync_timeline_ops = {
.driver_name = "kgsl-timeline",
.dup = kgsl_sync_pt_dup,
@@ -227,6 +236,7 @@
.compare = kgsl_sync_pt_compare,
.timeline_value_str = kgsl_sync_timeline_value_str,
.pt_value_str = kgsl_sync_pt_value_str,
+ .release_obj = kgsl_sync_timeline_release_obj,
};
int kgsl_sync_timeline_create(struct kgsl_context *context)
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 831b13f..179a72b 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -37,14 +37,13 @@
TP_PROTO(struct kgsl_device *device,
int drawctxt_id,
- struct kgsl_ibdesc *ibdesc,
- int numibs,
+ struct kgsl_cmdbatch *cmdbatch,
int timestamp,
int flags,
int result,
unsigned int type),
- TP_ARGS(device, drawctxt_id, ibdesc, numibs, timestamp, flags,
+ TP_ARGS(device, drawctxt_id, cmdbatch, timestamp, flags,
result, type),
TP_STRUCT__entry(
@@ -61,8 +60,8 @@
TP_fast_assign(
__assign_str(device_name, device->name);
__entry->drawctxt_id = drawctxt_id;
- __entry->ibdesc_addr = ibdesc[0].gpuaddr;
- __entry->numibs = numibs;
+ __entry->ibdesc_addr = cmdbatch->ibdesc[0].gpuaddr;
+ __entry->numibs = cmdbatch->ibcount;
__entry->timestamp = timestamp;
__entry->flags = flags;
__entry->result = result;
@@ -262,29 +261,6 @@
)
);
-TRACE_EVENT(kgsl_mpdcvs,
-
- TP_PROTO(struct kgsl_device *device, unsigned int state),
-
- TP_ARGS(device, state),
-
- TP_STRUCT__entry(
- __string(device_name, device->name)
- __field(unsigned int, state)
- ),
-
- TP_fast_assign(
- __assign_str(device_name, device->name);
- __entry->state = state;
- ),
-
- TP_printk(
- "d_name=%s %s",
- __get_str(device_name),
- __entry->state ? "BUSY" : "IDLE"
- )
-);
-
TRACE_EVENT(kgsl_gpubusy,
TP_PROTO(struct kgsl_device *device, unsigned int busy,
unsigned int elapsed),
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index 883417f..0af57aa 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -353,7 +353,13 @@
return ts_diff < Z180_PACKET_COUNT;
}
-static int z180_idle(struct kgsl_device *device)
+/**
+ * z180_idle() - Idle the 2D device
+ * @device: Pointer to the KGSL device struct for the Z180
+ *
+ * wait until the z180 submission queue is idle
+ */
+int z180_idle(struct kgsl_device *device)
{
int status = 0;
struct z180_device *z180_dev = Z180_DEVICE(device);
@@ -373,10 +379,8 @@
int
z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_ibdesc *ibdesc,
- unsigned int numibs,
- uint32_t *timestamp,
- unsigned int ctrl)
+ struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp)
{
long result = 0;
unsigned int ofs = PACKETSIZE_STATESTREAM * sizeof(unsigned int);
@@ -389,6 +393,20 @@
struct kgsl_pagetable *pagetable = dev_priv->process_priv->pagetable;
struct z180_device *z180_dev = Z180_DEVICE(device);
unsigned int sizedwords;
+ unsigned int numibs;
+ struct kgsl_ibdesc *ibdesc;
+
+ mutex_lock(&device->mutex);
+
+ kgsl_active_count_get(device);
+
+ if (cmdbatch == NULL) {
+ result = EINVAL;
+ goto error;
+ }
+
+ ibdesc = cmdbatch->ibdesc;
+ numibs = cmdbatch->ibcount;
if (device->state & KGSL_STATE_HUNG) {
result = -EINVAL;
@@ -430,7 +448,7 @@
context->id, cmd, sizedwords);
/* context switch */
if ((context->id != (int)z180_dev->ringbuffer.prevctx) ||
- (ctrl & KGSL_CONTEXT_CTX_SWITCH)) {
+ (cmdbatch->flags & KGSL_CONTEXT_CTX_SWITCH)) {
KGSL_CMD_INFO(device, "context switch %d -> %d\n",
context->id, z180_dev->ringbuffer.prevctx);
kgsl_mmu_setstate(&device->mmu, pagetable,
@@ -438,10 +456,13 @@
cnt = PACKETSIZE_STATESTREAM;
ofs = 0;
}
- kgsl_setstate(&device->mmu,
+
+ result = kgsl_setstate(&device->mmu,
KGSL_MEMSTORE_GLOBAL,
kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
device->id));
+ if (result < 0)
+ goto error;
result = wait_event_interruptible_timeout(device->wait_queue,
room_in_rb(z180_dev),
@@ -482,9 +503,12 @@
z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
error:
+ kgsl_trace_issueibcmds(device, context->id, cmdbatch,
+ *timestamp, cmdbatch->flags, result, 0);
- kgsl_trace_issueibcmds(device, context->id, ibdesc, numibs,
- *timestamp, ctrl, result, 0);
+ kgsl_active_count_put(device);
+
+ mutex_unlock(&device->mutex);
return (int)result;
}
@@ -595,8 +619,12 @@
static int z180_stop(struct kgsl_device *device)
{
+ int ret;
+
device->ftbl->irqctrl(device, 0);
- z180_idle(device);
+ ret = z180_idle(device);
+ if (ret)
+ return ret;
del_timer_sync(&device->idle_timer);
@@ -662,7 +690,7 @@
return status;
}
-static unsigned int z180_isidle(struct kgsl_device *device)
+static bool z180_isidle(struct kgsl_device *device)
{
struct z180_device *z180_dev = Z180_DEVICE(device);
@@ -875,7 +903,7 @@
return context;
}
-static void
+static int
z180_drawctxt_detach(struct kgsl_context *context)
{
struct kgsl_device *device;
@@ -889,9 +917,13 @@
if (z180_dev->ringbuffer.prevctx == context->id) {
z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
device->mmu.hwpagetable = device->mmu.defaultpagetable;
+
+ /* Ignore the result - we are going down anyway */
kgsl_setstate(&device->mmu, KGSL_MEMSTORE_GLOBAL,
KGSL_MMUFLAGS_PTUPDATE);
}
+
+ return 0;
}
static void
@@ -965,6 +997,7 @@
.irqctrl = z180_irqctrl,
.gpuid = z180_gpuid,
.irq_handler = z180_irq_handler,
+ .drain = z180_idle, /* drain == idle for the z180 */
/* Optional functions */
.drawctxt_create = z180_drawctxt_create,
.drawctxt_detach = z180_drawctxt_detach,
diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h
index 1be0870..a36e92d 100644
--- a/drivers/gpu/msm/z180.h
+++ b/drivers/gpu/msm/z180.h
@@ -45,5 +45,6 @@
};
int z180_dump(struct kgsl_device *, int);
+int z180_idle(struct kgsl_device *);
#endif /* __Z180_H */
diff --git a/drivers/gpu/msm/z180_postmortem.c b/drivers/gpu/msm/z180_postmortem.c
index 5d929cf..bc53c0e 100644
--- a/drivers/gpu/msm/z180_postmortem.c
+++ b/drivers/gpu/msm/z180_postmortem.c
@@ -58,6 +58,8 @@
unsigned int i;
unsigned int reg_val;
+ z180_idle(device);
+
KGSL_LOG_DUMP(device, "Z180 Register Dump\n");
for (i = 0; i < ARRAY_SIZE(regs_to_dump); i++) {
kgsl_regread(device,
diff --git a/drivers/gud/mobicore_driver/api.c b/drivers/gud/mobicore_driver/api.c
index 871f6cc..b47383a0 100644
--- a/drivers/gud/mobicore_driver/api.c
+++ b/drivers/gud/mobicore_driver/api.c
@@ -98,7 +98,11 @@
*/
struct mc_instance *mobicore_open(void)
{
- return mc_alloc_instance();
+ struct mc_instance *instance = mc_alloc_instance();
+ if(instance) {
+ instance->admin = true;
+ }
+ return instance;
}
EXPORT_SYMBOL(mobicore_open);
diff --git a/drivers/gud/mobicore_driver/build_tag.h b/drivers/gud/mobicore_driver/build_tag.h
index 2a7772e..4a24275 100644
--- a/drivers/gud/mobicore_driver/build_tag.h
+++ b/drivers/gud/mobicore_driver/build_tag.h
@@ -26,4 +26,4 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define MOBICORE_COMPONENT_BUILD_TAG \
- "*** GC_MSM8960_Release_V019 ###"
+ "*** t-base-202_V001 ###"
diff --git a/drivers/gud/mobicore_driver/main.c b/drivers/gud/mobicore_driver/main.c
index 6f91974..0451452 100644
--- a/drivers/gud/mobicore_driver/main.c
+++ b/drivers/gud/mobicore_driver/main.c
@@ -47,7 +47,7 @@
/* Define a MobiCore device structure for use with dev_debug() etc */
struct device_driver mcd_debug_name = {
- .name = "mcdrvkmod"
+ .name = "MobiCore"
};
struct device mcd_debug_subname = {
diff --git a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h b/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
index 4768f39..7854fc5 100644
--- a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
+++ b/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
@@ -41,7 +41,15 @@
/* Enable the use of vm_unamp instead of the deprecated do_munmap
* and other 3.7 features
*/
+#ifndef CONFIG_ARCH_MSM8960
#define MC_VM_UNMAP
+#endif
+
+
+#if defined(CONFIG_ARCH_MSM8974) || defined(CONFIG_ARCH_MSM8226)
+/* Perform clock enable/disable */
+#define MC_CRYPTO_CLOCK_MANAGEMENT
+#endif
/* Enable Power Management for Crypto Engine */
#define MC_CRYPTO_CLOCK_MANAGEMENT
diff --git a/drivers/gud/mobicore_driver/pm.c b/drivers/gud/mobicore_driver/pm.c
index 3ad2015..55a1ef7 100644
--- a/drivers/gud/mobicore_driver/pm.c
+++ b/drivers/gud/mobicore_driver/pm.c
@@ -67,8 +67,8 @@
MCDRV_DBG(mcd, "MobiCore IDLE=%d!", flags->schedule);
MCDRV_DBG(mcd,
"MobiCore Request Sleep=%d!", flags->sleep_mode.SleepReq);
- MCDRV_DBG(mcd, "MobiCore Sleep Ready=%d!",
- flags->sleep_mode.ReadyToSleep);
+ MCDRV_DBG(mcd,
+ "MobiCore Sleep Ready=%d!", flags->sleep_mode.ReadyToSleep);
}
static int mc_suspend_notifier(struct notifier_block *nb,
diff --git a/drivers/gud/mobicore_driver/public/mc_linux.h b/drivers/gud/mobicore_driver/public/mc_linux.h
index 9c49aef..af027dc 100644
--- a/drivers/gud/mobicore_driver/public/mc_linux.h
+++ b/drivers/gud/mobicore_driver/public/mc_linux.h
@@ -43,6 +43,10 @@
#include "version.h"
+#ifndef __KERNEL__
+#include <stdint.h>
+#endif
+
#define MC_ADMIN_DEVNODE "mobicore"
#define MC_USER_DEVNODE "mobicore-user"
diff --git a/drivers/gud/mobicore_kernelapi/clientlib.c b/drivers/gud/mobicore_kernelapi/clientlib.c
index 7038e02..16b52e5 100644
--- a/drivers/gud/mobicore_kernelapi/clientlib.c
+++ b/drivers/gud/mobicore_kernelapi/clientlib.c
@@ -299,7 +299,8 @@
{
session->device_id,
*uuid,
- (uint32_t)wsm->phys_addr,
+ (uint32_t)(wsm->phys_addr) & 0xFFF,
+ wsm->handle,
len
}
};
@@ -926,7 +927,8 @@
{
session->session_id,
handle,
- (uint32_t)(map_info->secure_virt_addr)
+ (uint32_t)(map_info->secure_virt_addr),
+ map_info->secure_virt_len
}
};
@@ -956,11 +958,11 @@
break;
}
- struct mc_drv_rsp_unmap_bulk_mem_payload_t
+ /*struct mc_drv_rsp_unmap_bulk_mem_payload_t
rsp_unmap_bulk_mem_payload;
connection_read_datablock(dev_con,
&rsp_unmap_bulk_mem_payload,
- sizeof(rsp_unmap_bulk_mem_payload));
+ sizeof(rsp_unmap_bulk_mem_payload));*/
/*
* Unregister mapped bulk buffer from Kernel Module and
diff --git a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h b/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
index 3b8eb4b..eaf7e6c 100644
--- a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
+++ b/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
@@ -109,6 +109,7 @@
uint32_t device_id;
struct mc_uuid_t uuid;
uint32_t tci;
+ uint32_t handle;
uint32_t len;
};
@@ -119,10 +120,8 @@
struct mc_drv_rsp_open_session_payload_t {
- uint32_t device_id;
uint32_t session_id;
uint32_t device_session_id;
- uint32_t mc_result;
uint32_t session_magic;
};
@@ -186,7 +185,6 @@
struct mc_drv_rsp_map_bulk_mem_payload_t {
uint32_t session_id;
uint32_t secure_virtual_adr;
- uint32_t mc_result;
};
struct mc_drv_rsp_map_bulk_mem_t {
@@ -210,7 +208,6 @@
struct mc_drv_rsp_unmap_bulk_mem_payload_t {
uint32_t response_id;
uint32_t session_id;
- uint32_t mc_result;
};
struct mc_drv_rsp_unmap_bulk_mem_t {
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
index 27818b4..a453159 100644
--- a/drivers/hwmon/qpnp-adc-current.c
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -146,9 +146,10 @@
bool iadc_mode_sel;
struct qpnp_iadc_comp iadc_comp;
struct sensor_device_attribute sens_attr[0];
+ bool skip_auto_calibrations;
};
-struct qpnp_iadc_drv *qpnp_iadc;
+static struct qpnp_iadc_drv *qpnp_iadc;
static int32_t qpnp_iadc_read_reg(uint32_t reg, u8 *data)
{
@@ -499,10 +500,13 @@
return 0;
}
+#define IADC_CENTER 0xC000
+#define IADC_READING_RESOLUTION_N 542535
+#define IADC_READING_RESOLUTION_D 100000
static int32_t qpnp_convert_raw_offset_voltage(void)
{
struct qpnp_iadc_drv *iadc = qpnp_iadc;
- uint32_t num = 0;
+ s64 numerator;
if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
pr_err("raw offset errors! raw_gain:0x%x and raw_offset:0x%x\n",
@@ -510,19 +514,23 @@
return -EINVAL;
}
- iadc->adc->calib.offset_uv = 0;
+ numerator = iadc->adc->calib.offset_raw - IADC_CENTER;
+ numerator *= IADC_READING_RESOLUTION_N;
+ iadc->adc->calib.offset_uv = div_s64(numerator,
+ IADC_READING_RESOLUTION_D);
- num = iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw;
+ numerator = iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw;
+ numerator *= IADC_READING_RESOLUTION_N;
- iadc->adc->calib.gain_uv = (num * QPNP_ADC_GAIN_NV)/
- (iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw);
+ iadc->adc->calib.gain_uv = div_s64(numerator,
+ IADC_READING_RESOLUTION_D);
pr_debug("gain_uv:%d offset_uv:%d\n",
iadc->adc->calib.gain_uv, iadc->adc->calib.offset_uv);
return 0;
}
-int32_t qpnp_iadc_calibrate_for_trim(void)
+int32_t qpnp_iadc_calibrate_for_trim(bool batfet_closed)
{
struct qpnp_iadc_drv *iadc = qpnp_iadc;
uint8_t rslt_lsb, rslt_msb;
@@ -544,7 +552,14 @@
iadc->adc->calib.gain_raw = raw_data;
- if (iadc->external_rsense) {
+ /*
+ * there is a features in the BMS where if the batfet is opened
+ * the BMS reads from INTERNAL_RSENSE (channel 0) actually go to
+ * OFFSET_CALIBRATION_CSP_CSN (channel 5). Hence if batfet is opened
+ * we have to calibrate based on OFFSET_CALIBRATION_CSP_CSN even for
+ * internal rsense.
+ */
+ if (!batfet_closed || iadc->external_rsense) {
/* external offset calculation */
rc = qpnp_iadc_configure(OFFSET_CALIBRATION_CSP_CSN,
&raw_data, mode_sel);
@@ -621,13 +636,15 @@
struct qpnp_iadc_drv *iadc = qpnp_iadc;
int rc = 0;
- rc = qpnp_iadc_calibrate_for_trim();
- if (rc)
- pr_debug("periodic IADC calibration failed\n");
- else
- schedule_delayed_work(&iadc->iadc_work,
- round_jiffies_relative(msecs_to_jiffies
- (QPNP_IADC_CALIB_SECONDS)));
+ if (!iadc->skip_auto_calibrations) {
+ rc = qpnp_iadc_calibrate_for_trim(true);
+ if (rc)
+ pr_debug("periodic IADC calibration failed\n");
+ }
+
+ schedule_delayed_work(&iadc->iadc_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (QPNP_IADC_CALIB_SECONDS)));
return;
}
@@ -716,11 +733,12 @@
die_temp_offset = -die_temp_offset;
if (die_temp_offset > QPNP_IADC_DIE_TEMP_CALIB_OFFSET) {
- iadc->die_temp =
- result_pmic_therm.physical;
- rc = qpnp_iadc_calibrate_for_trim();
- if (rc)
- pr_err("periodic IADC calibration failed\n");
+ iadc->die_temp = result_pmic_therm.physical;
+ if (!iadc->skip_auto_calibrations) {
+ rc = qpnp_iadc_calibrate_for_trim(true);
+ if (rc)
+ pr_err("IADC calibration failed rc = %d\n", rc);
+ }
}
return rc;
@@ -820,6 +838,30 @@
}
EXPORT_SYMBOL(qpnp_iadc_get_gain_and_offset);
+int qpnp_iadc_skip_calibration(void)
+{
+ struct qpnp_iadc_drv *iadc = qpnp_iadc;
+
+ if (!iadc || !iadc->iadc_initialized)
+ return -EPROBE_DEFER;
+
+ iadc->skip_auto_calibrations = true;
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_skip_calibration);
+
+int qpnp_iadc_resume_calibration(void)
+{
+ struct qpnp_iadc_drv *iadc = qpnp_iadc;
+
+ if (!iadc || !iadc->iadc_initialized)
+ return -EPROBE_DEFER;
+
+ iadc->skip_auto_calibrations = false;
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_resume_calibration);
+
int32_t qpnp_iadc_vadc_sync_read(
enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
@@ -1008,7 +1050,7 @@
}
iadc->iadc_initialized = true;
- rc = qpnp_iadc_calibrate_for_trim();
+ rc = qpnp_iadc_calibrate_for_trim(true);
if (rc)
dev_err(&spmi->dev, "failed to calibrate for USR trim\n");
schedule_delayed_work(&iadc->iadc_work,
diff --git a/drivers/input/misc/stk3x1x.c b/drivers/input/misc/stk3x1x.c
new file mode 100644
index 0000000..eee9a28
--- /dev/null
+++ b/drivers/input/misc/stk3x1x.c
@@ -0,0 +1,2020 @@
+/*
+ * stk3x1x.c - Linux kernel modules for sensortek stk301x, stk321x and stk331x
+ * proximity/ambient light sensor
+ *
+ * Copyright (C) 2012 Lex Hsieh / sensortek <lex_hsieh@sitronix.com.tw> or
+ * <lex_hsieh@sensortek.com.tw>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/kdev_t.h>
+#include <linux/fs.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/errno.h>
+#include <linux/wakelock.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+#include "linux/stk3x1x.h"
+
+#define DRIVER_VERSION "3.4.4ts"
+
+/* Driver Settings */
+#define CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+#ifdef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+#define STK_ALS_CHANGE_THD 20 /* The threshold to trigger ALS interrupt, unit: lux */
+#endif /* #ifdef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD */
+#define STK_INT_PS_MODE 1 /* 1, 2, or 3 */
+#define STK_POLL_PS
+#define STK_POLL_ALS /* ALS interrupt is valid only when STK_PS_INT_MODE = 1 or 4*/
+
+#define STK_DEBUG_PRINTF
+
+/* Define Register Map */
+#define STK_STATE_REG 0x00
+#define STK_PSCTRL_REG 0x01
+#define STK_ALSCTRL_REG 0x02
+#define STK_LEDCTRL_REG 0x03
+#define STK_INT_REG 0x04
+#define STK_WAIT_REG 0x05
+#define STK_THDH1_PS_REG 0x06
+#define STK_THDH2_PS_REG 0x07
+#define STK_THDL1_PS_REG 0x08
+#define STK_THDL2_PS_REG 0x09
+#define STK_THDH1_ALS_REG 0x0A
+#define STK_THDH2_ALS_REG 0x0B
+#define STK_THDL1_ALS_REG 0x0C
+#define STK_THDL2_ALS_REG 0x0D
+#define STK_FLAG_REG 0x10
+#define STK_DATA1_PS_REG 0x11
+#define STK_DATA2_PS_REG 0x12
+#define STK_DATA1_ALS_REG 0x13
+#define STK_DATA2_ALS_REG 0x14
+#define STK_DATA1_OFFSET_REG 0x15
+#define STK_DATA2_OFFSET_REG 0x16
+#define STK_DATA1_IR_REG 0x17
+#define STK_DATA2_IR_REG 0x18
+#define STK_PDT_ID_REG 0x3E
+#define STK_RSRVD_REG 0x3F
+#define STK_SW_RESET_REG 0x80
+
+
+/* Define state reg */
+#define STK_STATE_EN_IRS_SHIFT 7
+#define STK_STATE_EN_AK_SHIFT 6
+#define STK_STATE_EN_ASO_SHIFT 5
+#define STK_STATE_EN_IRO_SHIFT 4
+#define STK_STATE_EN_WAIT_SHIFT 2
+#define STK_STATE_EN_ALS_SHIFT 1
+#define STK_STATE_EN_PS_SHIFT 0
+
+#define STK_STATE_EN_IRS_MASK 0x80
+#define STK_STATE_EN_AK_MASK 0x40
+#define STK_STATE_EN_ASO_MASK 0x20
+#define STK_STATE_EN_IRO_MASK 0x10
+#define STK_STATE_EN_WAIT_MASK 0x04
+#define STK_STATE_EN_ALS_MASK 0x02
+#define STK_STATE_EN_PS_MASK 0x01
+
+/* Define PS ctrl reg */
+#define STK_PS_PRS_SHIFT 6
+#define STK_PS_GAIN_SHIFT 4
+#define STK_PS_IT_SHIFT 0
+
+#define STK_PS_PRS_MASK 0xC0
+#define STK_PS_GAIN_MASK 0x30
+#define STK_PS_IT_MASK 0x0F
+
+/* Define ALS ctrl reg */
+#define STK_ALS_PRS_SHIFT 6
+#define STK_ALS_GAIN_SHIFT 4
+#define STK_ALS_IT_SHIFT 0
+
+#define STK_ALS_PRS_MASK 0xC0
+#define STK_ALS_GAIN_MASK 0x30
+#define STK_ALS_IT_MASK 0x0F
+
+/* Define LED ctrl reg */
+#define STK_LED_IRDR_SHIFT 6
+#define STK_LED_DT_SHIFT 0
+
+#define STK_LED_IRDR_MASK 0xC0
+#define STK_LED_DT_MASK 0x3F
+
+/* Define interrupt reg */
+#define STK_INT_CTRL_SHIFT 7
+#define STK_INT_OUI_SHIFT 4
+#define STK_INT_ALS_SHIFT 3
+#define STK_INT_PS_SHIFT 0
+
+#define STK_INT_CTRL_MASK 0x80
+#define STK_INT_OUI_MASK 0x10
+#define STK_INT_ALS_MASK 0x08
+#define STK_INT_PS_MASK 0x07
+
+#define STK_INT_ALS 0x08
+
+/* Define flag reg */
+#define STK_FLG_ALSDR_SHIFT 7
+#define STK_FLG_PSDR_SHIFT 6
+#define STK_FLG_ALSINT_SHIFT 5
+#define STK_FLG_PSINT_SHIFT 4
+#define STK_FLG_OUI_SHIFT 2
+#define STK_FLG_IR_RDY_SHIFT 1
+#define STK_FLG_NF_SHIFT 0
+
+#define STK_FLG_ALSDR_MASK 0x80
+#define STK_FLG_PSDR_MASK 0x40
+#define STK_FLG_ALSINT_MASK 0x20
+#define STK_FLG_PSINT_MASK 0x10
+#define STK_FLG_OUI_MASK 0x04
+#define STK_FLG_IR_RDY_MASK 0x02
+#define STK_FLG_NF_MASK 0x01
+
+/* misc define */
+#define MIN_ALS_POLL_DELAY_NS 110000000
+
+#define DEVICE_NAME "stk_ps"
+#define ALS_NAME "lightsensor-level"
+#define PS_NAME "proximity"
+
+struct stk3x1x_data {
+ struct i2c_client *client;
+#if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS))
+ int32_t irq;
+ struct work_struct stk_work;
+ struct workqueue_struct *stk_wq;
+#endif
+ int int_pin;
+ uint8_t wait_reg;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend stk_early_suspend;
+#endif
+ uint16_t ps_thd_h;
+ uint16_t ps_thd_l;
+ struct mutex io_lock;
+ struct input_dev *ps_input_dev;
+ int32_t ps_distance_last;
+ bool ps_enabled;
+ struct wake_lock ps_wakelock;
+ struct work_struct stk_ps_work;
+ struct workqueue_struct *stk_ps_wq;
+#ifdef STK_POLL_PS
+ struct wake_lock ps_nosuspend_wl;
+#endif
+ struct input_dev *als_input_dev;
+ int32_t als_lux_last;
+ uint32_t als_transmittance;
+ bool als_enabled;
+ struct hrtimer als_timer;
+ struct hrtimer ps_timer;
+ ktime_t als_poll_delay;
+ ktime_t ps_poll_delay;
+#ifdef STK_POLL_ALS
+ struct work_struct stk_als_work;
+ struct workqueue_struct *stk_als_wq;
+#endif
+};
+
+#if( !defined(CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD))
+static uint32_t lux_threshold_table[] =
+{
+ 3,
+ 10,
+ 40,
+ 65,
+ 145,
+ 300,
+ 550,
+ 930,
+ 1250,
+ 1700,
+};
+
+#define LUX_THD_TABLE_SIZE (sizeof(lux_threshold_table)/sizeof(uint32_t)+1)
+static uint16_t code_threshold_table[LUX_THD_TABLE_SIZE+1];
+#endif
+
+static int32_t stk3x1x_enable_ps(struct stk3x1x_data *ps_data, uint8_t enable);
+static int32_t stk3x1x_enable_als(struct stk3x1x_data *ps_data, uint8_t enable);
+static int32_t stk3x1x_set_ps_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l);
+static int32_t stk3x1x_set_ps_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h);
+static int32_t stk3x1x_set_als_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l);
+static int32_t stk3x1x_set_als_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h);
+//static int32_t stk3x1x_set_ps_aoffset(struct stk3x1x_data *ps_data, uint16_t offset);
+
+inline uint32_t stk_alscode2lux(struct stk3x1x_data *ps_data, uint32_t alscode)
+{
+ alscode += ((alscode<<7)+(alscode<<3)+(alscode>>1));
+ alscode<<=3;
+ alscode/=ps_data->als_transmittance;
+ return alscode;
+}
+
+inline uint32_t stk_lux2alscode(struct stk3x1x_data *ps_data, uint32_t lux)
+{
+ lux*=ps_data->als_transmittance;
+ lux/=1100;
+ if (unlikely(lux>=(1<<16)))
+ lux = (1<<16) -1;
+ return lux;
+}
+
+#ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+static void stk_init_code_threshold_table(struct stk3x1x_data *ps_data)
+{
+ uint32_t i,j;
+ uint32_t alscode;
+
+ code_threshold_table[0] = 0;
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "alscode[0]=%d\n",0);
+#endif
+ for (i=1,j=0;i<LUX_THD_TABLE_SIZE;i++,j++)
+ {
+ alscode = stk_lux2alscode(ps_data, lux_threshold_table[j]);
+ printk(KERN_INFO "alscode[%d]=%d\n",i,alscode);
+ code_threshold_table[i] = (uint16_t)(alscode);
+ }
+ code_threshold_table[i] = 0xffff;
+ printk(KERN_INFO "alscode[%d]=%d\n",i,alscode);
+}
+
+static uint32_t stk_get_lux_interval_index(uint16_t alscode)
+{
+ uint32_t i;
+ for (i=1;i<=LUX_THD_TABLE_SIZE;i++)
+ {
+ if ((alscode>=code_threshold_table[i-1])&&(alscode<code_threshold_table[i]))
+ {
+ return i;
+ }
+ }
+ return LUX_THD_TABLE_SIZE;
+}
+#else
+inline void stk_als_set_new_thd(struct stk3x1x_data *ps_data, uint16_t alscode)
+{
+ int32_t high_thd,low_thd;
+ high_thd = alscode + stk_lux2alscode(ps_data, STK_ALS_CHANGE_THD);
+ low_thd = alscode - stk_lux2alscode(ps_data, STK_ALS_CHANGE_THD);
+ if (high_thd >= (1<<16))
+ high_thd = (1<<16) -1;
+ if (low_thd <0)
+ low_thd = 0;
+ stk3x1x_set_als_thd_h(ps_data, (uint16_t)high_thd);
+ stk3x1x_set_als_thd_l(ps_data, (uint16_t)low_thd);
+}
+#endif // CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+
+
+static int32_t stk3x1x_init_all_reg(struct stk3x1x_data *ps_data, struct stk3x1x_platform_data *plat_data)
+{
+ int32_t ret;
+ uint8_t w_reg;
+
+ w_reg = plat_data->state_reg;
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+
+ ps_data->ps_thd_h = plat_data->ps_thd_h;
+ ps_data->ps_thd_l = plat_data->ps_thd_l;
+
+ w_reg = plat_data->psctrl_reg;
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_PSCTRL_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ w_reg = plat_data->alsctrl_reg;
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_ALSCTRL_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ w_reg = plat_data->ledctrl_reg;
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_LEDCTRL_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ ps_data->wait_reg = plat_data->wait_reg;
+
+ if(ps_data->wait_reg < 2)
+ {
+ printk(KERN_WARNING "%s: wait_reg should be larger than 2, force to write 2\n", __func__);
+ ps_data->wait_reg = 2;
+ }
+ else if (ps_data->wait_reg > 0xFF)
+ {
+ printk(KERN_WARNING "%s: wait_reg should be less than 0xFF, force to write 0xFF\n", __func__);
+ ps_data->wait_reg = 0xFF;
+ }
+ w_reg = plat_data->wait_reg;
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_WAIT_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ stk3x1x_set_ps_thd_h(ps_data, ps_data->ps_thd_h);
+ stk3x1x_set_ps_thd_l(ps_data, ps_data->ps_thd_l);
+
+ w_reg = 0;
+#ifndef STK_POLL_PS
+ w_reg |= STK_INT_PS_MODE;
+#else
+ w_reg |= 0x01;
+#endif
+
+#if (!defined(STK_POLL_ALS) && (STK_INT_PS_MODE != 0x02) && (STK_INT_PS_MODE != 0x03))
+ w_reg |= STK_INT_ALS;
+#endif
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_INT_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ return 0;
+}
+
+static int32_t stk3x1x_check_pid(struct stk3x1x_data *ps_data)
+{
+ int32_t err1, err2;
+
+ err1 = i2c_smbus_read_byte_data(ps_data->client,STK_PDT_ID_REG);
+ if (err1 < 0)
+ {
+ printk(KERN_ERR "%s: read i2c error, err=%d\n", __func__, err1);
+ return err1;
+ }
+
+ err2 = i2c_smbus_read_byte_data(ps_data->client,STK_RSRVD_REG);
+ if (err2 < 0)
+ {
+ printk(KERN_ERR "%s: read i2c error, err=%d\n", __func__, err2);
+ return -1;
+ }
+ printk(KERN_INFO "%s: PID=0x%x, RID=0x%x\n", __func__, err1, err2);
+ if(err2 == 0xC0)
+ printk(KERN_INFO "%s: RID=0xC0!!!!!!!!!!!!!\n", __func__);
+
+ return 0;
+}
+
+
+static int32_t stk3x1x_software_reset(struct stk3x1x_data *ps_data)
+{
+ int32_t r;
+ uint8_t w_reg;
+
+ w_reg = 0x7F;
+ r = i2c_smbus_write_byte_data(ps_data->client,STK_WAIT_REG,w_reg);
+ if (r<0)
+ {
+ printk(KERN_ERR "%s: software reset: write i2c error, ret=%d\n", __func__, r);
+ return r;
+ }
+ r = i2c_smbus_read_byte_data(ps_data->client,STK_WAIT_REG);
+ if (w_reg != r)
+ {
+ printk(KERN_ERR "%s: software reset: read-back value is not the same\n", __func__);
+ return -1;
+ }
+
+ r = i2c_smbus_write_byte_data(ps_data->client,STK_SW_RESET_REG,0);
+ if (r<0)
+ {
+ printk(KERN_ERR "%s: software reset: read error after reset\n", __func__);
+ return r;
+ }
+ msleep(1);
+ return 0;
+}
+
+
+static int32_t stk3x1x_set_als_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l)
+{
+ uint8_t temp;
+ uint8_t* pSrc = (uint8_t*)&thd_l;
+ temp = *pSrc;
+ *pSrc = *(pSrc+1);
+ *(pSrc+1) = temp;
+ return i2c_smbus_write_word_data(ps_data->client,STK_THDL1_ALS_REG,thd_l);
+}
+static int32_t stk3x1x_set_als_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h)
+{
+ uint8_t temp;
+ uint8_t* pSrc = (uint8_t*)&thd_h;
+ temp = *pSrc;
+ *pSrc = *(pSrc+1);
+ *(pSrc+1) = temp;
+ return i2c_smbus_write_word_data(ps_data->client,STK_THDH1_ALS_REG,thd_h);
+}
+
+static int32_t stk3x1x_set_ps_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l)
+{
+ uint8_t temp;
+ uint8_t* pSrc = (uint8_t*)&thd_l;
+
+ temp = *pSrc;
+ *pSrc = *(pSrc+1);
+ *(pSrc+1) = temp;
+ ps_data->ps_thd_l = thd_l;
+ return i2c_smbus_write_word_data(ps_data->client,STK_THDL1_PS_REG,thd_l);
+}
+
+static int32_t stk3x1x_set_ps_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h)
+{
+ uint8_t temp;
+ uint8_t* pSrc = (uint8_t*)&thd_h;
+
+ temp = *pSrc;
+ *pSrc = *(pSrc+1);
+ *(pSrc+1) = temp;
+ ps_data->ps_thd_h = thd_h;
+ return i2c_smbus_write_word_data(ps_data->client,STK_THDH1_PS_REG,thd_h);
+}
+
+/*
+static int32_t stk3x1x_set_ps_foffset(struct stk3x1x_data *ps_data, uint16_t offset)
+{
+ uint8_t temp;
+ uint8_t* pSrc = (uint8_t*)&offset;
+ temp = *pSrc;
+ *pSrc = *(pSrc+1);
+ *(pSrc+1) = temp;
+ return i2c_smbus_write_word_data(ps_data->client,STK_DATA1_OFFSET_REG,offset);
+}
+
+static int32_t stk3x1x_set_ps_aoffset(struct stk3x1x_data *ps_data, uint16_t offset)
+{
+ uint8_t temp;
+ uint8_t* pSrc = (uint8_t*)&offset;
+ int ret;
+ uint8_t w_state_reg;
+ uint8_t re_en;
+
+ ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ re_en = (ret & STK_STATE_EN_AK_MASK) ? 1: 0;
+ if(re_en)
+ {
+ w_state_reg = (uint8_t)(ret & (~STK_STATE_EN_AK_MASK));
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ msleep(1);
+ }
+ temp = *pSrc;
+ *pSrc = *(pSrc+1);
+ *(pSrc+1) = temp;
+ ret = i2c_smbus_write_word_data(ps_data->client,0x0E,offset);
+ if(!re_en)
+ return ret;
+
+ w_state_reg |= STK_STATE_EN_AK_MASK;
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+*/
+
+static inline uint32_t stk3x1x_get_ps_reading(struct stk3x1x_data *ps_data)
+{
+ int32_t word_data, tmp_word_data;
+
+ tmp_word_data = i2c_smbus_read_word_data(ps_data->client,STK_DATA1_PS_REG);
+ if(tmp_word_data < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data);
+ return tmp_word_data;
+ }
+ word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ;
+ return word_data;
+}
+
+static int32_t stk3x1x_set_flag(struct stk3x1x_data *ps_data, uint8_t org_flag_reg, uint8_t clr)
+{
+ uint8_t w_flag;
+ w_flag = org_flag_reg | (STK_FLG_ALSINT_MASK | STK_FLG_PSINT_MASK | STK_FLG_OUI_MASK | STK_FLG_IR_RDY_MASK);
+ w_flag &= (~clr);
+ //printk(KERN_INFO "%s: org_flag_reg=0x%x, w_flag = 0x%x\n", __func__, org_flag_reg, w_flag);
+ return i2c_smbus_write_byte_data(ps_data->client,STK_FLAG_REG, w_flag);
+}
+
+static int32_t stk3x1x_get_flag(struct stk3x1x_data *ps_data)
+{
+ return i2c_smbus_read_byte_data(ps_data->client,STK_FLAG_REG);
+}
+
+static int32_t stk3x1x_enable_ps(struct stk3x1x_data *ps_data, uint8_t enable)
+{
+ int32_t ret;
+ uint8_t w_state_reg;
+ uint8_t curr_ps_enable;
+ curr_ps_enable = ps_data->ps_enabled?1:0;
+ if(curr_ps_enable == enable)
+ return 0;
+
+ ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error, ret=%d\n", __func__, ret);
+ return ret;
+ }
+ w_state_reg = ret;
+ w_state_reg &= ~(STK_STATE_EN_PS_MASK | STK_STATE_EN_WAIT_MASK | 0x60);
+ if(enable)
+ {
+ w_state_reg |= STK_STATE_EN_PS_MASK;
+ if(!(ps_data->als_enabled))
+ w_state_reg |= STK_STATE_EN_WAIT_MASK;
+ }
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ if(enable)
+ {
+#ifdef STK_POLL_PS
+ hrtimer_start(&ps_data->ps_timer, ps_data->ps_poll_delay, HRTIMER_MODE_REL);
+ ps_data->ps_distance_last = -1;
+#endif
+ ps_data->ps_enabled = true;
+#ifndef STK_POLL_PS
+#ifndef STK_POLL_ALS
+ if(!(ps_data->als_enabled))
+#endif /* #ifndef STK_POLL_ALS */
+ enable_irq(ps_data->irq);
+ msleep(1);
+ ret = stk3x1x_get_flag(ps_data);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: read i2c error, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ near_far_state = ret & STK_FLG_NF_MASK;
+ ps_data->ps_distance_last = near_far_state;
+ input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state);
+ input_sync(ps_data->ps_input_dev);
+ wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ);
+ reading = stk3x1x_get_ps_reading(ps_data);
+ printk(KERN_INFO "%s: ps input event=%d, ps code = %d\n",__func__, near_far_state, reading);
+#endif /* #ifndef STK_POLL_PS */
+ }
+ else
+ {
+#ifdef STK_POLL_PS
+ hrtimer_cancel(&ps_data->ps_timer);
+#else
+#ifndef STK_POLL_ALS
+ if(!(ps_data->als_enabled))
+#endif
+ disable_irq(ps_data->irq);
+#endif
+ ps_data->ps_enabled = false;
+ }
+ return ret;
+}
+
+static int32_t stk3x1x_enable_als(struct stk3x1x_data *ps_data, uint8_t enable)
+{
+ int32_t ret;
+ uint8_t w_state_reg;
+ uint8_t curr_als_enable = (ps_data->als_enabled)?1:0;
+
+ if(curr_als_enable == enable)
+ return 0;
+
+#ifndef STK_POLL_ALS
+ if (enable)
+ {
+ stk3x1x_set_als_thd_h(ps_data, 0x0000);
+ stk3x1x_set_als_thd_l(ps_data, 0xFFFF);
+ }
+#endif
+ ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ w_state_reg = (uint8_t)(ret & (~(STK_STATE_EN_ALS_MASK | STK_STATE_EN_WAIT_MASK)));
+ if(enable)
+ w_state_reg |= STK_STATE_EN_ALS_MASK;
+ else if (ps_data->ps_enabled)
+ w_state_reg |= STK_STATE_EN_WAIT_MASK;
+
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+
+ if (enable)
+ {
+ ps_data->als_enabled = true;
+#ifdef STK_POLL_ALS
+ hrtimer_start(&ps_data->als_timer, ps_data->als_poll_delay, HRTIMER_MODE_REL);
+#else
+#ifndef STK_POLL_PS
+ if(!(ps_data->ps_enabled))
+#endif
+ enable_irq(ps_data->irq);
+#endif
+ }
+ else
+ {
+ ps_data->als_enabled = false;
+#ifdef STK_POLL_ALS
+ hrtimer_cancel(&ps_data->als_timer);
+#else
+#ifndef STK_POLL_PS
+ if(!(ps_data->ps_enabled))
+#endif
+ disable_irq(ps_data->irq);
+#endif
+ }
+ return ret;
+}
+
+static inline int32_t stk3x1x_get_als_reading(struct stk3x1x_data *ps_data)
+{
+ int32_t word_data, tmp_word_data;
+ tmp_word_data = i2c_smbus_read_word_data(ps_data->client, STK_DATA1_ALS_REG);
+ if(tmp_word_data < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data);
+ return tmp_word_data;
+ }
+ word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ;
+ return word_data;
+}
+
+static int32_t stk3x1x_get_ir_reading(struct stk3x1x_data *ps_data)
+{
+ int32_t word_data, tmp_word_data;
+ int32_t ret;
+ uint8_t w_reg, retry = 0;
+
+ if(ps_data->ps_enabled)
+ {
+ stk3x1x_enable_ps(ps_data, 0);
+ ps_data->ps_enabled = true;
+ }
+ ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ w_reg = (uint8_t)(ret & (~STK_STATE_EN_IRS_MASK));
+ w_reg |= STK_STATE_EN_IRS_MASK;
+
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ msleep(100);
+
+ do
+ {
+ msleep(50);
+ ret = stk3x1x_get_flag(ps_data);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ retry++;
+ }while(retry < 5 && ((ret&STK_FLG_IR_RDY_MASK) == 0));
+
+ if(retry == 5)
+ {
+ printk(KERN_ERR "%s: ir data is not ready for 300ms\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = stk3x1x_get_flag(ps_data);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+
+ ret = stk3x1x_set_flag(ps_data, ret, STK_FLG_IR_RDY_MASK);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+
+ tmp_word_data = i2c_smbus_read_word_data(ps_data->client, STK_DATA1_IR_REG);
+ if(tmp_word_data < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data);
+ return tmp_word_data;
+ }
+ word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ;
+
+ if(ps_data->ps_enabled)
+ stk3x1x_enable_ps(ps_data, 1);
+ return word_data;
+}
+
+
+static ssize_t stk_als_code_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t reading;
+
+ reading = stk3x1x_get_als_reading(ps_data);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", reading);
+}
+
+
+static ssize_t stk_als_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t enable, ret;
+
+ mutex_lock(&ps_data->io_lock);
+ enable = (ps_data->als_enabled)?1:0;
+ mutex_unlock(&ps_data->io_lock);
+ ret = i2c_smbus_read_byte_data(ps_data->client,STK_STATE_REG);
+ ret = (ret & STK_STATE_EN_ALS_MASK)?1:0;
+
+ if(enable != ret)
+ printk(KERN_ERR "%s: driver and sensor mismatch! driver_enable=0x%x, sensor_enable=%x\n", __func__, enable, ret);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t stk_als_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ uint8_t en;
+ if (sysfs_streq(buf, "1"))
+ en = 1;
+ else if (sysfs_streq(buf, "0"))
+ en = 0;
+ else
+ {
+ printk(KERN_ERR "%s, invalid value %d\n", __func__, *buf);
+ return -EINVAL;
+ }
+ printk(KERN_INFO "%s: Enable ALS : %d\n", __func__, en);
+ mutex_lock(&ps_data->io_lock);
+ stk3x1x_enable_als(ps_data, en);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+static ssize_t stk_als_lux_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t als_reading;
+ uint32_t als_lux;
+ als_reading = stk3x1x_get_als_reading(ps_data);
+ mutex_lock(&ps_data->io_lock);
+ als_lux = stk_alscode2lux(ps_data, als_reading);
+ mutex_unlock(&ps_data->io_lock);
+ return scnprintf(buf, PAGE_SIZE, "%d lux\n", als_lux);
+}
+
+static ssize_t stk_als_lux_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 16, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ ps_data->als_lux_last = value;
+ input_report_abs(ps_data->als_input_dev, ABS_MISC, value);
+ input_sync(ps_data->als_input_dev);
+ mutex_unlock(&ps_data->io_lock);
+ printk(KERN_INFO "%s: als input event %ld lux\n",__func__, value);
+
+ return size;
+}
+
+
+static ssize_t stk_als_transmittance_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t transmittance;
+ mutex_lock(&ps_data->io_lock);
+ transmittance = ps_data->als_transmittance;
+ mutex_unlock(&ps_data->io_lock);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", transmittance);
+}
+
+
+static ssize_t stk_als_transmittance_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ ps_data->als_transmittance = value;
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+static ssize_t stk_als_delay_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ return scnprintf(buf, PAGE_SIZE, "%lld\n", ktime_to_ns(ps_data->als_poll_delay));
+}
+
+
+static ssize_t stk_als_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ uint64_t value = 0;
+ int ret;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ ret = strict_strtoull(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoull failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "%s: set als poll delay=%lld\n", __func__, value);
+#endif
+ if(value < MIN_ALS_POLL_DELAY_NS)
+ {
+ printk(KERN_ERR "%s: delay is too small\n", __func__);
+ value = MIN_ALS_POLL_DELAY_NS;
+ }
+ mutex_lock(&ps_data->io_lock);
+ if(value != ktime_to_ns(ps_data->als_poll_delay))
+ ps_data->als_poll_delay = ns_to_ktime(value);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+static ssize_t stk_als_ir_code_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t reading;
+ reading = stk3x1x_get_ir_reading(ps_data);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", reading);
+}
+
+static ssize_t stk_ps_code_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ uint32_t reading;
+ reading = stk3x1x_get_ps_reading(ps_data);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", reading);
+}
+
+static ssize_t stk_ps_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t enable, ret;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+
+ mutex_lock(&ps_data->io_lock);
+ enable = (ps_data->ps_enabled)?1:0;
+ mutex_unlock(&ps_data->io_lock);
+ ret = i2c_smbus_read_byte_data(ps_data->client,STK_STATE_REG);
+ ret = (ret & STK_STATE_EN_PS_MASK)?1:0;
+
+ if(enable != ret)
+ printk(KERN_ERR "%s: driver and sensor mismatch! driver_enable=0x%x, sensor_enable=%x\n", __func__, enable, ret);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t stk_ps_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ uint8_t en;
+ if (sysfs_streq(buf, "1"))
+ en = 1;
+ else if (sysfs_streq(buf, "0"))
+ en = 0;
+ else
+ {
+ printk(KERN_ERR "%s, invalid value %d\n", __func__, *buf);
+ return -EINVAL;
+ }
+ printk(KERN_INFO "%s: Enable PS : %d\n", __func__, en);
+ mutex_lock(&ps_data->io_lock);
+ stk3x1x_enable_ps(ps_data, en);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+static ssize_t stk_ps_enable_aso_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t ret;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+
+ ret = i2c_smbus_read_byte_data(ps_data->client,STK_STATE_REG);
+ ret = (ret & STK_STATE_EN_ASO_MASK)?1:0;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t stk_ps_enable_aso_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ uint8_t en;
+ int32_t ret;
+ uint8_t w_state_reg;
+
+ if (sysfs_streq(buf, "1"))
+ en = 1;
+ else if (sysfs_streq(buf, "0"))
+ en = 0;
+ else
+ {
+ printk(KERN_ERR "%s, invalid value %d\n", __func__, *buf);
+ return -EINVAL;
+ }
+ printk(KERN_INFO "%s: Enable PS ASO : %d\n", __func__, en);
+
+ ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ w_state_reg = (uint8_t)(ret & (~STK_STATE_EN_ASO_MASK));
+ if(en)
+ w_state_reg |= STK_STATE_EN_ASO_MASK;
+
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+
+ return size;
+}
+
+
+static ssize_t stk_ps_offset_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t word_data, tmp_word_data;
+
+ tmp_word_data = i2c_smbus_read_word_data(ps_data->client, STK_DATA1_OFFSET_REG);
+ if(tmp_word_data < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data);
+ return tmp_word_data;
+ }
+ word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", word_data);
+}
+
+static ssize_t stk_ps_offset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ uint16_t offset;
+
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ if(value > 65535)
+ {
+ printk(KERN_ERR "%s: invalid value, offset=%ld\n", __func__, value);
+ return -EINVAL;
+ }
+
+ offset = (uint16_t) ((value&0x00FF) << 8) | ((value&0xFF00) >>8);
+ ret = i2c_smbus_write_word_data(ps_data->client,STK_DATA1_OFFSET_REG,offset);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ return size;
+}
+
+
+static ssize_t stk_ps_distance_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t dist=1, ret;
+
+ mutex_lock(&ps_data->io_lock);
+ ret = stk3x1x_get_flag(ps_data);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s: stk3x1x_get_flag failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ dist = (ret & STK_FLG_NF_MASK)?1:0;
+
+ ps_data->ps_distance_last = dist;
+ input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, dist);
+ input_sync(ps_data->ps_input_dev);
+ mutex_unlock(&ps_data->io_lock);
+ wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ);
+ printk(KERN_INFO "%s: ps input event %d cm\n",__func__, dist);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", dist);
+}
+
+
+static ssize_t stk_ps_distance_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ ps_data->ps_distance_last = value;
+ input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, value);
+ input_sync(ps_data->ps_input_dev);
+ mutex_unlock(&ps_data->io_lock);
+ wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ);
+ printk(KERN_INFO "%s: ps input event %ld cm\n",__func__, value);
+ return size;
+}
+
+
+static ssize_t stk_ps_code_thd_l_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t ps_thd_l1_reg, ps_thd_l2_reg;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ mutex_lock(&ps_data->io_lock);
+ ps_thd_l1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL1_PS_REG);
+ if(ps_thd_l1_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_l1_reg);
+ return -EINVAL;
+ }
+ ps_thd_l2_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL2_PS_REG);
+ if(ps_thd_l2_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_l2_reg);
+ return -EINVAL;
+ }
+ mutex_unlock(&ps_data->io_lock);
+ ps_thd_l1_reg = ps_thd_l1_reg<<8 | ps_thd_l2_reg;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ps_thd_l1_reg);
+}
+
+
+static ssize_t stk_ps_code_thd_l_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ stk3x1x_set_ps_thd_l(ps_data, value);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+static ssize_t stk_ps_code_thd_h_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t ps_thd_h1_reg, ps_thd_h2_reg;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ mutex_lock(&ps_data->io_lock);
+ ps_thd_h1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH1_PS_REG);
+ if(ps_thd_h1_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_h1_reg);
+ return -EINVAL;
+ }
+ ps_thd_h2_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH2_PS_REG);
+ if(ps_thd_h2_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_h2_reg);
+ return -EINVAL;
+ }
+ mutex_unlock(&ps_data->io_lock);
+ ps_thd_h1_reg = ps_thd_h1_reg<<8 | ps_thd_h2_reg;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ps_thd_h1_reg);
+}
+
+
+static ssize_t stk_ps_code_thd_h_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ stk3x1x_set_ps_thd_h(ps_data, value);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+#if 0
+static ssize_t stk_als_lux_thd_l_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t als_thd_l0_reg,als_thd_l1_reg;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ uint32_t als_lux;
+
+ mutex_lock(&ps_data->io_lock);
+ als_thd_l0_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL1_ALS_REG);
+ als_thd_l1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL2_ALS_REG);
+ if(als_thd_l0_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_l0_reg);
+ return -EINVAL;
+ }
+ if(als_thd_l1_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_l1_reg);
+ return -EINVAL;
+ }
+ als_thd_l0_reg|=(als_thd_l1_reg<<8);
+ als_lux = stk_alscode2lux(ps_data, als_thd_l0_reg);
+ mutex_unlock(&ps_data->io_lock);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", als_lux);
+}
+
+
+static ssize_t stk_als_lux_thd_l_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ value = stk_lux2alscode(ps_data, value);
+ stk3x1x_set_als_thd_l(ps_data, value);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+static ssize_t stk_als_lux_thd_h_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t als_thd_h0_reg,als_thd_h1_reg;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ uint32_t als_lux;
+
+ mutex_lock(&ps_data->io_lock);
+ als_thd_h0_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH1_ALS_REG);
+ als_thd_h1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH2_ALS_REG);
+ if(als_thd_h0_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_h0_reg);
+ return -EINVAL;
+ }
+ if(als_thd_h1_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_h1_reg);
+ return -EINVAL;
+ }
+ als_thd_h0_reg|=(als_thd_h1_reg<<8);
+ als_lux = stk_alscode2lux(ps_data, als_thd_h0_reg);
+ mutex_unlock(&ps_data->io_lock);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", als_lux);
+}
+
+
+static ssize_t stk_als_lux_thd_h_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ value = stk_lux2alscode(ps_data, value);
+ stk3x1x_set_als_thd_h(ps_data, value);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+#endif
+
+
+static ssize_t stk_all_reg_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t ps_reg[27];
+ uint8_t cnt;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ mutex_lock(&ps_data->io_lock);
+ for(cnt=0;cnt<25;cnt++)
+ {
+ ps_reg[cnt] = i2c_smbus_read_byte_data(ps_data->client, (cnt));
+ if(ps_reg[cnt] < 0)
+ {
+ mutex_unlock(&ps_data->io_lock);
+ printk(KERN_ERR "stk_all_reg_show:i2c_smbus_read_byte_data fail, ret=%d", ps_reg[cnt]);
+ return -EINVAL;
+ }
+ else
+ {
+ printk(KERN_INFO "reg[0x%2X]=0x%2X\n", cnt, ps_reg[cnt]);
+ }
+ }
+ ps_reg[cnt] = i2c_smbus_read_byte_data(ps_data->client, STK_PDT_ID_REG);
+ if(ps_reg[cnt] < 0)
+ {
+ mutex_unlock(&ps_data->io_lock);
+ printk( KERN_ERR "all_reg_show:i2c_smbus_read_byte_data fail, ret=%d", ps_reg[cnt]);
+ return -EINVAL;
+ }
+ printk( KERN_INFO "reg[0x%x]=0x%2X\n", STK_PDT_ID_REG, ps_reg[cnt]);
+ cnt++;
+ ps_reg[cnt] = i2c_smbus_read_byte_data(ps_data->client, STK_RSRVD_REG);
+ if(ps_reg[cnt] < 0)
+ {
+ mutex_unlock(&ps_data->io_lock);
+ printk( KERN_ERR "all_reg_show:i2c_smbus_read_byte_data fail, ret=%d", ps_reg[cnt]);
+ return -EINVAL;
+ }
+ printk( KERN_INFO "reg[0x%x]=0x%2X\n", STK_RSRVD_REG, ps_reg[cnt]);
+ mutex_unlock(&ps_data->io_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X\n",
+ ps_reg[0], ps_reg[1], ps_reg[2], ps_reg[3], ps_reg[4], ps_reg[5], ps_reg[6], ps_reg[7], ps_reg[8],
+ ps_reg[9], ps_reg[10], ps_reg[11], ps_reg[12], ps_reg[13], ps_reg[14], ps_reg[15], ps_reg[16], ps_reg[17],
+ ps_reg[18], ps_reg[19], ps_reg[20], ps_reg[21], ps_reg[22], ps_reg[23], ps_reg[24], ps_reg[25], ps_reg[26]);
+}
+
+static ssize_t stk_recv_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return 0;
+}
+
+
+static ssize_t stk_recv_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ unsigned long value = 0;
+ int ret;
+ int32_t recv_data;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+
+ if((ret = strict_strtoul(buf, 16, &value)) < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ recv_data = i2c_smbus_read_byte_data(ps_data->client,value);
+ printk("%s: reg 0x%x=0x%x\n", __func__, (int)value, recv_data);
+ return size;
+}
+
+
+static ssize_t stk_send_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return 0;
+}
+
+
+static ssize_t stk_send_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ int addr, cmd;
+ u8 addr_u8, cmd_u8;
+ int32_t ret, i;
+ char *token[10];
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+
+ for (i = 0; i < 2; i++)
+ token[i] = strsep((char **)&buf, " ");
+ if((ret = strict_strtoul(token[0], 16, (unsigned long *)&(addr))) < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ if((ret = strict_strtoul(token[1], 16, (unsigned long *)&(cmd))) < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ printk(KERN_INFO "%s: write reg 0x%x=0x%x\n", __func__, addr, cmd);
+
+ addr_u8 = (u8) addr;
+ cmd_u8 = (u8) cmd;
+ //mutex_lock(&ps_data->io_lock);
+ ret = i2c_smbus_write_byte_data(ps_data->client,addr_u8,cmd_u8);
+ //mutex_unlock(&ps_data->io_lock);
+ if (0 != ret)
+ {
+ printk(KERN_ERR "%s: i2c_smbus_write_byte_data fail\n", __func__);
+ return ret;
+ }
+
+ return size;
+}
+
+
+static struct device_attribute als_enable_attribute = __ATTR(enable,0664,stk_als_enable_show,stk_als_enable_store);
+static struct device_attribute als_lux_attribute = __ATTR(lux,0664,stk_als_lux_show,stk_als_lux_store);
+static struct device_attribute als_code_attribute = __ATTR(code, 0444, stk_als_code_show, NULL);
+static struct device_attribute als_transmittance_attribute = __ATTR(transmittance,0664,stk_als_transmittance_show,stk_als_transmittance_store);
+static struct device_attribute als_poll_delay_attribute = __ATTR(delay,0664,stk_als_delay_show,stk_als_delay_store);
+static struct device_attribute als_ir_code_attribute = __ATTR(ircode,0444,stk_als_ir_code_show,NULL);
+
+
+static struct attribute *stk_als_attrs [] =
+{
+ &als_enable_attribute.attr,
+ &als_lux_attribute.attr,
+ &als_code_attribute.attr,
+ &als_transmittance_attribute.attr,
+ &als_poll_delay_attribute.attr,
+ &als_ir_code_attribute.attr,
+ NULL
+};
+
+static struct attribute_group stk_als_attribute_group = {
+ .name = "driver",
+ .attrs = stk_als_attrs,
+};
+
+
+static struct device_attribute ps_enable_attribute = __ATTR(enable,0664,stk_ps_enable_show,stk_ps_enable_store);
+static struct device_attribute ps_enable_aso_attribute = __ATTR(enableaso,0664,stk_ps_enable_aso_show,stk_ps_enable_aso_store);
+static struct device_attribute ps_distance_attribute = __ATTR(distance,0664,stk_ps_distance_show, stk_ps_distance_store);
+static struct device_attribute ps_offset_attribute = __ATTR(offset,0664,stk_ps_offset_show, stk_ps_offset_store);
+static struct device_attribute ps_code_attribute = __ATTR(code, 0444, stk_ps_code_show, NULL);
+static struct device_attribute ps_code_thd_l_attribute = __ATTR(codethdl,0664,stk_ps_code_thd_l_show,stk_ps_code_thd_l_store);
+static struct device_attribute ps_code_thd_h_attribute = __ATTR(codethdh,0664,stk_ps_code_thd_h_show,stk_ps_code_thd_h_store);
+static struct device_attribute recv_attribute = __ATTR(recv,0664,stk_recv_show,stk_recv_store);
+static struct device_attribute send_attribute = __ATTR(send,0664,stk_send_show, stk_send_store);
+static struct device_attribute all_reg_attribute = __ATTR(allreg, 0444, stk_all_reg_show, NULL);
+
+static struct attribute *stk_ps_attrs [] =
+{
+ &ps_enable_attribute.attr,
+ &ps_enable_aso_attribute.attr,
+ &ps_distance_attribute.attr,
+ &ps_offset_attribute.attr,
+ &ps_code_attribute.attr,
+ &ps_code_thd_l_attribute.attr,
+ &ps_code_thd_h_attribute.attr,
+ &recv_attribute.attr,
+ &send_attribute.attr,
+ &all_reg_attribute.attr,
+ NULL
+};
+
+static struct attribute_group stk_ps_attribute_group = {
+ .name = "driver",
+ .attrs = stk_ps_attrs,
+};
+
+#ifdef STK_POLL_ALS
+static enum hrtimer_restart stk_als_timer_func(struct hrtimer *timer)
+{
+ struct stk3x1x_data *ps_data = container_of(timer, struct stk3x1x_data, als_timer);
+ queue_work(ps_data->stk_als_wq, &ps_data->stk_als_work);
+ hrtimer_forward_now(&ps_data->als_timer, ps_data->als_poll_delay);
+ return HRTIMER_RESTART;
+}
+
+static void stk_als_work_func(struct work_struct *work)
+{
+ struct stk3x1x_data *ps_data = container_of(work, struct stk3x1x_data, stk_als_work);
+ int32_t reading;
+
+ mutex_lock(&ps_data->io_lock);
+ reading = stk3x1x_get_als_reading(ps_data);
+ if(reading < 0)
+ return;
+ ps_data->als_lux_last = stk_alscode2lux(ps_data, reading);
+ input_report_abs(ps_data->als_input_dev, ABS_MISC, ps_data->als_lux_last);
+ input_sync(ps_data->als_input_dev);
+ mutex_unlock(&ps_data->io_lock);
+ //printk(KERN_INFO "%s: als input event %d lux\n",__func__, ps_data->als_lux_last);
+}
+#endif
+
+static enum hrtimer_restart stk_ps_timer_func(struct hrtimer *timer)
+{
+ struct stk3x1x_data *ps_data = container_of(timer, struct stk3x1x_data, ps_timer);
+ queue_work(ps_data->stk_ps_wq, &ps_data->stk_ps_work);
+#ifdef STK_POLL_PS
+ hrtimer_forward_now(&ps_data->ps_timer, ps_data->ps_poll_delay);
+ return HRTIMER_RESTART;
+#else
+ hrtimer_cancel(&ps_data->ps_timer);
+ return HRTIMER_NORESTART;
+#endif
+}
+
+static void stk_ps_work_func(struct work_struct *work)
+{
+ struct stk3x1x_data *ps_data = container_of(work, struct stk3x1x_data, stk_ps_work);
+ uint32_t reading;
+ int32_t near_far_state;
+ uint8_t org_flag_reg;
+ int32_t ret;
+ uint8_t disable_flag = 0;
+ mutex_lock(&ps_data->io_lock);
+
+ org_flag_reg = stk3x1x_get_flag(ps_data);
+ if(org_flag_reg < 0)
+ {
+ printk(KERN_ERR "%s: get_status_reg fail, ret=%d", __func__, org_flag_reg);
+ goto err_i2c_rw;
+ }
+ near_far_state = (org_flag_reg & STK_FLG_NF_MASK)?1:0;
+ reading = stk3x1x_get_ps_reading(ps_data);
+ if(ps_data->ps_distance_last != near_far_state)
+ {
+ ps_data->ps_distance_last = near_far_state;
+ input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state);
+ input_sync(ps_data->ps_input_dev);
+ wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ);
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "%s: ps input event %d cm, ps code = %d\n",__func__, near_far_state, reading);
+#endif
+ }
+ ret = stk3x1x_set_flag(ps_data, org_flag_reg, disable_flag);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:stk3x1x_set_flag fail, ret=%d\n", __func__, ret);
+ goto err_i2c_rw;
+ }
+
+ mutex_unlock(&ps_data->io_lock);
+ return;
+
+err_i2c_rw:
+ mutex_unlock(&ps_data->io_lock);
+ msleep(30);
+ return;
+}
+
+
+#if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS))
+static void stk_work_func(struct work_struct *work)
+{
+ uint32_t reading;
+#if ((STK_INT_PS_MODE != 0x03) && (STK_INT_PS_MODE != 0x02))
+ int32_t ret;
+ uint8_t disable_flag = 0;
+ uint8_t org_flag_reg;
+#endif /* #if ((STK_INT_PS_MODE != 0x03) && (STK_INT_PS_MODE != 0x02)) */
+
+#ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+ uint32_t nLuxIndex;
+#endif
+ struct stk3x1x_data *ps_data = container_of(work, struct stk3x1x_data, stk_work);
+ int32_t near_far_state;
+
+ mutex_lock(&ps_data->io_lock);
+
+#if (STK_INT_PS_MODE == 0x03)
+ near_far_state = gpio_get_value(ps_data->int_pin);
+#elif (STK_INT_PS_MODE == 0x02)
+ near_far_state = !(gpio_get_value(ps_data->int_pin));
+#endif
+
+#if ((STK_INT_PS_MODE == 0x03) || (STK_INT_PS_MODE == 0x02))
+ ps_data->ps_distance_last = near_far_state;
+ input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state);
+ input_sync(ps_data->ps_input_dev);
+ wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ);
+ reading = stk3x1x_get_ps_reading(ps_data);
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "%s: ps input event %d cm, ps code = %d\n",__func__, near_far_state, reading);
+#endif
+#else
+ /* mode 0x01 or 0x04 */
+ org_flag_reg = stk3x1x_get_flag(ps_data);
+ if(org_flag_reg < 0)
+ {
+ printk(KERN_ERR "%s: get_status_reg fail, org_flag_reg=%d", __func__, org_flag_reg);
+ goto err_i2c_rw;
+ }
+
+ if (org_flag_reg & STK_FLG_ALSINT_MASK)
+ {
+ disable_flag |= STK_FLG_ALSINT_MASK;
+ reading = stk3x1x_get_als_reading(ps_data);
+ if(reading < 0)
+ {
+ printk(KERN_ERR "%s: stk3x1x_get_als_reading fail, ret=%d", __func__, reading);
+ goto err_i2c_rw;
+ }
+#ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+ nLuxIndex = stk_get_lux_interval_index(reading);
+ stk3x1x_set_als_thd_h(ps_data, code_threshold_table[nLuxIndex]);
+ stk3x1x_set_als_thd_l(ps_data, code_threshold_table[nLuxIndex-1]);
+#else
+ stk_als_set_new_thd(ps_data, reading);
+#endif //CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+ ps_data->als_lux_last = stk_alscode2lux(ps_data, reading);
+ input_report_abs(ps_data->als_input_dev, ABS_MISC, ps_data->als_lux_last);
+ input_sync(ps_data->als_input_dev);
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "%s: als input event %d lux\n",__func__, ps_data->als_lux_last);
+#endif
+ }
+ if (org_flag_reg & STK_FLG_PSINT_MASK)
+ {
+ disable_flag |= STK_FLG_PSINT_MASK;
+ near_far_state = (org_flag_reg & STK_FLG_NF_MASK)?1:0;
+
+ ps_data->ps_distance_last = near_far_state;
+ input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state);
+ input_sync(ps_data->ps_input_dev);
+ wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ);
+ reading = stk3x1x_get_ps_reading(ps_data);
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "%s: ps input event=%d, ps code = %d\n",__func__, near_far_state, reading);
+#endif
+ }
+
+ ret = stk3x1x_set_flag(ps_data, org_flag_reg, disable_flag);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:reset_int_flag fail, ret=%d\n", __func__, ret);
+ goto err_i2c_rw;
+ }
+#endif
+
+ msleep(1);
+ enable_irq(ps_data->irq);
+ mutex_unlock(&ps_data->io_lock);
+ return;
+
+err_i2c_rw:
+ mutex_unlock(&ps_data->io_lock);
+ msleep(30);
+ enable_irq(ps_data->irq);
+ return;
+}
+#endif
+
+#if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS))
+static irqreturn_t stk_oss_irq_handler(int irq, void *data)
+{
+ struct stk3x1x_data *pData = data;
+ disable_irq_nosync(irq);
+ queue_work(pData->stk_wq,&pData->stk_work);
+ return IRQ_HANDLED;
+}
+#endif /* #if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS)) */
+static int32_t stk3x1x_init_all_setting(struct i2c_client *client, struct stk3x1x_platform_data *plat_data)
+{
+ int32_t ret;
+ struct stk3x1x_data *ps_data = i2c_get_clientdata(client);
+
+ mutex_lock(&ps_data->io_lock);
+ ps_data->als_enabled = false;
+ ps_data->ps_enabled = false;
+ mutex_unlock(&ps_data->io_lock);
+
+ ret = stk3x1x_software_reset(ps_data);
+ if(ret < 0)
+ return ret;
+
+ stk3x1x_check_pid(ps_data);
+ if(ret < 0)
+ return ret;
+
+ ret = stk3x1x_init_all_reg(ps_data, plat_data);
+ if(ret < 0)
+ return ret;
+#ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+ stk_init_code_threshold_table(ps_data);
+#endif
+ return 0;
+}
+
+#if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS))
+static int stk3x1x_setup_irq(struct i2c_client *client)
+{
+ int irq, err = -EIO;
+ struct stk3x1x_data *ps_data = i2c_get_clientdata(client);
+
+ irq = gpio_to_irq(ps_data->int_pin);
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "%s: int pin #=%d, irq=%d\n",__func__, ps_data->int_pin, irq);
+#endif
+ if (irq <= 0)
+ {
+ printk(KERN_ERR "irq number is not specified, irq # = %d, int pin=%d\n",irq, ps_data->int_pin);
+ return irq;
+ }
+ ps_data->irq = irq;
+ err = gpio_request(ps_data->int_pin,"stk-int");
+ if(err < 0)
+ {
+ printk(KERN_ERR "%s: gpio_request, err=%d", __func__, err);
+ return err;
+ }
+ err = gpio_direction_input(ps_data->int_pin);
+ if(err < 0)
+ {
+ printk(KERN_ERR "%s: gpio_direction_input, err=%d", __func__, err);
+ return err;
+ }
+#if ((STK_INT_PS_MODE == 0x03) || (STK_INT_PS_MODE == 0x02))
+ err = request_any_context_irq(irq, stk_oss_irq_handler, IRQF_TRIGGER_FALLING|IRQF_TRIGGER_RISING, DEVICE_NAME, ps_data);
+#else
+ err = request_any_context_irq(irq, stk_oss_irq_handler, IRQF_TRIGGER_LOW, DEVICE_NAME, ps_data);
+#endif
+ if (err < 0)
+ {
+ printk(KERN_WARNING "%s: request_any_context_irq(%d) failed for (%d)\n", __func__, irq, err);
+ goto err_request_any_context_irq;
+ }
+ disable_irq(irq);
+
+ return 0;
+err_request_any_context_irq:
+ gpio_free(ps_data->int_pin);
+ return err;
+}
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void stk3x1x_early_suspend(struct early_suspend *h)
+{
+ struct stk3x1x_data *ps_data = container_of(h, struct stk3x1x_data, stk_early_suspend);
+#ifndef STK_POLL_PS
+ int err;
+#endif
+
+ printk(KERN_INFO "%s", __func__);
+ mutex_lock(&ps_data->io_lock);
+ if(ps_data->als_enabled)
+ {
+ stk3x1x_enable_als(ps_data, 0);
+ ps_data->als_enabled = true;
+ }
+ if(ps_data->ps_enabled)
+ {
+#ifdef STK_POLL_PS
+ wake_lock(&ps_data->ps_nosuspend_wl);
+#else
+ err = enable_irq_wake(ps_data->irq);
+ if (err)
+ printk(KERN_WARNING "%s: set_irq_wake(%d) failed, err=(%d)\n", __func__, ps_data->irq, err);
+#endif
+ }
+ mutex_unlock(&ps_data->io_lock);
+ return;
+}
+
+static void stk3x1x_late_resume(struct early_suspend *h)
+{
+ struct stk3x1x_data *ps_data = container_of(h, struct stk3x1x_data, stk_early_suspend);
+#ifndef STK_POLL_PS
+ int err;
+#endif
+
+ printk(KERN_INFO "%s", __func__);
+ mutex_lock(&ps_data->io_lock);
+ if(ps_data->als_enabled)
+ stk3x1x_enable_als(ps_data, 1);
+
+ if(ps_data->ps_enabled)
+ {
+#ifdef STK_POLL_PS
+ wake_lock(&ps_data->ps_nosuspend_wl);
+#else
+ err = disable_irq_wake(ps_data->irq);
+ if (err)
+ printk(KERN_WARNING "%s: disable_irq_wake(%d) failed, err=(%d)\n", __func__, ps_data->irq, err);
+#endif
+ }
+ mutex_unlock(&ps_data->io_lock);
+ return;
+}
+#endif //#ifdef CONFIG_HAS_EARLYSUSPEND
+
+
+static int stk3x1x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err = -ENODEV;
+ struct stk3x1x_data *ps_data;
+ struct stk3x1x_platform_data *plat_data;
+ printk(KERN_INFO "%s: driver version = %s\n", __func__, DRIVER_VERSION);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ {
+ printk(KERN_ERR "%s: No Support for I2C_FUNC_SMBUS_BYTE_DATA\n", __func__);
+ return -ENODEV;
+ }
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ {
+ printk(KERN_ERR "%s: No Support for I2C_FUNC_SMBUS_WORD_DATA\n", __func__);
+ return -ENODEV;
+ }
+
+ ps_data = kzalloc(sizeof(struct stk3x1x_data),GFP_KERNEL);
+ if(!ps_data)
+ {
+ printk(KERN_ERR "%s: failed to allocate stk3x1x_data\n", __func__);
+ return -ENOMEM;
+ }
+ ps_data->client = client;
+ i2c_set_clientdata(client,ps_data);
+ mutex_init(&ps_data->io_lock);
+ wake_lock_init(&ps_data->ps_wakelock,WAKE_LOCK_SUSPEND, "stk_input_wakelock");
+
+#ifdef STK_POLL_PS
+ wake_lock_init(&ps_data->ps_nosuspend_wl,WAKE_LOCK_SUSPEND, "stk_nosuspend_wakelock");
+#endif
+ if(client->dev.platform_data != NULL)
+ {
+ plat_data = client->dev.platform_data;
+ ps_data->als_transmittance = plat_data->transmittance;
+ ps_data->int_pin = plat_data->int_pin;
+ if(ps_data->als_transmittance == 0)
+ {
+ printk(KERN_ERR "%s: Please set als_transmittance in platform data\n", __func__);
+ goto err_als_input_allocate;
+ }
+ }
+ else
+ {
+ printk(KERN_ERR "%s: no stk3x1x platform data!\n", __func__);
+ goto err_als_input_allocate;
+ }
+
+ ps_data->als_input_dev = input_allocate_device();
+ if (ps_data->als_input_dev==NULL)
+ {
+ printk(KERN_ERR "%s: could not allocate als device\n", __func__);
+ err = -ENOMEM;
+ goto err_als_input_allocate;
+ }
+ ps_data->ps_input_dev = input_allocate_device();
+ if (ps_data->ps_input_dev==NULL)
+ {
+ printk(KERN_ERR "%s: could not allocate ps device\n", __func__);
+ err = -ENOMEM;
+ goto err_ps_input_allocate;
+ }
+ ps_data->als_input_dev->name = ALS_NAME;
+ ps_data->ps_input_dev->name = PS_NAME;
+ set_bit(EV_ABS, ps_data->als_input_dev->evbit);
+ set_bit(EV_ABS, ps_data->ps_input_dev->evbit);
+ input_set_abs_params(ps_data->als_input_dev, ABS_MISC, 0, stk_alscode2lux(ps_data, (1<<16)-1), 0, 0);
+ input_set_abs_params(ps_data->ps_input_dev, ABS_DISTANCE, 0,1, 0, 0);
+ err = input_register_device(ps_data->als_input_dev);
+ if (err<0)
+ {
+ printk(KERN_ERR "%s: can not register als input device\n", __func__);
+ goto err_als_input_register;
+ }
+ err = input_register_device(ps_data->ps_input_dev);
+ if (err<0)
+ {
+ printk(KERN_ERR "%s: can not register ps input device\n", __func__);
+ goto err_ps_input_register;
+ }
+
+ err = sysfs_create_group(&ps_data->als_input_dev->dev.kobj, &stk_als_attribute_group);
+ if (err < 0)
+ {
+ printk(KERN_ERR "%s:could not create sysfs group for als\n", __func__);
+ goto err_als_sysfs_create_group;
+ }
+ err = sysfs_create_group(&ps_data->ps_input_dev->dev.kobj, &stk_ps_attribute_group);
+ if (err < 0)
+ {
+ printk(KERN_ERR "%s:could not create sysfs group for ps\n", __func__);
+ goto err_ps_sysfs_create_group;
+ }
+ input_set_drvdata(ps_data->als_input_dev, ps_data);
+ input_set_drvdata(ps_data->ps_input_dev, ps_data);
+
+#ifdef STK_POLL_ALS
+ ps_data->stk_als_wq = create_singlethread_workqueue("stk_als_wq");
+ INIT_WORK(&ps_data->stk_als_work, stk_als_work_func);
+ hrtimer_init(&ps_data->als_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ps_data->als_poll_delay = ns_to_ktime(110 * NSEC_PER_MSEC);
+ ps_data->als_timer.function = stk_als_timer_func;
+#endif
+
+ ps_data->stk_ps_wq = create_singlethread_workqueue("stk_ps_wq");
+ INIT_WORK(&ps_data->stk_ps_work, stk_ps_work_func);
+ hrtimer_init(&ps_data->ps_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ps_data->ps_poll_delay = ns_to_ktime(110 * NSEC_PER_MSEC);
+ ps_data->ps_timer.function = stk_ps_timer_func;
+#if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS))
+ ps_data->stk_wq = create_singlethread_workqueue("stk_wq");
+ INIT_WORK(&ps_data->stk_work, stk_work_func);
+ err = stk3x1x_setup_irq(client);
+ if(err < 0)
+ goto err_stk3x1x_setup_irq;
+#endif
+
+ err = stk3x1x_init_all_setting(client, plat_data);
+ if(err < 0)
+ goto err_init_all_setting;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ps_data->stk_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ps_data->stk_early_suspend.suspend = stk3x1x_early_suspend;
+ ps_data->stk_early_suspend.resume = stk3x1x_late_resume;
+ register_early_suspend(&ps_data->stk_early_suspend);
+#endif
+ printk(KERN_INFO "%s: probe successfully", __func__);
+ return 0;
+
+err_init_all_setting:
+#ifndef STK_POLL_PS
+ free_irq(ps_data->irq, ps_data);
+ gpio_free(plat_data->int_pin);
+#endif
+#if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS))
+err_stk3x1x_setup_irq:
+#endif
+#ifdef STK_POLL_ALS
+ hrtimer_try_to_cancel(&ps_data->als_timer);
+ destroy_workqueue(ps_data->stk_als_wq);
+#endif
+ destroy_workqueue(ps_data->stk_ps_wq);
+#if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS))
+ destroy_workqueue(ps_data->stk_wq);
+#endif
+ sysfs_remove_group(&ps_data->ps_input_dev->dev.kobj, &stk_ps_attribute_group);
+err_ps_sysfs_create_group:
+ sysfs_remove_group(&ps_data->als_input_dev->dev.kobj, &stk_als_attribute_group);
+err_als_sysfs_create_group:
+ input_unregister_device(ps_data->ps_input_dev);
+err_ps_input_register:
+ input_unregister_device(ps_data->als_input_dev);
+err_als_input_register:
+ input_free_device(ps_data->ps_input_dev);
+err_ps_input_allocate:
+ input_free_device(ps_data->als_input_dev);
+err_als_input_allocate:
+#ifdef STK_POLL_PS
+ wake_lock_destroy(&ps_data->ps_nosuspend_wl);
+#endif
+ wake_lock_destroy(&ps_data->ps_wakelock);
+ mutex_destroy(&ps_data->io_lock);
+ kfree(ps_data);
+ return err;
+}
+
+
+static int stk3x1x_remove(struct i2c_client *client)
+{
+ struct stk3x1x_data *ps_data = i2c_get_clientdata(client);
+#ifndef STK_POLL_PS
+ free_irq(ps_data->irq, ps_data);
+ gpio_free(ps_data->int_pin);
+#endif
+#ifdef STK_POLL_ALS
+ hrtimer_try_to_cancel(&ps_data->als_timer);
+ destroy_workqueue(ps_data->stk_als_wq);
+#endif
+ destroy_workqueue(ps_data->stk_ps_wq);
+#if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS))
+ destroy_workqueue(ps_data->stk_wq);
+#endif
+ sysfs_remove_group(&ps_data->ps_input_dev->dev.kobj, &stk_ps_attribute_group);
+ sysfs_remove_group(&ps_data->als_input_dev->dev.kobj, &stk_als_attribute_group);
+ input_unregister_device(ps_data->ps_input_dev);
+ input_unregister_device(ps_data->als_input_dev);
+ input_free_device(ps_data->ps_input_dev);
+ input_free_device(ps_data->als_input_dev);
+#ifdef STK_POLL_PS
+ wake_lock_destroy(&ps_data->ps_nosuspend_wl);
+#endif
+ wake_lock_destroy(&ps_data->ps_wakelock);
+ mutex_destroy(&ps_data->io_lock);
+ kfree(ps_data);
+
+ return 0;
+}
+
+static const struct i2c_device_id stk_ps_id[] =
+{
+ { "stk_ps", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, stk_ps_id);
+
+static struct i2c_driver stk_ps_driver =
+{
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = stk3x1x_probe,
+ .remove = stk3x1x_remove,
+ .id_table = stk_ps_id,
+};
+
+
+static int __init stk3x1x_init(void)
+{
+ int ret;
+ ret = i2c_add_driver(&stk_ps_driver);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void __exit stk3x1x_exit(void)
+{
+ i2c_del_driver(&stk_ps_driver);
+}
+
+module_init(stk3x1x_init);
+module_exit(stk3x1x_exit);
+MODULE_AUTHOR("Lex Hsieh <lex_hsieh@sitronix.com.tw>");
+MODULE_DESCRIPTION("Sensortek stk3x1x Proximity Sensor driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 5415f4e..b725200 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1996,7 +1996,7 @@
if (atomic_read(&data->st_enabled) == 0)
break;
- pm_runtime_put(&data->client->adapter->dev);
+ pm_runtime_put(data->client->adapter->dev.parent);
atomic_set(&data->st_enabled, 0);
complete(&data->st_completion);
mxt_interrupt(data->client->irq, data);
@@ -2015,8 +2015,9 @@
}
INIT_COMPLETION(data->st_completion);
INIT_COMPLETION(data->st_powerdown);
- atomic_set(&data->st_pending_irqs, 0);
atomic_set(&data->st_enabled, 1);
+ synchronize_irq(data->client->irq);
+ atomic_set(&data->st_pending_irqs, 0);
break;
default:
dev_err(&data->client->dev, "unsupported value: %lu\n", value);
diff --git a/drivers/input/touchscreen/ft5x06_ts.c b/drivers/input/touchscreen/ft5x06_ts.c
index 8de6b1e..25228a6 100644
--- a/drivers/input/touchscreen/ft5x06_ts.c
+++ b/drivers/input/touchscreen/ft5x06_ts.c
@@ -490,6 +490,7 @@
{
struct ft5x06_ts_data *data = dev_get_drvdata(dev);
char txbuf[2], i;
+ int err;
if (data->loading_fw) {
dev_info(dev, "Firmware loading in process...\n");
@@ -517,20 +518,58 @@
ft5x06_i2c_write(data->client, txbuf, sizeof(txbuf));
}
+ if (data->pdata->power_on) {
+ err = data->pdata->power_on(false);
+ if (err) {
+ dev_err(dev, "power off failed");
+ goto pwr_off_fail;
+ }
+ } else {
+ err = ft5x06_power_on(data, false);
+ if (err) {
+ dev_err(dev, "power off failed");
+ goto pwr_off_fail;
+ }
+ }
+
data->suspended = true;
return 0;
+
+pwr_off_fail:
+ if (gpio_is_valid(data->pdata->reset_gpio)) {
+ gpio_set_value_cansleep(data->pdata->reset_gpio, 0);
+ msleep(FT_RESET_DLY);
+ gpio_set_value_cansleep(data->pdata->reset_gpio, 1);
+ }
+ enable_irq(data->client->irq);
+ return err;
}
static int ft5x06_ts_resume(struct device *dev)
{
struct ft5x06_ts_data *data = dev_get_drvdata(dev);
+ int err;
if (!data->suspended) {
dev_info(dev, "Already in awake state\n");
return 0;
}
+ if (data->pdata->power_on) {
+ err = data->pdata->power_on(true);
+ if (err) {
+ dev_err(dev, "power on failed");
+ return err;
+ }
+ } else {
+ err = ft5x06_power_on(data, true);
+ if (err) {
+ dev_err(dev, "power on failed");
+ return err;
+ }
+ }
+
if (gpio_is_valid(data->pdata->reset_gpio)) {
gpio_set_value_cansleep(data->pdata->reset_gpio, 0);
msleep(FT_RESET_DLY);
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.c b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
index ba0be2b..908d0d7 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.c
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
@@ -1056,6 +1056,8 @@
rmi4_pdata->i2c_pull_up = of_property_read_bool(np,
"synaptics,i2c-pull-up");
+ rmi4_pdata->power_down_enable = of_property_read_bool(np,
+ "synaptics,power-down");
rmi4_pdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
rmi4_pdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
@@ -2003,7 +2005,7 @@
error_reg_en_vcc_i2c:
if (rmi4_data->board->i2c_pull_up)
- reg_set_optimum_mode_check(rmi4_data->vdd, 0);
+ reg_set_optimum_mode_check(rmi4_data->vcc_i2c, 0);
error_reg_opt_i2c:
regulator_disable(rmi4_data->vdd);
error_reg_en_vdd:
@@ -2592,29 +2594,53 @@
bool on)
{
int retval;
+ int load_ua;
if (on == false)
goto regulator_hpm;
- retval = reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_LPM_LOAD_UA);
+ load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_LPM_LOAD_UA;
+ retval = reg_set_optimum_mode_check(rmi4_data->vdd, load_ua);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
- "Regulator vcc_ana set_opt failed rc=%d\n",
+ "Regulator vdd_ana set_opt failed rc=%d\n",
retval);
goto fail_regulator_lpm;
}
- if (rmi4_data->board->i2c_pull_up) {
- retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
- RMI4_I2C_LPM_LOAD_UA);
- if (retval < 0) {
+ if (rmi4_data->board->power_down_enable) {
+ retval = regulator_disable(rmi4_data->vdd);
+ if (retval) {
dev_err(&rmi4_data->i2c_client->dev,
- "Regulator vcc_i2c set_opt failed rc=%d\n",
+ "Regulator vdd disable failed rc=%d\n",
retval);
goto fail_regulator_lpm;
}
}
+ if (rmi4_data->board->i2c_pull_up) {
+ load_ua = rmi4_data->board->power_down_enable ?
+ 0 : RMI4_I2C_LPM_LOAD_UA;
+ retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
+ load_ua);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "Regulator vcc_i2c set_opt failed " \
+ "rc=%d\n", retval);
+ goto fail_regulator_lpm;
+ }
+
+ if (rmi4_data->board->power_down_enable) {
+ retval = regulator_disable(rmi4_data->vcc_i2c);
+ if (retval) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "Regulator vcc_i2c disable failed " \
+ "rc=%d\n", retval);
+ goto fail_regulator_lpm;
+ }
+ }
+ }
+
return 0;
regulator_hpm:
@@ -2628,6 +2654,16 @@
goto fail_regulator_hpm;
}
+ if (rmi4_data->board->power_down_enable) {
+ retval = regulator_enable(rmi4_data->vdd);
+ if (retval) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "Regulator vdd enable failed rc=%d\n",
+ retval);
+ goto fail_regulator_hpm;
+ }
+ }
+
if (rmi4_data->board->i2c_pull_up) {
retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
RMI4_I2C_LOAD_UA);
@@ -2637,6 +2673,26 @@
retval);
goto fail_regulator_hpm;
}
+
+ if (rmi4_data->board->power_down_enable) {
+ retval = regulator_enable(rmi4_data->vcc_i2c);
+ if (retval) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "Regulator vcc_i2c enable failed " \
+ "rc=%d\n", retval);
+ goto fail_regulator_hpm;
+ }
+ }
+ }
+
+ if (rmi4_data->board->power_down_enable) {
+ retval = synaptics_rmi4_reset_device(rmi4_data);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to issue reset command, rc = %d\n",
+ __func__, retval);
+ return retval;
+ }
}
return 0;
diff --git a/drivers/iommu/msm_iommu-v1.c b/drivers/iommu/msm_iommu-v1.c
index b9c4cae..53c7c30 100644
--- a/drivers/iommu/msm_iommu-v1.c
+++ b/drivers/iommu/msm_iommu-v1.c
@@ -692,7 +692,6 @@
if (ret)
goto fail;
- ret = __flush_iotlb_va(domain, va);
fail:
mutex_unlock(&msm_iommu_lock);
return ret;
@@ -742,7 +741,6 @@
if (ret)
goto fail;
- __flush_iotlb(domain);
fail:
mutex_unlock(&msm_iommu_lock);
return ret;
diff --git a/drivers/iommu/msm_iommu_sec.c b/drivers/iommu/msm_iommu_sec.c
index 474efdf..78fffb2 100644
--- a/drivers/iommu/msm_iommu_sec.c
+++ b/drivers/iommu/msm_iommu_sec.c
@@ -371,7 +371,7 @@
map.info.ctx_id = ctx_drvdata->num;
map.info.va = va;
map.info.size = len;
- map.flags = IOMMU_TLBINVAL_FLAG;
+ map.flags = 0;
flush_va = &pa;
flush_pa = virt_to_phys(&pa);
@@ -421,7 +421,7 @@
map.info.ctx_id = ctx_drvdata->num;
map.info.va = va;
map.info.size = len;
- map.flags = IOMMU_TLBINVAL_FLAG;
+ map.flags = 0;
if (sg->length == len) {
pa = get_phys_addr(sg);
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index e84b477..bdc476e 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -378,7 +378,7 @@
* @second_addr - address of secondary flash to be written
* @safety_timer - enable safety timer or watchdog timer
* @torch_enable - enable flash LED torch mode
- * @regulator_get - regulator attached or not
+ * @flash_reg_get - flash regulator attached or not
* @flash_on - flash status, on or off
* @flash_boost_reg - boost regulator for flash
*/
@@ -395,7 +395,7 @@
u16 second_addr;
bool safety_timer;
bool torch_enable;
- bool regulator_get;
+ bool flash_reg_get;
bool flash_on;
struct regulator *flash_boost_reg;
};
@@ -403,15 +403,22 @@
/**
* kpdbl_config_data - kpdbl configuration data
* @pwm_cfg - device pwm configuration
- * @row_src_sel_val - select source, 0 for vph_pwr and 1 for vbst
- * @row_scan_en - enable row scan
- * @row_scan_val - map to enable needed rows
+ * @mode - running mode: pwm or lut
+ * @row_id - row id of the led
+ * @row_src_vbst - 0 for vph_pwr and 1 for vbst
+ * @row_src_en - enable row source
+ * @always_on - always on row
+ * @lut_params - lut parameters to be used by pwm driver
+ * @duty_cycles - duty cycles for lut
*/
struct kpdbl_config_data {
struct pwm_config_data *pwm_cfg;
- u32 row_src_sel_val;
- u32 row_scan_en;
- u32 row_scan_val;
+ u32 row_id;
+ bool row_src_vbst;
+ bool row_src_en;
+ bool always_on;
+ struct pwm_duty_cycles *duty_cycles;
+ struct lut_params lut_params;
};
/**
@@ -458,6 +465,8 @@
int turn_off_delay_ms;
};
+static int num_kpbl_leds_on;
+
static int
qpnp_led_masked_write(struct qpnp_led_data *led, u16 addr, u8 mask, u8 val)
{
@@ -672,6 +681,66 @@
return 0;
}
+static int qpnp_flash_regulator_operate(struct qpnp_led_data *led, bool on)
+{
+ int rc, i;
+ struct qpnp_led_data *led_array;
+ bool regulator_on = false;
+
+ led_array = dev_get_drvdata(&led->spmi_dev->dev);
+ if (!led_array) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to get LED array\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < led->num_leds; i++)
+ regulator_on |= led_array[i].flash_cfg->flash_on;
+
+ if (!on)
+ goto regulator_turn_off;
+
+ if (!regulator_on && !led->flash_cfg->flash_on) {
+ for (i = 0; i < led->num_leds; i++) {
+ if (led_array[i].flash_cfg->flash_reg_get) {
+ rc = regulator_enable(
+ led_array[i].flash_cfg->\
+ flash_boost_reg);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Regulator enable failed(%d)\n",
+ rc);
+ return rc;
+ }
+ led->flash_cfg->flash_on = true;
+ }
+ break;
+ }
+ }
+
+ return 0;
+
+regulator_turn_off:
+ if (regulator_on && led->flash_cfg->flash_on) {
+ for (i = 0; i < led->num_leds; i++) {
+ if (led_array[i].flash_cfg->flash_reg_get) {
+ rc = regulator_disable(led_array[i].flash_cfg->\
+ flash_boost_reg);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Regulator disable failed(%d)\n",
+ rc);
+ return rc;
+ }
+ led->flash_cfg->flash_on = false;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int qpnp_flash_set(struct qpnp_led_data *led)
{
int rc;
@@ -750,6 +819,14 @@
return rc;
}
} else {
+ rc = qpnp_flash_regulator_operate(led, true);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Flash regulator operate failed(%d)\n",
+ rc);
+ return rc;
+ }
+
/* Set flash safety timer */
rc = qpnp_led_masked_write(led,
FLASH_SAFETY_TIMER(led->base),
@@ -890,6 +967,13 @@
"Enable reg write failed(%d)\n", rc);
return rc;
}
+
+ rc = qpnp_flash_regulator_operate(led, false);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Flash regulator operate failed(%d)\n", rc);
+ return rc;
+ }
}
qpnp_dump_regs(led, flash_debug_regs, ARRAY_SIZE(flash_debug_regs));
@@ -906,35 +990,73 @@
if (!led->kpdbl_cfg->pwm_cfg->blinking)
led->kpdbl_cfg->pwm_cfg->mode =
led->kpdbl_cfg->pwm_cfg->default_mode;
- rc = qpnp_led_masked_write(led, KPDBL_ENABLE(led->base),
- KPDBL_MODULE_EN_MASK, KPDBL_MODULE_EN);
- duty_us = (led->kpdbl_cfg->pwm_cfg->pwm_period_us *
- led->cdev.brightness) / KPDBL_MAX_LEVEL;
- rc = pwm_config(led->kpdbl_cfg->pwm_cfg->pwm_dev, duty_us,
- led->kpdbl_cfg->pwm_cfg->pwm_period_us);
- if (rc < 0) {
- dev_err(&led->spmi_dev->dev, "pwm config failed\n");
- return rc;
+ if (!num_kpbl_leds_on) {
+ rc = qpnp_led_masked_write(led, KPDBL_ENABLE(led->base),
+ KPDBL_MODULE_EN_MASK, KPDBL_MODULE_EN);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Enable reg write failed(%d)\n", rc);
+ return rc;
+ }
}
+
+ if (led->kpdbl_cfg->pwm_cfg->mode == PWM_MODE) {
+ duty_us = (led->kpdbl_cfg->pwm_cfg->pwm_period_us *
+ led->cdev.brightness) / KPDBL_MAX_LEVEL;
+ rc = pwm_config(led->kpdbl_cfg->pwm_cfg->pwm_dev,
+ duty_us,
+ led->kpdbl_cfg->pwm_cfg->pwm_period_us);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev, "pwm config failed\n");
+ return rc;
+ }
+ }
+
rc = pwm_enable(led->kpdbl_cfg->pwm_cfg->pwm_dev);
if (rc < 0) {
dev_err(&led->spmi_dev->dev, "pwm enable failed\n");
return rc;
}
+
+ num_kpbl_leds_on++;
+
} else {
led->kpdbl_cfg->pwm_cfg->mode =
led->kpdbl_cfg->pwm_cfg->default_mode;
- pwm_disable(led->kpdbl_cfg->pwm_cfg->pwm_dev);
- rc = qpnp_led_masked_write(led, KPDBL_ENABLE(led->base),
- KPDBL_MODULE_EN_MASK, KPDBL_MODULE_DIS);
- if (rc) {
- dev_err(&led->spmi_dev->dev,
- "Failed to write led enable reg\n");
- return rc;
+
+ if (led->kpdbl_cfg->always_on) {
+ rc = pwm_config(led->kpdbl_cfg->pwm_cfg->pwm_dev, 0,
+ led->kpdbl_cfg->pwm_cfg->pwm_period_us);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "pwm config failed\n");
+ return rc;
+ }
+
+ rc = pwm_enable(led->kpdbl_cfg->pwm_cfg->pwm_dev);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev, "pwm enable failed\n");
+ return rc;
+ }
+ } else
+ pwm_disable(led->kpdbl_cfg->pwm_cfg->pwm_dev);
+
+ if (num_kpbl_leds_on > 0)
+ num_kpbl_leds_on--;
+
+ if (!num_kpbl_leds_on) {
+ rc = qpnp_led_masked_write(led, KPDBL_ENABLE(led->base),
+ KPDBL_MODULE_EN_MASK, KPDBL_MODULE_DIS);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led enable reg\n");
+ return rc;
+ }
}
}
led->kpdbl_cfg->pwm_cfg->blinking = false;
+
qpnp_dump_regs(led, kpdbl_debug_regs, ARRAY_SIZE(kpdbl_debug_regs));
return 0;
@@ -1000,11 +1122,14 @@
struct qpnp_led_data *led;
led = container_of(led_cdev, struct qpnp_led_data, cdev);
- if (value < LED_OFF || value > led->cdev.max_brightness) {
+ if (value < LED_OFF) {
dev_err(&led->spmi_dev->dev, "Invalid brightness value\n");
return;
}
+ if (value > led->cdev.max_brightness)
+ value = led->cdev.max_brightness;
+
led->cdev.brightness = value;
schedule_work(&led->work);
}
@@ -1012,35 +1137,7 @@
static void __qpnp_led_work(struct qpnp_led_data *led,
enum led_brightness value)
{
- int rc, i;
- struct qpnp_led_data *led_array;
-
- if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1) {
- if (!led->flash_cfg->flash_on && value > 0) {
- led_array = dev_get_drvdata(&led->spmi_dev->dev);
- if (!led_array) {
- dev_err(&led->spmi_dev->dev,
- "Unable to unable to get array\n");
- return;
- }
-
- for (i = 0; i < led->num_leds; i++) {
- if (led_array[i].flash_cfg->regulator_get) {
- rc = regulator_enable(led_array[i].\
- flash_cfg->\
- flash_boost_reg);
- if (rc) {
- dev_err(&led->spmi_dev->dev,
- "Regulator enable" \
- "failed(%d)\n",
- rc);
- return;
- }
- }
- }
- led->flash_cfg->flash_on = true;
- }
- }
+ int rc;
mutex_lock(&led->lock);
@@ -1084,31 +1181,6 @@
}
mutex_unlock(&led->lock);
- if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1) {
- if (led->flash_cfg->flash_on && !value) {
- led_array = dev_get_drvdata(&led->spmi_dev->dev);
- if (!led_array) {
- dev_err(&led->spmi_dev->dev,
- "Unable to get LED array\n");
- return;
- }
-
- for (i = 0; i < led->num_leds; i++) {
- if (led_array[i].flash_cfg->regulator_get) {
- rc = regulator_disable(led_array[i]\
- .flash_cfg->flash_boost_reg);
- if (rc) {
- dev_err(&led->spmi_dev->dev,
- "Unable to disable" \
- " regulator(%d)\n",
- rc);
- return;
- }
- }
- }
- led->flash_cfg->flash_on = false;
- }
- }
}
static void qpnp_led_work(struct work_struct *work)
@@ -2013,16 +2085,31 @@
int rc;
u8 val;
- /* enable row source selct */
- rc = qpnp_led_masked_write(led, KPDBL_ROW_SRC_SEL(led->base),
- KPDBL_ROW_SRC_SEL_VAL_MASK, led->kpdbl_cfg->row_src_sel_val);
+ /* select row source - vbst or vph */
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid,
+ KPDBL_ROW_SRC_SEL(led->base), &val, 1);
if (rc) {
dev_err(&led->spmi_dev->dev,
- "Enable row src sel write failed(%d)\n", rc);
+ "Unable to read from addr=%x, rc(%d)\n",
+ KPDBL_ROW_SRC_SEL(led->base), rc);
return rc;
}
- /* row source */
+ if (led->kpdbl_cfg->row_src_vbst)
+ val |= 1 << led->kpdbl_cfg->row_id;
+ else
+ val &= ~(1 << led->kpdbl_cfg->row_id);
+
+ rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid,
+ KPDBL_ROW_SRC_SEL(led->base), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read from addr=%x, rc(%d)\n",
+ KPDBL_ROW_SRC_SEL(led->base), rc);
+ return rc;
+ }
+
+ /* row source enable */
rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid,
KPDBL_ROW_SRC(led->base), &val, 1);
if (rc) {
@@ -2032,12 +2119,10 @@
return rc;
}
- val &= ~KPDBL_ROW_SCAN_VAL_MASK;
- val |= led->kpdbl_cfg->row_scan_val;
-
- led->kpdbl_cfg->row_scan_en <<= KPDBL_ROW_SCAN_EN_SHIFT;
- val &= ~KPDBL_ROW_SCAN_EN_MASK;
- val |= led->kpdbl_cfg->row_scan_en;
+ if (led->kpdbl_cfg->row_src_en)
+ val |= KPDBL_ROW_SCAN_EN_MASK | (1 << led->kpdbl_cfg->row_id);
+ else
+ val &= ~(1 << led->kpdbl_cfg->row_id);
rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid,
KPDBL_ROW_SRC(led->base), &val, 1);
@@ -2317,10 +2402,10 @@
"Regulator get failed(%d)\n", rc);
return rc;
}
- led->flash_cfg->regulator_get = true;
+ led->flash_cfg->flash_reg_get = true;
*reg_set = true;
} else
- led->flash_cfg->regulator_get = false;
+ led->flash_cfg->flash_reg_get = false;
} else if (led->id == QPNP_ID_FLASH1_LED1) {
led->flash_cfg->enable_module = FLASH_ENABLE_ALL;
led->flash_cfg->current_addr = FLASH_LED_1_CURR(led->base);
@@ -2336,10 +2421,10 @@
"Regulator get failed(%d)\n", rc);
return rc;
}
- led->flash_cfg->regulator_get = true;
+ led->flash_cfg->flash_reg_get = true;
*reg_set = true;
} else
- led->flash_cfg->regulator_get = false;
+ led->flash_cfg->flash_reg_get = false;
} else {
dev_err(&led->spmi_dev->dev, "Unknown flash LED name given\n");
return -EINVAL;
@@ -2565,6 +2650,7 @@
dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n");
return -ENOMEM;
}
+
rc = of_property_read_string(node, "qcom,mode", &mode);
if (!rc) {
led_mode = qpnp_led_get_mode(mode);
@@ -2590,23 +2676,20 @@
if (rc < 0)
return rc;
- rc = of_property_read_u32(node, "qcom,row-src-sel-val", &val);
+ rc = of_property_read_u32(node, "qcom,row-id", &val);
if (!rc)
- led->kpdbl_cfg->row_src_sel_val = val;
+ led->kpdbl_cfg->row_id = val;
else
return rc;
- rc = of_property_read_u32(node, "qcom,row-scan-val", &val);
- if (!rc)
- led->kpdbl_cfg->row_scan_val = val;
- else
- return rc;
+ led->kpdbl_cfg->row_src_vbst =
+ of_property_read_bool(node, "qcom,row-src-vbst");
- rc = of_property_read_u32(node, "qcom,row-scan-en", &val);
- if (!rc)
- led->kpdbl_cfg->row_scan_en = val;
- else
- return rc;
+ led->kpdbl_cfg->row_src_en =
+ of_property_read_bool(node, "qcom,row-src-en");
+
+ led->kpdbl_cfg->always_on =
+ of_property_read_bool(node, "qcom,always-on");
return 0;
}
@@ -2856,6 +2939,7 @@
goto fail_id_check;
}
} else if (strncmp(led_label, "kpdbl", sizeof("kpdbl")) == 0) {
+ num_kpbl_leds_on = 0;
rc = qpnp_get_config_kpdbl(led, temp);
if (rc < 0) {
dev_err(&led->spmi_dev->dev,
@@ -2985,7 +3069,7 @@
break;
case QPNP_ID_FLASH1_LED0:
case QPNP_ID_FLASH1_LED1:
- if (led_array[i].flash_cfg->regulator_get)
+ if (led_array[i].flash_cfg->flash_reg_get)
regulator_put(led_array[i].flash_cfg-> \
flash_boost_reg);
sysfs_remove_group(&led_array[i].cdev.dev->kobj,
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 692a04e..9d606a1 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -128,6 +128,20 @@
DMX_IDX_H264_NON_IDR_START
};
+static const struct dvb_dmx_video_patterns h264_non_access_unit_del = {
+ {0x00, 0x00, 0x01, 0x09},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_ACCESS_UNIT_DEL
+};
+
+static const struct dvb_dmx_video_patterns h264_non_sei = {
+ {0x00, 0x00, 0x01, 0x06},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_SEI
+};
+
static const struct dvb_dmx_video_patterns vc1_seq_hdr = {
{0x00, 0x00, 0x01, 0x0F},
{0xFF, 0xFF, 0xFF, 0xFF},
@@ -1791,6 +1805,12 @@
case DMX_IDX_H264_NON_IDR_START:
return &h264_non_idr;
+ case DMX_IDX_H264_ACCESS_UNIT_DEL:
+ return &h264_non_access_unit_del;
+
+ case DMX_IDX_H264_SEI:
+ return &h264_non_sei;
+
case DMX_IDX_VC1_SEQ_HEADER:
return &vc1_seq_hdr;
@@ -1913,6 +1933,20 @@
}
if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_H264_ACCESS_UNIT_DEL)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_ACCESS_UNIT_DEL);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_H264_SEI)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
(feed->idx_params.types &
(DMX_IDX_VC1_SEQ_HEADER |
DMX_IDX_VC1_FIRST_SEQ_FRAME_START |
diff --git a/drivers/media/platform/msm/camera_v2/camera/camera.c b/drivers/media/platform/msm/camera_v2/camera/camera.c
index d3618c0..c70e151 100644
--- a/drivers/media/platform/msm/camera_v2/camera/camera.c
+++ b/drivers/media/platform/msm/camera_v2/camera/camera.c
@@ -328,6 +328,10 @@
pr_debug("%s: num planes :%c\n", __func__,
user_fmt->num_planes);
+ /*num_planes need to bound checked, otherwise for loop
+ can execute forever */
+ if (WARN_ON(user_fmt->num_planes > VIDEO_MAX_PLANES))
+ return -EINVAL;
for (i = 0; i < user_fmt->num_planes; i++)
pr_debug("%s: plane size[%d]\n", __func__,
user_fmt->plane_sizes[i]);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index f1f4c17..8c42ed2 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -90,7 +90,8 @@
void (*enable_wm) (struct vfe_device *vfe_dev,
uint8_t wm_idx, uint8_t enable);
void (*cfg_io_format) (struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info);
+ enum msm_vfe_axi_stream_src stream_src,
+ uint32_t io_format);
void (*cfg_framedrop) (struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info);
void (*clear_framedrop) (struct vfe_device *vfe_dev,
@@ -289,6 +290,7 @@
enum msm_vfe_inputmux input_mux;
uint32_t width;
long pixel_clock;
+ uint32_t input_format;
};
enum msm_wm_ub_cfg_type {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 07a66e6..aac973e 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -144,7 +144,7 @@
/* CGC_OVERRIDE */
msm_camera_io_w(0x07FFFFFF, vfe_dev->vfe_base + 0xC);
/* BUS_CFG */
- msm_camera_io_w(0x00000001, vfe_dev->vfe_base + 0x3C);
+ msm_camera_io_w(0x00000009, vfe_dev->vfe_base + 0x3C);
msm_camera_io_w(0x01000025, vfe_dev->vfe_base + 0x1C);
msm_camera_io_w_mb(0x1CFFFFFF, vfe_dev->vfe_base + 0x20);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x24);
@@ -481,11 +481,11 @@
}
static void msm_vfe32_cfg_io_format(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info)
+ enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
{
int bpp, bpp_reg = 0;
uint32_t io_format_reg;
- bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
+ bpp = msm_isp_get_bit_per_pixel(io_format);
switch (bpp) {
case 8:
@@ -499,7 +499,9 @@
break;
}
io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x6F8);
- switch (stream_info->stream_src) {
+ switch (stream_src) {
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER:
case CAMIF_RAW:
io_format_reg &= 0xFFFFCFFF;
io_format_reg |= bpp_reg << 12;
@@ -508,8 +510,6 @@
io_format_reg &= 0xFFFFFFC8;
io_format_reg |= bpp_reg << 4;
break;
- case PIX_ENCODER:
- case PIX_VIEWFINDER:
case RDI_INTF_0:
case RDI_INTF_1:
case RDI_INTF_2:
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 2db25a6..84b95f1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -688,11 +688,11 @@
}
static void msm_vfe40_cfg_io_format(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info)
+ enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
{
int bpp, bpp_reg = 0;
uint32_t io_format_reg;
- bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
+ bpp = msm_isp_get_bit_per_pixel(io_format);
switch (bpp) {
case 8:
@@ -706,7 +706,9 @@
break;
}
io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x54);
- switch (stream_info->stream_src) {
+ switch (stream_src) {
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER:
case CAMIF_RAW:
io_format_reg &= 0xFFFFCFFF;
io_format_reg |= bpp_reg << 12;
@@ -715,8 +717,6 @@
io_format_reg &= 0xFFFFFFC8;
io_format_reg |= bpp_reg << 4;
break;
- case PIX_ENCODER:
- case PIX_VIEWFINDER:
case RDI_INTF_0:
case RDI_INTF_1:
case RDI_INTF_2:
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index d3138ed..5b7658d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -474,6 +474,7 @@
int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0, i;
+ uint32_t io_format = 0;
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
struct msm_vfe_axi_stream *stream_info;
@@ -497,10 +498,20 @@
stream_info[HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
msm_isp_axi_reserve_wm(&vfe_dev->axi_data, stream_info);
- if (stream_cfg_cmd->stream_src == CAMIF_RAW ||
- stream_cfg_cmd->stream_src == IDEAL_RAW)
- vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_io_format(vfe_dev, stream_info);
+ if (stream_info->stream_src < RDI_INTF_0) {
+ io_format = vfe_dev->axi_data.src_info[VFE_PIX_0].input_format;
+ if (stream_info->stream_src == CAMIF_RAW ||
+ stream_info->stream_src == IDEAL_RAW) {
+ if (stream_info->stream_src == CAMIF_RAW &&
+ io_format != stream_info->output_format)
+ pr_warn("%s: Overriding input format\n",
+ __func__);
+
+ io_format = stream_info->output_format;
+ }
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_io_format(
+ vfe_dev, stream_info->stream_src, io_format);
+ }
msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index d857a14..33f63b3 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -150,6 +150,12 @@
stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
get_stats_idx(stream_req_cmd->stats_type);
+ if ((stats_idx > MSM_ISP_STATS_MAX) ||
+ (stats_idx == -EINVAL)) {
+ pr_err("%s: Stats idx Error\n", __func__);
+ return rc;
+ }
+
stream_info = &stats_data->stream_info[stats_idx];
if (stream_info->state != STATS_AVALIABLE) {
pr_err("%s: Stats already requested\n", __func__);
@@ -188,7 +194,7 @@
int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
{
- int rc = 0;
+ int rc = -1;
struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
struct msm_vfe_stats_stream *stream_info = NULL;
struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
@@ -202,6 +208,11 @@
}
stats_idx = STATS_IDX(stream_req_cmd->stream_handle);
+ if (stats_idx > MSM_ISP_STATS_MAX) {
+ pr_err("%s: Stats idx Error\n", __func__);
+ return rc;
+ }
+
stream_info = &stats_data->stream_info[stats_idx];
framedrop_period = msm_isp_get_framedrop_period(
@@ -228,9 +239,14 @@
struct msm_vfe_stats_stream_release_cmd *stream_release_cmd = arg;
struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
int stats_idx = STATS_IDX(stream_release_cmd->stream_handle);
- struct msm_vfe_stats_stream *stream_info =
- &stats_data->stream_info[stats_idx];
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ if (stats_idx > MSM_ISP_STATS_MAX) {
+ pr_err("%s: Stats idx Error\n", __func__);
+ return rc;
+ }
+
+ stream_info = &stats_data->stream_info[stats_idx];
if (stream_info->state == STATS_AVALIABLE) {
pr_err("%s: stream already release\n", __func__);
return rc;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 908d3c6..590b636 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -273,6 +273,9 @@
return rc;
}
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
+ input_cfg->d.pix_cfg.input_format;
+
vfe_dev->hw_info->vfe_ops.core_ops.cfg_camif(
vfe_dev, &input_cfg->d.pix_cfg);
return rc;
@@ -527,9 +530,11 @@
int i;
uint32_t *data_ptr = cfg_data +
reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
- for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++)
+ for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
*data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
- reg_cfg_cmd->u.rw_info.reg_offset++);
+ reg_cfg_cmd->u.rw_info.reg_offset);
+ reg_cfg_cmd->u.rw_info.reg_offset += 4;
+ }
break;
}
}
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 8b0f6f1..822c0c8 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -1036,7 +1036,7 @@
}
#endif
-void msm_cpp_do_timeout_work(struct work_struct *work)
+static void msm_cpp_do_timeout_work(struct work_struct *work)
{
int ret;
uint32_t i = 0;
@@ -1046,6 +1046,11 @@
pr_err("cpp_timer_callback called idx:%d. (jiffies=%lu)\n",
del_timer_idx, jiffies);
+ if (!work || !this_frame) {
+ pr_err("Invalid work:%p, this_frame:%p, del_idx:%d\n",
+ work, this_frame, del_timer_idx);
+ return;
+ }
pr_err("fatal: cpp_timer expired for identity=0x%x, frame_id=%03d",
this_frame->identity, this_frame->frame_id);
cpp_timers[del_timer_idx].used = 0;
@@ -1214,6 +1219,14 @@
goto ERROR1;
}
+ if ((new_frame->msg_len == 0) ||
+ (new_frame->msg_len > MSM_CPP_MAX_FRAME_LENGTH)) {
+ pr_err("%s:%d: Invalid frame len:%d\n", __func__,
+ __LINE__, new_frame->msg_len);
+ rc = -EINVAL;
+ goto ERROR1;
+ }
+
cpp_frame_msg = kzalloc(sizeof(uint32_t)*new_frame->msg_len,
GFP_KERNEL);
if (!cpp_frame_msg) {
@@ -1304,7 +1317,8 @@
(cpp_frame_msg[12] & 0x3FF);
fw_version_1_2_x = 0;
- if (cpp_dev->hw_info.cpp_hw_version == 0x10010000)
+ if ((cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_1_1_0) ||
+ (cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_1_1_1))
fw_version_1_2_x = 2;
for (i = 0; i < num_stripes; i++) {
@@ -1374,7 +1388,10 @@
pr_err("ioctl_ptr is null\n");
return -EINVAL;
}
-
+ if (cpp_dev == NULL) {
+ pr_err("cpp_dev is null\n");
+ return -EINVAL;
+ }
mutex_lock(&cpp_dev->mutex);
CPP_DBG("E cmd: %d\n", cmd);
switch (cmd) {
@@ -1390,8 +1407,16 @@
case VIDIOC_MSM_CPP_LOAD_FIRMWARE: {
if (cpp_dev->is_firmware_loaded == 0) {
- kfree(cpp_dev->fw_name_bin);
- cpp_dev->fw_name_bin = NULL;
+ if (cpp_dev->fw_name_bin != NULL) {
+ kfree(cpp_dev->fw_name_bin);
+ cpp_dev->fw_name_bin = NULL;
+ }
+ if ((ioctl_ptr->len == 0) ||
+ (ioctl_ptr->len > MSM_CPP_MAX_FW_NAME_LEN)) {
+ pr_err("ioctl_ptr->len is 0\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
cpp_dev->fw_name_bin = kzalloc(ioctl_ptr->len+1,
GFP_KERNEL);
if (!cpp_dev->fw_name_bin) {
@@ -1400,13 +1425,9 @@
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
-
if (ioctl_ptr->ioctl_ptr == NULL) {
pr_err("ioctl_ptr->ioctl_ptr=NULL\n");
- return -EINVAL;
- }
- if (ioctl_ptr->len == 0) {
- pr_err("ioctl_ptr->len is 0\n");
+ mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
rc = (copy_from_user(cpp_dev->fw_name_bin,
@@ -1420,11 +1441,6 @@
return -EINVAL;
}
*(cpp_dev->fw_name_bin+ioctl_ptr->len) = '\0';
- if (cpp_dev == NULL) {
- pr_err("cpp_dev is null\n");
- return -EINVAL;
- }
-
disable_irq(cpp_dev->irq->start);
cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
enable_irq(cpp_dev->irq->start);
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
index 0a70d37..796bede 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
@@ -21,6 +21,14 @@
#include <media/v4l2-subdev.h>
#include "msm_sd.h"
+/* hw version info:
+ 31:28 Major version
+ 27:16 Minor version
+ 15:0 Revision bits
+**/
+#define CPP_HW_VERSION_1_1_0 0x10010000
+#define CPP_HW_VERSION_1_1_1 0x10010001
+
#define MAX_ACTIVE_CPP_INSTANCE 8
#define MAX_CPP_PROCESSING_FRAME 2
#define MAX_CPP_V4l2_EVENTS 30
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
index a27ca99..4fa3085 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
@@ -46,30 +46,44 @@
static void msm_cci_set_clk_param(struct cci_device *cci_dev)
{
- struct msm_cci_clk_params_t *clk_params = &cci_dev->cci_clk_params;
+ struct msm_cci_clk_params_t *clk_params = NULL;
+ uint8_t count = 0;
- msm_camera_io_w(clk_params->hw_thigh << 16 | clk_params->hw_tlow,
- cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR);
- msm_camera_io_w(clk_params->hw_tsu_sto << 16 | clk_params->hw_tsu_sta,
- cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR);
- msm_camera_io_w(clk_params->hw_thd_dat << 16 | clk_params->hw_thd_sta,
- cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR);
- msm_camera_io_w(clk_params->hw_tbuf,
- cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR);
- msm_camera_io_w(clk_params->hw_scl_stretch_en << 8 |
- clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
- cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
- msm_camera_io_w(clk_params->hw_thigh << 16 | clk_params->hw_tlow,
- cci_dev->base + CCI_I2C_M1_SCL_CTL_ADDR);
- msm_camera_io_w(clk_params->hw_tsu_sto << 16 | clk_params->hw_tsu_sta,
- cci_dev->base + CCI_I2C_M1_SDA_CTL_0_ADDR);
- msm_camera_io_w(clk_params->hw_thd_dat << 16 | clk_params->hw_thd_sta,
- cci_dev->base + CCI_I2C_M1_SDA_CTL_1_ADDR);
- msm_camera_io_w(clk_params->hw_tbuf,
- cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
- msm_camera_io_w(clk_params->hw_scl_stretch_en << 8 |
- clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
- cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
+ for (count = 0; count < MASTER_MAX; count++) {
+ if (MASTER_0 == count) {
+ clk_params = &cci_dev->cci_clk_params[count];
+ msm_camera_io_w(clk_params->hw_thigh << 16 |
+ clk_params->hw_tlow,
+ cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR);
+ msm_camera_io_w(clk_params->hw_tsu_sto << 16 |
+ clk_params->hw_tsu_sta,
+ cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR);
+ msm_camera_io_w(clk_params->hw_thd_dat << 16 |
+ clk_params->hw_thd_sta,
+ cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR);
+ msm_camera_io_w(clk_params->hw_tbuf,
+ cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR);
+ msm_camera_io_w(clk_params->hw_scl_stretch_en << 8 |
+ clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
+ cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
+ } else if (MASTER_1 == count) {
+ clk_params = &cci_dev->cci_clk_params[count];
+ msm_camera_io_w(clk_params->hw_thigh << 16 |
+ clk_params->hw_tlow,
+ cci_dev->base + CCI_I2C_M1_SCL_CTL_ADDR);
+ msm_camera_io_w(clk_params->hw_tsu_sto << 16 |
+ clk_params->hw_tsu_sta,
+ cci_dev->base + CCI_I2C_M1_SDA_CTL_0_ADDR);
+ msm_camera_io_w(clk_params->hw_thd_dat << 16 |
+ clk_params->hw_thd_sta,
+ cci_dev->base + CCI_I2C_M1_SDA_CTL_1_ADDR);
+ msm_camera_io_w(clk_params->hw_tbuf,
+ cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
+ msm_camera_io_w(clk_params->hw_scl_stretch_en << 8 |
+ clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
+ cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
+ }
+ }
return;
}
@@ -910,58 +924,96 @@
{
int32_t rc = 0;
uint32_t val = 0;
+ uint8_t count = 0;
struct device_node *of_node = cci_dev->pdev->dev.of_node;
+ struct device_node *src_node = NULL;
- rc = of_property_read_u32(of_node, "qcom,hw-thigh", &val);
- CDBG("%s qcom,hw-thigh %d, rc %d\n", __func__, val, rc);
- if (!rc)
- cci_dev->cci_clk_params.hw_thigh = val;
+ for (count = 0; count < MASTER_MAX; count++) {
- rc = of_property_read_u32(of_node, "qcom,hw-tlow", &val);
- CDBG("%s qcom,hw-tlow %d, rc %d\n", __func__, val, rc);
- if (!rc)
- cci_dev->cci_clk_params.hw_tlow = val;
+ if (MASTER_0 == count)
+ src_node = of_find_node_by_name(of_node,
+ "qcom,cci-master0");
+ else if (MASTER_1 == count)
+ src_node = of_find_node_by_name(of_node,
+ "qcom,cci-master1");
+ else
+ return;
- rc = of_property_read_u32(of_node, "qcom,hw-tsu-sto", &val);
- CDBG("%s qcom,hw-tsu-sto %d, rc %d\n", __func__, val, rc);
- if (!rc)
- cci_dev->cci_clk_params.hw_tsu_sto = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-thigh", &val);
+ CDBG("%s qcom,hw-thigh %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ cci_dev->cci_clk_params[count].hw_thigh = val;
+ else
+ cci_dev->cci_clk_params[count].hw_thigh = 78;
- rc = of_property_read_u32(of_node, "qcom,hw-tsu-sta", &val);
- CDBG("%s qcom,hw-tsu-sta %d, rc %d\n", __func__, val, rc);
- if (!rc)
- cci_dev->cci_clk_params.hw_tsu_sta = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tlow", &val);
+ CDBG("%s qcom,hw-tlow %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ cci_dev->cci_clk_params[count].hw_tlow = val;
+ else
+ cci_dev->cci_clk_params[count].hw_tlow = 114;
- rc = of_property_read_u32(of_node, "qcom,hw-thd-dat", &val);
- CDBG("%s qcom,hw-thd-dat %d, rc %d\n", __func__, val, rc);
- if (!rc)
- cci_dev->cci_clk_params.hw_thd_dat = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tsu-sto", &val);
+ CDBG("%s qcom,hw-tsu-sto %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ cci_dev->cci_clk_params[count].hw_tsu_sto = val;
+ else
+ cci_dev->cci_clk_params[count].hw_tsu_sto = 28;
- rc = of_property_read_u32(of_node, "qcom,hw-thd-sta", &val);
- CDBG("%s qcom,hwthd-sta %d, rc %d\n", __func__, val, rc);
- if (!rc)
- cci_dev->cci_clk_params.hw_thd_sta = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tsu-sta", &val);
+ CDBG("%s qcom,hw-tsu-sta %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ cci_dev->cci_clk_params[count].hw_tsu_sta = val;
+ else
+ cci_dev->cci_clk_params[count].hw_tsu_sta = 28;
- rc = of_property_read_u32(of_node, "qcom,hw-tbuf", &val);
- CDBG("%s qcom,hw-tbuf %d, rc %d\n", __func__, val, rc);
- if (!rc)
- cci_dev->cci_clk_params.hw_tbuf = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-thd-dat", &val);
+ CDBG("%s qcom,hw-thd-dat %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ cci_dev->cci_clk_params[count].hw_thd_dat = val;
+ else
+ cci_dev->cci_clk_params[count].hw_thd_dat = 10;
- rc = of_property_read_u32(of_node, "qcom,hw-scl-stretch-en", &val);
- CDBG("%s qcom,hw-scl-stretch-en %d, rc %d\n", __func__, val, rc);
- if (!rc)
- cci_dev->cci_clk_params.hw_scl_stretch_en = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-thd-sta", &val);
+ CDBG("%s qcom,hwthd-sta %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ cci_dev->cci_clk_params[count].hw_thd_sta = val;
+ else
+ cci_dev->cci_clk_params[count].hw_thd_sta = 77;
- rc = of_property_read_u32(of_node, "qcom,hw-trdhld", &val);
- CDBG("%s qcom,hw-trdhld %d, rc %d\n", __func__, val, rc);
- if (!rc)
- cci_dev->cci_clk_params.hw_trdhld = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tbuf", &val);
+ CDBG("%s qcom,hw-tbuf %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ cci_dev->cci_clk_params[count].hw_tbuf = val;
+ else
+ cci_dev->cci_clk_params[count].hw_tbuf = 118;
- rc = of_property_read_u32(of_node, "qcom,hw-tsp", &val);
- CDBG("%s qcom,hw-tsp %d, rc %d\n", __func__, val, rc);
- if (!rc)
- cci_dev->cci_clk_params.hw_tsp = val;
+ rc = of_property_read_u32(src_node,
+ "qcom,hw-scl-stretch-en", &val);
+ CDBG("%s qcom,hw-scl-stretch-en %d, rc %d\n",
+ __func__, val, rc);
+ if (!rc)
+ cci_dev->cci_clk_params[count].hw_scl_stretch_en = val;
+ else
+ cci_dev->cci_clk_params[count].hw_scl_stretch_en = 0;
+ rc = of_property_read_u32(src_node, "qcom,hw-trdhld", &val);
+ CDBG("%s qcom,hw-trdhld %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ cci_dev->cci_clk_params[count].hw_trdhld = val;
+ else
+ cci_dev->cci_clk_params[count].hw_trdhld = 6;
+
+ rc = of_property_read_u32(src_node, "qcom,hw-tsp", &val);
+ CDBG("%s qcom,hw-tsp %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ cci_dev->cci_clk_params[count].hw_tsp = val;
+ else
+ cci_dev->cci_clk_params[count].hw_tsp = 1;
+
+ of_node_put(src_node);
+ src_node = NULL;
+ }
return;
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
index f9e40f1..16edaae 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
@@ -139,7 +139,7 @@
struct msm_camera_cci_i2c_queue_info
cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
struct msm_camera_cci_master_info cci_master_info[NUM_MASTERS];
- struct msm_cci_clk_params_t cci_clk_params;
+ struct msm_cci_clk_params_t cci_clk_params[MASTER_MAX];
struct gpio *cci_gpio_tbl;
uint8_t cci_gpio_tbl_size;
};
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index 0fbe238..21b9cdc 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -400,7 +400,6 @@
struct msm_camera_csi_lane_params *csi_lane_params;
uint16_t csi_lane_mask;
csi_lane_params = (struct msm_camera_csi_lane_params *)arg;
- csi_lane_mask = csi_lane_params->csi_lane_mask;
if (!csiphy_dev || !csiphy_dev->ref_count) {
pr_err("%s csiphy dev NULL / ref_count ZERO\n", __func__);
@@ -413,19 +412,29 @@
return -EINVAL;
}
- CDBG("%s csiphy_params, lane assign %x mask = %x\n",
- __func__,
- csi_lane_params->csi_lane_assign,
- csi_lane_params->csi_lane_mask);
-
if (csiphy_dev->hw_version < CSIPHY_VERSION_V30) {
csiphy_dev->lane_mask[csiphy_dev->pdev->id] = 0;
for (i = 0; i < 4; i++)
msm_camera_io_w(0x0, csiphy_dev->base +
MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*i);
} else {
+ if (!csi_lane_params) {
+ pr_err("%s:%d failed: csi_lane_params %p\n", __func__,
+ __LINE__, csi_lane_params);
+ return -EINVAL;
+ }
+ csi_lane_mask = csi_lane_params->csi_lane_mask;
+
+ CDBG("%s csiphy_params, lane assign %x mask = %x\n",
+ __func__,
+ csi_lane_params->csi_lane_assign,
+ csi_lane_params->csi_lane_mask);
+
+ if (!csi_lane_mask)
+ csi_lane_mask = 0x1f;
+
csiphy_dev->lane_mask[csiphy_dev->pdev->id] &=
- ~(csi_lane_params->csi_lane_mask);
+ ~(csi_lane_mask);
i = 0;
while (csi_lane_mask & 0x1F) {
if (csi_lane_mask & 0x1) {
@@ -475,7 +484,6 @@
struct msm_camera_csi_lane_params *csi_lane_params;
uint16_t csi_lane_mask;
csi_lane_params = (struct msm_camera_csi_lane_params *)arg;
- csi_lane_mask = csi_lane_params->csi_lane_mask;
if (!csiphy_dev || !csiphy_dev->ref_count) {
pr_err("%s csiphy dev NULL / ref_count ZERO\n", __func__);
@@ -488,19 +496,29 @@
return -EINVAL;
}
- CDBG("%s csiphy_params, lane assign %x mask = %x\n",
- __func__,
- csi_lane_params->csi_lane_assign,
- csi_lane_params->csi_lane_mask);
-
if (csiphy_dev->hw_version < CSIPHY_VERSION_V30) {
csiphy_dev->lane_mask[csiphy_dev->pdev->id] = 0;
for (i = 0; i < 4; i++)
msm_camera_io_w(0x0, csiphy_dev->base +
MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*i);
} else {
+ if (!csi_lane_params) {
+ pr_err("%s:%d failed: csi_lane_params %p\n", __func__,
+ __LINE__, csi_lane_params);
+ return -EINVAL;
+ }
+ csi_lane_mask = csi_lane_params->csi_lane_mask;
+
+ CDBG("%s csiphy_params, lane assign %x mask = %x\n",
+ __func__,
+ csi_lane_params->csi_lane_assign,
+ csi_lane_params->csi_lane_mask);
+
+ if (!csi_lane_mask)
+ csi_lane_mask = 0x1f;
+
csiphy_dev->lane_mask[csiphy_dev->pdev->id] &=
- ~(csi_lane_params->csi_lane_mask);
+ ~(csi_lane_mask);
i = 0;
while (csi_lane_mask & 0x1F) {
if (csi_lane_mask & 0x1) {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/gc0339.c b/drivers/media/platform/msm/camera_v2/sensor/gc0339.c
index 8cba04c..cc38b56 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/gc0339.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/gc0339.c
@@ -490,6 +490,11 @@
break;
}
+ if (conf_array.addr_type == MSM_CAMERA_I2C_WORD_ADDR
+ || conf_array.data_type == MSM_CAMERA_I2C_WORD_DATA
+ || !conf_array.size)
+ break;
+
reg_setting = kzalloc(conf_array.size *
(sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
if (!reg_setting) {
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index a3d6dd8..2bc460b 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -99,6 +99,14 @@
static int mpq_sdmx_debug;
module_param(mpq_sdmx_debug, int, S_IRUGO | S_IWUSR);
+/*
+ * Indicates whether the demux should search for frame boundaries
+ * and notify on video packets on frame-basis or whether to provide
+ * only video PES packet payloads as-is.
+ */
+static int video_framing = 1;
+module_param(video_framing, int, S_IRUGO | S_IWUSR);
+
/* Global data-structure for managing demux devices */
static struct
{
@@ -112,13 +120,6 @@
struct mpq_streambuffer
decoder_buffers[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
- /*
- * Indicates whether the video decoder handles framing
- * or we are required to provide framing information
- * in the meta-data passed to the decoder.
- */
- int decoder_framing;
-
/* Indicates whether secure demux TZ application is available */
int secure_demux_app_loaded;
} mpq_dmx_info;
@@ -197,7 +198,8 @@
patterns[1] = dvb_dmx_get_pattern(DMX_IDX_H264_PPS);
patterns[2] = dvb_dmx_get_pattern(DMX_IDX_H264_IDR_START);
patterns[3] = dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_START);
- *patterns_num = 4;
+ patterns[4] = dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+ *patterns_num = 5;
break;
case DMX_VIDEO_CODEC_VC1:
@@ -466,6 +468,12 @@
&mpq_demux->decoder_ts_errors);
debugfs_create_u32(
+ "decoder_cc_errors",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->decoder_cc_errors);
+
+ debugfs_create_u32(
"sdmx_process_count",
S_IRUGO | S_IWUSR | S_IWGRP,
mpq_demux->demux.dmx.debugfs_demux_dir,
@@ -616,13 +624,6 @@
mpq_dmx_info.secure_demux_app_loaded = 0;
- /*
- * TODO: the following should be set based on the decoder:
- * 0 means the decoder doesn't handle framing, so framing
- * is done by demux. 1 means the decoder handles framing.
- */
- mpq_dmx_info.decoder_framing = 0;
-
/* Allocate memory for all MPQ devices */
mpq_dmx_info.devices =
vzalloc(mpq_demux_device_num*sizeof(struct mpq_demux));
@@ -1339,7 +1340,7 @@
struct mpq_streambuffer *stream_buffer;
/* get and store framing information if required */
- if (!mpq_dmx_info.decoder_framing) {
+ if (video_framing) {
mpq_dmx_get_pattern_params(
mpq_feed->dvb_demux_feed->video_codec,
feed_data->patterns, &feed_data->patterns_num);
@@ -1457,6 +1458,7 @@
mpq_demux->decoder_out_interval_sum = 0;
mpq_demux->decoder_out_interval_max = 0;
mpq_demux->decoder_ts_errors = 0;
+ mpq_demux->decoder_cc_errors = 0;
return 0;
@@ -2346,6 +2348,14 @@
meta_data.info.framing.pattern_type =
feed_data->last_framing_match_type;
meta_data.info.framing.stc = feed_data->last_framing_match_stc;
+ meta_data.info.framing.continuity_error_counter =
+ feed_data->continuity_errs;
+ meta_data.info.framing.transport_error_indicator_counter =
+ feed_data->tei_errs;
+ meta_data.info.framing.ts_dropped_bytes =
+ feed_data->ts_dropped_bytes;
+ meta_data.info.framing.ts_packets_num =
+ feed_data->ts_packets_num;
mpq_streambuffer_get_buffer_handle(stream_buffer,
0, /* current write buffer handle */
@@ -2415,7 +2425,6 @@
mpq_dmx_write_pts_dts(feed_data,
&(meta_data.info.pes.pts_dts_info));
- mpq_dmx_save_pts_dts(feed_data);
meta_data.packet_type = DMX_PES_PACKET;
meta_data.info.pes.stc = feed_data->prev_stc;
@@ -2644,6 +2653,7 @@
mpq_dmx_check_continuity(feed_data,
ts_header->continuity_counter,
discontinuity_indicator);
+ mpq_demux->decoder_cc_errors += feed_data->continuity_errs;
/* Need to back-up the PTS information of the very first frame */
if (feed_data->first_pts_dts_copy) {
@@ -2788,6 +2798,15 @@
feed_data->last_framing_match_type;
meta_data.info.framing.stc =
feed_data->last_framing_match_stc;
+ meta_data.info.framing.continuity_error_counter =
+ feed_data->continuity_errs;
+ meta_data.info.framing.
+ transport_error_indicator_counter =
+ feed_data->tei_errs;
+ meta_data.info.framing.ts_dropped_bytes =
+ feed_data->ts_dropped_bytes;
+ meta_data.info.framing.ts_packets_num =
+ feed_data->ts_packets_num;
mpq_streambuffer_get_buffer_handle(
stream_buffer,
@@ -2933,7 +2952,9 @@
mpq_dmx_write_pts_dts(feed_data,
&(meta_data.info.pes.pts_dts_info));
- mpq_dmx_save_pts_dts(feed_data);
+
+ /* Mark that we detected start of new PES */
+ feed_data->first_pts_dts_copy = 1;
meta_data.packet_type = DMX_PES_PACKET;
meta_data.info.pes.stc = feed_data->prev_stc;
@@ -3038,7 +3059,7 @@
/*
* Need to back-up the PTS information
- * of the very first PES
+ * of the start of new PES
*/
if (feed_data->first_pts_dts_copy) {
mpq_dmx_save_pts_dts(feed_data);
@@ -3052,6 +3073,7 @@
mpq_dmx_check_continuity(feed_data,
ts_header->continuity_counter,
discontinuity_indicator);
+ mpq_demux->decoder_cc_errors += feed_data->continuity_errs;
if (mpq_streambuffer_data_write(
stream_buffer,
@@ -3143,7 +3165,7 @@
curr_stc *= 256; /* convert from 105.47 KHZ to 27MHz */
}
- if (mpq_dmx_info.decoder_framing)
+ if (!video_framing)
return mpq_dmx_process_video_packet_no_framing(feed, buf,
curr_stc);
else
@@ -4885,7 +4907,7 @@
event.status = DMX_OK_EOS;
if (!feed->secure_mode.is_secured) {
if (dvb_dmx_is_video_feed(feed)) {
- if (mpq_dmx_info.decoder_framing)
+ if (!video_framing)
mpq_dmx_decoder_pes_closure(mpq_demux,
mpq_feed);
else
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
index f095e00..adc4261 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
@@ -388,6 +388,8 @@
* successive video frames output, exposed in debugfs.
* @decoder_ts_errors: Counter for number of decoder packets with TEI bit
* set, exposed in debugfs.
+ * @decoder_cc_errors: Counter for number of decoder packets with continuity
+ * counter errors, exposed in debugfs.
* @sdmx_process_count: Total number of times sdmx_process is called.
* @sdmx_process_time_sum: Total time sdmx_process takes.
* @sdmx_process_time_average: Average time sdmx_process takes.
@@ -445,6 +447,7 @@
u32 decoder_out_interval_average;
u32 decoder_out_interval_max;
u32 decoder_ts_errors;
+ u32 decoder_cc_errors;
u32 sdmx_process_count;
u32 sdmx_process_time_sum;
u32 sdmx_process_time_average;
diff --git a/drivers/media/platform/msm/dvb/include/mpq_adapter.h b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
index a2ade18..86f36a4 100644
--- a/drivers/media/platform/msm/dvb/include/mpq_adapter.h
+++ b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -67,6 +67,24 @@
/** STC value attached to first TS packet holding the pattern */
u64 stc;
+
+ /*
+ * Number of TS packets with Transport Error Indicator (TEI)
+ * found while constructing the frame.
+ */
+ __u32 transport_error_indicator_counter;
+
+ /* Number of continuity errors found while constructing the frame */
+ __u32 continuity_error_counter;
+
+ /*
+ * Number of dropped bytes due to insufficient buffer space,
+ * since last reported frame.
+ */
+ __u32 ts_dropped_bytes;
+
+ /* Total number of TS packets holding the frame */
+ __u32 ts_packets_num;
};
struct dmx_pes_packet_info {
diff --git a/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c b/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c
index 0908a6e..9ddb9b7 100644
--- a/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c
+++ b/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c
@@ -134,6 +134,8 @@
case DMX_IDX_H264_SPS:
case DMX_IDX_MPEG_SEQ_HEADER:
case DMX_IDX_VC1_SEQ_HEADER:
+ case DMX_IDX_H264_ACCESS_UNIT_DEL:
+ case DMX_IDX_H264_SEI:
DBG("SPS FOUND\n");
frame_found = false;
break;
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index d92a9c1..02b36f8 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -191,7 +191,7 @@
(struct ocmem_buf *) resource_value;
pkt->resource_type = HFI_RESOURCE_OCMEM;
- pkt->size += sizeof(struct hfi_resource_ocmem);
+ pkt->size += sizeof(struct hfi_resource_ocmem) - sizeof(u32);
hfioc_mem->size = (u32) ocmem->len;
hfioc_mem->mem = (u8 *) ocmem->addr;
break;
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 44105ad..653ba46 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -45,6 +45,7 @@
case HFI_ERR_SESSION_UNSUPPORTED_PROPERTY:
case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
case HFI_ERR_SESSION_INSUFFICIENT_RESOURCES:
+ case HFI_ERR_SESSION_UNSUPPORTED_STREAM:
vidc_err = VIDC_ERR_NOT_SUPPORTED;
break;
case HFI_ERR_SYS_MAX_SESSIONS_REACHED:
@@ -786,6 +787,8 @@
data_done.input_done.offset = pkt->offset;
data_done.input_done.filled_len = pkt->filled_len;
data_done.input_done.packet_buffer = pkt->packet_buffer;
+ data_done.input_done.status =
+ hfi_map_err_status((u32) pkt->error_type);
callback(SESSION_ETB_DONE, &data_done);
}
@@ -1003,7 +1006,6 @@
struct hfi_msg_sys_session_end_done_packet *pkt)
{
struct msm_vidc_cb_cmd_done cmd_done;
- struct hal_session *sess_close;
dprintk(VIDC_DBG, "RECEIVED:SESSION_END_DONE");
@@ -1021,12 +1023,6 @@
cmd_done.status = hfi_map_err_status((u32)pkt->error_type);
cmd_done.data = NULL;
cmd_done.size = 0;
- sess_close = (struct hal_session *)pkt->session_id;
- dprintk(VIDC_INFO, "deleted the session: 0x%x",
- sess_close->session_id);
- list_del(&sess_close->list);
- kfree(sess_close);
- sess_close = NULL;
callback(SESSION_END_DONE, &cmd_done);
}
@@ -1035,7 +1031,6 @@
struct hfi_msg_sys_session_abort_done_packet *pkt)
{
struct msm_vidc_cb_cmd_done cmd_done;
- struct hal_session *sess_close;
dprintk(VIDC_DBG, "RECEIVED:SESSION_ABORT_DONE");
@@ -1053,16 +1048,6 @@
cmd_done.data = NULL;
cmd_done.size = 0;
- sess_close = (struct hal_session *)pkt->session_id;
- if (!sess_close) {
- dprintk(VIDC_ERR, "%s: invalid session pointer\n", __func__);
- return;
- }
- dprintk(VIDC_ERR, "deleted the session: 0x%x",
- sess_close->session_id);
- list_del(&sess_close->list);
- kfree(sess_close);
- sess_close = NULL;
callback(SESSION_ABORT_DONE, &cmd_done);
}
@@ -1230,7 +1215,7 @@
hfi_msg_sys_session_abort_done_packet*) msg_hdr);
break;
default:
- dprintk(VIDC_ERR, "UNKNOWN_MSG_TYPE : %d", msg_hdr->packet);
+ dprintk(VIDC_DBG, "UNKNOWN_MSG_TYPE : %d", msg_hdr->packet);
break;
}
return rc;
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 96d95989..cf96ca2 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -387,7 +387,6 @@
goto exit;
}
for (i = 0; i < b->length; ++i) {
- buffer_type = HAL_BUFFER_OUTPUT;
if (EXTRADATA_IDX(b->length) &&
(i == EXTRADATA_IDX(b->length)) &&
!b->m.planes[i].length) {
@@ -404,8 +403,20 @@
kfree(binfo);
goto exit;
}
- if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- buffer_type = HAL_BUFFER_INPUT;
+
+ if (vidc_inst->session_type == MSM_VIDC_DECODER) {
+ if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ buffer_type = HAL_BUFFER_INPUT;
+ else /* V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE */
+ buffer_type = HAL_BUFFER_OUTPUT;
+ } else {
+ /* FIXME in the future. See comment in msm_comm_get_\
+ * domain_partition. Same problem here. */
+ if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ buffer_type = HAL_BUFFER_OUTPUT;
+ else /* V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE */
+ buffer_type = HAL_BUFFER_INPUT;
+ }
temp = get_same_fd_buffer(&v4l2_inst->registered_bufs,
b->m.planes[i].reserved[0], &plane);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 1c43f1e..b6d031a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -477,14 +477,14 @@
if (rc) {
dprintk(VIDC_ERR,
"Failed to initialize vb2 queue on capture port\n");
- goto fail_init;
+ goto fail_bufq_capture;
}
rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
session_type);
if (rc) {
dprintk(VIDC_ERR,
"Failed to initialize vb2 queue on capture port\n");
- goto fail_init;
+ goto fail_bufq_output;
}
rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT);
if (rc) {
@@ -502,6 +502,14 @@
mutex_unlock(&core->lock);
return inst;
fail_init:
+ vb2_queue_release(&inst->bufq[OUTPUT_PORT].vb2_bufq);
+fail_bufq_output:
+ vb2_queue_release(&inst->bufq[CAPTURE_PORT].vb2_bufq);
+fail_bufq_capture:
+ if (session_type == MSM_VIDC_DECODER)
+ msm_vdec_ctrl_deinit(inst);
+ else if (session_type == MSM_VIDC_ENCODER)
+ msm_venc_ctrl_deinit(inst);
msm_smem_delete_client(inst->mem_client);
fail_mem_client:
kfree(inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 57b98dc..f94b6f1 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -627,6 +627,8 @@
if (inst->core)
hdev = inst->core->device;
if (hdev && inst->session) {
+ dprintk(VIDC_DBG,
+ "cleaning up inst: 0x%p", inst);
rc = call_hfi_op(hdev, session_clean,
(void *) inst->session);
if (rc)
@@ -693,10 +695,24 @@
{
struct msm_vidc_cb_cmd_done *response = data;
struct msm_vidc_inst *inst;
+ struct hfi_device *hdev = NULL;
+
if (response) {
inst = (struct msm_vidc_inst *)response->session_id;
- signal_session_msg_receipt(cmd, inst);
+ if (!inst || !inst->core || !inst->core->device) {
+ dprintk(VIDC_ERR, "%s invalid params\n", __func__);
+ return;
+ }
+ hdev = inst->core->device;
+ mutex_lock(&inst->lock);
+ if (inst->session) {
+ dprintk(VIDC_DBG, "cleaning up inst: 0x%p", inst);
+ call_hfi_op(hdev, session_clean,
+ (void *) inst->session);
+ }
inst->session = NULL;
+ mutex_unlock(&inst->lock);
+ signal_session_msg_receipt(cmd, inst);
show_stats(inst);
} else {
dprintk(VIDC_ERR,
@@ -737,22 +753,45 @@
struct msm_vidc_cb_data_done *response = data;
struct vb2_buffer *vb;
struct msm_vidc_inst *inst;
+ struct vidc_hal_ebd *empty_buf_done;
+
if (!response) {
dprintk(VIDC_ERR, "Invalid response from vidc_hal\n");
return;
}
vb = response->clnt_data;
inst = (struct msm_vidc_inst *)response->session_id;
+ if (!inst) {
+ dprintk(VIDC_ERR, "%s Invalid response from vidc_hal\n",
+ __func__);
+ return;
+ }
if (vb) {
vb->v4l2_planes[0].bytesused = response->input_done.filled_len;
vb->v4l2_planes[0].data_offset = response->input_done.offset;
if (vb->v4l2_planes[0].data_offset > vb->v4l2_planes[0].length)
- dprintk(VIDC_ERR, "Error: data_offset overflow\n");
+ dprintk(VIDC_INFO, "data_offset overflow length\n");
if (vb->v4l2_planes[0].bytesused > vb->v4l2_planes[0].length)
- dprintk(VIDC_ERR, "Error: buffer overflow\n");
+ dprintk(VIDC_INFO, "bytesused overflow length\n");
if ((u8 *)vb->v4l2_planes[0].m.userptr !=
response->input_done.packet_buffer)
- dprintk(VIDC_ERR, "Error: unexpected buffer address\n");
+ dprintk(VIDC_INFO, "Unexpected buffer address\n");
+ vb->v4l2_buf.flags = 0;
+ empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
+ if (empty_buf_done) {
+ if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) {
+ dprintk(VIDC_INFO,
+ "Failed : Unsupported input stream\n");
+ vb->v4l2_buf.flags |=
+ V4L2_QCOM_BUF_INPUT_UNSUPPORTED;
+ }
+ if (empty_buf_done->status == VIDC_ERR_BITSTREAM_ERR) {
+ dprintk(VIDC_INFO,
+ "Failed : Corrupted input stream\n");
+ vb->v4l2_buf.flags |=
+ V4L2_QCOM_BUF_DATA_CORRUPT;
+ }
+ }
mutex_lock(&inst->bufq[OUTPUT_PORT].lock);
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
mutex_unlock(&inst->bufq[OUTPUT_PORT].lock);
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 5c22552..010f15d 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -936,6 +936,7 @@
u32 timestamp_hi;
u32 timestamp_lo;
u32 flags;
+ u32 status;
u32 mark_target;
u32 mark_data;
u32 stats;
diff --git a/drivers/media/platform/msm/wfd/enc-venus-subdev.c b/drivers/media/platform/msm/wfd/enc-venus-subdev.c
index 8121471..9cd199b 100644
--- a/drivers/media/platform/msm/wfd/enc-venus-subdev.c
+++ b/drivers/media/platform/msm/wfd/enc-venus-subdev.c
@@ -296,24 +296,13 @@
static long set_default_properties(struct venc_inst *inst)
{
struct v4l2_control ctrl = {0};
- int rc;
/* Set the IDR period as 1. The venus core doesn't give
* the sps/pps for I-frames, only IDR. */
ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD;
ctrl.value = 1;
- rc = msm_vidc_s_ctrl(inst->vidc_context, &ctrl);
- if (rc)
- WFD_MSG_WARN("Failed to set IDR period\n");
- /* Set the default rc mode to VBR/VFR, client can change later */
- ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL;
- ctrl.value = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_VFR;
- rc = msm_vidc_s_ctrl(inst->vidc_context, &ctrl);
- if (rc)
- WFD_MSG_WARN("Failed to set rc mode\n");
-
- return 0;
+ return msm_vidc_s_ctrl(inst->vidc_context, &ctrl);
}
static int subscribe_events(struct venc_inst *inst)
diff --git a/drivers/media/platform/msm/wfd/vsg-subdev.c b/drivers/media/platform/msm/wfd/vsg-subdev.c
index 6ffaffa..0f2fbbb 100644
--- a/drivers/media/platform/msm/wfd/vsg-subdev.c
+++ b/drivers/media/platform/msm/wfd/vsg-subdev.c
@@ -424,7 +424,8 @@
struct timespec diff = timespec_sub(buf_info->time,
context->last_buffer->time);
struct timespec temp = ns_to_timespec(
- context->frame_interval);
+ context->frame_interval -
+ context->frame_interval_variance);
if (timespec_compare(&diff, &temp) >= 0)
push = true;
@@ -633,6 +634,61 @@
return 0;
}
+static long vsg_set_frame_interval_variance(struct v4l2_subdev *sd, void *arg)
+{
+ struct vsg_context *context = NULL;
+ int64_t variance;
+
+ if (!arg || !sd) {
+ WFD_MSG_ERR("ERROR, invalid arguments into %s\n", __func__);
+ return -EINVAL;
+ }
+
+ context = (struct vsg_context *)sd->dev_priv;
+ variance = *(int64_t *)arg;
+
+ if (variance < 0 || variance > 100) {
+ WFD_MSG_ERR("ERROR, invalid variance %lld%% into %s\n",
+ variance, __func__);
+ return -EINVAL;
+ } else if (context->mode == VSG_MODE_CFR) {
+ WFD_MSG_ERR("Setting FPS variance not supported in CFR mode\n");
+ return -ENOTSUPP;
+ }
+
+ mutex_lock(&context->mutex);
+
+ /* Convert from percentage to a value in nano seconds */
+ variance *= context->frame_interval;
+ do_div(variance, 100);
+
+ context->frame_interval_variance = variance;
+ mutex_unlock(&context->mutex);
+
+ return 0;
+}
+
+static long vsg_get_frame_interval_variance(struct v4l2_subdev *sd, void *arg)
+{
+ struct vsg_context *context = NULL;
+ int64_t variance;
+
+ if (!arg || !sd) {
+ WFD_MSG_ERR("ERROR, invalid arguments into %s\n", __func__);
+ return -EINVAL;
+ }
+
+ context = (struct vsg_context *)sd->dev_priv;
+
+ mutex_lock(&context->mutex);
+ variance = context->frame_interval_variance * 100;
+ do_div(variance, context->frame_interval);
+ *(int64_t *)arg = variance;
+ mutex_unlock(&context->mutex);
+
+ return 0;
+}
+
static long vsg_set_mode(struct v4l2_subdev *sd, void *arg)
{
struct vsg_context *context = NULL;
@@ -702,6 +758,12 @@
case VSG_SET_FRAME_INTERVAL:
rc = vsg_set_frame_interval(sd, arg);
break;
+ case VSG_SET_FRAME_INTERVAL_VARIANCE:
+ rc = vsg_set_frame_interval_variance(sd, arg);
+ break;
+ case VSG_GET_FRAME_INTERVAL_VARIANCE:
+ rc = vsg_get_frame_interval_variance(sd, arg);
+ break;
case VSG_GET_MAX_FRAME_INTERVAL:
rc = vsg_get_max_frame_interval(sd, arg);
break;
diff --git a/drivers/media/platform/msm/wfd/vsg-subdev.h b/drivers/media/platform/msm/wfd/vsg-subdev.h
index f5e4f5d..3347e5b 100644
--- a/drivers/media/platform/msm/wfd/vsg-subdev.h
+++ b/drivers/media/platform/msm/wfd/vsg-subdev.h
@@ -59,7 +59,7 @@
struct vsg_buf_info free_queue, busy_queue;
struct vsg_msg_ops vmops;
/* All time related values below in nanosecs */
- int64_t frame_interval, max_frame_interval;
+ int64_t frame_interval, max_frame_interval, frame_interval_variance;
struct workqueue_struct *work_queue;
struct hrtimer threshold_timer;
struct mutex mutex;
@@ -90,9 +90,11 @@
/* Time related arguments for frame interval ioctls are always in nanosecs*/
#define VSG_SET_FRAME_INTERVAL _IOW(VSG_MAGIC_IOCTL, 9, int64_t *)
#define VSG_GET_FRAME_INTERVAL _IOR(VSG_MAGIC_IOCTL, 10, int64_t *)
-#define VSG_SET_MAX_FRAME_INTERVAL _IOW(VSG_MAGIC_IOCTL, 11, int64_t *)
-#define VSG_GET_MAX_FRAME_INTERVAL _IOR(VSG_MAGIC_IOCTL, 12, int64_t *)
-#define VSG_SET_MODE _IOW(VSG_MAGIC_IOCTL, 13, enum vsg_modes *)
+#define VSG_SET_FRAME_INTERVAL_VARIANCE _IOW(VSG_MAGIC_IOCTL, 11, int64_t *)
+#define VSG_GET_FRAME_INTERVAL_VARIANCE _IOR(VSG_MAGIC_IOCTL, 12, int64_t *)
+#define VSG_SET_MAX_FRAME_INTERVAL _IOW(VSG_MAGIC_IOCTL, 13, int64_t *)
+#define VSG_GET_MAX_FRAME_INTERVAL _IOR(VSG_MAGIC_IOCTL, 14, int64_t *)
+#define VSG_SET_MODE _IOW(VSG_MAGIC_IOCTL, 15, enum vsg_modes *)
extern int vsg_init(struct v4l2_subdev *sd, u32 val);
extern long vsg_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
diff --git a/drivers/media/platform/msm/wfd/wfd-ioctl.c b/drivers/media/platform/msm/wfd/wfd-ioctl.c
index 6554947..58e008d 100644
--- a/drivers/media/platform/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/platform/msm/wfd/wfd-ioctl.c
@@ -1124,7 +1124,9 @@
struct wfd_device *wfd_dev = video_drvdata(filp);
struct wfd_inst *inst = file_to_inst(filp);
struct v4l2_qcom_frameskip frameskip;
- int64_t frame_interval, max_frame_interval;
+ int64_t frame_interval = 0,
+ max_frame_interval = 0,
+ frame_interval_variance = 0;
void *extendedmode = NULL;
enum vsg_modes vsg_mode = VSG_MODE_VFR;
enum venc_framerate_modes venc_mode = VENC_MODE_VFR;
@@ -1177,6 +1179,7 @@
goto set_parm_fail;
max_frame_interval = (int64_t)frameskip.maxframeinterval;
+ frame_interval_variance = frameskip.fpsvariance;
vsg_mode = VSG_MODE_VFR;
venc_mode = VENC_MODE_VFR;
@@ -1206,6 +1209,16 @@
goto set_parm_fail;
}
+ if (frame_interval_variance) {
+ rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
+ ioctl, VSG_SET_FRAME_INTERVAL_VARIANCE,
+ &frame_interval_variance);
+ if (rc) {
+ WFD_MSG_ERR("Setting FR variance for VSG failed\n");
+ goto set_parm_fail;
+ }
+ }
+
set_parm_fail:
return rc;
}
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index 28f9e80..b9eb8f9 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -1635,8 +1635,9 @@
if (status)
return;
- if (radio->mode != FM_CALIB)
+ if ((radio->mode != FM_CALIB) && (radio->mode != FM_OFF))
iris_q_event(radio, IRIS_EVT_RADIO_DISABLED);
+ radio->mode = FM_OFF;
radio_hci_req_complete(hdev, status);
}
@@ -2420,9 +2421,11 @@
FMDERR("AF list size received more than available size");
return;
}
- memcpy(&ev.af_list[0], &skb->data[AF_LIST_OFFSET], ev.af_size);
+ memcpy(&ev.af_list[0], &skb->data[AF_LIST_OFFSET],
+ ev.af_size * sizeof(int));
iris_q_event(radio, IRIS_EVT_NEW_AF_LIST);
- iris_q_evt_data(radio, (char *)&ev, sizeof(ev), IRIS_BUF_AF_LIST);
+ iris_q_evt_data(radio, (char *)&ev, (7 + ev.af_size * sizeof(int)),
+ IRIS_BUF_AF_LIST);
}
static void hci_ev_rds_lock_status(struct radio_hci_dev *hdev,
@@ -2692,7 +2695,7 @@
radio->fm_hdev);
if (retval < 0)
FMDERR("Disable Failed after calibration %d", retval);
- radio->mode = FM_OFF;
+ radio->mode = FM_TURNING_OFF;
return retval;
}
static int iris_vidioc_g_ctrl(struct file *file, void *priv,
@@ -3238,7 +3241,7 @@
" %d\n", retval);
return retval;
}
- radio->mode = FM_OFF;
+ radio->mode = FM_TURNING_OFF;
break;
case FM_TRANS:
retval = hci_cmd(HCI_FM_DISABLE_TRANS_CMD,
@@ -3249,7 +3252,7 @@
" %d\n", retval);
return retval;
}
- radio->mode = FM_OFF;
+ radio->mode = FM_TURNING_OFF;
break;
default:
retval = -EINVAL;
@@ -4040,16 +4043,18 @@
if (radio->mode == FM_OFF)
return 0;
- if (radio->mode == FM_RECV)
+ if (radio->mode == FM_RECV) {
+ radio->mode = FM_OFF;
retval = hci_cmd(HCI_FM_DISABLE_RECV_CMD,
radio->fm_hdev);
- else if (radio->mode == FM_TRANS)
+ } else if (radio->mode == FM_TRANS) {
+ radio->mode = FM_OFF;
retval = hci_cmd(HCI_FM_DISABLE_TRANS_CMD,
radio->fm_hdev);
+ }
if (retval < 0)
FMDERR("Err on disable FM %d\n", retval);
- radio->mode = FM_OFF;
return retval;
}
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index bd838fc..b750602 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -2647,6 +2647,11 @@
struct qseecom_dev_handle *data = file->private_data;
void __user *argp = (void __user *) arg;
+ if (!data) {
+ pr_err("Invalid/uninitialized device handle\n");
+ return -EINVAL;
+ }
+
if (data->abort) {
pr_err("Aborting qseecom driver\n");
return -ENODEV;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9527249..578cc14 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -130,6 +130,33 @@
pr_info(DRIVER_NAME ": ===========================================\n");
}
+#define MAX_PM_QOS_TIMEOUT_VALUE 100000 /* 100 ms */
+static ssize_t
+show_sdhci_pm_qos_tout(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d us\n", host->pm_qos_timeout_us);
+}
+
+static ssize_t
+store_sdhci_pm_qos_tout(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ uint32_t value;
+ unsigned long flags;
+
+ if (!kstrtou32(buf, 0, &value)) {
+ spin_lock_irqsave(&host->lock, flags);
+ if (value <= MAX_PM_QOS_TIMEOUT_VALUE)
+ host->pm_qos_timeout_us = value;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ return count;
+}
+
/*****************************************************************************\
* *
* Low level functions *
@@ -1361,15 +1388,55 @@
{
struct sdhci_host *host = mmc_priv(mmc);
- if (host->cpu_dma_latency_us)
- pm_qos_update_request(&host->pm_qos_req_dma,
+ if (host->cpu_dma_latency_us) {
+ /*
+ * In performance mode, release QoS vote after a timeout to
+ * make sure back-to-back requests don't suffer from latencies
+ * that are involved to wake CPU from low power modes in cases
+ * where the CPU goes into low power mode as soon as QoS vote is
+ * released.
+ */
+ if (host->power_policy == SDHCI_PERFORMANCE_MODE)
+ pm_qos_update_request_timeout(&host->pm_qos_req_dma,
+ host->cpu_dma_latency_us,
+ host->pm_qos_timeout_us);
+ else
+ pm_qos_update_request(&host->pm_qos_req_dma,
PM_QOS_DEFAULT_VALUE);
+ }
+
if (host->ops->platform_bus_voting)
host->ops->platform_bus_voting(host, 0);
return 0;
}
+static inline void sdhci_update_power_policy(struct sdhci_host *host,
+ enum sdhci_power_policy policy)
+{
+ host->power_policy = policy;
+}
+
+static int sdhci_notify_load(struct mmc_host *mmc, enum mmc_load state)
+{
+ int err = 0;
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ switch (state) {
+ case MMC_LOAD_HIGH:
+ sdhci_update_power_policy(host, SDHCI_PERFORMANCE_MODE);
+ break;
+ case MMC_LOAD_LOW:
+ sdhci_update_power_policy(host, SDHCI_POWER_SAVE_MODE);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
bool is_first_req)
{
@@ -1496,9 +1563,11 @@
int vdd_bit = -1;
u8 ctrl;
+ mutex_lock(&host->ios_mutex);
if (host->flags & SDHCI_DEVICE_DEAD) {
if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
+ mutex_unlock(&host->ios_mutex);
return;
}
@@ -1508,6 +1577,7 @@
spin_lock_irqsave(&host->lock, flags);
if (!host->clock) {
spin_unlock_irqrestore(&host->lock, flags);
+ mutex_unlock(&host->ios_mutex);
return;
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -1664,6 +1734,7 @@
sdhci_set_clock(host, ios->clock);
mmiowb();
+ mutex_unlock(&host->ios_mutex);
}
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -2196,6 +2267,7 @@
.disable = sdhci_disable,
.stop_request = sdhci_stop_request,
.get_xfer_remain = sdhci_get_xfer_remain,
+ .notify_load = sdhci_notify_load,
};
/*****************************************************************************\
@@ -2900,6 +2972,7 @@
host->mmc = mmc;
spin_lock_init(&host->lock);
+ mutex_init(&host->ios_mutex);
return host;
}
@@ -3355,9 +3428,22 @@
mmiowb();
- if (host->cpu_dma_latency_us)
+ if (host->cpu_dma_latency_us) {
+ host->pm_qos_timeout_us = 10000; /* default value */
pm_qos_add_request(&host->pm_qos_req_dma,
PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
+ host->pm_qos_tout.show = show_sdhci_pm_qos_tout;
+ host->pm_qos_tout.store = store_sdhci_pm_qos_tout;
+ sysfs_attr_init(&host->pm_qos_tout.attr);
+ host->pm_qos_tout.attr.name = "pm_qos_unvote_delay";
+ host->pm_qos_tout.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(mmc_dev(mmc), &host->pm_qos_tout);
+ if (ret)
+ pr_err("%s: cannot create pm_qos_unvote_delay %d\n",
+ mmc_hostname(mmc), ret);
+ }
+
mmc_add_host(mmc);
pr_info("%s: SDHCI controller on %s [%s] using %s\n",
diff --git a/drivers/net/ethernet/msm/msm_rmnet_bam.c b/drivers/net/ethernet/msm/msm_rmnet_bam.c
index 83f486c..3f3d76a 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_bam.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_bam.c
@@ -55,7 +55,7 @@
#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
/* Configure device instances */
-#define RMNET_DEVICE_COUNT (8)
+#define RMNET_DEVICE_COUNT 9
/* allow larger frames */
#define RMNET_DATA_LEN 2000
@@ -85,6 +85,7 @@
u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
uint8_t device_up;
uint8_t in_reset;
+ struct platform_driver *bam_pdev;
};
#ifdef CONFIG_MSM_RMNET_DEBUG
@@ -401,6 +402,14 @@
__func__, p->ch_id, r);
return -ENODEV;
}
+
+ r = platform_driver_register(p->bam_pdev);
+ if (r) {
+ pr_err("%s: bam pdev registration failed n=%d rc=%d\n",
+ __func__, p->ch_id, r);
+ msm_bam_dmux_close(p->ch_id);
+ return r;
+ }
}
p->device_up = DEVICE_ACTIVE;
@@ -711,6 +720,11 @@
break;
}
+ if (i >= RMNET_DEVICE_COUNT) {
+ pr_err("%s: wrong netdev %s\n", __func__, pdev->name);
+ return -ENODEV;
+ }
+
p = netdev_priv(netdevs[i]);
if (p->in_reset) {
p->in_reset = 0;
@@ -766,7 +780,7 @@
if (i >= RMNET_REV_DEVICE_COUNT) {
pr_err("%s: wrong netdev %s\n", __func__, pdev->name);
- return 0;
+ return -ENODEV;
}
p = netdev_priv(netdevs_rev[i]);
@@ -871,8 +885,13 @@
#endif
for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
+ const char *dev_name = "rmnet%d";
+
+ if (n == BAM_DMUX_USB_RMNET_0)
+ dev_name = "rmnet_usb%d";
+
dev = alloc_netdev(sizeof(struct rmnet_private),
- "rmnet%d", rmnet_setup);
+ dev_name, rmnet_setup);
if (!dev) {
pr_err("%s: no memory for netdev %d\n", __func__, n);
@@ -898,6 +917,7 @@
if (ret) {
pr_err("%s: unable to register netdev"
" %d rc=%d\n", __func__, n, ret);
+ netdevs[n] = NULL;
free_netdev(dev);
return ret;
}
@@ -921,18 +941,16 @@
bam_rmnet_drivers[n].probe = bam_rmnet_probe;
bam_rmnet_drivers[n].remove = bam_rmnet_remove;
tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
- if (tempname == NULL)
- return -ENOMEM;
+ if (tempname == NULL) {
+ netdevs[n] = NULL;
+ ret = -ENOMEM;
+ goto error;
+ }
scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
n);
bam_rmnet_drivers[n].driver.name = tempname;
bam_rmnet_drivers[n].driver.owner = THIS_MODULE;
- ret = platform_driver_register(&bam_rmnet_drivers[n]);
- if (ret) {
- pr_err("%s: registration failed n=%d rc=%d\n",
- __func__, n, ret);
- return ret;
- }
+ p->bam_pdev = &bam_rmnet_drivers[n];
}
/*Support for new rmnet ports */
for (n = 0; n < RMNET_REV_DEVICE_COUNT; n++) {
@@ -960,6 +978,7 @@
if (ret) {
pr_err("%s: unable to register rev netdev %d rc=%d\n",
__func__, n, ret);
+ netdevs_rev[n] = NULL;
free_netdev(dev);
return ret;
}
@@ -968,20 +987,23 @@
bam_rmnet_rev_drivers[n].probe = bam_rmnet_rev_probe;
bam_rmnet_rev_drivers[n].remove = bam_rmnet_rev_remove;
tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
- if (tempname == NULL)
- return -ENOMEM;
+ if (tempname == NULL) {
+ netdevs_rev[n] = NULL;
+ ret = -ENOMEM;
+ goto error;
+ }
scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
(n+BAM_DMUX_DATA_REV_RMNET_0));
bam_rmnet_rev_drivers[n].driver.name = tempname;
bam_rmnet_rev_drivers[n].driver.owner = THIS_MODULE;
- ret = platform_driver_register(&bam_rmnet_rev_drivers[n]);
- if (ret) {
- pr_err("%s: new rev driver registration failed n=%d rc=%d\n",
- __func__, n, ret);
- return ret;
- }
+ p->bam_pdev = &bam_rmnet_rev_drivers[n];
}
return 0;
+
+error:
+ unregister_netdev(dev);
+ free_netdev(dev);
+ return ret;
}
module_init(rmnet_init);
diff --git a/drivers/nfc/nfc-nci.c b/drivers/nfc/nfc-nci.c
index 9e9a4ea..e832716 100644
--- a/drivers/nfc/nfc-nci.c
+++ b/drivers/nfc/nfc-nci.c
@@ -27,7 +27,6 @@
#include <linux/regulator/consumer.h>
#include "nfc-nci.h"
-
struct qca199x_platform_data {
unsigned int irq_gpio;
unsigned int dis_gpio;
@@ -62,6 +61,7 @@
bool irq_enabled;
spinlock_t irq_enabled_lock;
unsigned int count_irq;
+ enum nfcc_state state;
};
/*
@@ -69,6 +69,12 @@
* IOCTL NFC_KERNEL_LOGGING_MODE.
*/
static int logging_level;
+/*
+ * FTM-RAW-I2C RD/WR MODE
+ */
+static struct devicemode device_mode;
+static int ftm_raw_write_mode;
+static int ftm_werr_code;
static void qca199x_init_stat(struct qca199x_dev *qca199x_dev)
{
@@ -104,7 +110,6 @@
struct qca199x_dev *qca199x_dev = dev_id;
unsigned long flags;
-
spin_lock_irqsave(&qca199x_dev->irq_enabled_lock, flags);
qca199x_dev->count_irq++;
spin_unlock_irqrestore(&qca199x_dev->irq_enabled_lock, flags);
@@ -119,7 +124,6 @@
unsigned int mask = 0;
unsigned long flags;
-
poll_wait(filp, &qca199x_dev->read_wq, wait);
spin_lock_irqsave(&qca199x_dev->irq_enabled_lock, flags);
@@ -129,17 +133,33 @@
}
spin_unlock_irqrestore(&qca199x_dev->irq_enabled_lock, flags);
-
return mask;
}
+/*
+ * ONLY for FTM-RAW-I2C Mode
+ * Required to instigate a read, which comes from DT layer. This means we need
+ * to spoof an interrupt and send a wake up event.
+ */
+void ftm_raw_trigger_read(struct qca199x_dev *qca199x_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qca199x_dev->irq_enabled_lock, flags);
+ qca199x_dev->count_irq++;
+ spin_unlock_irqrestore(&qca199x_dev->irq_enabled_lock, flags);
+ wake_up(&qca199x_dev->read_wq);
+}
+
static ssize_t nfc_read(struct file *filp, char __user *buf,
size_t count, loff_t *offset)
{
struct qca199x_dev *qca199x_dev = filp->private_data;
- unsigned char tmp[MAX_BUFFER_SIZE];
+ unsigned char tmp[MAX_BUFFER_SIZE], rd_byte;
unsigned char len[PAYLOAD_HEADER_LENGTH];
int total, length, ret;
+ int ftm_rerr_code;
+ enum ehandler_mode dmode;
total = 0;
length = 0;
@@ -147,6 +167,36 @@
count = MAX_BUFFER_SIZE;
mutex_lock(&qca199x_dev->read_mutex);
+ dmode = device_mode.handle_flavour;
+ /* FTM-RAW-I2C RD/WR MODE - Special Case */
+ if ((dmode == UNSOLICITED_FTM_RAW_MODE) ||
+ (dmode == SOLICITED_FTM_RAW_MODE)) {
+ /* READ */
+ if ((ftm_raw_write_mode == 0) && (ftm_werr_code == 0)) {
+ ftm_rerr_code = i2c_master_recv(qca199x_dev->client,
+ &rd_byte, 1);
+ if (ftm_rerr_code == 0x1)
+ ftm_rerr_code = 0;
+ tmp[0] = (unsigned char)ftm_rerr_code;
+ tmp[1] = rd_byte;
+ total = 2;
+ ret = copy_to_user(buf, tmp, total);
+ }
+ /* WRITE */
+ else if ((ftm_raw_write_mode == 1) || (ftm_werr_code != 0)) {
+ tmp[0] = (unsigned char)ftm_werr_code;
+ total = 1;
+ ret = copy_to_user(buf, tmp, total);
+ } else {
+ /* Invalid case */
+ total = 0;
+ ret = copy_to_user(buf, tmp, total);
+ }
+ mutex_unlock(&qca199x_dev->read_mutex);
+ goto done;
+ }
+
+ /* NORMAL NCI Behaviour */
/* Read the header */
ret = i2c_master_recv(qca199x_dev->client, len, PAYLOAD_HEADER_LENGTH);
if (ret != PAYLOAD_HEADER_LENGTH)
@@ -174,7 +224,7 @@
err:
if (ret < 0)
mutex_unlock(&qca199x_dev->read_mutex);
-
+done:
return total;
}
@@ -183,7 +233,8 @@
{
struct qca199x_dev *qca199x_dev = filp->private_data;
char tmp[MAX_BUFFER_SIZE];
- int ret;
+ int ret = 0;
+ enum ehandler_mode dmode;
if (count > MAX_BUFFER_SIZE) {
dev_err(&qca199x_dev->client->dev, "out of memory\n");
@@ -195,10 +246,39 @@
return -EFAULT;
}
mutex_lock(&qca199x_dev->read_mutex);
- ret = i2c_master_send(qca199x_dev->client, tmp, count);
+ dmode = device_mode.handle_flavour;
+ /* FTM-DIRECT-I2C RD/WR MODE */
+ /* This is a special FTM-i2c mode case,where tester is not using NCI */
+ if ((dmode == UNSOLICITED_FTM_RAW_MODE) ||
+ (dmode == SOLICITED_FTM_RAW_MODE)) {
+ /* Read From Register */
+ if (count == 1) {
+ ftm_raw_write_mode = 0;
+ ret = i2c_master_send(qca199x_dev->client, tmp, count);
+ if (ret == 1)
+ ftm_werr_code = 0;
+ else
+ ftm_werr_code = ret;
+ ftm_raw_trigger_read(qca199x_dev);
+ }
+ /* Write to Register */
+ if (count == 2) {
+ ftm_raw_write_mode = 1;
+ ret = i2c_master_send(qca199x_dev->client, tmp, count);
+ if (ret == 2)
+ ftm_werr_code = 0;
+ else
+ ftm_werr_code = ret;
+ ftm_raw_trigger_read(qca199x_dev);
+ }
+ } else {
+ /* NORMAL NCI behaviour - NB :
+ We can be in FTM mode here also */
+ ret = i2c_master_send(qca199x_dev->client, tmp, count);
+ }
if (ret != count) {
dev_err(&qca199x_dev->client->dev,
- "NFC: failed to write %d\n", ret);
+ "NFC: failed to write %d\n", ret);
ret = -EIO;
}
mutex_unlock(&qca199x_dev->read_mutex);
@@ -225,7 +305,7 @@
/*
* Wake/Sleep Mode
*/
-int nfcc_wake(int level, struct nfc_info *info)
+int nfcc_wake(int level, struct file *filp)
{
int r = 0;
unsigned char raw_nci_sleep[] = {0x2F, 0x03, 0x00};
@@ -233,32 +313,32 @@
unsigned char raw_nci_wake[] = {0x10, 0x0F};
unsigned short slave_addr = 0xE;
unsigned short curr_addr;
+ struct qca199x_dev *qca199x_dev = filp->private_data;
- struct i2c_client *client = info->i2c_dev;
-
- dev_dbg(&client->dev, "nfcc_wake: %s: info: %p\n", __func__, info);
+ dev_dbg(&qca199x_dev->client->dev, "nfcc_wake: %s: info: %p\n",
+ __func__, qca199x_dev);
if (level == NFCC_SLEEP) {
- r = nfc_i2c_write(client, &raw_nci_sleep[0],
+ r = i2c_master_send(qca199x_dev->client, &raw_nci_sleep[0],
sizeof(raw_nci_sleep));
+ r = sizeof(raw_nci_sleep);
if (r != sizeof(raw_nci_sleep))
return -EMSGSIZE;
- info->state = NFCC_STATE_NORMAL_SLEEP;
+ qca199x_dev->state = NFCC_STATE_NORMAL_SLEEP;
} else {
- curr_addr = client->addr;
- client->addr = slave_addr;
- r = nfc_i2c_write(client, &raw_nci_wake[0],
+ curr_addr = qca199x_dev->client->addr;
+ qca199x_dev->client->addr = slave_addr;
+ r = nfc_i2c_write(qca199x_dev->client, &raw_nci_wake[0],
sizeof(raw_nci_wake));
/* Restore original NFCC slave I2C address */
- client->addr = curr_addr;
-
- if (r != sizeof(raw_nci_sleep))
+ qca199x_dev->client->addr = curr_addr;
+ r = sizeof(raw_nci_wake);
+ if (r != sizeof(raw_nci_wake))
return -EMSGSIZE;
-
- info->state = NFCC_STATE_NORMAL_WAKE;
+ qca199x_dev->state = NFCC_STATE_NORMAL_WAKE;
}
- msleep(20);
+
return r;
}
@@ -284,41 +364,35 @@
{
int r = 0;
struct qca199x_dev *qca199x_dev = filp->private_data;
- struct nfc_info *info = container_of(filp->private_data,
- struct nfc_info, miscdev);
-
- struct i2c_client *client = info->i2c_dev;
-
- r = gpio_request(qca199x_dev->dis_gpio, "nfc_reset_gpio");
- if (r) {
- dev_err(&client->dev, "unable to request gpio [%d]\n",
- qca199x_dev->dis_gpio);
- goto err_req;
- }
- gpio_set_value(qca199x_dev->dis_gpio, 0);
- r = gpio_direction_output(qca199x_dev->dis_gpio, 1);
- if (r) {
- dev_err(&client->dev, "unable to set direction for gpio [%d]\n",
- qca199x_dev->irq_gpio);
- goto err_req;
- }
if (arg == 0) {
gpio_set_value(qca199x_dev->dis_gpio, 0);
- msleep(20);
+ r = gpio_direction_output(qca199x_dev->dis_gpio, 1);
+ if (r) {
+ dev_err(&qca199x_dev->client->dev,
+ "unable to set direction for gpio [%d]\n",
+ qca199x_dev->dis_gpio);
+ goto err_req;
+ }
+ gpio_set_value(qca199x_dev->dis_gpio, 0);
} else if (arg == 1) {
+ gpio_set_value(qca199x_dev->dis_gpio, 0);
+ r = gpio_direction_output(qca199x_dev->dis_gpio, 1);
+ if (r) {
+ dev_err(&qca199x_dev->client->dev,
+ "unable to set direction for gpio [%d]\n",
+ qca199x_dev->dis_gpio);
+ goto err_req;
+ }
gpio_set_value(qca199x_dev->dis_gpio, 1);
- msleep(20);
} else if (arg == 2) {
msleep(20);
} else if (arg == 3) {
msleep(20);
} else if (arg == 4) {
- nfcc_wake(NFCC_WAKE, info);
- msleep(20);
+ nfcc_wake(NFCC_WAKE, filp);
} else if (arg == 5) {
- nfcc_wake(NFCC_SLEEP, info);
- msleep(20);
+ nfcc_wake(NFCC_SLEEP, filp);
} else {
r = -ENOIOCTLCMD;
}
@@ -327,6 +401,63 @@
return r;
}
+
+/*
+ * Inside nfc_ioctl_nfcc_mode
+ *
+ * @brief nfc_ioctl_nfcc_mode
+ *
+ * (arg = 0) ; NORMAL_MODE - Standard mode, unsolicited read behaviour
+ * (arg = 1) ; SOLICITED_MODE - As above but reads are solicited from User Land
+ * (arg = 2) ; UNSOLICITED_FTM_RAW MODE - NORMAL_MODE but messages from FTM and
+ * not NCI Host.
+ * (arg = 2) ; SOLICITED_FTM_RAW_MODE - As SOLICITED_MODE but messages from FTM
+ * and not NCI Host.
+ *
+ *
+ *
+ */
+int nfc_ioctl_nfcc_mode(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+
+ static unsigned short nci_addr;
+ struct qca199x_dev *qca199x_dev = filp->private_data;
+ struct qca199x_platform_data *platform_data;
+
+ platform_data = qca199x_dev->client->dev.platform_data;
+
+ if (arg == 0) {
+ device_mode.handle_flavour = UNSOLICITED_MODE;
+ qca199x_dev->client->addr = NCI_I2C_SLAVE;
+ /* enable interrupts again */
+ qca199x_enable_irq(qca199x_dev);
+ } else if (arg == 1) {
+ device_mode.handle_flavour = SOLICITED_MODE;
+ qca199x_dev->client->addr = qca199x_dev->client->addr;
+ /* enable interrupts again */
+ qca199x_enable_irq(qca199x_dev);
+ } else if (arg == 2) {
+ device_mode.handle_flavour = UNSOLICITED_FTM_RAW_MODE;
+ nci_addr = qca199x_dev->client->addr;
+ /* replace with new client slave address*/
+ qca199x_dev->client->addr = 0xE;
+ /* We also need to disable interrupts */
+ qca199x_disable_irq(qca199x_dev);
+ } else if (arg == 3) {
+ device_mode.handle_flavour = SOLICITED_FTM_RAW_MODE;
+ nci_addr = qca199x_dev->client->addr;
+ /* replace with new client slave address*/
+ qca199x_dev->client->addr = 0xE;
+ /* We also need to disable interrupts */
+ qca199x_disable_irq(qca199x_dev);
+ } else {
+ device_mode.handle_flavour = UNSOLICITED_MODE;
+ qca199x_dev->client->addr = NCI_I2C_SLAVE;
+ }
+ return retval;
+}
+
/*
* Inside nfc_ioctl_kernel_logging
*
@@ -371,6 +502,7 @@
nfc_ioctl_power_states(pfile, cmd, arg);
break;
case NFCC_MODE:
+ nfc_ioctl_nfcc_mode(pfile, cmd, arg);
break;
case NFC_KERNEL_LOGGING_MODE:
nfc_ioctl_kernel_logging(arg, pfile);
@@ -447,66 +579,75 @@
unsigned char raw_s73[] = {0x73, 0x02};
unsigned char raw_slave1_rd = {0x0};
unsigned char raw_1P8_PAD_CFG_CLK_REQ[] = {0xA5, 0x1};
- unsigned char buf[4];
+ unsigned char raw_1P8_PAD_CFG_PWR_REQ[] = {0xA7, 0x1};
+ unsigned char buf = 0;
- /* Set I2C address to enable configuration of QCA1990 */
client->addr = curr_addr;
- RAW(s73, 0x02);
+ r = i2c_master_send(client, &buf, 1);
+ buf = 0;
+ r = i2c_master_recv(client, &buf, 1);
+ if (0x10 != (0x10 & buf)) {
+ RAW(s73, 0x02);
- r = nfc_i2c_write(client, &raw_s73[0], sizeof(raw_s73));
- usleep(1000);
- RAW(1p8_CONTROL_011, XTAL_CLOCK | 0x01);
+ r = nfc_i2c_write(client, &raw_s73[0], sizeof(raw_s73));
+ usleep(1000);
+ RAW(1p8_CONTROL_011, XTAL_CLOCK | 0x01);
- r = nfc_i2c_write(client, &raw_1p8_CONTROL_011[0],
+ r = nfc_i2c_write(client, &raw_1p8_CONTROL_011[0],
sizeof(raw_1p8_CONTROL_011));
- usleep(1000);
- RAW(1P8_CONTROL_010, (0x8));
- r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
- sizeof(raw_1P8_CONTROL_010));
-
- usleep(10000); /* 10ms wait */
- RAW(1P8_CONTROL_010, (0xC));
- r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
- sizeof(raw_1P8_CONTROL_010));
- usleep(100); /* 100uS wait */
- RAW(1P8_X0_0B0, (FREQ_SEL_19));
- r = nfc_i2c_write(client, &raw_1P8_X0_0B0[0], sizeof(raw_1P8_X0_0B0));
- usleep(1000);
-
- /* PWR_EN = 1 */
- RAW(1P8_CONTROL_010, (0xd));
- r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
+ usleep(1000);
+ RAW(1P8_CONTROL_010, (0x8));
+ r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
sizeof(raw_1P8_CONTROL_010));
- usleep(20000); /* 20ms wait */
- /* LS_EN = 1 */
- RAW(1P8_CONTROL_010, 0xF);
- r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
- sizeof(raw_1P8_CONTROL_010));
- usleep(20000); /* 20ms wait */
- /* Enable the PMIC clock */
- RAW(1P8_PAD_CFG_CLK_REQ, (0x1));
- r = nfc_i2c_write(client, &raw_1P8_PAD_CFG_CLK_REQ[0],
+ usleep(10000); /* 10ms wait */
+ RAW(1P8_CONTROL_010, (0xC));
+ r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
+ sizeof(raw_1P8_CONTROL_010));
+ usleep(100); /* 100uS wait */
+ RAW(1P8_X0_0B0, (FREQ_SEL_19));
+ r = nfc_i2c_write(client, &raw_1P8_X0_0B0[0],
+ sizeof(raw_1P8_X0_0B0));
+ usleep(1000);
+
+ /* PWR_EN = 1 */
+ RAW(1P8_CONTROL_010, (0xd));
+ r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
+ sizeof(raw_1P8_CONTROL_010));
+ usleep(20000); /* 20ms wait */
+ /* LS_EN = 1 */
+ RAW(1P8_CONTROL_010, 0xF);
+ r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
+ sizeof(raw_1P8_CONTROL_010));
+ usleep(20000); /* 20ms wait */
+
+ /* Enable the PMIC clock */
+ RAW(1P8_PAD_CFG_CLK_REQ, (0x1));
+ r = nfc_i2c_write(client, &raw_1P8_PAD_CFG_CLK_REQ[0],
sizeof(raw_1P8_PAD_CFG_CLK_REQ));
- usleep(1000);
+ usleep(1000);
- RAW(slave2, 0x10);
- r = nfc_i2c_write(client, &raw_slave2[0], sizeof(raw_slave2));
- usleep(1000);
- {
- r = i2c_master_send(client, buf, 1);
- memset(buf, 0xAA, sizeof(buf));
- r = i2c_master_recv(client, buf, 1);
+ RAW(1P8_PAD_CFG_PWR_REQ, (0x1));
+ r = nfc_i2c_write(client, &raw_1P8_PAD_CFG_PWR_REQ[0],
+ sizeof(raw_1P8_PAD_CFG_PWR_REQ));
+ usleep(1000);
+
+ RAW(slave2, 0x10);
+ r = nfc_i2c_write(client, &raw_slave2[0], sizeof(raw_slave2));
+ usleep(1000);
+
+ RAW(slave1, NCI_I2C_SLAVE);
+ r = nfc_i2c_write(client, &raw_slave1[0], sizeof(raw_slave1));
+ usleep(1000);
+
+ /* QCA199x NFCC CPU should now boot... */
+ r = i2c_master_recv(client, &raw_slave1_rd, 1);
+ /* Talk on NCI slave address NCI_I2C_SLAVE 0x2C*/
+ client->addr = NCI_I2C_SLAVE;
+ r = 0;
+ } else {
+ r = 1;
}
- RAW(slave1, NCI_I2C_SLAVE);
- r = nfc_i2c_write(client, &raw_slave1[0], sizeof(raw_slave1));
- usleep(1000);
-
- /* QCA199x NFCC CPU should now boot... */
- r = i2c_master_recv(client, &raw_slave1_rd, 1);
- /* Talk on NCI slave address NCI_I2C_SLAVE 0x2C*/
- client->addr = NCI_I2C_SLAVE;
-
return r;
}
@@ -623,6 +764,9 @@
dev_err(&client->dev, "dis gpio not provided\n");
goto err_irq;
}
+ gpio_set_value(qca199x_dev->dis_gpio, 1);
+ msleep(20);
+ gpio_set_value(qca199x_dev->dis_gpio, 0);
nfc_clk = clk_get(&client->dev, "ref_clk");
@@ -640,7 +784,7 @@
r = gpio_request(platform_data->ven_gpio, "nfc_ven_gpio");
if (r) {
dev_err(&client->dev, "unable to request gpio [%d]\n",
- platform_data->irq_gpio);
+ platform_data->ven_gpio);
goto err_ven_gpio;
}
r = gpio_direction_input(platform_data->ven_gpio);
@@ -648,7 +792,7 @@
dev_err(&client->dev,
"unable to set direction for gpio [%d]\n",
- platform_data->irq_gpio);
+ platform_data->ven_gpio);
goto err_ven_gpio;
}
@@ -681,7 +825,12 @@
/* request irq. The irq is set whenever the chip has data available
* for reading. It is cleared when all data has been read.
*/
- nfcc_initialise(client, platform_data->reg);
+ device_mode.handle_flavour = UNSOLICITED_MODE;
+ r = nfcc_initialise(client, platform_data->reg);
+ if (r) {
+ dev_err(&client->dev, "nfc-nci probe: request nfcc initialise failed\n");
+ goto err_nfcc_init_failed;
+ }
qca199x_dev->irq_enabled = true;
r = request_irq(client->irq, qca199x_dev_irq_handler,
@@ -697,6 +846,7 @@
__func__);
return 0;
+err_nfcc_init_failed:
err_request_irq_failed:
misc_deregister(&qca199x_dev->qca199x_device);
err_misc_register:
diff --git a/drivers/nfc/nfc-nci.h b/drivers/nfc/nfc-nci.h
index 4398df7..c3cabc2 100644
--- a/drivers/nfc/nfc-nci.h
+++ b/drivers/nfc/nfc-nci.h
@@ -30,8 +30,10 @@
};
enum ehandler_mode {
- UNSOLICITED_READ_MODE = 0,
- SOLICITED_READ_MODE
+ UNSOLICITED_MODE = 0,
+ SOLICITED_MODE,
+ UNSOLICITED_FTM_RAW_MODE,
+ SOLICITED_FTM_RAW_MODE
};
enum ekernel_logging_mode {
@@ -43,9 +45,9 @@
LEVEL_5
};
-struct DeviceMode {
+struct devicemode {
enum ehandler_mode handle_flavour;
-} tDeviceMode;
+} tdevicemode;
#define NFC_DRIVER_NAME "nfc-nci"
#define NFC_I2C_DRIVER_NAME "NCI NFC I2C Interface",
diff --git a/drivers/of/of_batterydata.c b/drivers/of/of_batterydata.c
index c2585a7..977a1e0 100644
--- a/drivers/of/of_batterydata.c
+++ b/drivers/of/of_batterydata.c
@@ -157,6 +157,8 @@
#define OF_PROP_READ(property, qpnp_dt_property, node, rc, optional) \
do { \
+ if (rc) \
+ break; \
rc = of_property_read_u32(node, "qcom," qpnp_dt_property, \
&property); \
\
@@ -166,7 +168,6 @@
} else if (rc) { \
pr_err("Error reading " #qpnp_dt_property \
" property rc = %d\n", rc); \
- return rc; \
} \
} while (0)
@@ -234,6 +235,8 @@
node = batterydata_container_node;
OF_PROP_READ(rpull_up_kohm, "rpull-up-kohm", node, rc, false);
OF_PROP_READ(vadc_vdd_uv, "vref-batt-therm", node, rc, false);
+ if (rc)
+ return rc;
batt_id_kohm = of_batterydata_convert_battery_id_kohm(batt_id_uv,
rpull_up_kohm, vadc_vdd_uv);
diff --git a/drivers/platform/msm/ipa/ipa_rt.c b/drivers/platform/msm/ipa/ipa_rt.c
index 8c0adbd..8d6d5e6 100644
--- a/drivers/platform/msm/ipa/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_rt.c
@@ -74,23 +74,6 @@
rule_hdr->u.hdr.hdr_offset = 0;
}
buf += sizeof(struct ipa_rt_rule_hw_hdr);
- if ((ip == IPA_IP_v4) &&
- (entry->rule.attrib.attrib_mask & IPA_FLT_TOS)) {
- entry->rule.attrib.tos_value =
- (entry->rule.attrib.u.v4.tos << 5);
- entry->rule.attrib.tos_mask = 0xe0;
- entry->rule.attrib.attrib_mask &= ~IPA_FLT_TOS;
- entry->rule.attrib.attrib_mask |= IPA_FLT_TOS_MASKED;
- }
-
- if ((ip == IPA_IP_v6) &&
- (entry->rule.attrib.attrib_mask & IPA_FLT_TC)) {
- entry->rule.attrib.tos_value =
- (entry->rule.attrib.u.v6.tc << 5);
- entry->rule.attrib.tos_mask = 0xe0;
- entry->rule.attrib.attrib_mask &= ~IPA_FLT_TC;
- entry->rule.attrib.attrib_mask |= IPA_FLT_TOS_MASKED;
- }
if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
IPAERR("fail to generate hw rule\n");
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 9911750..408681c 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -1069,10 +1069,11 @@
info.connect_complete = 1;
spin_unlock(&usb_bam_ipa_handshake_info_lock);
- if (info.cur_cons_state[HSUSB_BAM] == IPA_RM_RESOURCE_GRANTED) {
- pr_debug("%s: Notify CONS_GRANTED\n", __func__);
+ if (info.cur_cons_state[cur_bam] == IPA_RM_RESOURCE_GRANTED) {
+ pr_debug("%s: Notify %s_CONS_GRANTED\n", __func__,
+ bam_enable_strings[cur_bam]);
ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
- ipa_rm_resource_cons[HSUSB_BAM]);
+ ipa_rm_resource_cons[cur_bam]);
}
}
@@ -1505,7 +1506,7 @@
ctx.pipes_enabled_per_bam[cur_bam] += 1;
spin_unlock(&usb_bam_lock);
- if (ipa_params->dir == PEER_PERIPHERAL_TO_USB && cur_bam == HSUSB_BAM)
+ if (ipa_params->dir == PEER_PERIPHERAL_TO_USB)
notify_usb_connected(cur_bam);
if (cur_bam == HSUSB_BAM)
@@ -2160,6 +2161,7 @@
u8 i = 0;
bool reset_bam;
enum usb_bam bam;
+ u32 addr;
ctx.max_connections = 0;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
@@ -2176,9 +2178,11 @@
}
rc = of_property_read_u32(node, "qcom,usb-bam-fifo-baseaddr",
- &pdata->usb_bam_fifo_baseaddr);
+ &addr);
if (rc)
pr_debug("%s: Invalid usb base address property\n", __func__);
+ else
+ pdata->usb_bam_fifo_baseaddr = addr;
pdata->ignore_core_reset_ack = of_property_read_bool(node,
"qcom,ignore-core-reset-ack");
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index b518f1f..c246036 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -1970,6 +1970,11 @@
pr_debug("new delta ocv = %d\n", delta_ocv_uv);
}
+ if (wake_lock_active(&chip->low_voltage_wake_lock)) {
+ pr_debug("Low Voltage, apply only ibat limited corrections\n");
+ goto skip_limiting_corrections;
+ }
+
if (chip->last_ocv_uv > 3800000)
correction_limit_uv = the_chip->high_ocv_correction_limit_uv;
else
@@ -1986,6 +1991,7 @@
pr_debug("new delta ocv = %d\n", delta_ocv_uv);
}
+skip_limiting_corrections:
chip->last_ocv_uv -= delta_ocv_uv;
if (chip->last_ocv_uv >= chip->max_voltage_uv)
@@ -2278,7 +2284,6 @@
int new_calculated_soc;
static int firsttime = 1;
- calib_hkadc_check(chip, batt_temp);
calculate_soc_params(chip, raw, batt_temp, chargecycles,
&fcc_uah,
&unusable_charge_uah,
@@ -2426,6 +2431,7 @@
get_batt_temp(chip, &batt_temp);
mutex_lock(&chip->last_ocv_uv_mutex);
+ calib_hkadc_check(chip, batt_temp);
read_soc_params_raw(chip, &raw, batt_temp);
soc = calculate_state_of_charge(chip, &raw,
@@ -2762,6 +2768,7 @@
get_batt_temp(the_chip, &batt_temp);
mutex_lock(&the_chip->last_ocv_uv_mutex);
+ calib_hkadc_check(the_chip, batt_temp);
read_soc_params_raw(the_chip, &raw, batt_temp);
mutex_unlock(&the_chip->last_ocv_uv_mutex);
@@ -2907,6 +2914,7 @@
mutex_lock(&the_chip->last_ocv_uv_mutex);
+ calib_hkadc_check(the_chip, batt_temp);
read_soc_params_raw(the_chip, &raw, batt_temp);
calculate_cc_uah(the_chip, raw.cc, &bms_end_cc_uah);
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index d5b2cc6..f3f59e6 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -298,6 +298,7 @@
bool disable_aicl;
int usb_type;
bool disable_chg_rmvl_wrkarnd;
+ struct msm_xo_voter *voter;
};
/* user space parameter to limit usb current */
@@ -4034,6 +4035,7 @@
int err;
u8 temp;
+ msm_xo_mode_vote(chip->voter, MSM_XO_MODE_ON);
temp = 0xD1;
err = pm_chg_write(chip, CHG_TEST, temp);
if (err) {
@@ -4092,6 +4094,8 @@
pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
return;
}
+
+ msm_xo_mode_vote(chip->voter, MSM_XO_MODE_OFF);
}
static void pm8921_chg_set_hw_clk_switching(struct pm8921_chg_chip *chip)
@@ -4099,6 +4103,7 @@
int err;
u8 temp;
+ msm_xo_mode_vote(chip->voter, MSM_XO_MODE_ON);
temp = 0xD1;
err = pm_chg_write(chip, CHG_TEST, temp);
if (err) {
@@ -4112,6 +4117,7 @@
pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
return;
}
+ msm_xo_mode_vote(chip->voter, MSM_XO_MODE_OFF);
}
#define VREF_BATT_THERM_FORCE_ON BIT(7)
@@ -4800,6 +4806,7 @@
chip->ibatmax_max_adj_ma = find_ibat_max_adj_ma(
chip->max_bat_chg_current);
+ chip->voter = msm_xo_get(MSM_XO_TCXO_D0, "pm8921_charger");
rc = pm8921_chg_hw_init(chip);
if (rc) {
pr_err("couldn't init hardware rc=%d\n", rc);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 11e6cc1..982c30b 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -147,7 +147,9 @@
POWER_SUPPLY_ATTR(voltage_min_design),
POWER_SUPPLY_ATTR(voltage_now),
POWER_SUPPLY_ATTR(voltage_avg),
+ POWER_SUPPLY_ATTR(input_voltage_regulation),
POWER_SUPPLY_ATTR(current_max),
+ POWER_SUPPLY_ATTR(input_current_max),
POWER_SUPPLY_ATTR(current_now),
POWER_SUPPLY_ATTR(current_avg),
POWER_SUPPLY_ATTR(power_now),
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index 0a8b2a6..cf20a81 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -25,11 +25,11 @@
#include <linux/delay.h>
#include <linux/qpnp/qpnp-adc.h>
#include <linux/qpnp/power-on.h>
-#include <linux/batterydata-lib.h>
+#include <linux/of_batterydata.h>
/* BMS Register Offsets */
-#define BMS1_REVISION1 0x0
-#define BMS1_REVISION2 0x1
+#define REVISION1 0x0
+#define REVISION2 0x1
#define BMS1_STATUS1 0x8
#define BMS1_MODE_CTL 0X40
/* Coulomb counter clear registers */
@@ -47,6 +47,7 @@
#define BMS1_S1_DELAY_CTL 0x5A
/* OCV interrupt threshold */
#define BMS1_OCV_THR0 0x50
+#define BMS1_S2_SAMP_AVG_CTL 0x61
/* SW CC interrupt threshold */
#define BMS1_SW_CC_THR0 0xA0
/* OCV for r registers */
@@ -71,7 +72,10 @@
#define CHARGE_CYCLE_STORAGE_LSB 0xBE /* LSB=0xBE, MSB=0xBF */
/* IADC Channel Select */
+#define IADC1_BMS_REVISION2 0x01
#define IADC1_BMS_ADC_CH_SEL_CTL 0x48
+#define IADC1_BMS_ADC_INT_RSNSN_CTL 0x49
+#define IADC1_BMS_FAST_AVG_EN 0x5B
/* Configuration for saving of shutdown soc/iavg */
#define IGNORE_SOC_TEMP_DECIDEG 50
@@ -144,8 +148,13 @@
u8 revision1;
u8 revision2;
+
+ u8 iadc_bms_revision1;
+ u8 iadc_bms_revision2;
+
int battery_present;
int battery_status;
+ bool batfet_closed;
bool new_battery;
bool done_charging;
bool last_soc_invalid;
@@ -170,6 +179,7 @@
struct delayed_work calculate_soc_delayed_work;
struct work_struct recalc_work;
+ struct work_struct batfet_open_work;
struct mutex bms_output_lock;
struct mutex last_ocv_uv_mutex;
@@ -739,6 +749,11 @@
return get_battery_status(chip) == POWER_SUPPLY_STATUS_CHARGING;
}
+static bool is_battery_full(struct qpnp_bms_chip *chip)
+{
+ return get_battery_status(chip) == POWER_SUPPLY_STATUS_FULL;
+}
+
static bool is_battery_present(struct qpnp_bms_chip *chip)
{
union power_supply_propval ret = {0,};
@@ -757,9 +772,22 @@
return false;
}
-static bool is_battery_full(struct qpnp_bms_chip *chip)
+static bool is_batfet_closed(struct qpnp_bms_chip *chip)
{
- return get_battery_status(chip) == POWER_SUPPLY_STATUS_FULL;
+ union power_supply_propval ret = {0,};
+
+ if (chip->batt_psy == NULL)
+ chip->batt_psy = power_supply_get_by_name("battery");
+ if (chip->batt_psy) {
+ /* if battery has been registered, use the online property */
+ chip->batt_psy->get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_ONLINE, &ret);
+ return !!ret.intval;
+ }
+
+ /* Default to true if the battery power supply is not registered. */
+ pr_debug("battery power supply is not registered\n");
+ return true;
}
static int get_simultaneous_batt_v_and_i(struct qpnp_bms_chip *chip,
@@ -2322,12 +2350,15 @@
bms_stay_awake(&chip->soc_wake_source);
mutex_lock(&chip->vbat_monitor_mutex);
- qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
+ if (chip->vbat_monitor_params.state_request !=
+ ADC_TM_HIGH_LOW_THR_DISABLE)
+ qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
mutex_unlock(&chip->vbat_monitor_mutex);
if (chip->use_voltage_soc) {
soc = calculate_soc_from_voltage(chip);
} else {
- qpnp_iadc_calibrate_for_trim();
+ if (!chip->batfet_closed)
+ qpnp_iadc_calibrate_for_trim(true);
rc = qpnp_vadc_read(LR_MUX1_BATT_THERM, &result);
if (rc) {
pr_err("error reading vadc LR_MUX1_BATT_THERM = %d, rc = %d\n",
@@ -2519,9 +2550,10 @@
int rc;
chip->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_DISABLE;
- rc = qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
+
+ rc = qpnp_adc_tm_disable_chan_meas(&chip->vbat_monitor_params);
if (rc) {
- pr_err("tm measure failed: %d\n", rc);
+ pr_err("tm disable failed: %d\n", rc);
return rc;
}
if (wake_lock_active(&chip->low_voltage_wake_lock)) {
@@ -2545,11 +2577,6 @@
return -EPROBE_DEFER;
}
- if (!is_battery_present(chip)) {
- pr_debug("no battery inserted, do not setup vbat monitoring\n");
- return 0;
- }
-
chip->vbat_monitor_params.low_thr = chip->low_voltage_threshold;
chip->vbat_monitor_params.high_thr = chip->max_voltage_uv
- VBATT_ERROR_MARGIN;
@@ -2561,10 +2588,17 @@
pr_debug("set low thr to %d and high to %d\n",
chip->vbat_monitor_params.low_thr,
chip->vbat_monitor_params.high_thr);
- rc = qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
- if (rc) {
- pr_err("tm setup failed: %d\n", rc);
- return rc;
+
+ if (!is_battery_present(chip)) {
+ pr_debug("no battery inserted, do not enable vbat monitoring\n");
+ chip->vbat_monitor_params.state_request =
+ ADC_TM_HIGH_LOW_THR_DISABLE;
+ } else {
+ rc = qpnp_adc_tm_channel_measure(&chip->vbat_monitor_params);
+ if (rc) {
+ pr_err("tm setup failed: %d\n", rc);
+ return rc;
+ }
}
pr_debug("setup complete\n");
return 0;
@@ -2898,9 +2932,55 @@
}
}
+#define MAX_CAL_TRIES 200
+#define MIN_CAL_UA 3000
+static void batfet_open_work(struct work_struct *work)
+{
+ int i;
+ int rc;
+ int result_ua;
+ u8 orig_delay, sample_delay;
+ struct qpnp_bms_chip *chip = container_of(work,
+ struct qpnp_bms_chip,
+ batfet_open_work);
+
+ rc = qpnp_read_wrapper(chip, &orig_delay,
+ chip->base + BMS1_S1_DELAY_CTL, 1);
+
+ sample_delay = 0x0;
+ rc = qpnp_write_wrapper(chip, &sample_delay,
+ chip->base + BMS1_S1_DELAY_CTL, 1);
+
+ /*
+ * In certain PMICs there is a coupling issue which causes
+ * bad calibration value that result in a huge battery current
+ * even when the BATFET is open. Do continious calibrations until
+ * we hit reasonable cal values which result in low battery current
+ */
+
+ for (i = 0; (!chip->batfet_closed) && i < MAX_CAL_TRIES; i++) {
+ rc = qpnp_iadc_calibrate_for_trim(false);
+ /*
+ * Wait 20mS after calibration and before reading battery
+ * current. The BMS h/w uses calibration values in the
+ * next sampling of vsense.
+ */
+ msleep(20);
+ rc |= get_battery_current(chip, &result_ua);
+ if (rc == 0 && abs(result_ua) <= MIN_CAL_UA) {
+ pr_debug("good cal at %d attempt\n", i);
+ break;
+ }
+ }
+ pr_debug("batfet_closed = %d i = %d result_ua = %d\n",
+ chip->batfet_closed, i, result_ua);
+
+ rc = qpnp_write_wrapper(chip, &orig_delay,
+ chip->base + BMS1_S1_DELAY_CTL, 1);
+}
+
static void charging_began(struct qpnp_bms_chip *chip)
{
-
mutex_lock(&chip->last_soc_mutex);
chip->charge_start_tm_sec = 0;
chip->catch_up_time_sec = 0;
@@ -2990,6 +3070,29 @@
}
}
+#define CALIB_WRKARND_DIG_MAJOR_MAX 0x03
+static void batfet_status_check(struct qpnp_bms_chip *chip)
+{
+ bool batfet_closed;
+
+ if (chip->iadc_bms_revision2 > CALIB_WRKARND_DIG_MAJOR_MAX)
+ return;
+
+ batfet_closed = is_batfet_closed(chip);
+ if (chip->batfet_closed != batfet_closed) {
+ chip->batfet_closed = batfet_closed;
+ if (batfet_closed == false) {
+ /* batfet opened */
+ schedule_work(&chip->batfet_open_work);
+ qpnp_iadc_skip_calibration();
+ } else {
+ /* batfet closed */
+ qpnp_iadc_calibrate_for_trim(true);
+ qpnp_iadc_resume_calibration();
+ }
+ }
+}
+
static void battery_insertion_check(struct qpnp_bms_chip *chip)
{
bool present = is_battery_present(chip);
@@ -3024,6 +3127,7 @@
bms_psy);
battery_insertion_check(chip);
+ batfet_status_check(chip);
battery_status_check(chip);
}
@@ -3183,11 +3287,7 @@
return IRQ_HANDLED;
}
-#define PALLADIUM_ID_MIN 0x7F40
-#define PALLADIUM_ID_MAX 0x7F5A
-#define DESAY_5200_ID_MIN 0x7F7F
-#define DESAY_5200_ID_MAX 0x802F
-static int32_t read_battery_id(struct qpnp_bms_chip *chip)
+static int64_t read_battery_id(struct qpnp_bms_chip *chip)
{
int rc;
struct qpnp_vadc_result result;
@@ -3198,16 +3298,16 @@
LR_MUX2_BAT_ID, rc);
return rc;
}
- pr_debug("batt_id phy = %lld meas = 0x%llx\n", result.physical,
- result.measurement);
- pr_debug("raw_code = 0x%x\n", result.adc_code);
- return result.adc_code;
+
+ return result.physical;
}
static int set_battery_data(struct qpnp_bms_chip *chip)
{
int64_t battery_id;
+ int rc;
struct bms_battery_data *batt_data;
+ struct device_node *node;
if (chip->batt_type == BATT_DESAY) {
batt_data = &desay_5200_data;
@@ -3227,12 +3327,30 @@
return battery_id;
}
- if (is_between(PALLADIUM_ID_MIN, PALLADIUM_ID_MAX,
- battery_id)) {
- batt_data = &palladium_1500_data;
- } else if (is_between(DESAY_5200_ID_MIN, DESAY_5200_ID_MAX,
- battery_id)) {
- batt_data = &desay_5200_data;
+ node = of_find_node_by_name(chip->spmi->dev.of_node,
+ "qcom,battery-data");
+ if (node) {
+ batt_data = kzalloc(sizeof(struct bms_battery_data),
+ GFP_KERNEL);
+ batt_data->fcc_temp_lut = kzalloc(
+ sizeof(struct single_row_lut),
+ GFP_KERNEL);
+ batt_data->pc_temp_ocv_lut = kzalloc(
+ sizeof(struct pc_temp_ocv_lut),
+ GFP_KERNEL);
+ batt_data->rbatt_sf_lut = kzalloc(
+ sizeof(struct sf_lut), GFP_KERNEL);
+
+ rc = of_batterydata_read_data(node,
+ batt_data, battery_id);
+ if (rc) {
+ pr_err("battery data load failed, using palladium 1500\n");
+ kfree(batt_data->fcc_temp_lut);
+ kfree(batt_data->pc_temp_ocv_lut);
+ kfree(batt_data->rbatt_sf_lut);
+ kfree(batt_data);
+ batt_data = &palladium_1500_data;
+ }
} else {
pr_warn("invalid battid, palladium 1500 assumed batt_id %llx\n",
battery_id);
@@ -3250,6 +3368,14 @@
chip->rbatt_capacitive_mohm = batt_data->rbatt_capacitive_mohm;
chip->flat_ocv_threshold_uv = batt_data->flat_ocv_threshold_uv;
+ /* Override battery properties if specified in the battery profile */
+ if (batt_data->max_voltage_uv >= 0)
+ chip->max_voltage_uv = batt_data->max_voltage_uv;
+ if (batt_data->cutoff_uv >= 0)
+ chip->v_cutoff_uv = batt_data->cutoff_uv;
+ if (batt_data->iterm_ua >= 0)
+ chip->chg_term_ua = batt_data->iterm_ua;
+
if (chip->pc_temp_ocv_lut == NULL) {
pr_err("temp ocv lut table is NULL\n");
return -EINVAL;
@@ -3259,13 +3385,14 @@
#define SPMI_PROP_READ(chip_prop, qpnp_spmi_property, retval) \
do { \
+ if (retval) \
+ break; \
retval = of_property_read_u32(chip->spmi->dev.of_node, \
"qcom," qpnp_spmi_property, \
&chip->chip_prop); \
if (retval) { \
pr_err("Error reading " #qpnp_spmi_property \
" property %d\n", rc); \
- return -EINVAL; \
} \
} while (0)
@@ -3277,7 +3404,7 @@
static inline int bms_read_properties(struct qpnp_bms_chip *chip)
{
- int rc;
+ int rc = 0;
SPMI_PROP_READ(r_sense_uohm, "r-sense-uohm", rc);
SPMI_PROP_READ(v_cutoff_uv, "v-cutoff-uv", rc);
@@ -3294,17 +3421,6 @@
SPMI_PROP_READ(low_soc_calculate_soc_ms,
"low-soc-calculate-soc-ms", rc);
SPMI_PROP_READ(calculate_soc_ms, "calculate-soc-ms", rc);
- chip->use_external_rsense = of_property_read_bool(
- chip->spmi->dev.of_node,
- "qcom,use-external-rsense");
- chip->ignore_shutdown_soc = of_property_read_bool(
- chip->spmi->dev.of_node,
- "qcom,ignore-shutdown-soc");
- chip->use_voltage_soc = of_property_read_bool(chip->spmi->dev.of_node,
- "qcom,use-voltage-soc");
- chip->use_ocv_thresholds = of_property_read_bool(
- chip->spmi->dev.of_node,
- "qcom,use-ocv-thresholds");
SPMI_PROP_READ(high_ocv_correction_limit_uv,
"high-ocv-correction-limit-uv", rc);
SPMI_PROP_READ(low_ocv_correction_limit_uv,
@@ -3318,6 +3434,18 @@
SPMI_PROP_READ(low_voltage_threshold, "low-voltage-threshold", rc);
SPMI_PROP_READ(temperature_margin, "tm-temp-margin", rc);
+ chip->use_external_rsense = of_property_read_bool(
+ chip->spmi->dev.of_node,
+ "qcom,use-external-rsense");
+ chip->ignore_shutdown_soc = of_property_read_bool(
+ chip->spmi->dev.of_node,
+ "qcom,ignore-shutdown-soc");
+ chip->use_voltage_soc = of_property_read_bool(chip->spmi->dev.of_node,
+ "qcom,use-voltage-soc");
+ chip->use_ocv_thresholds = of_property_read_bool(
+ chip->spmi->dev.of_node,
+ "qcom,use-ocv-thresholds");
+
if (chip->adjust_soc_low_threshold >= 45)
chip->adjust_soc_low_threshold = 45;
@@ -3341,6 +3469,11 @@
chip->min_fcc_learning_samples);
}
+ if (rc) {
+ pr_err("Missing required properties.\n");
+ return rc;
+ }
+
pr_debug("dts data: r_sense_uohm:%d, v_cutoff_uv:%d, max_v:%d\n",
chip->r_sense_uohm, chip->v_cutoff_uv,
chip->max_voltage_uv);
@@ -3486,7 +3619,11 @@
return 0;
}
-#define ADC_CH_SEL_MASK 0x7
+#define ADC_CH_SEL_MASK 0x7
+#define ADC_INT_RSNSN_CTL_MASK 0x3
+#define ADC_INT_RSNSN_CTL_VALUE_EXT_RENSE 0x2
+#define FAST_AVG_EN_MASK 0x80
+#define FAST_AVG_EN_VALUE_EXT_RSENSE 0x80
static int read_iadc_channel_select(struct qpnp_bms_chip *chip)
{
u8 iadc_channel_select;
@@ -3554,6 +3691,34 @@
pr_debug("rds_rsense = %d nOhm, saved as %d uOhm\n",
rds_rsense_nohm, chip->r_sense_uohm);
}
+ /* prevent shorting of leads by IADC_BMS when external Rsense is used */
+ if (chip->use_external_rsense) {
+ if (chip->iadc_bms_revision2 > CALIB_WRKARND_DIG_MAJOR_MAX) {
+ rc = qpnp_masked_write_iadc(chip,
+ IADC1_BMS_ADC_INT_RSNSN_CTL,
+ ADC_INT_RSNSN_CTL_MASK,
+ ADC_INT_RSNSN_CTL_VALUE_EXT_RENSE);
+ if (rc) {
+ pr_err("Unable to set batfet config %x to %x: %d\n",
+ IADC1_BMS_ADC_INT_RSNSN_CTL,
+ ADC_INT_RSNSN_CTL_VALUE_EXT_RENSE, rc);
+ return rc;
+ }
+ } else {
+ /* In older PMICS use FAST_AVG_EN register bit 7 */
+ rc = qpnp_masked_write_iadc(chip,
+ IADC1_BMS_FAST_AVG_EN,
+ FAST_AVG_EN_MASK,
+ FAST_AVG_EN_VALUE_EXT_RSENSE);
+ if (rc) {
+ pr_err("Unable to set batfet config %x to %x: %d\n",
+ IADC1_BMS_FAST_AVG_EN,
+ FAST_AVG_EN_VALUE_EXT_RSENSE, rc);
+ return rc;
+ }
+ }
+ }
+
return 0;
}
@@ -3660,20 +3825,36 @@
}
rc = qpnp_read_wrapper(chip, &chip->revision1,
- chip->base + BMS1_REVISION1, 1);
+ chip->base + REVISION1, 1);
if (rc) {
pr_err("error reading version register %d\n", rc);
goto error_read;
}
rc = qpnp_read_wrapper(chip, &chip->revision2,
- chip->base + BMS1_REVISION2, 1);
+ chip->base + REVISION2, 1);
if (rc) {
pr_err("Error reading version register %d\n", rc);
goto error_read;
}
pr_debug("BMS version: %hhu.%hhu\n", chip->revision2, chip->revision1);
+ rc = qpnp_read_wrapper(chip, &chip->iadc_bms_revision2,
+ chip->iadc_base + REVISION2, 1);
+ if (rc) {
+ pr_err("Error reading version register %d\n", rc);
+ goto error_read;
+ }
+
+ rc = qpnp_read_wrapper(chip, &chip->iadc_bms_revision1,
+ chip->iadc_base + REVISION1, 1);
+ if (rc) {
+ pr_err("Error reading version register %d\n", rc);
+ goto error_read;
+ }
+ pr_debug("IADC_BMS version: %hhu.%hhu\n",
+ chip->iadc_bms_revision2, chip->iadc_bms_revision1);
+
rc = bms_read_properties(chip);
if (rc) {
pr_err("Unable to read all bms properties, rc = %d\n", rc);
@@ -3713,6 +3894,7 @@
INIT_DELAYED_WORK(&chip->calculate_soc_delayed_work,
calculate_soc_work);
INIT_WORK(&chip->recalc_work, recalculate_work);
+ INIT_WORK(&chip->batfet_open_work, batfet_open_work);
read_shutdown_soc_and_iavg(chip);
@@ -3750,6 +3932,7 @@
}
battery_insertion_check(chip);
+ batfet_status_check(chip);
battery_status_check(chip);
calculate_soc_work(&(chip->calculate_soc_delayed_work.work));
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index 3679aa9..950b88a 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -28,6 +28,7 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
+#include <linux/of_batterydata.h>
/* Interrupt offsets */
#define INT_RT_STS(base) (base + 0x10)
@@ -273,6 +274,8 @@
struct qpnp_chg_irq chg_failed;
struct qpnp_chg_irq chg_vbatdet_lo;
struct qpnp_chg_irq batt_pres;
+ struct qpnp_chg_irq vchg_loop;
+ struct qpnp_chg_irq batt_temp_ok;
bool bat_is_cool;
bool bat_is_warm;
bool chg_done;
@@ -290,6 +293,7 @@
unsigned int safe_voltage_mv;
unsigned int max_voltage_mv;
unsigned int min_voltage_mv;
+ int prev_usb_max_ma;
int set_vddmax_mv;
int delta_vddmax_mv;
unsigned int warm_bat_mv;
@@ -302,8 +306,8 @@
unsigned int maxinput_dc_ma;
unsigned int hot_batt_p;
unsigned int cold_batt_p;
- unsigned int warm_bat_decidegc;
- unsigned int cool_bat_decidegc;
+ int warm_bat_decidegc;
+ int cool_bat_decidegc;
unsigned int safe_current;
unsigned int revision;
unsigned int type;
@@ -500,6 +504,23 @@
}
static int
+qpnp_chg_is_batt_temp_ok(struct qpnp_chg_chip *chip)
+{
+ u8 batt_rt_sts;
+ int rc;
+
+ rc = qpnp_chg_read(chip, &batt_rt_sts,
+ INT_RT_STS(chip->bat_if_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->bat_if_base), rc);
+ return rc;
+ }
+
+ return (batt_rt_sts & BAT_TEMP_OK_IRQ) ? 1 : 0;
+}
+
+static int
qpnp_chg_is_batt_present(struct qpnp_chg_chip *chip)
{
u8 batt_pres_rt_sts;
@@ -516,6 +537,23 @@
return (batt_pres_rt_sts & BATT_PRES_IRQ) ? 1 : 0;
}
+static int
+qpnp_chg_is_batfet_closed(struct qpnp_chg_chip *chip)
+{
+ u8 batfet_closed_rt_sts;
+ int rc;
+
+ rc = qpnp_chg_read(chip, &batfet_closed_rt_sts,
+ INT_RT_STS(chip->bat_if_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->bat_if_base), rc);
+ return rc;
+ }
+
+ return (batfet_closed_rt_sts & BAT_FET_ON_IRQ) ? 1 : 0;
+}
+
#define USB_VALID_BIT BIT(7)
static int
qpnp_chg_is_usb_chg_plugged_in(struct qpnp_chg_chip *chip)
@@ -650,6 +688,92 @@
return rc;
}
+#define QPNP_CHG_VINMIN_MIN_MV 4200
+#define QPNP_CHG_VINMIN_HIGH_MIN_MV 5600
+#define QPNP_CHG_VINMIN_HIGH_MIN_VAL 0x2B
+#define QPNP_CHG_VINMIN_MAX_MV 9600
+#define QPNP_CHG_VINMIN_STEP_MV 50
+#define QPNP_CHG_VINMIN_STEP_HIGH_MV 200
+#define QPNP_CHG_VINMIN_MASK 0x1F
+#define QPNP_CHG_VINMIN_MIN_VAL 0x10
+static int
+qpnp_chg_vinmin_set(struct qpnp_chg_chip *chip, int voltage)
+{
+ u8 temp;
+
+ if (voltage < QPNP_CHG_VINMIN_MIN_MV
+ || voltage > QPNP_CHG_VINMIN_MAX_MV) {
+ pr_err("bad mV=%d asked to set\n", voltage);
+ return -EINVAL;
+ }
+ if (voltage >= QPNP_CHG_VINMIN_HIGH_MIN_MV) {
+ temp = QPNP_CHG_VINMIN_HIGH_MIN_VAL;
+ temp += (voltage - QPNP_CHG_VINMIN_MIN_MV)
+ / QPNP_CHG_VINMIN_STEP_HIGH_MV;
+ } else {
+ temp = QPNP_CHG_VINMIN_MIN_VAL;
+ temp += (voltage - QPNP_CHG_VINMIN_MIN_MV)
+ / QPNP_CHG_VINMIN_STEP_MV;
+ }
+
+ pr_debug("voltage=%d setting %02x\n", voltage, temp);
+ return qpnp_chg_masked_write(chip,
+ chip->chgr_base + CHGR_VIN_MIN,
+ QPNP_CHG_VINMIN_MASK, temp, 1);
+}
+
+static int
+qpnp_chg_vinmin_get(struct qpnp_chg_chip *chip)
+{
+ int rc, vin_min_mv;
+ u8 vin_min;
+
+ rc = qpnp_chg_read(chip, &vin_min, chip->chgr_base + CHGR_VIN_MIN, 1);
+ if (rc) {
+ pr_err("failed to read VIN_MIN rc=%d\n", rc);
+ return 0;
+ }
+
+ if (vin_min == 0)
+ vin_min_mv = QPNP_CHG_I_MAX_MIN_100;
+ else if (vin_min > QPNP_CHG_VINMIN_HIGH_MIN_VAL)
+ vin_min_mv = QPNP_CHG_VINMIN_HIGH_MIN_MV +
+ (vin_min - QPNP_CHG_VINMIN_HIGH_MIN_VAL)
+ * QPNP_CHG_VINMIN_STEP_HIGH_MV;
+ else
+ vin_min_mv = QPNP_CHG_VINMIN_MIN_MV +
+ (vin_min - QPNP_CHG_VINMIN_MIN_VAL)
+ * QPNP_CHG_VINMIN_STEP_MV;
+ pr_debug("vin_min= 0x%02x, ma = %d\n", vin_min, vin_min_mv);
+
+ return vin_min_mv;
+}
+
+static int
+qpnp_chg_usb_iusbmax_get(struct qpnp_chg_chip *chip)
+{
+ int rc, iusbmax_ma;
+ u8 iusbmax;
+
+ rc = qpnp_chg_read(chip, &iusbmax,
+ chip->usb_chgpth_base + CHGR_I_MAX_REG, 1);
+ if (rc) {
+ pr_err("failed to read IUSB_MAX rc=%d\n", rc);
+ return 0;
+ }
+
+ if (iusbmax == 0)
+ iusbmax_ma = QPNP_CHG_I_MAX_MIN_100;
+ else if (iusbmax == 0x01)
+ iusbmax_ma = QPNP_CHG_I_MAX_MIN_150;
+ else
+ iusbmax_ma = iusbmax * QPNP_CHG_I_MAXSTEP_MA;
+
+ pr_debug("iusbmax = 0x%02x, ma = %d\n", iusbmax, iusbmax_ma);
+
+ return iusbmax_ma;
+}
+
#define USB_SUSPEND_BIT BIT(0)
static int
qpnp_chg_usb_suspend_enable(struct qpnp_chg_chip *chip, int enable)
@@ -797,6 +921,17 @@
pr_err("request ADC error\n");
}
+static irqreturn_t
+qpnp_chg_buck_vchg_loop_irq_handler(int irq, void *_chip)
+{
+ struct qpnp_chg_chip *chip = _chip;
+
+ if (chip->bat_if_base)
+ power_supply_changed(&chip->batt_psy);
+
+ return IRQ_HANDLED;
+}
+
#define EOC_CHECK_PERIOD_MS 10000
static irqreturn_t
qpnp_chg_vbatdet_lo_irq_handler(int irq, void *_chip)
@@ -867,6 +1002,7 @@
if (!usb_present) {
qpnp_chg_usb_suspend_enable(chip, 1);
chip->chg_done = false;
+ chip->prev_usb_max_ma = -EINVAL;
} else {
schedule_delayed_work(&chip->eoc_work,
msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
@@ -879,6 +1015,19 @@
}
static irqreturn_t
+qpnp_chg_bat_if_batt_temp_irq_handler(int irq, void *_chip)
+{
+ struct qpnp_chg_chip *chip = _chip;
+ int batt_temp_good;
+
+ batt_temp_good = qpnp_chg_is_batt_temp_ok(chip);
+ pr_debug("batt-temp triggered: %d\n", batt_temp_good);
+
+ power_supply_changed(&chip->batt_psy);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
qpnp_chg_bat_if_batt_pres_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
@@ -966,6 +1115,12 @@
qpnp_chg_chgr_chg_fastchg_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
+ u8 chgr_sts;
+ int rc;
+
+ rc = qpnp_chg_read(chip, &chgr_sts, INT_RT_STS(chip->chgr_base), 1);
+ if (rc)
+ pr_err("failed to read interrupt sts %d\n", rc);
pr_debug("FAST_CHG IRQ triggered\n");
chip->chg_done = false;
@@ -978,7 +1133,12 @@
chip->resuming_charging = false;
qpnp_chg_set_appropriate_vbatdet(chip);
}
+
qpnp_chg_enable_irq(&chip->chg_vbatdet_lo);
+ if (chgr_sts & FAST_CHG_ON_IRQ)
+ qpnp_chg_enable_irq(&chip->vchg_loop);
+ else
+ qpnp_chg_disable_irq(&chip->vchg_loop);
return IRQ_HANDLED;
}
@@ -1004,6 +1164,8 @@
switch (psp) {
case POWER_SUPPLY_PROP_CHARGING_ENABLED:
case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
return 1;
default:
break;
@@ -1103,12 +1265,16 @@
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_TEMP,
@@ -1124,6 +1290,9 @@
"bms",
};
+static int charger_monitor;
+module_param(charger_monitor, int, 0644);
+
#define USB_WALL_THRESHOLD_MA 500
static int
qpnp_power_get_property_mains(struct power_supply *psy,
@@ -1375,6 +1544,28 @@
return ret.intval;
}
+static int get_prop_vchg_loop(struct qpnp_chg_chip *chip)
+{
+ u8 buck_sts;
+ int rc;
+
+ rc = qpnp_chg_read(chip, &buck_sts, INT_RT_STS(chip->buck_base), 1);
+
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->buck_base), rc);
+ return rc;
+ }
+ pr_debug("buck usb sts 0x%x\n", buck_sts);
+
+ return (buck_sts & VCHG_LOOP_IRQ) ? 1 : 0;
+}
+
+static int get_prop_online(struct qpnp_chg_chip *chip)
+{
+ return qpnp_chg_is_batfet_closed(chip);
+}
+
static void
qpnp_batt_external_power_changed(struct power_supply *psy)
{
@@ -1392,16 +1583,28 @@
if (qpnp_chg_is_usb_chg_plugged_in(chip)) {
chip->usb_psy->get_property(chip->usb_psy,
POWER_SUPPLY_PROP_CURRENT_MAX, &ret);
+
+ if (chip->prev_usb_max_ma == ret.intval)
+ goto skip_set_iusb_max;
+
if (ret.intval <= 2 && !chip->use_default_batt_values &&
get_prop_batt_present(chip)) {
qpnp_chg_usb_suspend_enable(chip, 1);
qpnp_chg_iusbmax_set(chip, QPNP_CHG_I_MAX_MIN_100);
} else {
qpnp_chg_usb_suspend_enable(chip, 0);
- qpnp_chg_iusbmax_set(chip, ret.intval / 1000);
+ if (((ret.intval / 1000) > USB_WALL_THRESHOLD_MA)
+ && (charger_monitor)) {
+ qpnp_chg_iusbmax_set(chip,
+ USB_WALL_THRESHOLD_MA);
+ } else {
+ qpnp_chg_iusbmax_set(chip, ret.intval / 1000);
+ }
}
+ chip->prev_usb_max_ma = ret.intval;
}
+skip_set_iusb_max:
pr_debug("end of power supply changed\n");
power_supply_changed(&chip->batt_psy);
}
@@ -1463,6 +1666,18 @@
case POWER_SUPPLY_PROP_CYCLE_COUNT:
val->intval = get_prop_cycle_count(chip);
break;
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
+ val->intval = get_prop_vchg_loop(chip);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+ val->intval = qpnp_chg_usb_iusbmax_get(chip) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ val->intval = qpnp_chg_vinmin_get(chip) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = get_prop_online(chip);
+ break;
default:
return -EINVAL;
}
@@ -1502,38 +1717,6 @@
mask, btc_cfg, 1);
}
-#define QPNP_CHG_VINMIN_MIN_MV 4200
-#define QPNP_CHG_VINMIN_HIGH_MIN_MV 5600
-#define QPNP_CHG_VINMIN_HIGH_MIN_VAL 0x2B
-#define QPNP_CHG_VINMIN_MAX_MV 9600
-#define QPNP_CHG_VINMIN_STEP_MV 50
-#define QPNP_CHG_VINMIN_STEP_HIGH_MV 200
-#define QPNP_CHG_VINMIN_MASK 0x1F
-static int
-qpnp_chg_vinmin_set(struct qpnp_chg_chip *chip, int voltage)
-{
- u8 temp;
-
- if (voltage < QPNP_CHG_VINMIN_MIN_MV
- || voltage > QPNP_CHG_VINMIN_MAX_MV) {
- pr_err("bad mV=%d asked to set\n", voltage);
- return -EINVAL;
- }
- if (voltage >= QPNP_CHG_VINMIN_HIGH_MIN_MV) {
- temp = QPNP_CHG_VINMIN_HIGH_MIN_VAL;
- temp += (voltage - QPNP_CHG_VINMIN_MIN_MV)
- / QPNP_CHG_VINMIN_STEP_HIGH_MV;
- } else {
- temp = (voltage - QPNP_CHG_VINMIN_MIN_MV)
- / QPNP_CHG_VINMIN_STEP_MV;
- }
-
- pr_debug("voltage=%d setting %02x\n", voltage, temp);
- return qpnp_chg_masked_write(chip,
- chip->chgr_base + CHGR_VIN_MIN,
- QPNP_CHG_VINMIN_MASK, temp, 1);
-}
-
#define QPNP_CHG_IBATSAFE_MIN_MA 100
#define QPNP_CHG_IBATSAFE_MAX_MA 3250
#define QPNP_CHG_I_STEP_MA 50
@@ -2165,6 +2348,8 @@
if (qpnp_adc_tm_channel_measure(&chip->adc_param))
pr_err("request ADC error\n");
+
+ power_supply_changed(&chip->batt_psy);
}
static int
@@ -2214,6 +2399,12 @@
case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
qpnp_batt_system_temp_level_set(chip, val->intval);
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+ qpnp_chg_iusbmax_set(chip, val->intval / 1000);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ qpnp_chg_vinmin_set(chip, val->intval / 1000);
+ break;
default:
return -EINVAL;
}
@@ -2362,7 +2553,49 @@
}
enable_irq_wake(chip->batt_pres.irq);
+
+ chip->batt_temp_ok.irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "bat-temp-ok");
+ if (chip->batt_temp_ok.irq < 0) {
+ pr_err("Unable to get bat-temp-ok irq\n");
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev, chip->batt_temp_ok.irq,
+ qpnp_chg_bat_if_batt_temp_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "bat-temp-ok", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d bat-temp-ok irq: %d\n",
+ chip->batt_temp_ok.irq, rc);
+ return rc;
+ }
+
+ enable_irq_wake(chip->batt_temp_ok.irq);
+
break;
+ case SMBB_BUCK_SUBTYPE:
+ case SMBBP_BUCK_SUBTYPE:
+ case SMBCL_BUCK_SUBTYPE:
+ chip->vchg_loop.irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "vchg-loop");
+ if (chip->vchg_loop.irq < 0) {
+ pr_err("Unable to get vchg-loop irq\n");
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev, chip->vchg_loop.irq,
+ qpnp_chg_buck_vchg_loop_irq_handler,
+ IRQF_TRIGGER_RISING,
+ "vchg-loop", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d vchg-loop irq: %d\n",
+ chip->vchg_loop.irq, rc);
+ return rc;
+ }
+
+ enable_irq_wake(chip->vchg_loop.irq);
+ qpnp_chg_disable_irq(&chip->vchg_loop);
+ break;
+
case SMBB_USB_CHGPTH_SUBTYPE:
case SMBBP_USB_CHGPTH_SUBTYPE:
case SMBCL_USB_CHGPTH_SUBTYPE:
@@ -2426,6 +2659,41 @@
return rc;
}
+static int
+qpnp_chg_load_battery_data(struct qpnp_chg_chip *chip)
+{
+ struct bms_battery_data batt_data;
+ struct device_node *node;
+ struct qpnp_vadc_result result;
+ int rc;
+
+ node = of_find_node_by_name(chip->spmi->dev.of_node,
+ "qcom,battery-data");
+ if (node) {
+ memset(&batt_data, 0, sizeof(struct bms_battery_data));
+ rc = qpnp_vadc_read(LR_MUX2_BAT_ID, &result);
+ if (rc) {
+ pr_err("error reading batt id channel = %d, rc = %d\n",
+ LR_MUX2_BAT_ID, rc);
+ return rc;
+ }
+
+ rc = of_batterydata_read_data(node,
+ &batt_data, result.physical);
+ if (rc) {
+ pr_err("failed to read battery data: %d\n", rc);
+ return rc;
+ }
+
+ if (batt_data.max_voltage_uv >= 0)
+ chip->max_voltage_mv = batt_data.max_voltage_uv / 1000;
+ if (batt_data.iterm_ua >= 0)
+ chip->term_current = batt_data.iterm_ua / 1000;
+ }
+
+ return 0;
+}
+
#define WDOG_EN_BIT BIT(7)
static int
qpnp_chg_hwinit(struct qpnp_chg_chip *chip, u8 subtype,
@@ -2818,6 +3086,7 @@
return -ENOMEM;
}
+ chip->prev_usb_max_ma = -EINVAL;
chip->dev = &(spmi->dev);
chip->spmi = spmi;
@@ -2833,7 +3102,10 @@
if (rc)
goto fail_chg_enable;
- /* Check if bat_if is set in DT and make sure VADC is present */
+ /*
+ * Check if bat_if is set in DT and make sure VADC is present
+ * Also try loading the battery data profile if bat_if exists
+ */
spmi_for_each_container_dev(spmi_resource, spmi) {
if (!spmi_resource) {
pr_err("qpnp_chg: spmi resource absent\n");
@@ -2863,6 +3135,10 @@
rc = qpnp_vadc_is_ready();
if (rc)
goto fail_chg_enable;
+
+ rc = qpnp_chg_load_battery_data(chip);
+ if (rc)
+ goto fail_chg_enable;
}
}
@@ -3186,6 +3462,66 @@
return rc;
}
+static int
+qpnp_chg_ops_set(const char *val, const struct kernel_param *kp)
+{
+ return -EINVAL;
+}
+
+#define MAX_LEN_VADC 10
+static int
+qpnp_chg_usb_in_get(char *val, const struct kernel_param *kp)
+{
+ int rc;
+ struct qpnp_vadc_result results;
+
+ rc = qpnp_vadc_is_ready();
+ if (rc)
+ return rc;
+
+ rc = qpnp_vadc_read(USBIN, &results);
+ if (rc) {
+ pr_err("Unable to read vchg rc=%d\n", rc);
+ return 0;
+ }
+ rc = snprintf(val, MAX_LEN_VADC, "%lld\n", results.physical);
+
+ return rc;
+}
+
+static int
+qpnp_chg_vchg_get(char *val, const struct kernel_param *kp)
+{
+ int rc;
+ struct qpnp_vadc_result results;
+
+ rc = qpnp_vadc_is_ready();
+ if (rc)
+ return rc;
+
+ rc = qpnp_vadc_read(VCHG_SNS, &results);
+ if (rc) {
+ pr_err("Unable to read vchg rc=%d\n", rc);
+ return 0;
+ }
+ rc = snprintf(val, MAX_LEN_VADC, "%lld\n", results.physical);
+
+ return rc;
+}
+
+static struct kernel_param_ops usb_in_uv_param_ops = {
+ .set = qpnp_chg_ops_set,
+ .get = qpnp_chg_usb_in_get,
+};
+
+static struct kernel_param_ops vchg_uv_param_ops = {
+ .set = qpnp_chg_ops_set,
+ .get = qpnp_chg_vchg_get,
+};
+
+module_param_cb(usb_in_uv, &usb_in_uv_param_ops, NULL, 0644);
+module_param_cb(vchg_uv, &vchg_uv_param_ops, NULL, 0644);
+
static const struct dev_pm_ops qpnp_chg_pm_ops = {
.resume = qpnp_chg_resume,
.suspend = qpnp_chg_suspend,
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 863339b..25b4b5e 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -2719,7 +2719,7 @@
pdata->use_bam = false;
}
- if (pdata->bam_producer_pipe_index) {
+ if (!pdata->bam_producer_pipe_index) {
dev_warn(&pdev->dev,
"missing qcom,bam-producer-pipe-index entry in device-tree\n");
pdata->use_bam = false;
@@ -2919,15 +2919,6 @@
goto err_probe_reqmem;
}
- if (pdata && pdata->ver_reg_exists) {
- enum msm_spi_qup_version ver =
- msm_spi_get_qup_hw_ver(&pdev->dev, dd);
- if (dd->qup_ver != ver)
- dev_warn(&pdev->dev,
- "%s: HW version different then initially assumed by probe",
- __func__);
- }
-
if (pdata && pdata->rsl_id) {
struct remote_mutex_id rmid;
rmid.r_spinlock_id = pdata->rsl_id;
@@ -2986,6 +2977,16 @@
}
pclk_enabled = 1;
+
+ if (pdata && pdata->ver_reg_exists) {
+ enum msm_spi_qup_version ver =
+ msm_spi_get_qup_hw_ver(&pdev->dev, dd);
+ if (dd->qup_ver != ver)
+ dev_warn(&pdev->dev,
+ "%s: HW version different then initially assumed by probe",
+ __func__);
+ }
+
/* GSBI dose not exists on B-family MSM-chips */
if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
rc = msm_spi_configure_gsbi(dd, pdev);
@@ -3200,6 +3201,13 @@
if (!dd)
goto suspend_exit;
msm_spi_pm_suspend_runtime(device);
+
+ /*
+ * set the device's runtime PM status to 'suspended'
+ */
+ pm_runtime_disable(device);
+ pm_runtime_set_suspended(device);
+ pm_runtime_enable(device);
}
suspend_exit:
return 0;
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index 03c58a2..974cadc 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -1514,7 +1514,7 @@
goto read_node_fail;
}
- rails[i].curr_level = 0;
+ rails[i].curr_level = -1;
rails[i].reg = NULL;
i++;
}
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index f3b29c9..3b0ef8c 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -1315,7 +1315,7 @@
return -ENODEV;
if (param->threshold_notification == NULL) {
- pr_err("No notification for high/low temp??\n");
+ pr_debug("No notification for high/low temp??\n");
return -EINVAL;
}
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
index c8e5163..e36806f 100644
--- a/drivers/tty/n_smux.c
+++ b/drivers/tty/n_smux.c
@@ -1186,6 +1186,7 @@
int ret;
struct smux_lch_t *ch;
int enable_powerdown = 0;
+ int tx_ready = 0;
lcid = pkt->hdr.lcid;
ch = &smux_lch[lcid];
@@ -1203,7 +1204,7 @@
if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
schedule_notify(lcid, SMUX_CONNECTED, NULL);
if (!(list_empty(&ch->tx_queue)))
- list_channel(ch);
+ tx_ready = 1;
}
ret = 0;
} else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
@@ -1226,6 +1227,9 @@
spin_unlock(&smux.tx_lock_lha2);
}
+ if (tx_ready)
+ list_channel(ch);
+
return ret;
}
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 9e22afd..a243a05 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -219,8 +219,67 @@
struct msm_bus_scale_pdata *bus_scale_table;
bool rx_discard_flush_issued;
int rx_count_callback;
+ unsigned int *reg_ptr;
};
+unsigned int regmap_nonblsp[UART_DM_LAST] = {
+ [UART_DM_MR1] = UARTDM_MR1_ADDR,
+ [UART_DM_MR2] = UARTDM_MR2_ADDR,
+ [UART_DM_IMR] = UARTDM_IMR_ADDR,
+ [UART_DM_SR] = UARTDM_SR_ADDR,
+ [UART_DM_CR] = UARTDM_CR_ADDR,
+ [UART_DM_CSR] = UARTDM_CSR_ADDR,
+ [UART_DM_IPR] = UARTDM_IPR_ADDR,
+ [UART_DM_ISR] = UARTDM_ISR_ADDR,
+ [UART_DM_RX_TOTAL_SNAP] = UARTDM_RX_TOTAL_SNAP_ADDR,
+ [UART_DM_TFWR] = UARTDM_TFWR_ADDR,
+ [UART_DM_RFWR] = UARTDM_RFWR_ADDR,
+ [UART_DM_RF] = UARTDM_RF_ADDR,
+ [UART_DM_TF] = UARTDM_TF_ADDR,
+ [UART_DM_MISR] = UARTDM_MISR_ADDR,
+ [UART_DM_DMRX] = UARTDM_DMRX_ADDR,
+ [UART_DM_NCF_TX] = UARTDM_NCF_TX_ADDR,
+ [UART_DM_DMEN] = UARTDM_DMEN_ADDR,
+ [UART_DM_TXFS] = UARTDM_TXFS_ADDR,
+ [UART_DM_RXFS] = UARTDM_RXFS_ADDR,
+ [UART_DM_RX_TRANS_CTRL] = UARTDM_RX_TRANS_CTRL_ADDR,
+};
+
+unsigned int regmap_blsp[UART_DM_LAST] = {
+ [UART_DM_MR1] = 0x0,
+ [UART_DM_MR2] = 0x4,
+ [UART_DM_IMR] = 0xb0,
+ [UART_DM_SR] = 0xa4,
+ [UART_DM_CR] = 0xa8,
+ [UART_DM_CSR] = 0xa0,
+ [UART_DM_IPR] = 0x18,
+ [UART_DM_ISR] = 0xb4,
+ [UART_DM_RX_TOTAL_SNAP] = 0xbc,
+ [UART_DM_TFWR] = 0x1c,
+ [UART_DM_RFWR] = 0x20,
+ [UART_DM_RF] = 0x140,
+ [UART_DM_TF] = 0x100,
+ [UART_DM_MISR] = 0xac,
+ [UART_DM_DMRX] = 0x34,
+ [UART_DM_NCF_TX] = 0x40,
+ [UART_DM_DMEN] = 0x3c,
+ [UART_DM_TXFS] = 0x4c,
+ [UART_DM_RXFS] = 0x50,
+ [UART_DM_RX_TRANS_CTRL] = 0xcc,
+};
+
+static struct of_device_id msm_hs_match_table[] = {
+ { .compatible = "qcom,msm-hsuart-v14",
+ .data = regmap_blsp
+ },
+ {
+ .compatible = "qcom,msm-hsuart-v13",
+ .data = regmap_nonblsp
+ },
+ {}
+};
+
+
#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
#define UARTDM_RX_BUF_SIZE 512
@@ -421,14 +480,23 @@
}
static inline unsigned int msm_hs_read(struct uart_port *uport,
- unsigned int offset)
+ unsigned int index)
{
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ unsigned int offset;
+
+ offset = *(msm_uport->reg_ptr + index);
+
return readl_relaxed(uport->membase + offset);
}
-static inline void msm_hs_write(struct uart_port *uport, unsigned int offset,
+static inline void msm_hs_write(struct uart_port *uport, unsigned int index,
unsigned int value)
{
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ unsigned int offset;
+
+ offset = *(msm_uport->reg_ptr + index);
writel_relaxed(value, uport->membase + offset);
}
@@ -492,23 +560,23 @@
if (val) {
spin_lock_irqsave(&uport->lock, flags);
- ret = msm_hs_read(uport, UARTDM_MR2_ADDR);
+ ret = msm_hs_read(uport, UART_DM_MR2);
if (is_blsp_uart(msm_uport))
ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
else
ret |= UARTDM_MR2_LOOP_MODE_BMSK;
- msm_hs_write(uport, UARTDM_MR2_ADDR, ret);
+ msm_hs_write(uport, UART_DM_MR2, ret);
spin_unlock_irqrestore(&uport->lock, flags);
} else {
spin_lock_irqsave(&uport->lock, flags);
- ret = msm_hs_read(uport, UARTDM_MR2_ADDR);
+ ret = msm_hs_read(uport, UART_DM_MR2);
if (is_blsp_uart(msm_uport))
ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
else
ret &= ~UARTDM_MR2_LOOP_MODE_BMSK;
- msm_hs_write(uport, UARTDM_MR2_ADDR, ret);
+ msm_hs_write(uport, UART_DM_MR2, ret);
spin_unlock_irqrestore(&uport->lock, flags);
}
/* Calling CLOCK API. Hence mb() requires here. */
@@ -528,7 +596,7 @@
msm_hs_clock_vote(msm_uport);
spin_lock_irqsave(&uport->lock, flags);
- ret = msm_hs_read(&msm_uport->uport, UARTDM_MR2_ADDR);
+ ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
spin_unlock_irqrestore(&uport->lock, flags);
msm_hs_clock_unvote(msm_uport);
@@ -737,63 +805,63 @@
switch (bps) {
case 300:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0x00);
+ msm_hs_write(uport, UART_DM_CSR, 0x00);
rxstale = 1;
break;
case 600:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0x11);
+ msm_hs_write(uport, UART_DM_CSR, 0x11);
rxstale = 1;
break;
case 1200:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0x22);
+ msm_hs_write(uport, UART_DM_CSR, 0x22);
rxstale = 1;
break;
case 2400:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0x33);
+ msm_hs_write(uport, UART_DM_CSR, 0x33);
rxstale = 1;
break;
case 4800:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0x44);
+ msm_hs_write(uport, UART_DM_CSR, 0x44);
rxstale = 1;
break;
case 9600:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0x55);
+ msm_hs_write(uport, UART_DM_CSR, 0x55);
rxstale = 2;
break;
case 14400:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0x66);
+ msm_hs_write(uport, UART_DM_CSR, 0x66);
rxstale = 3;
break;
case 19200:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0x77);
+ msm_hs_write(uport, UART_DM_CSR, 0x77);
rxstale = 4;
break;
case 28800:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0x88);
+ msm_hs_write(uport, UART_DM_CSR, 0x88);
rxstale = 6;
break;
case 38400:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
+ msm_hs_write(uport, UART_DM_CSR, 0x99);
rxstale = 8;
break;
case 57600:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
+ msm_hs_write(uport, UART_DM_CSR, 0xaa);
rxstale = 16;
break;
case 76800:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
+ msm_hs_write(uport, UART_DM_CSR, 0xbb);
rxstale = 16;
break;
case 115200:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
+ msm_hs_write(uport, UART_DM_CSR, 0xcc);
rxstale = 31;
break;
case 230400:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
+ msm_hs_write(uport, UART_DM_CSR, 0xee);
rxstale = 31;
break;
case 460800:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
+ msm_hs_write(uport, UART_DM_CSR, 0xff);
rxstale = 31;
break;
case 4000000:
@@ -806,11 +874,11 @@
case 1152000:
case 1000000:
case 921600:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
+ msm_hs_write(uport, UART_DM_CSR, 0xff);
rxstale = 31;
break;
default:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
+ msm_hs_write(uport, UART_DM_CSR, 0xff);
/* default to 9600 */
bps = 9600;
rxstale = 2;
@@ -848,14 +916,14 @@
data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
- msm_hs_write(uport, UARTDM_IPR_ADDR, data);
+ msm_hs_write(uport, UART_DM_IPR, data);
/*
* It is suggested to do reset of transmitter and receiver after
* changing any protocol configuration. Here Baud rate and stale
* timeout are getting updated. Hence reset transmitter and receiver.
*/
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
+ msm_hs_write(uport, UART_DM_CR, RESET_TX);
+ msm_hs_write(uport, UART_DM_CR, RESET_RX);
}
@@ -867,35 +935,35 @@
switch (bps) {
case 9600:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
+ msm_hs_write(uport, UART_DM_CSR, 0x99);
rxstale = 2;
break;
case 14400:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
+ msm_hs_write(uport, UART_DM_CSR, 0xaa);
rxstale = 3;
break;
case 19200:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
+ msm_hs_write(uport, UART_DM_CSR, 0xbb);
rxstale = 4;
break;
case 28800:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
+ msm_hs_write(uport, UART_DM_CSR, 0xcc);
rxstale = 6;
break;
case 38400:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xdd);
+ msm_hs_write(uport, UART_DM_CSR, 0xdd);
rxstale = 8;
break;
case 57600:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
+ msm_hs_write(uport, UART_DM_CSR, 0xee);
rxstale = 16;
break;
case 115200:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
+ msm_hs_write(uport, UART_DM_CSR, 0xff);
rxstale = 31;
break;
default:
- msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
+ msm_hs_write(uport, UART_DM_CSR, 0x99);
/* default to 9600 */
bps = 9600;
rxstale = 2;
@@ -905,7 +973,7 @@
data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
- msm_hs_write(uport, UARTDM_IPR_ADDR, data);
+ msm_hs_write(uport, UART_DM_IPR, data);
}
@@ -928,7 +996,7 @@
struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
mutex_lock(&msm_uport->clk_mutex);
- msm_hs_write(uport, UARTDM_IMR_ADDR, 0);
+ msm_hs_write(uport, UART_DM_IMR, 0);
/*
* Disable Rx channel of UARTDM
@@ -939,7 +1007,7 @@
* Note: should not reset the receiver here immediately as it is not
* suggested to do disable/reset or reset/disable at the same time.
*/
- data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
+ data = msm_hs_read(uport, UART_DM_DMEN);
if (is_blsp_uart(msm_uport)) {
/* Disable UARTDM RX BAM Interface */
data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
@@ -947,7 +1015,7 @@
data &= ~UARTDM_RX_DM_EN_BMSK;
}
- msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
+ msm_hs_write(uport, UART_DM_DMEN, data);
/* 300 is the minimum baud support by the driver */
bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
@@ -962,7 +1030,7 @@
else
msm_hs_set_bps_locked(uport, bps);
- data = msm_hs_read(uport, UARTDM_MR2_ADDR);
+ data = msm_hs_read(uport, UART_DM_MR2);
data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
/* set parity */
if (PARENB == (c_cflag & PARENB)) {
@@ -1001,10 +1069,10 @@
}
data |= UARTDM_MR2_ERROR_MODE_BMSK;
/* write parity/bits per char/stop bit configuration */
- msm_hs_write(uport, UARTDM_MR2_ADDR, data);
+ msm_hs_write(uport, UART_DM_MR2, data);
/* Configure HW flow control */
- data = msm_hs_read(uport, UARTDM_MR1_ADDR);
+ data = msm_hs_read(uport, UART_DM_MR1);
data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
@@ -1013,7 +1081,7 @@
data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
}
- msm_hs_write(uport, UARTDM_MR1_ADDR, data);
+ msm_hs_write(uport, UART_DM_MR1, data);
uport->ignore_status_mask = termios->c_iflag & INPCK;
uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
@@ -1025,8 +1093,8 @@
/* Set Transmit software time out */
uart_update_timeout(uport, c_cflag, bps);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
+ msm_hs_write(uport, UART_DM_CR, RESET_RX);
+ msm_hs_write(uport, UART_DM_CR, RESET_TX);
if (msm_uport->rx.flush == FLUSH_NONE) {
wake_lock(&msm_uport->rx.wake_lock);
@@ -1059,7 +1127,7 @@
}
}
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
mb();
mutex_unlock(&msm_uport->clk_mutex);
}
@@ -1129,12 +1197,12 @@
unsigned int data;
/* disable dlink */
- data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
+ data = msm_hs_read(uport, UART_DM_DMEN);
if (is_blsp_uart(msm_uport))
data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
else
data &= ~UARTDM_RX_DM_EN_BMSK;
- msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
+ msm_hs_write(uport, UART_DM_DMEN, data);
/* calling DMOV or CLOCK API. Hence mb() */
mb();
@@ -1198,7 +1266,7 @@
if (is_blsp_uart(msm_uport)) {
/* Issue TX BAM Start IFC command */
- msm_hs_write(uport, UARTDM_CR_ADDR, START_TX_BAM_IFC);
+ msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
} else {
tx->command_ptr->num_rows =
(((tx_count + 15) >> 4) << 16) |
@@ -1214,11 +1282,11 @@
/* Save tx_count to use in Callback */
tx->tx_count = tx_count;
- msm_hs_write(uport, UARTDM_NCF_TX_ADDR, tx_count);
+ msm_hs_write(uport, UART_DM_NCF_TX, tx_count);
/* Disable the tx_ready interrupt */
msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
/* Calling next DMOV API. Hence mb() here. */
mb();
@@ -1252,16 +1320,16 @@
printk(KERN_ERR "Error: rx started in buffer state = %x",
buffer_pending);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
- msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE);
- msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE);
+ msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+ msm_hs_write(uport, UART_DM_DMRX, UARTDM_RX_BUF_SIZE);
+ msm_hs_write(uport, UART_DM_CR, STALE_EVENT_ENABLE);
msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK;
/*
* Enable UARTDM Rx Interface as previously it has been
* disable in set_termios before configuring baud rate.
*/
- data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
+ data = msm_hs_read(uport, UART_DM_DMEN);
if (is_blsp_uart(msm_uport)) {
/* Enable UARTDM Rx BAM Interface */
data |= UARTDM_RX_BAM_ENABLE_BMSK;
@@ -1269,8 +1337,8 @@
data |= UARTDM_RX_DM_EN_BMSK;
}
- msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ msm_hs_write(uport, UART_DM_DMEN, data);
+ msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
/* Calling next DMOV API. Hence mb() here. */
mb();
@@ -1281,9 +1349,9 @@
*/
data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
RX_DMRX_CYCLIC_EN);
- msm_hs_write(uport, UARTDM_RX_TRANS_CTRL_ADDR, data);
+ msm_hs_write(uport, UART_DM_RX_TRANS_CTRL, data);
/* Issue RX BAM Start IFC command */
- msm_hs_write(uport, UARTDM_CR_ADDR, START_RX_BAM_IFC);
+ msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
mb();
}
@@ -1378,12 +1446,12 @@
notify = &msm_uport->notify;
rx = &msm_uport->rx;
- status = msm_hs_read(uport, UARTDM_SR_ADDR);
+ status = msm_hs_read(uport, UART_DM_SR);
spin_lock_irqsave(&uport->lock, flags);
if (!is_blsp_uart(msm_uport))
- msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
+ msm_hs_write(uport, UART_DM_CR, STALE_EVENT_DISABLE);
/* overflow is not connect to data in a FIFO */
if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
@@ -1424,7 +1492,7 @@
}
if (error_f)
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
+ msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED)
msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED;
@@ -1443,7 +1511,7 @@
if (is_blsp_uart(msm_uport)) {
rx_count = msm_uport->rx_count_callback;
} else {
- rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
+ rx_count = msm_hs_read(uport, UART_DM_RX_TOTAL_SNAP);
/* order the read of rx.buffer */
rmb();
}
@@ -1566,7 +1634,7 @@
}
msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK;
- msm_hs_write(&(msm_uport->uport), UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ msm_hs_write(&(msm_uport->uport), UART_DM_IMR, msm_uport->imr_reg);
/* Calling clk API. Hence mb() requires. */
mb();
@@ -1675,17 +1743,17 @@
/* RTS is active low */
set_rts = TIOCM_RTS & mctrl ? 0 : 1;
- data = msm_hs_read(uport, UARTDM_MR1_ADDR);
+ data = msm_hs_read(uport, UART_DM_MR1);
if (set_rts) {
/*disable auto ready-for-receiving */
data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
- msm_hs_write(uport, UARTDM_MR1_ADDR, data);
+ msm_hs_write(uport, UART_DM_MR1, data);
/* set RFR_N to high */
- msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH);
+ msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
} else {
/* Enable auto ready-for-receiving */
data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
- msm_hs_write(uport, UARTDM_MR1_ADDR, data);
+ msm_hs_write(uport, UART_DM_MR1, data);
}
mb();
}
@@ -1711,7 +1779,7 @@
/* Enable DELTA_CTS Interrupt */
msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
mb();
}
@@ -1735,7 +1803,7 @@
unsigned long flags;
spin_lock_irqsave(&uport->lock, flags);
- msm_hs_write(uport, UARTDM_CR_ADDR, ctl ? START_BREAK : STOP_BREAK);
+ msm_hs_write(uport, UART_DM_CR, ctl ? START_BREAK : STOP_BREAK);
mb();
spin_unlock_irqrestore(&uport->lock, flags);
}
@@ -1766,7 +1834,7 @@
static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
{
/* clear interrupt */
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
+ msm_hs_write(uport, UART_DM_CR, RESET_CTS);
/* Calling CLOCK API. Hence mb() requires here. */
mb();
uport->icount.cts++;
@@ -1802,7 +1870,7 @@
}
/* Make sure the uart is finished with the last byte */
- sr_status = msm_hs_read(uport, UARTDM_SR_ADDR);
+ sr_status = msm_hs_read(uport, UARTDM_SR);
if (!(sr_status & UARTDM_SR_TXEMT_BMSK)) {
spin_unlock_irqrestore(&uport->lock, flags);
mutex_unlock(&msm_uport->clk_mutex);
@@ -1815,7 +1883,7 @@
msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED;
if (!is_blsp_uart(msm_uport)) {
- msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
+ msm_hs_write(uport, UART_DM_CR, FORCE_STALE_EVENT);
/*
* Before returning make sure that device writel
* completed. Hence mb() requires here.
@@ -1916,20 +1984,20 @@
spin_lock_irqsave(&uport->lock, flags);
- isr_status = msm_hs_read(uport, UARTDM_MISR_ADDR);
+ isr_status = msm_hs_read(uport, UART_DM_MISR);
/* Uart RX starting */
if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
wake_lock(&rx->wake_lock); /* hold wakelock while rx dma */
msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
/* Complete device write for IMR. Hence mb() requires. */
mb();
}
/* Stale rx interrupt */
if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
- msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
+ msm_hs_write(uport, UART_DM_CR, STALE_EVENT_DISABLE);
+ msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
/*
* Complete device write before calling DMOV API. Hence
* mb() requires here.
@@ -1949,12 +2017,11 @@
/* tx ready interrupt */
if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
/* Clear TX Ready */
- msm_hs_write(uport, UARTDM_CR_ADDR, CLEAR_TX_READY);
+ msm_hs_write(uport, UART_DM_CR, CLEAR_TX_READY);
if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR,
- msm_uport->imr_reg);
+ msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
}
/*
* Complete both writes before starting new TX.
@@ -1983,7 +2050,7 @@
if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
/* TX FIFO is empty */
msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
/*
* Complete device write before starting clock_off request.
* Hence mb() requires here.
@@ -2027,7 +2094,7 @@
msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF;
msm_uport->clk_req_off_state = CLK_REQ_OFF_START;
msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ msm_hs_write(uport, UARTDM_IMR, msm_uport->imr_reg);
/*
* Complete device write before retuning back.
* Hence mb() requires here.
@@ -2067,13 +2134,13 @@
case MSM_HS_CLK_REQUEST_OFF:
if (msm_uport->rx.flush == FLUSH_STOP ||
msm_uport->rx.flush == FLUSH_SHUTDOWN) {
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
- data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
+ msm_hs_write(uport, UART_DM_CR, RESET_RX);
+ data = msm_hs_read(uport, UART_DM_DMEN);
if (is_blsp_uart(msm_uport))
data |= UARTDM_RX_BAM_ENABLE_BMSK;
else
data |= UARTDM_RX_DM_EN_BMSK;
- msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
+ msm_hs_write(uport, UART_DM_DMEN, data);
/* Complete above device write. Hence mb() here. */
mb();
}
@@ -2302,18 +2369,18 @@
}
/* Set auto RFR Level */
- data = msm_hs_read(uport, UARTDM_MR1_ADDR);
+ data = msm_hs_read(uport, UART_DM_MR1);
data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
- msm_hs_write(uport, UARTDM_MR1_ADDR, data);
+ msm_hs_write(uport, UART_DM_MR1, data);
/* Make sure RXSTALE count is non-zero */
- data = msm_hs_read(uport, UARTDM_IPR_ADDR);
+ data = msm_hs_read(uport, UART_DM_IPR);
if (!data) {
data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
- msm_hs_write(uport, UARTDM_IPR_ADDR, data);
+ msm_hs_write(uport, UART_DM_IPR, data);
}
if (is_blsp_uart(msm_uport)) {
@@ -2323,21 +2390,21 @@
/* Enable Data Mover Mode */
data = UARTDM_TX_DM_EN_BMSK | UARTDM_RX_DM_EN_BMSK;
}
- msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
+ msm_hs_write(uport, UART_DM_DMEN, data);
/* Reset TX */
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_BREAK_INT);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
- msm_hs_write(uport, UARTDM_CR_ADDR, RFR_LOW);
+ msm_hs_write(uport, UART_DM_CR, RESET_TX);
+ msm_hs_write(uport, UART_DM_CR, RESET_RX);
+ msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
+ msm_hs_write(uport, UART_DM_CR, RESET_BREAK_INT);
+ msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+ msm_hs_write(uport, UART_DM_CR, RESET_CTS);
+ msm_hs_write(uport, UART_DM_CR, RFR_LOW);
/* Turn on Uart Receiver */
- msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_EN_BMSK);
+ msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
/* Turn on Uart Transmitter */
- msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_EN_BMSK);
+ msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
/* Initialize the tx */
tx->tx_ready_int_en = 0;
@@ -2363,7 +2430,8 @@
/* Enable reading the current CTS, no harm even if CTS is ignored */
msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
- msm_hs_write(uport, UARTDM_TFWR_ADDR, 0); /* TXLEV on empty TX fifo */
+ /* TXLEV on empty TX fifo */
+ msm_hs_write(uport, UART_DM_TFWR, 0);
/*
* Complete all device write related configuration before
* queuing RX request. Hence mb() requires here.
@@ -2469,9 +2537,9 @@
/* Set up Uart Receive */
if (is_blsp_uart(msm_uport))
- msm_hs_write(uport, UARTDM_RFWR_ADDR, 32);
+ msm_hs_write(uport, UART_DM_RFWR, 32);
else
- msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
+ msm_hs_write(uport, UART_DM_RFWR, 0);
INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
@@ -2859,6 +2927,7 @@
struct resource *resource;
int core_irqres, bam_irqres, wakeup_irqres;
struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
+ const struct of_device_id *match;
if (pdev->dev.of_node) {
dev_dbg(&pdev->dev, "device tree enabled\n");
@@ -2905,6 +2974,12 @@
uport = &msm_uport->uport;
uport->dev = &pdev->dev;
+ match = of_match_device(msm_hs_match_table, &pdev->dev);
+ if (match)
+ msm_uport->reg_ptr = (unsigned int *)match->data;
+ else if (is_gsbi_uart(msm_uport))
+ msm_uport->reg_ptr = regmap_nonblsp;
+
if (pdev->dev.of_node)
msm_uport->uart_type = BLSP_HSUART;
@@ -3099,8 +3174,7 @@
}
/* configure the CR Protection to Enable */
- msm_hs_write(uport, UARTDM_CR_ADDR, CR_PROTECTION_EN);
-
+ msm_hs_write(uport, UART_DM_CR, CR_PROTECTION_EN);
/*
* Enable Command register protection before going ahead as this hw
@@ -3207,16 +3281,16 @@
if (!is_blsp_uart(msm_uport)) {
spin_lock_irqsave(&uport->lock, flags);
/* disable UART TX interface to DM */
- data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
+ data = msm_hs_read(uport, UART_DM_DMEN);
data &= ~UARTDM_TX_DM_EN_BMSK;
- msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
+ msm_hs_write(uport, UART_DM_DMEN, data);
/* turn OFF UART Transmitter */
- msm_hs_write(uport, UARTDM_CR_ADDR,
- UARTDM_CR_TX_DISABLE_BMSK);
+ msm_hs_write(uport, UART_DM_CR,
+ UARTDM_CR_TX_DISABLE_BMSK);
/* reset UART TX */
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
+ msm_hs_write(uport, UART_DM_CR, RESET_TX);
/* reset UART TX Error */
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX_ERROR);
+ msm_hs_write(uport, UART_DM_CR, RESET_TX_ERROR);
msm_uport->tx.flush = FLUSH_STOP;
spin_unlock_irqrestore(&uport->lock, flags);
/* discard flush */
@@ -3242,12 +3316,12 @@
pm_runtime_disable(uport->dev);
/* Disable the transmitter */
- msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK);
+ msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_DISABLE_BMSK);
/* Disable the receiver */
- msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_DISABLE_BMSK);
+ msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
msm_uport->imr_reg = 0;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
/*
* Complete all device write before actually disabling uartclk.
* Hence mb() requires here.
@@ -3323,10 +3397,6 @@
.runtime_idle = msm_hs_runtime_idle,
};
-static struct of_device_id msm_hs_match_table[] = {
- { .compatible = "qcom,msm-hsuart-v14" },
- {}
-};
static struct platform_driver msm_serial_hs_platform_driver = {
.probe = msm_hs_probe,
diff --git a/drivers/tty/serial/msm_serial_hs_hwreg.h b/drivers/tty/serial/msm_serial_hs_hwreg.h
index cd24f23..d912b9f 100644
--- a/drivers/tty/serial/msm_serial_hs_hwreg.h
+++ b/drivers/tty/serial/msm_serial_hs_hwreg.h
@@ -60,6 +60,30 @@
UARTDM_LAST,
};
+enum msm_hs_regs {
+ UART_DM_MR1,
+ UART_DM_MR2,
+ UART_DM_IMR,
+ UART_DM_SR,
+ UART_DM_CR,
+ UART_DM_CSR,
+ UART_DM_IPR,
+ UART_DM_ISR,
+ UART_DM_RX_TOTAL_SNAP,
+ UART_DM_RFWR,
+ UART_DM_TFWR,
+ UART_DM_RF,
+ UART_DM_TF,
+ UART_DM_MISR,
+ UART_DM_DMRX,
+ UART_DM_NCF_TX,
+ UART_DM_DMEN,
+ UART_DM_TXFS,
+ UART_DM_RXFS,
+ UART_DM_RX_TRANS_CTRL,
+ UART_DM_LAST,
+};
+
#define UARTDM_MR1_ADDR 0x0
#define UARTDM_MR2_ADDR 0x4
@@ -219,70 +243,6 @@
#define UARTDM_TX_BAM_ENABLE_BMSK 0x4
#define UARTDM_RX_BAM_ENABLE_BMSK 0x8
-/*
- * Some of the BLSP Based UART Core(v14) existing register offsets
- * are different compare to GSBI based UART Core(v13)
- * Hence add the changed register offsets for UART Core v14
- */
-#ifdef CONFIG_MSM_UARTDM_Core_v14
-
-/* write only register */
-#define UARTDM_CSR_ADDR 0xa0
-
-/* write only register */
-#define UARTDM_TF_ADDR 0x100
-#define UARTDM_TF2_ADDR 0x104
-#define UARTDM_TF3_ADDR 0x108
-#define UARTDM_TF4_ADDR 0x10c
-#define UARTDM_TF5_ADDR 0x110
-#define UARTDM_TF6_ADDR 0x114
-#define UARTDM_TF7_ADDR 0x118
-#define UARTDM_TF8_ADDR 0x11c
-#define UARTDM_TF9_ADDR 0x120
-#define UARTDM_TF10_ADDR 0x124
-#define UARTDM_TF11_ADDR 0x128
-#define UARTDM_TF12_ADDR 0x12c
-#define UARTDM_TF13_ADDR 0x130
-#define UARTDM_TF14_ADDR 0x134
-#define UARTDM_TF15_ADDR 0x138
-#define UARTDM_TF16_ADDR 0x13c
-
-/* write only register */
-#define UARTDM_CR_ADDR 0xa8
-/* write only register */
-#define UARTDM_IMR_ADDR 0xb0
-#define UARTDM_IRDA_ADDR 0xb8
-
-/* Read Only register */
-#define UARTDM_SR_ADDR 0xa4
-
-/* Read Only register */
-#define UARTDM_RF_ADDR 0x140
-#define UARTDM_RF2_ADDR 0x144
-#define UARTDM_RF3_ADDR 0x148
-#define UARTDM_RF4_ADDR 0x14c
-#define UARTDM_RF5_ADDR 0x150
-#define UARTDM_RF6_ADDR 0x154
-#define UARTDM_RF7_ADDR 0x158
-#define UARTDM_RF8_ADDR 0x15c
-#define UARTDM_RF9_ADDR 0x160
-#define UARTDM_RF10_ADDR 0x164
-#define UARTDM_RF11_ADDR 0x168
-#define UARTDM_RF12_ADDR 0x16c
-#define UARTDM_RF13_ADDR 0x170
-#define UARTDM_RF14_ADDR 0x174
-#define UARTDM_RF15_ADDR 0x178
-#define UARTDM_RF16_ADDR 0x17c
-
-/* Read Only register */
-#define UARTDM_MISR_ADDR 0xac
-
-/* Read Only register */
-#define UARTDM_ISR_ADDR 0xb4
-#define UARTDM_RX_TOTAL_SNAP_ADDR 0xbc
-
-#else
-
/* Register offsets for UART Core v13 */
/* write only register */
@@ -316,6 +276,4 @@
#define UARTDM_ISR_ADDR 0x14
#define UARTDM_RX_TOTAL_SNAP_ADDR 0x38
-#endif
-
#endif /* MSM_SERIAL_HS_HWREG_H */
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 0f8a7f0..c89f6d8 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -213,6 +213,7 @@
struct power_supply *ext_vbus_psy;
unsigned int online;
unsigned int host_mode;
+ unsigned int voltage_max;
unsigned int current_max;
unsigned int vdd_no_vol_level;
unsigned int vdd_low_vol_level;
@@ -2119,6 +2120,9 @@
case POWER_SUPPLY_PROP_SCOPE:
val->intval = mdwc->host_mode;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = mdwc->voltage_max;
+ break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
val->intval = mdwc->current_max;
break;
@@ -2166,6 +2170,9 @@
case POWER_SUPPLY_PROP_ONLINE:
mdwc->online = val->intval;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ mdwc->voltage_max = val->intval;
+ break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
mdwc->current_max = val->intval;
break;
@@ -2206,6 +2213,20 @@
power_supply_changed(&mdwc->usb_psy);
}
+static int
+dwc3_msm_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static char *dwc3_msm_pm_power_supplied_to[] = {
"battery",
@@ -2214,6 +2235,7 @@
static enum power_supply_property dwc3_msm_pm_power_props_usb[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_TYPE,
POWER_SUPPLY_PROP_SCOPE,
@@ -2878,6 +2900,8 @@
mdwc->usb_psy.set_property = dwc3_msm_power_set_property_usb;
mdwc->usb_psy.external_power_changed =
dwc3_msm_external_power_changed;
+ mdwc->usb_psy.property_is_writeable =
+ dwc3_msm_property_is_writeable;
ret = power_supply_register(&pdev->dev, &mdwc->usb_psy);
if (ret < 0) {
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index a7bea63..0d4d580 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -804,7 +804,7 @@
*/
dev_dbg(phy->dev, "enter lpm as\n"
"unable to start A-device\n");
- phy->state = OTG_STATE_UNDEFINED;
+ phy->state = OTG_STATE_A_IDLE;
pm_runtime_put_sync(phy->dev);
return;
}
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
index a775459..b0b2f56 100644
--- a/drivers/usb/gadget/u_bam.c
+++ b/drivers/usb/gadget/u_bam.c
@@ -127,6 +127,8 @@
struct work_struct connect_w;
struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
};
static struct bam_portmaster {
@@ -542,7 +544,9 @@
struct bam_ch_info *d = &port->data_ch;
int status;
+ spin_lock(&port->port_lock_ul);
if (!port->port_usb) {
+ spin_unlock(&port->port_lock_ul);
pr_err("%s: port->port_usb is NULL", __func__);
return;
}
@@ -551,6 +555,7 @@
status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
if (status)
pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
+ spin_unlock(&port->port_lock_ul);
}
static void gbam_start_endless_tx(struct gbam_port *port)
@@ -558,7 +563,9 @@
struct bam_ch_info *d = &port->data_ch;
int status;
+ spin_lock(&port->port_lock_dl);
if (!port->port_usb) {
+ spin_unlock(&port->port_lock_dl);
pr_err("%s: port->port_usb is NULL", __func__);
return;
}
@@ -567,6 +574,8 @@
status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
if (status)
pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
+ spin_unlock(&port->port_lock_dl);
+
}
static void gbam_stop_endless_rx(struct gbam_port *port)
@@ -574,7 +583,9 @@
struct bam_ch_info *d = &port->data_ch;
int status;
+ spin_lock(&port->port_lock_ul);
if (!port->port_usb) {
+ spin_unlock(&port->port_lock_ul);
pr_err("%s: port->port_usb is NULL", __func__);
return;
}
@@ -583,14 +594,17 @@
status = usb_ep_dequeue(port->port_usb->out, d->rx_req);
if (status)
pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
-
+ spin_unlock(&port->port_lock_ul);
}
+
static void gbam_stop_endless_tx(struct gbam_port *port)
{
struct bam_ch_info *d = &port->data_ch;
int status;
+ spin_lock(&port->port_lock_dl);
if (!port->port_usb) {
+ spin_unlock(&port->port_lock_dl);
pr_err("%s: port->port_usb is NULL", __func__);
return;
}
@@ -599,6 +613,7 @@
status = usb_ep_dequeue(port->port_usb->in, d->tx_req);
if (status)
pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+ spin_unlock(&port->port_lock_dl);
}
static void gbam_start(void *param, enum usb_bam_pipe_dir dir)
@@ -893,6 +908,46 @@
pr_debug("%s: done\n", __func__);
}
+static int gbam_wake_cb(void *param)
+{
+ struct gbam_port *port = (struct gbam_port *)param;
+ struct bam_ch_info *d;
+ struct f_rmnet *dev;
+
+ dev = port_to_rmnet(port->gr);
+ d = &port->data_ch;
+
+ pr_debug("%s: woken up by peer\n", __func__);
+
+ return usb_gadget_wakeup(dev->cdev->gadget);
+}
+
+static void gbam2bam_suspend_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, suspend_w);
+ struct bam_ch_info *d = &port->data_ch;
+
+ pr_debug("%s: suspend work started\n", __func__);
+
+ usb_bam_register_wake_cb(d->dst_connection_idx, gbam_wake_cb, port);
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ usb_bam_register_start_stop_cbs(gbam_start, gbam_stop, port);
+ usb_bam_suspend(&d->ipa_params);
+ }
+}
+
+static void gbam2bam_resume_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, resume_w);
+ struct bam_ch_info *d = &port->data_ch;
+
+ pr_debug("%s: resume work started\n", __func__);
+
+ usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL);
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ usb_bam_resume(&d->ipa_params);
+}
+
static int gbam_peer_reset_cb(void *param)
{
struct gbam_port *port = (struct gbam_port *)param;
@@ -1122,6 +1177,8 @@
INIT_WORK(&port->connect_w, gbam2bam_connect_work);
INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
+ INIT_WORK(&port->suspend_w, gbam2bam_suspend_work);
+ INIT_WORK(&port->resume_w, gbam2bam_resume_work);
/* data ch */
d = &port->data_ch;
@@ -1446,20 +1503,6 @@
return ret;
}
-static int gbam_wake_cb(void *param)
-{
- struct gbam_port *port = (struct gbam_port *)param;
- struct bam_ch_info *d;
- struct f_rmnet *dev;
-
- dev = port_to_rmnet(port->gr);
- d = &port->data_ch;
-
- pr_debug("%s: woken up by peer\n", __func__);
-
- return usb_gadget_wakeup(dev->cdev->gadget);
-}
-
void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans)
{
struct gbam_port *port;
@@ -1474,11 +1517,7 @@
pr_debug("%s: suspended port %d\n", __func__, port_num);
- usb_bam_register_wake_cb(d->dst_connection_idx, gbam_wake_cb, port);
- if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
- usb_bam_register_start_stop_cbs(gbam_start, gbam_stop, port);
- usb_bam_suspend(&d->ipa_params);
- }
+ queue_work(gbam_wq, &port->suspend_w);
}
void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans)
@@ -1495,7 +1534,5 @@
pr_debug("%s: resumed port %d\n", __func__, port_num);
- usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL);
- if (trans == USB_GADGET_XPORT_BAM2BAM_IPA)
- usb_bam_resume(&d->ipa_params);
+ queue_work(gbam_wq, &port->resume_w);
}
diff --git a/drivers/usb/gadget/u_bam_data.c b/drivers/usb/gadget/u_bam_data.c
index 577a4fe..b315605 100644
--- a/drivers/usb/gadget/u_bam_data.c
+++ b/drivers/usb/gadget/u_bam_data.c
@@ -62,10 +62,15 @@
struct work_struct connect_w;
struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
};
struct bam_data_port *bam2bam_data_ports[BAM2BAM_DATA_N_PORTS];
+static void bam2bam_data_suspend_work(struct work_struct *w);
+static void bam2bam_data_resume_work(struct work_struct *w);
+
/*------------data_path----------------------------*/
static void bam_data_endless_rx_complete(struct usb_ep *ep,
@@ -351,6 +356,8 @@
INIT_WORK(&port->connect_w, bam2bam_data_connect_work);
INIT_WORK(&port->disconnect_w, bam2bam_data_disconnect_work);
+ INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work);
+ INIT_WORK(&port->resume_w, bam2bam_data_resume_work);
/* data ch */
d = &port->data_ch;
@@ -578,12 +585,8 @@
d = &port->data_ch;
pr_debug("%s: suspended port %d\n", __func__, port_num);
- usb_bam_register_wake_cb(d->dst_connection_idx, bam_data_wake_cb, port);
- if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
- usb_bam_register_start_stop_cbs(bam_data_start, bam_data_stop,
- port);
- usb_bam_suspend(&d->ipa_params);
- }
+
+ queue_work(bam_data_wq, &port->suspend_w);
}
void bam_data_resume(u8 port_num)
@@ -596,6 +599,34 @@
d = &port->data_ch;
pr_debug("%s: resumed port %d\n", __func__, port_num);
+
+ queue_work(bam_data_wq, &port->resume_w);
+}
+
+static void bam2bam_data_suspend_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, suspend_w);
+ struct bam_data_ch_info *d = &port->data_ch;
+
+ pr_debug("%s: suspend work started\n", __func__);
+
+ usb_bam_register_wake_cb(d->dst_connection_idx, bam_data_wake_cb, port);
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ usb_bam_register_start_stop_cbs(bam_data_start, bam_data_stop,
+ port);
+ usb_bam_suspend(&d->ipa_params);
+ }
+}
+
+static void bam2bam_data_resume_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, resume_w);
+ struct bam_data_ch_info *d = &port->data_ch;
+
+ pr_debug("%s: resume work started\n", __func__);
+
usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL);
if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA)
usb_bam_resume(&d->ipa_params);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index a09b1ab..20425e2d 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -1229,6 +1229,12 @@
} else {
ehci_writel(ehci, temp, status_reg);
}
+
+ if (ehci->reset_delay) {
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ msleep(ehci->reset_delay);
+ spin_lock_irqsave(&ehci->lock, flags);
+ }
break;
/* For downstream facing ports (these): one hub port is put
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index ef45d49..cfc5961 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -1122,10 +1122,16 @@
u32 cmd;
unsigned long flags;
int retries = 0, ret, cnt = RESET_SIGNAL_TIME_USEC;
+ s32 next_latency = 0;
- if (pdata && pdata->swfi_latency)
- pm_qos_update_request(&mehci->pm_qos_req_dma,
- pdata->swfi_latency + 1);
+ if (pdata && pdata->swfi_latency) {
+ next_latency = pdata->swfi_latency + 1;
+ pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
+ if (pdata->standalone_latency)
+ next_latency = pdata->standalone_latency + 1;
+ else
+ next_latency = PM_QOS_DEFAULT_VALUE;
+ }
mehci->bus_reset = 1;
@@ -1196,9 +1202,8 @@
pr_debug("reset completed\n");
fail:
mehci->bus_reset = 0;
- if (pdata && pdata->swfi_latency)
- pm_qos_update_request(&mehci->pm_qos_req_dma,
- PM_QOS_DEFAULT_VALUE);
+ if (next_latency)
+ pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
}
static int ehci_hsic_bus_suspend(struct usb_hcd *hcd)
@@ -1229,19 +1234,27 @@
int retry_cnt = 0;
int tight_resume = 0;
struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
+ s32 next_latency = 0;
dbg_log_event(NULL, "Resume RH", 0);
+ if (pdata && pdata->swfi_latency) {
+ next_latency = pdata->swfi_latency + 1;
+ pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
+ if (pdata->standalone_latency)
+ next_latency = pdata->standalone_latency + 1;
+ else
+ next_latency = PM_QOS_DEFAULT_VALUE;
+ }
+
/* keep delay between bus states */
if (time_before(jiffies, ehci->next_statechange))
usleep_range(5000, 5000);
spin_lock_irq(&ehci->lock);
if (!HCD_HW_ACCESSIBLE(hcd)) {
- spin_unlock_irq(&ehci->lock);
mehci->resume_status = -ESHUTDOWN;
- complete(&mehci->rt_completion);
- return 0;
+ goto exit;
}
if (unlikely(ehci->debug)) {
@@ -1313,13 +1326,7 @@
&mehci->timer->gptimer1_ctrl);
spin_unlock_irq(&ehci->lock);
- if (pdata && pdata->swfi_latency)
- pm_qos_update_request(&mehci->pm_qos_req_dma,
- pdata->swfi_latency + 1);
wait_for_completion(&mehci->gpt0_completion);
- if (pdata && pdata->standalone_latency)
- pm_qos_update_request(&mehci->pm_qos_req_dma,
- pdata->standalone_latency + 1);
spin_lock_irq(&ehci->lock);
} else {
dbg_log_event(NULL, "FPR: Tightloop", 0);
@@ -1357,9 +1364,11 @@
dbg_log_event(NULL, "FPR: RT-Done", 0);
mehci->resume_status = 1;
+exit:
spin_unlock_irq(&ehci->lock);
-
complete(&mehci->rt_completion);
+ if (next_latency)
+ pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
return 0;
}
@@ -1878,6 +1887,8 @@
&pdata->strobe_pad_offset);
of_property_read_u32(node, "hsic,data-pad-offset",
&pdata->data_pad_offset);
+ of_property_read_u32(node, "hsic,reset-delay",
+ &pdata->reset_delay);
of_property_read_u32(node, "hsic,log2-itc",
&pdata->log2_irq_thresh);
if (pdata->log2_irq_thresh > 6)
@@ -1981,6 +1992,9 @@
mehci->ehci.resume_sof_bug = 1;
}
+ if (pdata->reset_delay)
+ mehci->ehci.reset_delay = pdata->reset_delay;
+
mehci->ehci.pool_64_bit_align = pdata->pool_64_bit_align;
mehci->enable_hbm = pdata->enable_hbm;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 0498a6a..7cd945a 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -155,6 +155,7 @@
unsigned resume_sof_bug:1;/*Chip Idea HC*/
unsigned reset_sof_bug:1; /*Chip Idea HC*/
bool disable_cerr;
+ u32 reset_delay;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 104ee5f..06e3a1b 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -863,7 +863,8 @@
struct msm_otg_platform_data *pdata = motg->pdata;
/* Check if target allows min_vote to be same as no_vote */
- if (vote >= pdata->bus_scale_table->num_usecases)
+ if (pdata->bus_scale_table &&
+ vote >= pdata->bus_scale_table->num_usecases)
vote = USB_NO_PERF_VOTE;
if (motg->bus_perf_client) {
@@ -3605,8 +3606,11 @@
else
val->intval = POWER_SUPPLY_SCOPE_DEVICE;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = motg->voltage_max;
+ break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
- val->intval = motg->current_max;
+ val->intval = motg->current_max;
break;
/* Reflect USB enumeration */
case POWER_SUPPLY_PROP_PRESENT:
@@ -3637,6 +3641,9 @@
case POWER_SUPPLY_PROP_ONLINE:
motg->online = val->intval;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ motg->voltage_max = val->intval;
+ break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
motg->current_max = val->intval;
break;
@@ -3657,6 +3664,7 @@
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_ONLINE:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
case POWER_SUPPLY_PROP_CURRENT_MAX:
return 1;
default:
@@ -3673,6 +3681,7 @@
static enum power_supply_property otg_pm_power_props_usb[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_TYPE,
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index 43eda51..017fa8e 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -24,6 +24,7 @@
mdss-dsi-objs += msm_mdss_io_8974.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss-dsi.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss_edp.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_edp_aux.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss_io_util.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o
diff --git a/drivers/video/msm/mdss/dsi_host_v2.c b/drivers/video/msm/mdss/dsi_host_v2.c
index 96f0f8c..f2de17d 100644
--- a/drivers/video/msm/mdss/dsi_host_v2.c
+++ b/drivers/video/msm/mdss/dsi_host_v2.c
@@ -857,6 +857,26 @@
return ret;
}
+static int msm_dsi_cont_on(struct mdss_panel_data *pdata)
+{
+ struct mdss_panel_info *pinfo;
+ int ret = 0;
+
+ pr_debug("%s:\n", __func__);
+
+ pinfo = &pdata->panel_info;
+ ret = msm_dsi_regulator_enable();
+ if (ret) {
+ pr_err("%s: DSI power on failed\n", __func__);
+ return ret;
+ }
+
+ msm_dsi_ahb_ctrl(1);
+ msm_dsi_prepare_clocks();
+ msm_dsi_clk_enable();
+ return 0;
+}
+
static int __devinit msm_dsi_probe(struct platform_device *pdev)
{
struct dsi_interface intf;
@@ -925,6 +945,7 @@
dsi_host_private->dis_dev = pdev->dev;
intf.on = msm_dsi_on;
intf.off = msm_dsi_off;
+ intf.cont_on = msm_dsi_cont_on;
intf.op_mode_config = msm_dsi_op_mode_config;
intf.tx = msm_dsi_cmds_tx;
intf.rx = msm_dsi_cmds_rx;
diff --git a/drivers/video/msm/mdss/dsi_panel_v2.c b/drivers/video/msm/mdss/dsi_panel_v2.c
index 5c164e4..022d911 100644
--- a/drivers/video/msm/mdss/dsi_panel_v2.c
+++ b/drivers/video/msm/mdss/dsi_panel_v2.c
@@ -163,7 +163,20 @@
pr_debug("%s: enable = %d\n", __func__, enable);
- if (enable) {
+ if (enable == 2) {
+ dsi_panel_power(1);
+ gpio_request(panel_private->rst_gpio, "panel_reset");
+ if (gpio_is_valid(panel_private->disp_en_gpio)) {
+ gpio_request(panel_private->disp_en_gpio,
+ "panel_enable");
+ }
+ if (gpio_is_valid(panel_private->video_mode_gpio)) {
+ gpio_request(panel_private->video_mode_gpio,
+ "panel_video_mdoe");
+ }
+ if (gpio_is_valid(panel_private->te_gpio))
+ gpio_request(panel_private->te_gpio, "panel_te");
+ } else if (enable == 1) {
dsi_panel_power(1);
gpio_request(panel_private->rst_gpio, "panel_reset");
gpio_set_value(panel_private->rst_gpio, 1);
diff --git a/drivers/video/msm/mdss/dsi_v2.c b/drivers/video/msm/mdss/dsi_v2.c
index 1a9059c..686ec01 100644
--- a/drivers/video/msm/mdss/dsi_v2.c
+++ b/drivers/video/msm/mdss/dsi_v2.c
@@ -82,6 +82,25 @@
return rc;
}
+static int dsi_splash_on(struct mdss_panel_data *pdata)
+{
+ int rc = 0;
+
+ pr_debug("%s:\n", __func__);
+
+ if (panel_common_data->reset)
+ panel_common_data->reset(pdata, 2);
+
+ if (dsi_intf.cont_on)
+ rc = dsi_intf.cont_on(pdata);
+
+ if (rc) {
+ pr_err("mdss_dsi_on DSI failed %d\n", rc);
+ return rc;
+ }
+ return rc;
+}
+
static int dsi_event_handler(struct mdss_panel_data *pdata,
int event, void *arg)
{
@@ -105,6 +124,9 @@
case MDSS_EVENT_PANEL_OFF:
rc = dsi_panel_handler(pdata, 0);
break;
+ case MDSS_EVENT_CONT_SPLASH_BEGIN:
+ rc = dsi_splash_on(pdata);
+ break;
default:
pr_debug("%s: unhandled event=%d\n", __func__, event);
break;
diff --git a/drivers/video/msm/mdss/dsi_v2.h b/drivers/video/msm/mdss/dsi_v2.h
index 54b772b..96dd390 100644
--- a/drivers/video/msm/mdss/dsi_v2.h
+++ b/drivers/video/msm/mdss/dsi_v2.h
@@ -198,6 +198,7 @@
struct dsi_interface {
int (*on)(struct mdss_panel_data *pdata);
int (*off)(struct mdss_panel_data *pdata);
+ int (*cont_on)(struct mdss_panel_data *pdata);
void (*op_mode_config)(int mode, struct mdss_panel_data *pdata);
int (*tx)(struct mdss_panel_data *pdata,
struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt);
diff --git a/drivers/video/msm/mdss/mdp3.c b/drivers/video/msm/mdss/mdp3.c
index 66418db..f6f722e 100644
--- a/drivers/video/msm/mdss/mdp3.c
+++ b/drivers/video/msm/mdss/mdp3.c
@@ -959,6 +959,21 @@
return rc;
}
+int mdp3_iommu_is_attached(int client)
+{
+ struct mdp3_iommu_ctx_map *context_map;
+ int context = MDP3_IOMMU_CTX_DMA_0;
+
+ if (!mdp3_res->iommu_contexts)
+ return 0;
+
+ if (client == MDP3_CLIENT_PPP)
+ context = MDP3_IOMMU_CTX_PPP_0;
+
+ context_map = mdp3_res->iommu_contexts + context;
+ return context_map->attached;
+}
+
static int mdp3_init(struct msm_fb_data_type *mfd)
{
int rc;
@@ -982,6 +997,66 @@
return xres * bpp;
}
+void mdp3_fbmem_clear(void)
+{
+ if (mdp3_res->ion_handle && mdp3_res->virt) {
+ pr_debug("mdp3_fbmem_clear\n");
+ memset(mdp3_res->virt, 0, mdp3_res->size);
+ }
+}
+
+static int mdp3_alloc(size_t size, void **virt, unsigned long *phys)
+{
+ int ret = 0;
+
+ if (mdp3_res->ion_handle) {
+ pr_debug("memory already alloc\n");
+ *virt = mdp3_res->virt;
+ *phys = mdp3_res->phys;
+ return 0;
+ }
+
+ mdp3_res->ion_handle = ion_alloc(mdp3_res->ion_client, size,
+ SZ_1M,
+ ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+
+ if (!IS_ERR_OR_NULL(mdp3_res->ion_handle)) {
+ *virt = ion_map_kernel(mdp3_res->ion_client,
+ mdp3_res->ion_handle);
+ if (IS_ERR(*virt)) {
+ pr_err("map kernel error\n");
+ goto ion_map_kernel_err;
+ }
+
+ ret = ion_phys(mdp3_res->ion_client, mdp3_res->ion_handle,
+ phys, &size);
+ if (ret) {
+ pr_err("%s ion_phys error\n", __func__);
+ goto ion_map_phys_err;
+ }
+
+ mdp3_res->virt = *virt;
+ mdp3_res->phys = *phys;
+ mdp3_res->size = size;
+ } else {
+ pr_err("%s ion alloc fail\n", __func__);
+ mdp3_res->ion_handle = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+
+ion_map_phys_err:
+ ion_unmap_kernel(mdp3_res->ion_client, mdp3_res->ion_handle);
+ion_map_kernel_err:
+ ion_free(mdp3_res->ion_client, mdp3_res->ion_handle);
+ mdp3_res->ion_handle = NULL;
+ mdp3_res->virt = NULL;
+ mdp3_res->phys = 0;
+ mdp3_res->size = 0;
+ return -ENOMEM;
+}
+
static int mdp3_fbmem_alloc(struct msm_fb_data_type *mfd)
{
int ret = -ENOMEM, dom;
@@ -999,28 +1074,10 @@
return 0;
}
- mdp3_res->ion_handle = ion_alloc(mdp3_res->ion_client, size,
- SZ_1M,
- ION_HEAP(ION_QSECOM_HEAP_ID), 0);
-
- if (!IS_ERR_OR_NULL(mdp3_res->ion_handle)) {
- virt = ion_map_kernel(mdp3_res->ion_client,
- mdp3_res->ion_handle);
- if (IS_ERR(virt)) {
- pr_err("%s map kernel error\n", __func__);
- goto ion_map_kernel_err;
- }
-
- ret = ion_phys(mdp3_res->ion_client, mdp3_res->ion_handle,
- &phys, &size);
- if (ret) {
- pr_err("%s ion_phys error\n", __func__);
- goto ion_map_phys_err;
- }
- } else {
- pr_err("%s ion alloc fail\n", __func__);
- mdp3_res->ion_handle = NULL;
- return -ENOMEM;
+ ret = mdp3_alloc(size, &virt, &phys);
+ if (ret) {
+ pr_err("fail to allocate fb memory\n");
+ return ret;
}
dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN)->domain_idx;
@@ -1031,10 +1088,10 @@
if (ret) {
pr_err("%s map IOMMU error\n", __func__);
- goto ion_map_phys_err;
+ goto ion_map_iommu_err;
}
- pr_info("allocating %u bytes at %p (%lx phys) for fb %d\n",
+ pr_debug("allocating %u bytes at %p (%lx phys) for fb %d\n",
size, virt, phys, mfd->index);
mfd->fbi->screen_base = virt;
@@ -1042,17 +1099,19 @@
mfd->fbi->fix.smem_len = size;
return 0;
-ion_map_phys_err:
+ion_map_iommu_err:
ion_unmap_kernel(mdp3_res->ion_client, mdp3_res->ion_handle);
-ion_map_kernel_err:
ion_free(mdp3_res->ion_client, mdp3_res->ion_handle);
mdp3_res->ion_handle = NULL;
+ mdp3_res->virt = NULL;
+ mdp3_res->phys = 0;
+ mdp3_res->size = 0;
return -ENOMEM;
}
void mdp3_fbmem_free(struct msm_fb_data_type *mfd)
{
- pr_info("mdp3_fbmem_free\n");
+ pr_debug("mdp3_fbmem_free\n");
if (mdp3_res->ion_handle) {
int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN)->domain_idx;
@@ -1061,6 +1120,9 @@
dom, 0);
ion_free(mdp3_res->ion_client, mdp3_res->ion_handle);
mdp3_res->ion_handle = NULL;
+ mdp3_res->virt = NULL;
+ mdp3_res->phys = 0;
+ mdp3_res->size = 0;
mfd->fbi->screen_base = 0;
mfd->fbi->fix.smem_start = 0;
mfd->fbi->fix.smem_len = 0;
@@ -1103,6 +1165,130 @@
return mdp3_res->domains[MDP3_IOMMU_DOMAIN].domain_idx;
}
+int mdp3_continuous_splash_copy(struct mdss_panel_data *pdata)
+{
+ unsigned long splash_phys, phys;
+ void *splash_virt, *virt;
+ u32 height, width, rgb_size, stride;
+ size_t size;
+ int rc;
+
+ rgb_size = MDP3_REG_READ(MDP3_REG_DMA_P_SIZE);
+ stride = MDP3_REG_READ(MDP3_REG_DMA_P_IBUF_Y_STRIDE);
+ stride = stride & 0x3FFF;
+ splash_phys = MDP3_REG_READ(MDP3_REG_DMA_P_IBUF_ADDR);
+
+ height = (rgb_size >> 16) & 0xffff;
+ width = rgb_size & 0xffff;
+ size = PAGE_ALIGN(height * stride * 2);
+ pr_debug("splash_height=%d splash_width=%d Buffer size=%d\n",
+ height, width, size);
+
+ rc = mdp3_alloc(size, &virt, &phys);
+ if (rc) {
+ pr_err("fail to allocate memory for continuous splash image\n");
+ return rc;
+ }
+
+ splash_virt = ioremap(splash_phys, stride * height);
+ memcpy(virt, splash_virt, stride * height);
+ iounmap(splash_virt);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, phys);
+
+ return 0;
+}
+
+static int mdp3_is_display_on(struct mdss_panel_data *pdata)
+{
+ int rc = 0;
+ u32 status;
+
+ mdp3_clk_update(MDP3_CLK_AHB, 1);
+ mdp3_clk_update(MDP3_CLK_CORE, 1);
+
+ if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+ status = MDP3_REG_READ(MDP3_REG_DSI_VIDEO_EN);
+ rc = status & 0x1;
+ } else {
+ status = MDP3_REG_READ(MDP3_REG_DMA_P_START);
+ rc = status & 01;
+ }
+
+ mdp3_clk_update(MDP3_CLK_AHB, 0);
+ mdp3_clk_update(MDP3_CLK_CORE, 0);
+ return rc;
+}
+
+static int mdp3_continuous_splash_on(struct mdss_panel_data *pdata)
+{
+ struct mdss_panel_info *panel_info = &pdata->panel_info;
+ int ab, ib, rc;
+
+ pr_debug("mdp3__continuous_splash_on\n");
+
+ rc = mdp3_clk_enable(1);
+ if (rc) {
+ pr_err("fail to enable clk\n");
+ return rc;
+ }
+
+ ab = panel_info->xres * panel_info->yres * 4;
+ ab *= panel_info->mipi.frame_rate;
+ ib = (ab * 3) / 2;
+ rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
+ if (rc) {
+ pr_err("fail to request bus bandwidth\n");
+ goto splash_on_err;
+ }
+
+ rc = mdp3_ppp_init();
+ if (rc) {
+ pr_err("ppp init failed\n");
+ goto splash_on_err;
+ }
+
+ rc = mdp3_continuous_splash_copy(pdata);
+ if (rc) {
+ pr_err("fail to copy continuous splash image\n");
+ goto splash_on_err;
+ }
+
+ mdp3_irq_register();
+
+ if (pdata->event_handler) {
+ rc = pdata->event_handler(pdata, MDSS_EVENT_CONT_SPLASH_BEGIN,
+ NULL);
+ if (rc) {
+ pr_err("MDSS_EVENT_CONT_SPLASH_BEGIN event fail\n");
+ goto splash_on_err;
+ }
+ }
+
+ if (panel_info->type == MIPI_VIDEO_PANEL)
+ mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_VIDEO].active = 1;
+ else
+ mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_CMD].active = 1;
+ return 0;
+
+splash_on_err:
+ mdp3_clk_enable(0);
+ return rc;
+}
+
+static int mdp3_panel_register_done(struct mdss_panel_data *pdata)
+{
+ int rc = 0;
+
+ if (pdata->panel_info.cont_splash_enabled) {
+ if (!mdp3_is_display_on(pdata)) {
+ pr_err("continuous splash, but bootloader is not\n");
+ return 0;
+ }
+ rc = mdp3_continuous_splash_on(pdata);
+ }
+ return rc;
+}
+
static int mdp3_probe(struct platform_device *pdev)
{
int rc;
@@ -1110,6 +1296,7 @@
.init_fnc = mdp3_init,
.fb_mem_get_iommu_domain = mdp3_fb_mem_get_iommu_domain,
.fb_mem_alloc_fnc = mdp3_fbmem_alloc,
+ .panel_register_done = mdp3_panel_register_done,
.fb_stride = mdp3_fb_stride,
};
diff --git a/drivers/video/msm/mdss/mdp3.h b/drivers/video/msm/mdss/mdp3.h
index d29e5b6..03416c7 100644
--- a/drivers/video/msm/mdss/mdp3.h
+++ b/drivers/video/msm/mdss/mdp3.h
@@ -111,6 +111,9 @@
struct mdp3_iommu_domain_map *domains;
struct mdp3_iommu_ctx_map *iommu_contexts;
struct ion_handle *ion_handle;
+ void *virt;
+ unsigned long phys;
+ size_t size;
struct mdp3_dma dma[MDP3_DMA_MAX];
struct mdp3_intf intf[MDP3_DMA_OUTPUT_SEL_MAX];
@@ -151,7 +154,10 @@
int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data);
int mdp3_iommu_enable(int client);
int mdp3_iommu_disable(int client);
+int mdp3_iommu_is_attached(int client);
void mdp3_fbmem_free(struct msm_fb_data_type *mfd);
+void mdp3_fbmem_clear(void);
+
#define MDP3_REG_WRITE(addr, val) writel_relaxed(val, mdp3_res->mdp_base + addr)
#define MDP3_REG_READ(addr) readl_relaxed(mdp3_res->mdp_base + addr)
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
index 7edf3d2..f77a2b3 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.c
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -351,7 +351,11 @@
cfg.dsi_cmd.dsi_cmd_tg_intf_sel = 0;
} else
return -EINVAL;
- rc = mdp3_intf_init(intf, &cfg);
+
+ if (intf->config)
+ rc = intf->config(intf, &cfg);
+ else
+ rc = -EINVAL;
return rc;
}
@@ -390,7 +394,10 @@
(MDP3_DMA_OUTPUT_COMP_BITS_8 << 2)|
MDP3_DMA_OUTPUT_COMP_BITS_8;
- rc = mdp3_dma_init(dma, &sourceConfig, &outputConfig);
+ if (dma->dma_config)
+ rc = dma->dma_config(dma, &sourceConfig, &outputConfig);
+ else
+ rc = -EINVAL;
return rc;
}
@@ -413,6 +420,11 @@
goto on_error;
}
+ if (mdp3_session->intf->active) {
+ pr_debug("continuous splash screen, initialized already\n");
+ goto on_error;
+ }
+
rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
if (rc) {
pr_err("fail to attach MDP DMA SMMU\n");
@@ -461,6 +473,8 @@
goto on_error;
}
+ mdp3_fbmem_clear();
+
if (panel->set_backlight)
panel->set_backlight(panel, panel->panel_info.bl_max);
@@ -671,6 +685,12 @@
return -EPERM;
}
+ if (!mdp3_iommu_is_attached(MDP3_CLIENT_DMA_P)) {
+ pr_debug("continuous splash screen, IOMMU not attached\n");
+ mdp3_ctrl_off(mfd);
+ mdp3_ctrl_on(mfd);
+ }
+
mutex_lock(&mdp3_session->lock);
data = mdp3_bufq_pop(&mdp3_session->bufq_in);
@@ -715,6 +735,12 @@
return;
}
+ if (!mdp3_iommu_is_attached(MDP3_CLIENT_DMA_P)) {
+ pr_debug("continuous splash screen, IOMMU not attached\n");
+ mdp3_ctrl_off(mfd);
+ mdp3_ctrl_on(mfd);
+ }
+
mutex_lock(&mdp3_session->lock);
fbi = mfd->fbi;
@@ -1217,12 +1243,23 @@
goto init_done;
}
+ rc = mdp3_dma_init(mdp3_session->dma);
+ if (rc) {
+ pr_err("fail to init dma\n");
+ goto init_done;
+ }
+
intf_type = mdp3_ctrl_get_intf_type(mfd);
mdp3_session->intf = mdp3_get_display_intf(intf_type);
if (!mdp3_session->intf) {
rc = -ENODEV;
goto init_done;
}
+ rc = mdp3_intf_init(mdp3_session->intf);
+ if (rc) {
+ pr_err("fail to init interface\n");
+ goto init_done;
+ }
mdp3_session->mfd = mfd;
mdp3_session->panel = dev_get_platdata(&mfd->pdev->dev);
diff --git a/drivers/video/msm/mdss/mdp3_dma.c b/drivers/video/msm/mdss/mdp3_dma.c
index 2e9c787..88eedb9 100644
--- a/drivers/video/msm/mdss/mdp3_dma.c
+++ b/drivers/video/msm/mdss/mdp3_dma.c
@@ -829,19 +829,14 @@
return ret;
}
-int mdp3_dma_init(struct mdp3_dma *dma,
- struct mdp3_dma_source *source_config,
- struct mdp3_dma_output_config *output_config)
+int mdp3_dma_init(struct mdp3_dma *dma)
{
int ret = 0;
pr_debug("mdp3_dma_init\n");
switch (dma->dma_sel) {
case MDP3_DMA_P:
- ret = mdp3_dmap_config(dma, source_config, output_config);
- if (ret < 0)
- return ret;
-
+ dma->dma_config = mdp3_dmap_config;
dma->config_cursor = mdp3_dmap_cursor_config;
dma->config_ccs = mdp3_dmap_ccs_config;
dma->config_histo = mdp3_dmap_histo_config;
@@ -855,10 +850,7 @@
dma->stop = mdp3_dma_stop;
break;
case MDP3_DMA_S:
- ret = mdp3_dmas_config(dma, source_config, output_config);
- if (ret < 0)
- return ret;
-
+ dma->dma_config = mdp3_dmas_config;
dma->config_cursor = NULL;
dma->config_ccs = NULL;
dma->config_histo = NULL;
@@ -1029,10 +1021,9 @@
return 0;
}
-int mdp3_intf_init(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+int mdp3_intf_init(struct mdp3_intf *intf)
{
- int ret = 0;
- switch (cfg->type) {
+ switch (intf->cfg.type) {
case MDP3_DMA_OUTPUT_SEL_LCDC:
intf->config = lcdc_config;
intf->start = lcdc_start;
@@ -1052,16 +1043,5 @@
default:
return -EINVAL;
}
-
- intf->active = false;
- if (intf->config)
- ret = intf->config(intf, cfg);
-
- if (ret) {
- pr_err("MDP interface initialization failed\n");
- return ret;
- }
-
- intf->cfg = *cfg;
return 0;
}
diff --git a/drivers/video/msm/mdss/mdp3_dma.h b/drivers/video/msm/mdss/mdp3_dma.h
index c652818..e4a28dc 100644
--- a/drivers/video/msm/mdss/mdp3_dma.h
+++ b/drivers/video/msm/mdss/mdp3_dma.h
@@ -250,6 +250,10 @@
int histo_state;
struct mdp3_dma_histogram_data histo_data;
+ int (*dma_config)(struct mdp3_dma *dma,
+ struct mdp3_dma_source *source_config,
+ struct mdp3_dma_output_config *output_config);
+
int (*start)(struct mdp3_dma *dma, struct mdp3_intf *intf);
int (*stop)(struct mdp3_dma *dma, struct mdp3_intf *intf);
@@ -323,11 +327,9 @@
int (*stop)(struct mdp3_intf *intf);
};
-int mdp3_dma_init(struct mdp3_dma *dma,
- struct mdp3_dma_source *source_config,
- struct mdp3_dma_output_config *output_config);
+int mdp3_dma_init(struct mdp3_dma *dma);
-int mdp3_intf_init(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg);
+int mdp3_intf_init(struct mdp3_intf *intf);
void mdp3_dma_callback_enable(struct mdp3_dma *dma, int type);
diff --git a/drivers/video/msm/mdss/mdp3_ppp.c b/drivers/video/msm/mdss/mdp3_ppp.c
index afb2eb4..924ec5a 100644
--- a/drivers/video/msm/mdss/mdp3_ppp.c
+++ b/drivers/video/msm/mdss/mdp3_ppp.c
@@ -23,6 +23,7 @@
#include <linux/sync.h>
#include <linux/sw_sync.h>
#include "linux/proc_fs.h"
+#include <linux/delay.h>
#include "mdss_fb.h"
#include "mdp3_ppp.h"
@@ -30,6 +31,7 @@
#include "mdp3.h"
#define MDP_IS_IMGTYPE_BAD(x) ((x) >= MDP_IMGTYPE_LIMIT)
+#define MDP_RELEASE_BW_TIMEOUT 50
#define MDP_BLIT_CLK_RATE 200000000
#define MDP_PPP_MAX_BPP 4
#define MDP_PPP_DYNAMIC_FACTOR 3
@@ -48,6 +50,7 @@
[MDP_Y_CRCB_H2V2] = true,
[MDP_Y_CBCR_H2V2] = true,
[MDP_Y_CBCR_H2V2_ADRENO] = true,
+ [MDP_Y_CBCR_H2V2_VENUS] = true,
[MDP_YCRYCB_H2V1] = true,
[MDP_Y_CBCR_H2V1] = true,
[MDP_Y_CRCB_H2V1] = true,
@@ -92,6 +95,9 @@
struct sw_sync_timeline *timeline;
int timeline_value;
+ struct timer_list free_bw_timer;
+ struct work_struct free_bw_work;
+ bool bw_on;
};
static struct ppp_status *ppp_stat;
@@ -353,6 +359,7 @@
mdp3_clk_set_rate(MDP3_CLK_CORE, rate, MDP3_CLIENT_PPP);
mdp3_clk_enable(on_off);
mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP, ab, ib);
+ ppp_stat->bw_on = on_off;
return 0;
}
@@ -403,6 +410,11 @@
(void *) ((uint32_t) blit_op->src.p0 +
ALIGN((ALIGN(req->src.width, 32) *
ALIGN(req->src.height, 32)), 4096));
+ else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS)
+ blit_op->src.p1 =
+ (void *) ((uint32_t) blit_op->src.p0 +
+ ALIGN((ALIGN(req->src.width, 128) *
+ ALIGN(req->src.height, 32)), 4096));
else
blit_op->src.p1 = (void *) ((uint32_t) blit_op->src.p0 +
req->src.width * req->src.height);
@@ -754,9 +766,6 @@
src_data, dst_data);
else
ret = mdp3_ppp_blit(mfd, req, src_data, dst_data);
-
- mdp3_put_img(src_data);
- mdp3_put_img(dst_data);
return ret;
}
@@ -892,28 +901,52 @@
req_q->pop_idx = (req_q->pop_idx + 1) % MDP3_PPP_MAX_LIST_REQ;
}
+void mdp3_free_fw_timer_func(unsigned long arg)
+{
+ schedule_work(&ppp_stat->free_bw_work);
+}
+
+static void mdp3_free_bw_wq_handler(struct work_struct *work)
+{
+ struct msm_fb_data_type *mfd = ppp_stat->mfd;
+ mutex_lock(&ppp_stat->config_ppp_mutex);
+ if (ppp_stat->bw_on) {
+ mdp3_ppp_turnon(mfd, 0);
+ mdp3_iommu_disable(MDP3_CLIENT_PPP);
+ }
+ mutex_unlock(&ppp_stat->config_ppp_mutex);
+}
+
static void mdp3_ppp_blit_wq_handler(struct work_struct *work)
{
struct msm_fb_data_type *mfd = ppp_stat->mfd;
struct blit_req_list *req;
- int i, rc;
+ int i, rc = 0;
mutex_lock(&ppp_stat->config_ppp_mutex);
req = mdp3_ppp_next_req(&ppp_stat->req_q);
+ if (!req) {
+ mutex_unlock(&ppp_stat->config_ppp_mutex);
+ return;
+ }
- mdp3_iommu_enable(MDP3_CLIENT_PPP);
- mdp3_ppp_turnon(mfd, 1);
+ if (!ppp_stat->bw_on) {
+ mdp3_iommu_enable(MDP3_CLIENT_PPP);
+ mdp3_ppp_turnon(mfd, 1);
+ }
while (req) {
mdp3_ppp_wait_for_fence(req);
for (i = 0; i < req->count; i++) {
if (!(req->req_list[i].flags & MDP_NO_BLIT)) {
/* Do the actual blit. */
- rc = mdp3_ppp_start_blit(mfd,
+ if (!rc) {
+ rc = mdp3_ppp_start_blit(mfd,
&(req->req_list[i]),
&req->src_data[i],
&req->dst_data[i]);
- if (rc)
- break;
+ }
+ mdp3_put_img(&req->src_data[i]);
+ mdp3_put_img(&req->dst_data[i]);
}
}
/* Signal to release fence */
@@ -925,8 +958,8 @@
complete(&ppp_stat->pop_q_comp);
mutex_unlock(&ppp_stat->req_mutex);
}
- mdp3_ppp_turnon(mfd, 0);
- mdp3_iommu_disable(MDP3_CLIENT_PPP);
+ mod_timer(&ppp_stat->free_bw_timer, jiffies +
+ msecs_to_jiffies(MDP_RELEASE_BW_TIMEOUT));
mutex_unlock(&ppp_stat->config_ppp_mutex);
}
@@ -995,14 +1028,14 @@
if (req->cur_rel_fen_fd < 0) {
pr_err("%s: get_unused_fd_flags failed\n", __func__);
rc = -ENOMEM;
- goto parse_err_2;
+ goto parse_err_1;
}
sync_fence_install(req->cur_rel_fence, req->cur_rel_fen_fd);
rc = copy_to_user(req_list_header->sync.rel_fen_fd,
&req->cur_rel_fen_fd, sizeof(int));
if (rc) {
pr_err("%s:copy_to_user failed\n", __func__);
- goto parse_err_3;
+ goto parse_err_2;
}
} else {
fence = req->cur_rel_fence;
@@ -1023,12 +1056,8 @@
}
return 0;
-parse_err_3:
- put_unused_fd(req->cur_rel_fen_fd);
parse_err_2:
- sync_fence_put(req->cur_rel_fence);
- req->cur_rel_fence = NULL;
- req->cur_rel_fen_fd = 0;
+ put_unused_fd(req->cur_rel_fen_fd);
parse_err_1:
for (i--; i >= 0; i--) {
mdp3_put_img(&req->src_data[i]);
@@ -1058,10 +1087,14 @@
}
INIT_WORK(&ppp_stat->blit_work, mdp3_ppp_blit_wq_handler);
+ INIT_WORK(&ppp_stat->free_bw_work, mdp3_free_bw_wq_handler);
init_completion(&ppp_stat->pop_q_comp);
spin_lock_init(&ppp_stat->ppp_lock);
mutex_init(&ppp_stat->req_mutex);
mutex_init(&ppp_stat->config_ppp_mutex);
+ init_timer(&ppp_stat->free_bw_timer);
+ ppp_stat->free_bw_timer.function = mdp3_free_fw_timer_func;
+ ppp_stat->free_bw_timer.data = 0;
ppp_stat->busy = false;
ppp_stat->mfd = mfd;
mdp3_ppp_callback_setup();
diff --git a/drivers/video/msm/mdss/mdp3_ppp_data.c b/drivers/video/msm/mdss/mdp3_ppp_data.c
index d68faad..e1c0f27 100644
--- a/drivers/video/msm/mdss/mdp3_ppp_data.c
+++ b/drivers/video/msm/mdss/mdp3_ppp_data.c
@@ -30,6 +30,7 @@
[MDP_Y_CRCB_H2V2] = MDP_Y_CBCR_H2V2_SRC_REG,
[MDP_Y_CBCR_H2V2] = MDP_Y_CBCR_H2V2_SRC_REG,
[MDP_Y_CBCR_H2V2_ADRENO] = MDP_Y_CBCR_H2V2_SRC_REG,
+ [MDP_Y_CBCR_H2V2_VENUS] = MDP_Y_CBCR_H2V2_SRC_REG,
[MDP_YCRYCB_H2V1] = MDP_YCRYCB_H2V1_SRC_REG,
[MDP_Y_CBCR_H2V1] = MDP_Y_CRCB_H2V1_SRC_REG,
[MDP_Y_CRCB_H2V1] = MDP_Y_CRCB_H2V1_SRC_REG,
@@ -48,6 +49,7 @@
[MDP_Y_CRCB_H2V2] = MDP_Y_CBCR_H2V2_DST_REG,
[MDP_Y_CBCR_H2V2] = MDP_Y_CBCR_H2V2_DST_REG,
[MDP_Y_CBCR_H2V2_ADRENO] = MDP_Y_CBCR_H2V2_DST_REG,
+ [MDP_Y_CBCR_H2V2_VENUS] = MDP_Y_CBCR_H2V2_DST_REG,
[MDP_YCRYCB_H2V1] = MDP_YCRYCB_H2V1_DST_REG,
[MDP_Y_CBCR_H2V1] = MDP_Y_CRCB_H2V1_DST_REG,
[MDP_Y_CRCB_H2V1] = MDP_Y_CRCB_H2V1_DST_REG,
@@ -72,6 +74,8 @@
[MDP_Y_CBCR_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
[MDP_Y_CBCR_H2V2_ADRENO] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB,
CLR_CR, 8),
+ [MDP_Y_CBCR_H2V2_VENUS] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB,
+ CLR_CR, 8),
[MDP_YCRYCB_H2V1] = PPP_GET_PACK_PATTERN(CLR_Y,
CLR_CR, CLR_Y, CLR_CB, 8),
[MDP_Y_CBCR_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
@@ -91,6 +95,8 @@
[MDP_Y_CBCR_H2V2] = PPP_OP_SRC_CHROMA_420 | PPP_OP_COLOR_SPACE_YCBCR,
[MDP_Y_CBCR_H2V2_ADRENO] = PPP_OP_SRC_CHROMA_420 |
PPP_OP_COLOR_SPACE_YCBCR,
+ [MDP_Y_CBCR_H2V2_VENUS] = PPP_OP_SRC_CHROMA_420 |
+ PPP_OP_COLOR_SPACE_YCBCR,
[MDP_Y_CBCR_H2V1] = PPP_OP_SRC_CHROMA_H2V1,
[MDP_Y_CRCB_H2V1] = PPP_OP_SRC_CHROMA_H2V1,
[MDP_YCRYCB_H2V1] = PPP_OP_SRC_CHROMA_H2V1,
@@ -109,6 +115,7 @@
[MDP_Y_CBCR_H2V1] = 1,
[MDP_Y_CBCR_H2V2] = 1,
[MDP_Y_CBCR_H2V2_ADRENO] = 1,
+ [MDP_Y_CBCR_H2V2_VENUS] = 1,
[MDP_Y_CRCB_H2V1] = 1,
[MDP_Y_CRCB_H2V2] = 1,
[MDP_YCRYCB_H2V1] = 2,
diff --git a/drivers/video/msm/mdss/mdp3_ppp_hwio.c b/drivers/video/msm/mdss/mdp3_ppp_hwio.c
index 8dd3d55..199387f 100644
--- a/drivers/video/msm/mdss/mdp3_ppp_hwio.c
+++ b/drivers/video/msm/mdss/mdp3_ppp_hwio.c
@@ -430,6 +430,8 @@
if (img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO && layer == 0)
img->p0 += (x + y * ALIGN(width, 32)) * bpp;
+ else if (img->color_fmt == MDP_Y_CBCR_H2V2_VENUS && layer == 0)
+ img->p0 += (x + y * ALIGN(width, 128)) * bpp;
else
img->p0 += (x + y * width) * bpp;
if (layer == 1)
@@ -442,7 +444,8 @@
* MDP_Y_CBCR_H2V2/MDP_Y_CRCB_H2V2 cosite for now
* we need to shift x direction same as y dir for offsite
*/
- if (img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO
+ if ((img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO ||
+ img->color_fmt == MDP_Y_CBCR_H2V2_VENUS)
&& layer == 0)
img->p1 += ((x / h_slice) * h_slice + ((y == 0) ? 0 :
(((y + 1) / v_slice - 1) * (ALIGN(width/2, 32) * 2))))
@@ -740,6 +743,7 @@
case MDP_Y_CBCR_H2V2:
case MDP_Y_CBCR_H2V2_ADRENO:
+ case MDP_Y_CBCR_H2V2_VENUS:
case MDP_Y_CRCB_H2V2:
er->chroma_interp_point_left = er->luma_interp_point_left >> 1;
er->chroma_interp_point_right =
@@ -778,6 +782,7 @@
break;
case MDP_Y_CBCR_H2V2:
case MDP_Y_CBCR_H2V2_ADRENO:
+ case MDP_Y_CBCR_H2V2_VENUS:
case MDP_Y_CRCB_H2V2:
/*
* cosite in horizontal dir, and offsite in vertical dir
@@ -1168,6 +1173,7 @@
switch (blit_op->src.color_fmt) {
case MDP_Y_CBCR_H2V2:
case MDP_Y_CBCR_H2V2_ADRENO:
+ case MDP_Y_CBCR_H2V2_VENUS:
case MDP_Y_CRCB_H2V2:
sh_slice = sv_slice = 2;
break;
@@ -1195,6 +1201,10 @@
blit_op->src.stride0 = ALIGN(blit_op->src.prop.width, 32) *
ppp_bpp(blit_op->src.color_fmt);
blit_op->src.stride1 = 2 * ALIGN(blit_op->src.prop.width/2, 32);
+ } else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS) {
+ blit_op->src.stride0 = ALIGN(blit_op->src.prop.width, 128) *
+ ppp_bpp(blit_op->src.color_fmt);
+ blit_op->src.stride1 = blit_op->src.stride0;
} else {
blit_op->src.stride0 = blit_op->src.prop.width *
ppp_bpp(blit_op->src.color_fmt);
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 3b0cd20..a575d6d 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -477,7 +477,6 @@
}
ctrl_pdata->ctrl_state |= CTRL_STATE_PANEL_INIT;
}
- mdss_dsi_op_mode_config(mipi->mode, pdata);
if (pdata->panel_info.type == MIPI_CMD_PANEL) {
if (mipi->vsync_enable && mipi->hw_vsync_mode
@@ -555,9 +554,9 @@
mdss_dsi_sw_reset(pdata);
mdss_dsi_host_init(mipi, pdata);
+ mdss_dsi_op_mode_config(mipi->mode, pdata);
if (ctrl_pdata->on_cmds.link_state == DSI_LP_MODE) {
- mdss_dsi_op_mode_config(DSI_CMD_MODE, pdata);
ret = mdss_dsi_unblank(pdata);
if (ret) {
pr_err("%s: unblank failed\n", __func__);
@@ -586,6 +585,8 @@
switch (event) {
case MDSS_EVENT_UNBLANK:
rc = mdss_dsi_on(pdata);
+ mdss_dsi_op_mode_config(pdata->panel_info.mipi.mode,
+ pdata);
if (ctrl_pdata->on_cmds.link_state == DSI_LP_MODE)
rc = mdss_dsi_unblank(pdata);
break;
diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
index 2603648..a8c34f3 100644
--- a/drivers/video/msm/mdss/mdss_dsi.h
+++ b/drivers/video/msm/mdss/mdss_dsi.h
@@ -394,9 +394,7 @@
void mdss_dsi_cmd_mdp_start(struct mdss_dsi_ctrl_pdata *ctrl);
void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata);
void mdss_dsi_ack_err_status(unsigned char *dsi_base);
-void mdss_dsi_clk_enable(struct mdss_dsi_ctrl_pdata *ctrl);
-void mdss_dsi_clk_disable(struct mdss_dsi_ctrl_pdata *ctrl);
-void mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable);
+int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable);
void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl,
int enable);
void mdss_dsi_controller_cfg(int enable,
@@ -412,8 +410,6 @@
int mdss_dsi_clk_init(struct platform_device *pdev,
struct mdss_dsi_ctrl_pdata *ctrl_pdata);
void mdss_dsi_clk_deinit(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
-void mdss_dsi_prepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
-void mdss_dsi_unprepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
int mdss_dsi_enable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
void mdss_dsi_disable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
void mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable);
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index e682e69..055f233 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -44,6 +44,14 @@
void mdss_dsi_ctrl_init(struct mdss_dsi_ctrl_pdata *ctrl)
{
+ if (ctrl->shared_pdata.broadcast_enable)
+ if (ctrl->panel_data.panel_info.pdest
+ == DISPLAY_1) {
+ pr_debug("%s: Broadcast mode enabled.\n",
+ __func__);
+ left_ctrl_pdata = ctrl;
+ }
+
if (ctrl->panel_data.panel_info.pdest == DISPLAY_1) {
mdss_dsi0_hw.ptr = (void *)(ctrl);
ctrl->dsi_hw = &mdss_dsi0_hw;
@@ -72,34 +80,6 @@
mdss_dsi_buf_alloc(&ctrl->rx_buf, SZ_4K);
}
-/*
- * acquire ctrl->mutex first
- */
-void mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
-{
- mutex_lock(&ctrl->mutex);
- if (enable) {
- if (ctrl->clk_cnt == 0) {
- mdss_dsi_enable_bus_clocks(ctrl);
- mdss_dsi_prepare_clocks(ctrl);
- mdss_dsi_clk_enable(ctrl);
- }
- ctrl->clk_cnt++;
- } else {
- if (ctrl->clk_cnt) {
- ctrl->clk_cnt--;
- if (ctrl->clk_cnt == 0) {
- mdss_dsi_clk_disable(ctrl);
- mdss_dsi_unprepare_clocks(ctrl);
- mdss_dsi_disable_bus_clocks(ctrl);
- }
- }
- }
- pr_debug("%s: ctrl ndx=%d enabled=%d clk_cnt=%d\n",
- __func__, ctrl->ndx, enable, ctrl->clk_cnt);
- mutex_unlock(&ctrl->mutex);
-}
-
void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
{
if (enable == 0) {
@@ -861,13 +841,6 @@
else
MIPI_OUTP(ctrl_pdata->ctrl_base + 0x3C, 0x14000000);
- if (ctrl_pdata->shared_pdata.broadcast_enable)
- if (pdata->panel_info.pdest == DISPLAY_1) {
- pr_debug("%s: Broadcast mode enabled.\n",
- __func__);
- left_ctrl_pdata = ctrl_pdata;
- }
-
data = 0;
if (pinfo->te_sel)
data |= BIT(31);
diff --git a/drivers/video/msm/mdss/mdss_edp.c b/drivers/video/msm/mdss/mdss_edp.c
index aea2de0..3e0bc6d 100644
--- a/drivers/video/msm/mdss/mdss_edp.c
+++ b/drivers/video/msm/mdss/mdss_edp.c
@@ -24,14 +24,19 @@
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <linux/pwm.h>
-
+#include <linux/clk.h>
+#include <linux/spinlock_types.h>
+#include <linux/kthread.h>
#include <asm/system.h>
#include <asm/mach-types.h>
-
#include <mach/hardware.h>
#include <mach/dma.h>
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_mdp.h"
#include "mdss_edp.h"
+#include "mdss_debug.h"
#define RGB_COMPONENTS 3
#define VDDA_MIN_UV 1800000 /* uV units */
@@ -39,26 +44,7 @@
#define VDDA_UA_ON_LOAD 100000 /* uA units */
#define VDDA_UA_OFF_LOAD 100 /* uA units */
-static int mdss_edp_get_base_address(struct mdss_edp_drv_pdata *edp_drv);
-static int mdss_edp_get_mmss_cc_base_address(struct mdss_edp_drv_pdata
- *edp_drv);
-static int mdss_edp_regulator_init(struct mdss_edp_drv_pdata *edp_drv);
static int mdss_edp_regulator_on(struct mdss_edp_drv_pdata *edp_drv);
-static int mdss_edp_regulator_off(struct mdss_edp_drv_pdata *edp_drv);
-static int mdss_edp_gpio_panel_en(struct mdss_edp_drv_pdata *edp_drv);
-static int mdss_edp_pwm_config(struct mdss_edp_drv_pdata *edp_drv);
-
-static void mdss_edp_edid2pinfo(struct mdss_edp_drv_pdata *edp_drv);
-static void mdss_edp_fill_edid_data(struct mdss_edp_drv_pdata *edp_drv);
-static void mdss_edp_fill_dpcd_data(struct mdss_edp_drv_pdata *edp_drv);
-
-static int mdss_edp_device_register(struct mdss_edp_drv_pdata *edp_drv);
-
-static void mdss_edp_config_sync(unsigned char *edp_base);
-static void mdss_edp_config_sw_div(unsigned char *edp_base);
-static void mdss_edp_config_static_mdiv(unsigned char *edp_base);
-static void mdss_edp_enable(unsigned char *edp_base, int enable);
-
/*
* Init regulator needed for edp, 8974_l12
*/
@@ -256,79 +242,130 @@
}
}
-void mdss_edp_config_sync(unsigned char *edp_base)
+void mdss_edp_config_sync(unsigned char *base)
{
int ret = 0;
- ret = edp_read(edp_base + 0xc); /* EDP_CONFIGURATION_CTRL */
+ ret = edp_read(base + 0xc); /* EDP_CONFIGURATION_CTRL */
ret &= ~0x733;
ret |= (0x55 & 0x733);
- edp_write(edp_base + 0xc, ret);
- edp_write(edp_base + 0xc, 0x55); /* EDP_CONFIGURATION_CTRL */
+ edp_write(base + 0xc, ret);
+ edp_write(base + 0xc, 0x55); /* EDP_CONFIGURATION_CTRL */
}
-static void mdss_edp_config_sw_div(unsigned char *edp_base)
+static void mdss_edp_config_sw_div(unsigned char *base)
{
- edp_write(edp_base + 0x14, 0x13b); /* EDP_SOFTWARE_MVID */
- edp_write(edp_base + 0x18, 0x266); /* EDP_SOFTWARE_NVID */
+ edp_write(base + 0x14, 0x13b); /* EDP_SOFTWARE_MVID */
+ edp_write(base + 0x18, 0x266); /* EDP_SOFTWARE_NVID */
}
-static void mdss_edp_config_static_mdiv(unsigned char *edp_base)
+static void mdss_edp_config_static_mdiv(unsigned char *base)
{
int ret = 0;
- ret = edp_read(edp_base + 0xc); /* EDP_CONFIGURATION_CTRL */
- edp_write(edp_base + 0xc, ret | 0x2); /* EDP_CONFIGURATION_CTRL */
- edp_write(edp_base + 0xc, 0x57); /* EDP_CONFIGURATION_CTRL */
+ ret = edp_read(base + 0xc); /* EDP_CONFIGURATION_CTRL */
+ edp_write(base + 0xc, ret | 0x2); /* EDP_CONFIGURATION_CTRL */
+ edp_write(base + 0xc, 0x57); /* EDP_CONFIGURATION_CTRL */
}
-static void mdss_edp_enable(unsigned char *edp_base, int enable)
+static void mdss_edp_enable(unsigned char *base, int enable)
{
- edp_write(edp_base + 0x8, 0x0); /* EDP_STATE_CTRL */
- edp_write(edp_base + 0x8, 0x40); /* EDP_STATE_CTRL */
- edp_write(edp_base + 0x94, enable); /* EDP_TIMING_ENGINE_EN */
- edp_write(edp_base + 0x4, enable); /* EDP_MAINLINK_CTRL */
+ edp_write(base + 0x8, 0x0); /* EDP_STATE_CTRL */
+ edp_write(base + 0x8, 0x40); /* EDP_STATE_CTRL */
+ edp_write(base + 0x94, enable); /* EDP_TIMING_ENGINE_EN */
+ edp_write(base + 0x4, enable); /* EDP_MAINLINK_CTRL */
}
+static void mdss_edp_irq_enable(struct mdss_edp_drv_pdata *edp_drv);
+static void mdss_edp_irq_disable(struct mdss_edp_drv_pdata *edp_drv);
+
int mdss_edp_on(struct mdss_panel_data *pdata)
{
struct mdss_edp_drv_pdata *edp_drv = NULL;
- int i;
+ int ret = 0;
- edp_drv = container_of(pdata, struct mdss_edp_drv_pdata,
- panel_data);
- if (!edp_drv) {
+ if (!pdata) {
pr_err("%s: Invalid input data\n", __func__);
return -EINVAL;
}
- mdss_edp_prepare_clocks(edp_drv);
- mdss_edp_phy_sw_reset(edp_drv->edp_base);
- mdss_edp_hw_powerup(edp_drv->edp_base, 1);
- mdss_edp_pll_configure(edp_drv->edp_base, edp_drv->edid.timing[0].pclk);
- mdss_edp_clk_enable(edp_drv);
+ edp_drv = container_of(pdata, struct mdss_edp_drv_pdata,
+ panel_data);
- for (i = 0; i < edp_drv->dpcd.max_lane_count; ++i)
- mdss_edp_enable_lane_bist(edp_drv->edp_base, i, 1);
+ pr_debug("%s:+\n", __func__);
+ if (edp_drv->train_start == 0)
+ edp_drv->train_start++;
- mdss_edp_enable_mainlink(edp_drv->edp_base, 1);
- mdss_edp_config_clk(edp_drv->edp_base, edp_drv->mmss_cc_base);
+ mdss_edp_phy_pll_reset(edp_drv->base);
+ mdss_edp_aux_reset(edp_drv->base);
+ mdss_edp_mainlink_reset(edp_drv->base);
- mdss_edp_phy_misc_cfg(edp_drv->edp_base);
- mdss_edp_config_sync(edp_drv->edp_base);
- mdss_edp_config_sw_div(edp_drv->edp_base);
- mdss_edp_config_static_mdiv(edp_drv->edp_base);
- mdss_edp_enable(edp_drv->edp_base, 1);
+ ret = mdss_edp_prepare_clocks(edp_drv);
+ if (ret)
+ return ret;
+ mdss_edp_phy_powerup(edp_drv->base, 1);
+
+ mdss_edp_pll_configure(edp_drv->base, edp_drv->edid.timing[0].pclk);
+ mdss_edp_phy_pll_ready(edp_drv->base);
+
+ ret = mdss_edp_clk_enable(edp_drv);
+ if (ret) {
+ mdss_edp_unprepare_clocks(edp_drv);
+ return ret;
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+ mdss_edp_aux_ctrl(edp_drv->base, 1);
+
+ mdss_edp_lane_power_ctrl(edp_drv->base,
+ edp_drv->dpcd.max_lane_count, 1);
+ mdss_edp_enable_mainlink(edp_drv->base, 1);
+ mdss_edp_config_clk(edp_drv->base, edp_drv->mmss_cc_base);
+
+ mdss_edp_clock_synchrous(edp_drv->base, 1);
+ mdss_edp_phy_vm_pe_init(edp_drv->base);
+ mdss_edp_config_sync(edp_drv->base);
+ mdss_edp_config_sw_div(edp_drv->base);
+ mdss_edp_config_static_mdiv(edp_drv->base);
gpio_set_value(edp_drv->gpio_panel_en, 1);
+ mdss_edp_irq_enable(edp_drv);
+ pr_debug("%s:-\n", __func__);
return 0;
}
+int mdss_edp_wait4train(struct mdss_panel_data *pdata)
+{
+ struct mdss_edp_drv_pdata *edp_drv = NULL;
+ int ret = 0;
+
+ if (!pdata) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ edp_drv = container_of(pdata, struct mdss_edp_drv_pdata,
+ panel_data);
+
+ ret = wait_for_completion_timeout(&edp_drv->train_comp, 100);
+ if (ret <= 0) {
+ pr_err("%s: Link Train timedout\n", __func__);
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ }
+
+ mdss_edp_enable(edp_drv->base, 1);
+
+ pr_debug("%s:\n", __func__);
+
+ return ret;
+}
+
int mdss_edp_off(struct mdss_panel_data *pdata)
{
struct mdss_edp_drv_pdata *edp_drv = NULL;
int ret = 0;
- int i;
edp_drv = container_of(pdata, struct mdss_edp_drv_pdata,
panel_data);
@@ -336,20 +373,26 @@
pr_err("%s: Invalid input data\n", __func__);
return -EINVAL;
}
+ pr_debug("%s:+\n", __func__);
+
+ mdss_edp_irq_disable(edp_drv);
gpio_set_value(edp_drv->gpio_panel_en, 0);
pwm_disable(edp_drv->bl_pwm);
- mdss_edp_enable(edp_drv->edp_base, 0);
- mdss_edp_unconfig_clk(edp_drv->edp_base, edp_drv->mmss_cc_base);
- mdss_edp_enable_mainlink(edp_drv->edp_base, 0);
+ mdss_edp_enable(edp_drv->base, 0);
+ mdss_edp_unconfig_clk(edp_drv->base, edp_drv->mmss_cc_base);
+ mdss_edp_enable_mainlink(edp_drv->base, 0);
- for (i = 0; i < edp_drv->dpcd.max_lane_count; ++i)
- mdss_edp_enable_lane_bist(edp_drv->edp_base, i, 0);
-
+ mdss_edp_lane_power_ctrl(edp_drv->base,
+ edp_drv->dpcd.max_lane_count, 0);
mdss_edp_clk_disable(edp_drv);
- mdss_edp_hw_powerup(edp_drv->edp_base, 0);
+ mdss_edp_phy_powerup(edp_drv->base, 0);
mdss_edp_unprepare_clocks(edp_drv);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ mdss_edp_aux_ctrl(edp_drv->base, 0);
+
+ pr_debug("%s:-\n", __func__);
return ret;
}
@@ -363,6 +406,9 @@
case MDSS_EVENT_UNBLANK:
rc = mdss_edp_on(pdata);
break;
+ case MDSS_EVENT_PANEL_ON:
+ rc = mdss_edp_wait4train(pdata);
+ break;
case MDSS_EVENT_PANEL_OFF:
rc = mdss_edp_off(pdata);
break;
@@ -382,20 +428,31 @@
pinfo = &edp_drv->panel_data.panel_info;
pinfo->clk_rate = dp->pclk;
+ pr_debug("%s: pclk=%d\n", __func__, pinfo->clk_rate);
pinfo->xres = dp->h_addressable + dp->h_border * 2;
pinfo->yres = dp->v_addressable + dp->v_border * 2;
+ pr_debug("%s: x=%d y=%d\n", __func__, pinfo->xres, pinfo->yres);
+
pinfo->lcdc.h_back_porch = dp->h_blank - dp->h_fporch \
- dp->h_sync_pulse;
pinfo->lcdc.h_front_porch = dp->h_fporch;
pinfo->lcdc.h_pulse_width = dp->h_sync_pulse;
+ pr_debug("%s: hporch= %d %d %d\n", __func__,
+ pinfo->lcdc.h_back_porch, pinfo->lcdc.h_front_porch,
+ pinfo->lcdc.h_pulse_width);
+
pinfo->lcdc.v_back_porch = dp->v_blank - dp->v_fporch \
- dp->v_sync_pulse;
pinfo->lcdc.v_front_porch = dp->v_fporch;
pinfo->lcdc.v_pulse_width = dp->v_sync_pulse;
+ pr_debug("%s: vporch= %d %d %d\n", __func__,
+ pinfo->lcdc.v_back_porch, pinfo->lcdc.v_front_porch,
+ pinfo->lcdc.v_pulse_width);
+
pinfo->type = EDP_PANEL;
pinfo->pdest = DISPLAY_1;
pinfo->wait_cycle = 0;
@@ -415,9 +472,9 @@
gpio_free(edp_drv->gpio_panel_en);
mdss_edp_regulator_off(edp_drv);
- iounmap(edp_drv->edp_base);
+ iounmap(edp_drv->base);
iounmap(edp_drv->mmss_cc_base);
- edp_drv->edp_base = NULL;
+ edp_drv->base = NULL;
return 0;
}
@@ -458,12 +515,19 @@
return -ENOMEM;
}
- edp_drv->edp_base = ioremap(res->start, resource_size(res));
- if (!edp_drv->edp_base) {
+ edp_drv->base_size = resource_size(res);
+ edp_drv->base = ioremap(res->start, resource_size(res));
+ if (!edp_drv->base) {
pr_err("%s: Unable to remap EDP resources", __func__);
return -ENOMEM;
}
+ pr_debug("%s: drv=%x base=%x size=%x\n", __func__,
+ (int)edp_drv, (int)edp_drv->base, edp_drv->base_size);
+
+ mdss_debug_register_base("edp",
+ edp_drv->base, edp_drv->base_size);
+
return 0;
}
@@ -488,52 +552,202 @@
return 0;
}
-static void mdss_edp_fill_edid_data(struct mdss_edp_drv_pdata *edp_drv)
+static void mdss_edp_video_ready(struct mdss_edp_drv_pdata *edp_drv)
{
- struct edp_edid *edid = &edp_drv->edid;
-
- edid->id_name[0] = 'A';
- edid->id_name[0] = 'U';
- edid->id_name[0] = 'O';
- edid->id_name[0] = 0;
- edid->id_product = 0x305D;
- edid->version = 1;
- edid->revision = 4;
- edid->ext_block_cnt = 0;
- edid->video_digital = 0x5;
- edid->color_depth = 6;
- edid->dpm = 0;
- edid->color_format = 0;
- edid->timing[0].pclk = 138500000;
- edid->timing[0].h_addressable = 1920;
- edid->timing[0].h_blank = 160;
- edid->timing[0].v_addressable = 1080;
- edid->timing[0].v_blank = 30;
- edid->timing[0].h_fporch = 48;
- edid->timing[0].h_sync_pulse = 32;
- edid->timing[0].v_sync_pulse = 14;
- edid->timing[0].v_fporch = 8;
- edid->timing[0].width_mm = 256;
- edid->timing[0].height_mm = 144;
- edid->timing[0].h_border = 0;
- edid->timing[0].v_border = 0;
- edid->timing[0].interlaced = 0;
- edid->timing[0].stereo = 0;
- edid->timing[0].sync_type = 1;
- edid->timing[0].sync_separate = 1;
- edid->timing[0].vsync_pol = 0;
- edid->timing[0].hsync_pol = 0;
+ pr_debug("%s: edp_video_ready\n", __func__);
}
-static void mdss_edp_fill_dpcd_data(struct mdss_edp_drv_pdata *edp_drv)
+static int edp_event_thread(void *data)
{
- struct dpcd_cap *cap = &edp_drv->dpcd;
+ struct mdss_edp_drv_pdata *ep;
+ unsigned long flag;
+ u32 todo = 0;
- cap->max_lane_count = 2;
- cap->max_link_clk = 270;
+ ep = (struct mdss_edp_drv_pdata *)data;
+
+ while (1) {
+ wait_event(ep->event_q, (ep->event_pndx != ep->event_gndx));
+ spin_lock_irqsave(&ep->event_lock, flag);
+ if (ep->event_pndx == ep->event_gndx) {
+ spin_unlock_irqrestore(&ep->event_lock, flag);
+ break;
+ }
+ todo = ep->event_todo_list[ep->event_gndx];
+ ep->event_todo_list[ep->event_gndx++] = 0;
+ ep->event_gndx %= HPD_EVENT_MAX;
+ spin_unlock_irqrestore(&ep->event_lock, flag);
+
+ pr_debug("%s: todo=%x\n", __func__, todo);
+
+ if (todo == 0)
+ continue;
+
+ if (todo & EV_EDID_READ)
+ mdss_edp_edid_read(ep, 0);
+
+ if (todo & EV_DPCD_CAP_READ)
+ mdss_edp_dpcd_cap_read(ep);
+
+ if (todo & EV_DPCD_STATUS_READ)
+ mdss_edp_dpcd_status_read(ep);
+
+ if (todo & EV_LINK_TRAIN) {
+ INIT_COMPLETION(ep->train_comp);
+ mdss_edp_link_train(ep);
+ }
+
+ if (todo & EV_VIDEO_READY)
+ mdss_edp_video_ready(ep);
+ }
+
+ return 0;
}
+static void edp_send_events(struct mdss_edp_drv_pdata *ep, u32 events)
+{
+ spin_lock(&ep->event_lock);
+ ep->event_todo_list[ep->event_pndx++] = events;
+ ep->event_pndx %= HPD_EVENT_MAX;
+ wake_up(&ep->event_q);
+ spin_unlock(&ep->event_lock);
+}
+
+irqreturn_t edp_isr(int irq, void *ptr)
+{
+ struct mdss_edp_drv_pdata *ep = (struct mdss_edp_drv_pdata *)ptr;
+ unsigned char *base = ep->base;
+ u32 isr1, isr2, mask1, mask2;
+ u32 ack;
+
+ isr1 = edp_read(base + 0x308);
+ isr2 = edp_read(base + 0x30c);
+
+ mask1 = isr1 & EDP_INTR_MASK1;
+ mask2 = isr2 & EDP_INTR_MASK2;
+
+ isr1 &= ~mask1; /* remove masks bit */
+ isr2 &= ~mask2;
+
+ pr_debug("%s: isr=%x mask=%x isr2=%x mask2=%x\n",
+ __func__, isr1, mask1, isr2, mask2);
+
+ ack = isr1 & EDP_INTR_STATUS1;
+ ack <<= 1; /* ack bits */
+ ack |= mask1;
+ edp_write(base + 0x308, ack);
+
+ ack = isr2 & EDP_INTR_STATUS2;
+ ack <<= 1; /* ack bits */
+ ack |= mask2;
+ edp_write(base + 0x30c, ack);
+
+ if (isr1 & EDP_INTR_HPD) {
+ isr1 &= ~EDP_INTR_HPD; /* clear */
+ if (ep->train_start)
+ edp_send_events(ep, EV_LINK_TRAIN);
+ }
+
+ if (isr2 & EDP_INTR_READY_FOR_VIDEO)
+ edp_send_events(ep, EV_VIDEO_READY);
+
+ if (isr1 && ep->aux_cmd_busy) {
+ /* clear EDP_AUX_TRANS_CTRL */
+ edp_write(base + 0x318, 0);
+ /* read EDP_INTERRUPT_TRANS_NUM */
+ ep->aux_trans_num = edp_read(base + 0x310);
+
+ if (ep->aux_cmd_i2c)
+ edp_aux_i2c_handler(ep, isr1);
+ else
+ edp_aux_native_handler(ep, isr1);
+ }
+
+ return IRQ_HANDLED;
+}
+
+struct mdss_hw mdss_edp_hw = {
+ .hw_ndx = MDSS_HW_EDP,
+ .ptr = NULL,
+ .irq_handler = edp_isr,
+};
+
+static void mdss_edp_irq_enable(struct mdss_edp_drv_pdata *edp_drv)
+{
+ edp_write(edp_drv->base + 0x308, EDP_INTR_MASK1);
+ edp_write(edp_drv->base + 0x30c, EDP_INTR_MASK2);
+
+ mdss_enable_irq(&mdss_edp_hw);
+}
+
+static void mdss_edp_irq_disable(struct mdss_edp_drv_pdata *edp_drv)
+{
+ edp_write(edp_drv->base + 0x308, 0x0);
+ edp_write(edp_drv->base + 0x30c, 0x0);
+
+ mdss_disable_irq(&mdss_edp_hw);
+}
+
+static int mdss_edp_irq_setup(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret = 0;
+
+
+ edp_drv->gpio_panel_hpd = of_get_named_gpio_flags(
+ edp_drv->pdev->dev.of_node, "gpio-panel-hpd", 0,
+ &edp_drv->hpd_flags);
+
+ if (!gpio_is_valid(edp_drv->gpio_panel_hpd)) {
+ pr_err("%s gpio_panel_hpd %d is not valid ", __func__,
+ edp_drv->gpio_panel_hpd);
+ return -ENODEV;
+ }
+
+ ret = gpio_request(edp_drv->gpio_panel_hpd, "edp_hpd_irq_gpio");
+ if (ret) {
+ pr_err("%s unable to request gpio_panel_hpd %d", __func__,
+ edp_drv->gpio_panel_hpd);
+ return -ENODEV;
+ }
+
+ ret = gpio_tlmm_config(GPIO_CFG(
+ edp_drv->gpio_panel_hpd,
+ 1,
+ GPIO_CFG_INPUT,
+ GPIO_CFG_NO_PULL,
+ GPIO_CFG_2MA),
+ GPIO_CFG_ENABLE);
+ if (ret) {
+ pr_err("%s: unable to config tlmm = %d\n", __func__,
+ edp_drv->gpio_panel_hpd);
+ gpio_free(edp_drv->gpio_panel_hpd);
+ return -ENODEV;
+ }
+
+ ret = gpio_direction_input(edp_drv->gpio_panel_hpd);
+ if (ret) {
+ pr_err("%s unable to set direction for gpio_panel_hpd %d",
+ __func__, edp_drv->gpio_panel_hpd);
+ return -ENODEV;
+ }
+
+ mdss_edp_hw.ptr = (void *)(edp_drv);
+
+ if (mdss_register_irq(&mdss_edp_hw))
+ pr_err("%s: mdss_register_irq failed.\n", __func__);
+
+
+ return 0;
+}
+
+
+static void mdss_edp_event_setup(struct mdss_edp_drv_pdata *ep)
+{
+ init_waitqueue_head(&ep->event_q);
+ spin_lock_init(&ep->event_lock);
+
+ kthread_run(edp_event_thread, (void *)ep, "mdss_edp_hpd");
+}
static int __devinit mdss_edp_probe(struct platform_device *pdev)
{
@@ -554,6 +768,7 @@
edp_drv->pdev = pdev;
edp_drv->pdev->id = 1;
edp_drv->clk_on = 0;
+ edp_drv->train_start = 0; /* no link train yet */
ret = mdss_edp_get_base_address(edp_drv);
if (ret)
@@ -579,8 +794,38 @@
if (ret)
goto edp_free_gpio_panel_en;
- mdss_edp_fill_edid_data(edp_drv);
- mdss_edp_fill_dpcd_data(edp_drv);
+ mdss_edp_irq_setup(edp_drv);
+
+ mdss_edp_aux_init(edp_drv);
+
+ mdss_edp_event_setup(edp_drv);
+
+ /* need mdss clock to receive irq */
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+ /* only need aux and ahb clock for aux channel */
+ mdss_edp_prepare_aux_clocks(edp_drv);
+ mdss_edp_aux_clk_enable(edp_drv);
+ mdss_edp_phy_pll_reset(edp_drv->base);
+ mdss_edp_aux_reset(edp_drv->base);
+ mdss_edp_mainlink_reset(edp_drv->base);
+ mdss_edp_phy_powerup(edp_drv->base, 1);
+ mdss_edp_aux_ctrl(edp_drv->base, 1);
+
+ mdss_edp_irq_enable(edp_drv);
+
+ mdss_edp_edid_read(edp_drv, 0);
+ mdss_edp_dpcd_cap_read(edp_drv);
+
+ mdss_edp_irq_disable(edp_drv);
+
+ mdss_edp_aux_ctrl(edp_drv->base, 0);
+ mdss_edp_aux_clk_disable(edp_drv);
+ mdss_edp_phy_powerup(edp_drv->base, 0);
+ mdss_edp_unprepare_aux_clocks(edp_drv);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+
mdss_edp_device_register(edp_drv);
return 0;
@@ -594,7 +839,7 @@
mmss_cc_base_unmap:
iounmap(edp_drv->mmss_cc_base);
edp_base_unmap:
- iounmap(edp_drv->edp_base);
+ iounmap(edp_drv->base);
probe_err:
return ret;
diff --git a/drivers/video/msm/mdss/mdss_edp.h b/drivers/video/msm/mdss/mdss_edp.h
index 00ef206..c3f7d0d 100644
--- a/drivers/video/msm/mdss/mdss_edp.h
+++ b/drivers/video/msm/mdss/mdss_edp.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,15 +14,157 @@
#ifndef MDSS_EDP_H
#define MDSS_EDP_H
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/clk.h>
-
-#include "mdss_panel.h"
+#include <linux/of_gpio.h>
#define edp_read(offset) readl_relaxed((offset))
#define edp_write(offset, data) writel_relaxed((data), (offset))
+#define AUX_CMD_FIFO_LEN 144
+#define AUX_CMD_MAX 16
+#define AUX_CMD_I2C_MAX 128
+
+#define EDP_PORT_MAX 1
+#define EDP_SINK_CAP_LEN 16
+
+#define EDP_AUX_ERR_NONE 0
+#define EDP_AUX_ERR_ADDR -1
+#define EDP_AUX_ERR_TOUT -2
+#define EDP_AUX_ERR_NACK -3
+
+/* 4 bits of aux command */
+#define EDP_CMD_AUX_WRITE 0x8
+#define EDP_CMD_AUX_READ 0x9
+
+/* 4 bits of i2c command */
+#define EDP_CMD_I2C_MOT 0x4 /* i2c middle of transaction */
+#define EDP_CMD_I2C_WRITE 0x0
+#define EDP_CMD_I2C_READ 0x1
+#define EDP_CMD_I2C_STATUS 0x2 /* i2c write status request */
+
+/* cmd reply: bit 0, 1 for aux */
+#define EDP_AUX_ACK 0x0
+#define EDP_AUX_NACK 0x1
+#define EDP_AUX_DEFER 0x2
+
+/* cmd reply: bit 2, 3 for i2c */
+#define EDP_I2C_ACK 0x0
+#define EDP_I2C_NACK 0x4
+#define EDP_I2C_DEFER 0x8
+
+#define EDP_CMD_TIMEOUT 400 /* us */
+#define EDP_CMD_LEN 16
+
+#define EDP_INTR_ACK_SHIFT 1
+#define EDP_INTR_MASK_SHIFT 2
+
+/* isr */
+#define EDP_INTR_HPD BIT(0)
+#define EDP_INTR_AUX_I2C_DONE BIT(3)
+#define EDP_INTR_WRONG_ADDR BIT(6)
+#define EDP_INTR_TIMEOUT BIT(9)
+#define EDP_INTR_NACK_DEFER BIT(12)
+#define EDP_INTR_WRONG_DATA_CNT BIT(15)
+#define EDP_INTR_I2C_NACK BIT(18)
+#define EDP_INTR_I2C_DEFER BIT(21)
+#define EDP_INTR_PLL_UNLOCKED BIT(24)
+#define EDP_INTR_AUX_ERROR BIT(27)
+
+
+#define EDP_INTR_STATUS1 \
+ (EDP_INTR_HPD | EDP_INTR_AUX_I2C_DONE| \
+ EDP_INTR_WRONG_ADDR | EDP_INTR_TIMEOUT | \
+ EDP_INTR_NACK_DEFER | EDP_INTR_WRONG_DATA_CNT | \
+ EDP_INTR_I2C_NACK | EDP_INTR_I2C_DEFER | \
+ EDP_INTR_PLL_UNLOCKED | EDP_INTR_AUX_ERROR)
+
+#define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2)
+
+
+#define EDP_INTR_READY_FOR_VIDEO BIT(0)
+#define EDP_INTR_IDLE_PATTERNs_SENT BIT(3)
+#define EDP_INTR_FRAME_END BIT(6)
+#define EDP_INTR_CRC_UPDATED BIT(9)
+
+#define EDP_INTR_STATUS2 \
+ (EDP_INTR_READY_FOR_VIDEO | EDP_INTR_IDLE_PATTERNs_SENT | \
+ EDP_INTR_FRAME_END | EDP_INTR_CRC_UPDATED)
+
+#define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2)
+
+
+#define EDP_MAINLINK_CTRL 0x004
+#define EDP_STATE_CTRL 0x008
+#define EDP_MAINLINK_READY 0x084
+
+#define EDP_AUX_CTRL 0x300
+#define EDP_INTERRUPT_STATUS 0x308
+#define EDP_INTERRUPT_STATUS_2 0x30c
+#define EDP_AUX_DATA 0x314
+#define EDP_AUX_TRANS_CTRL 0x318
+#define EDP_AUX_STATUS 0x324
+
+#define EDP_PHY_EDPPHY_GLB_VM_CFG0 0x510
+#define EDP_PHY_EDPPHY_GLB_VM_CFG1 0x514
+
+struct edp_cmd {
+ char read; /* 1 == read, 0 == write */
+ char i2c; /* 1 == i2c cmd, 0 == native cmd */
+ u32 addr; /* 20 bits */
+ char *datap;
+ int len; /* len to be tx OR len to be rx for read */
+ char next; /* next command */
+};
+
+struct edp_buf {
+ char *start; /* buffer start addr */
+ char *end; /* buffer end addr */
+ int size; /* size of buffer */
+ char *data; /* data pointer */
+ int len; /* dara length */
+ char trans_num; /* transaction number */
+ char i2c; /* 1 == i2c cmd, 0 == native cmd */
+};
+
+#define DPCD_ENHANCED_FRAME BIT(0)
+#define DPCD_TPS3 BIT(1)
+#define DPCD_MAX_DOWNSPREAD_0_5 BIT(2)
+#define DPCD_NO_AUX_HANDSHAKE BIT(3)
+#define DPCD_PORT_0_EDID_PRESENTED BIT(4)
+
+/* event */
+#define EV_EDP_AUX_SETUP BIT(0)
+#define EV_EDID_READ BIT(1)
+#define EV_DPCD_CAP_READ BIT(2)
+#define EV_DPCD_STATUS_READ BIT(3)
+#define EV_LINK_TRAIN BIT(4)
+#define EV_VIDEO_READY BIT(31)
+
+struct dpcd_cap {
+ char major;
+ char minor;
+ char max_lane_count;
+ char num_rx_port;
+ char i2c_speed_ctrl;
+ char scrambler_reset;
+ char enhanced_frame;
+ u32 max_link_rate; /* 162, 270 and 540 Mb, divided by 10 */
+ u32 flags;
+ u32 rx_port0_buf_size;
+ u32 training_read_interval;/* us */
+};
+
+struct dpcd_link_status {
+ char lane_01_status;
+ char lane_23_status;
+ char interlane_align_done;
+ char downstream_port_status_changed;
+ char link_status_updated;
+ char port_0_in_sync;
+ char port_1_in_sync;
+ char req_voltage_swing[4];
+ char req_pre_emphasis[4];
+};
+
struct display_timing_desc {
u32 pclk;
u32 h_addressable; /* addressable + boder = active */
@@ -45,12 +187,14 @@
u32 hsync_pol;
};
+#define EDID_DISPLAY_PORT_SUPPORT 0x05
+
struct edp_edid {
char id_name[4];
short id_product;
char version;
char revision;
- char video_digital;
+ char video_intf; /* edp == 0x5 */
char color_depth; /* 6, 8, 10, 12 and 14 bits */
char color_format; /* RGB 4:4:4, YCrCb 4:4:4, Ycrcb 4:2:2 */
char dpm; /* display power management */
@@ -62,11 +206,32 @@
struct display_timing_desc timing[4];
};
-struct dpcd_cap {
- char max_lane_count;
- u32 max_link_clk; /* 162, 270 and 540 Mb, divided by 10 */
+struct edp_statistic {
+ u32 intr_hpd;
+ u32 intr_aux_i2c_done;
+ u32 intr_wrong_addr;
+ u32 intr_tout;
+ u32 intr_nack_defer;
+ u32 intr_wrong_data_cnt;
+ u32 intr_i2c_nack;
+ u32 intr_i2c_defer;
+ u32 intr_pll_unlock;
+ u32 intr_crc_update;
+ u32 intr_frame_end;
+ u32 intr_idle_pattern_sent;
+ u32 intr_ready_for_video;
+ u32 aux_i2c_tx;
+ u32 aux_i2c_rx;
+ u32 aux_native_tx;
+ u32 aux_native_rx;
};
+
+#define DPCD_LINK_VOLTAGE_MAX 4
+#define DPCD_LINK_PRE_EMPHASIS_MAX 4
+
+#define HPD_EVENT_MAX 8
+
struct mdss_edp_drv_pdata {
/* device driver */
int (*on) (struct mdss_panel_data *pdata);
@@ -74,11 +239,15 @@
struct platform_device *pdev;
/* edp specific */
- struct mdss_panel_data panel_data;
- unsigned char *edp_base;
+ unsigned char *base;
+ int base_size;
unsigned char *mmss_cc_base;
+
+ struct mdss_panel_data panel_data;
+
struct edp_edid edid;
struct dpcd_cap dpcd;
+ int train_start;
/* regulators */
struct regulator *vdda_vreg;
@@ -98,22 +267,82 @@
struct pwm_device *bl_pwm;
int lpg_channel;
int pwm_period;
+
+ /* hpd */
+ int gpio_panel_hpd;
+ enum of_gpio_flags hpd_flags;
+ int hpd_irq;
+
+ /* aux */
+ struct completion aux_comp;
+ struct completion train_comp;
+ struct mutex aux_mutex;
+ u32 aux_cmd_busy;
+ u32 aux_cmd_i2c;
+ int aux_trans_num;
+ int aux_error_num;
+ u32 aux_ctrl_reg;
+ struct edp_buf txp;
+ struct edp_buf rxp;
+ char txbuf[256];
+ char rxbuf[256];
+ struct dpcd_link_status link_status;
+ char link_rate;
+ char lane_cnt;
+ char v_level;
+ char p_level;
+ /* transfer unit */
+ char tu_desired;
+ char valid_boundary;
+ char delay_start;
+ u32 bpp;
+ struct edp_statistic edp_stat;
+
+ /* event */
+ wait_queue_head_t event_q;
+ u32 event_pndx;
+ u32 event_gndx;
+ u32 event_todo_list[HPD_EVENT_MAX];
+ spinlock_t event_lock;
};
-void mdss_edp_phy_sw_reset(unsigned char *edp_base);
-void mdss_edp_pll_configure(unsigned char *edp_base, int rate);
-void mdss_edp_enable_lane_bist(unsigned char *edp_base, int lane, int enable);
-void mdss_edp_enable_mainlink(unsigned char *edp_base, int enable);
-void mdss_edp_hw_powerup(unsigned char *edp_base, int enable);
-void mdss_edp_clk_enable(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_phy_sw_reset(unsigned char *base);
+void mdss_edp_pll_configure(unsigned char *base, int rate);
+void mdss_edp_enable_mainlink(unsigned char *base, int enable);
+void mdss_edp_phy_powerup(unsigned char *base, int enable);
+int mdss_edp_aux_clk_enable(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_aux_clk_disable(struct mdss_edp_drv_pdata *edp_drv);
+int mdss_edp_clk_enable(struct mdss_edp_drv_pdata *edp_drv);
void mdss_edp_clk_disable(struct mdss_edp_drv_pdata *edp_drv);
int mdss_edp_clk_init(struct mdss_edp_drv_pdata *edp_drv);
void mdss_edp_clk_deinit(struct mdss_edp_drv_pdata *edp_drv);
-void mdss_edp_prepare_clocks(struct mdss_edp_drv_pdata *edp_drv);
+int mdss_edp_prepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_unprepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv);
+int mdss_edp_prepare_clocks(struct mdss_edp_drv_pdata *edp_drv);
void mdss_edp_unprepare_clocks(struct mdss_edp_drv_pdata *edp_drv);
-void mdss_edp_config_clk(unsigned char *edp_base, unsigned char *mmss_cc_base);
-void mdss_edp_unconfig_clk(unsigned char *edp_base,
+void mdss_edp_config_clk(unsigned char *base, unsigned char *mmss_cc_base);
+void mdss_edp_unconfig_clk(unsigned char *base,
unsigned char *mmss_cc_base);
-void mdss_edp_phy_misc_cfg(unsigned char *edp_base);
+
+void mdss_edp_dpcd_cap_read(struct mdss_edp_drv_pdata *edp);
+void mdss_edp_dpcd_status_read(struct mdss_edp_drv_pdata *edp);
+void mdss_edp_edid_read(struct mdss_edp_drv_pdata *edp, int block);
+int mdss_edp_link_train(struct mdss_edp_drv_pdata *edp);
+void edp_aux_i2c_handler(struct mdss_edp_drv_pdata *edp, u32 isr);
+void edp_aux_native_handler(struct mdss_edp_drv_pdata *edp, u32 isr);
+void mdss_edp_aux_init(struct mdss_edp_drv_pdata *ep);
+void mdss_edp_enable_aux(unsigned char *edp_base, int enable);
+
+void mdss_edp_timing_engine_ctrl(unsigned char *edp_base, int enable);
+void mdss_edp_mainlink_ctrl(unsigned char *edp_base, int enable);
+void mdss_edp_mainlink_reset(unsigned char *edp_base);
+void mdss_edp_aux_reset(unsigned char *edp_base);
+void mdss_edp_aux_ctrl(unsigned char *edp_base, int enable);
+void mdss_edp_phy_pll_reset(unsigned char *edp_base);
+int mdss_edp_phy_pll_ready(unsigned char *edp_base);
+int mdss_edp_phy_ready(unsigned char *edp_base);
+void mdss_edp_lane_power_ctrl(unsigned char *edp_base, int max_lane, int up);
+void mdss_edp_phy_vm_pe_init(unsigned char *edp_base);
+void mdss_edp_clock_synchrous(unsigned char *edp_base, int sync);
#endif /* MDSS_EDP_H */
diff --git a/drivers/video/msm/mdss/mdss_edp_aux.c b/drivers/video/msm/mdss/mdss_edp_aux.c
new file mode 100644
index 0000000..6d8e2c2
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_edp_aux.c
@@ -0,0 +1,1260 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/bug.h>
+#include <linux/of_gpio.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+
+#include <mach/hardware.h>
+#include <mach/gpio.h>
+#include <mach/clk.h>
+#include <mach/dma.h>
+
+#include "mdss_panel.h"
+#include "mdss_edp.h"
+
+/*
+ * edp buffer operation
+ */
+static char *edp_buf_init(struct edp_buf *eb, char *buf, int size)
+{
+ eb->start = buf;
+ eb->size = size;
+ eb->data = eb->start;
+ eb->end = eb->start + eb->size;
+ eb->len = 0;
+ eb->trans_num = 0;
+ eb->i2c = 0;
+ return eb->data;
+}
+
+static char *edp_buf_reset(struct edp_buf *eb)
+{
+ eb->data = eb->start;
+ eb->len = 0;
+ eb->trans_num = 0;
+ eb->i2c = 0;
+ return eb->data;
+}
+
+static char *edp_buf_push(struct edp_buf *eb, int len)
+{
+ eb->data += len;
+ eb->len += len;
+ return eb->data;
+}
+
+static int edp_buf_trailing(struct edp_buf *eb)
+{
+ return (int)(eb->end - eb->data);
+}
+
+/*
+ * edp aux edp_buf_add_cmd:
+ * NO native and i2c command mix allowed
+ */
+static int edp_buf_add_cmd(struct edp_buf *eb, struct edp_cmd *cmd)
+{
+ char data;
+ char *bp, *cp;
+ int i, len;
+
+ if (cmd->read) /* read */
+ len = 4;
+ else
+ len = cmd->len + 4;
+
+ if (edp_buf_trailing(eb) < len)
+ return 0;
+
+ /*
+ * cmd fifo only has depth of 144 bytes
+ * limit buf length to 128 bytes here
+ */
+ if ((eb->len + len) > 128)
+ return 0;
+
+ bp = eb->data;
+ data = cmd->addr >> 16;
+ data &= 0x0f; /* 4 addr bits */
+ if (cmd->read)
+ data |= BIT(4);
+ *bp++ = data;
+ *bp++ = cmd->addr >> 8;
+ *bp++ = cmd->addr;
+ *bp++ = cmd->len - 1;
+
+ if (!cmd->read) { /* write */
+ cp = cmd->datap;
+ for (i = 0; i < cmd->len; i++)
+ *bp++ = *cp++;
+ }
+ edp_buf_push(eb, len);
+
+ if (cmd->i2c)
+ eb->i2c++;
+
+ eb->trans_num++; /* Increase transaction number */
+
+ return cmd->len - 1;
+}
+
+static int edp_cmd_fifo_tx(struct edp_buf *tp, unsigned char *base)
+{
+ u32 data;
+ char *dp;
+ int len, cnt;
+
+ len = tp->len; /* total byte to cmd fifo */
+ if (len == 0)
+ return 0;
+
+ cnt = 0;
+ dp = tp->start;
+
+ while (cnt < len) {
+ data = *dp; /* data byte */
+ data <<= 8;
+ data &= 0x00ff00; /* index = 0, write */
+ if (cnt == 0)
+ data |= BIT(31); /* INDEX_WRITE */
+ pr_debug("%s: data=%x\n", __func__, data);
+ edp_write(base + EDP_AUX_DATA, data);
+ cnt++;
+ dp++;
+ }
+
+ data = (tp->trans_num - 1);
+ if (tp->i2c)
+ data |= BIT(8); /* I2C */
+
+ data |= BIT(9); /* GO */
+ pr_debug("%s: data=%x\n", __func__, data);
+ edp_write(base + EDP_AUX_TRANS_CTRL, data);
+
+ return tp->len;
+}
+
+static int edp_cmd_fifo_rx(struct edp_buf *rp, int len, unsigned char *base)
+{
+ u32 data;
+ char *dp;
+ int i;
+
+ data = 0; /* index = 0 */
+ data |= BIT(31); /* INDEX_WRITE */
+ data |= BIT(0); /* read */
+ edp_write(base + EDP_AUX_DATA, data);
+
+ dp = rp->data;
+
+ /* discard first byte */
+ data = edp_read(base + EDP_AUX_DATA);
+ for (i = 0; i < len; i++) {
+ data = edp_read(base + EDP_AUX_DATA);
+ pr_debug("%s: data=%x\n", __func__, data);
+ *dp++ = (char)((data >> 8) & 0xff);
+ }
+
+ rp->len = len;
+ return len;
+}
+
+static int edp_aux_write_cmds(struct mdss_edp_drv_pdata *ep,
+ struct edp_cmd *cmd)
+{
+ struct edp_cmd *cm;
+ struct edp_buf *tp;
+ int len, ret;
+
+ mutex_lock(&ep->aux_mutex);
+ ep->aux_cmd_busy = 1;
+
+ tp = &ep->txp;
+ edp_buf_reset(tp);
+
+ cm = cmd;
+ while (cm) {
+ pr_debug("%s: i2c=%d read=%d addr=%x len=%d next=%d\n",
+ __func__, cm->i2c, cm->read, cm->addr, cm->len,
+ cm->next);
+ ret = edp_buf_add_cmd(tp, cm);
+ if (ret <= 0)
+ break;
+ if (cm->next == 0)
+ break;
+ cm++;
+ }
+
+ if (tp->i2c)
+ ep->aux_cmd_i2c = 1;
+ else
+ ep->aux_cmd_i2c = 0;
+
+ INIT_COMPLETION(ep->aux_comp);
+
+ len = edp_cmd_fifo_tx(&ep->txp, ep->base);
+
+ wait_for_completion(&ep->aux_comp);
+
+ if (ep->aux_error_num == EDP_AUX_ERR_NONE)
+ ret = len;
+ else
+ ret = ep->aux_error_num;
+
+ ep->aux_cmd_busy = 0;
+ mutex_unlock(&ep->aux_mutex);
+ return ret;
+}
+
+static int edp_aux_read_cmds(struct mdss_edp_drv_pdata *ep,
+ struct edp_cmd *cmds)
+{
+ struct edp_cmd *cm;
+ struct edp_buf *tp;
+ struct edp_buf *rp;
+ int len, ret;
+
+ mutex_lock(&ep->aux_mutex);
+ ep->aux_cmd_busy = 1;
+
+ tp = &ep->txp;
+ rp = &ep->rxp;
+ edp_buf_reset(tp);
+ edp_buf_reset(rp);
+
+ cm = cmds;
+ len = 0;
+ while (cm) {
+ pr_debug("%s: i2c=%d read=%d addr=%x len=%d next=%d\n",
+ __func__, cm->i2c, cm->read, cm->addr, cm->len,
+ cm->next);
+ ret = edp_buf_add_cmd(tp, cm);
+ len += cm->len;
+ if (ret <= 0)
+ break;
+ if (cm->next == 0)
+ break;
+ cm++;
+ }
+
+ if (tp->i2c)
+ ep->aux_cmd_i2c = 1;
+ else
+ ep->aux_cmd_i2c = 0;
+
+ INIT_COMPLETION(ep->aux_comp);
+
+ edp_cmd_fifo_tx(tp, ep->base);
+
+ wait_for_completion(&ep->aux_comp);
+
+ if (ep->aux_error_num == EDP_AUX_ERR_NONE)
+ ret = edp_cmd_fifo_rx(rp, len, ep->base);
+ else
+ ret = ep->aux_error_num;
+
+ ep->aux_cmd_busy = 0;
+ mutex_unlock(&ep->aux_mutex);
+
+ return ret;
+}
+
+void edp_aux_native_handler(struct mdss_edp_drv_pdata *ep, u32 isr)
+{
+
+ pr_debug("%s: isr=%x\n", __func__, isr);
+
+ if (isr & EDP_INTR_AUX_I2C_DONE)
+ ep->aux_error_num = EDP_AUX_ERR_NONE;
+ else if (isr & EDP_INTR_WRONG_ADDR)
+ ep->aux_error_num = EDP_AUX_ERR_ADDR;
+ else if (isr & EDP_INTR_TIMEOUT)
+ ep->aux_error_num = EDP_AUX_ERR_TOUT;
+ if (isr & EDP_INTR_NACK_DEFER)
+ ep->aux_error_num = EDP_AUX_ERR_NACK;
+
+ complete(&ep->aux_comp);
+}
+
+void edp_aux_i2c_handler(struct mdss_edp_drv_pdata *ep, u32 isr)
+{
+
+ pr_debug("%s: isr=%x\n", __func__, isr);
+
+ if (isr & EDP_INTR_AUX_I2C_DONE) {
+ if (isr & (EDP_INTR_I2C_NACK | EDP_INTR_I2C_DEFER))
+ ep->aux_error_num = EDP_AUX_ERR_NACK;
+ else
+ ep->aux_error_num = EDP_AUX_ERR_NONE;
+ } else {
+ if (isr & EDP_INTR_WRONG_ADDR)
+ ep->aux_error_num = EDP_AUX_ERR_ADDR;
+ else if (isr & EDP_INTR_TIMEOUT)
+ ep->aux_error_num = EDP_AUX_ERR_TOUT;
+ if (isr & EDP_INTR_NACK_DEFER)
+ ep->aux_error_num = EDP_AUX_ERR_NACK;
+ if (isr & EDP_INTR_I2C_NACK)
+ ep->aux_error_num = EDP_AUX_ERR_NACK;
+ if (isr & EDP_INTR_I2C_DEFER)
+ ep->aux_error_num = EDP_AUX_ERR_NACK;
+ }
+
+ complete(&ep->aux_comp);
+}
+
+static int edp_aux_write_buf(struct mdss_edp_drv_pdata *ep, u32 addr,
+ char *buf, int len, int i2c)
+{
+ struct edp_cmd cmd;
+
+ cmd.read = 0;
+ cmd.i2c = i2c;
+ cmd.addr = addr;
+ cmd.datap = buf;
+ cmd.len = len & 0x0ff;
+ cmd.next = 0;
+
+ return edp_aux_write_cmds(ep, &cmd);
+}
+
+static int edp_aux_read_buf(struct mdss_edp_drv_pdata *ep, u32 addr,
+ int len, int i2c)
+{
+ struct edp_cmd cmd;
+
+ cmd.read = 1;
+ cmd.i2c = i2c;
+ cmd.addr = addr;
+ cmd.datap = NULL;
+ cmd.len = len & 0x0ff;
+ cmd.next = 0;
+
+ return edp_aux_read_cmds(ep, &cmd);
+}
+
+/*
+ * edid standard header bytes
+ */
+static char edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
+
+int edp_edid_buf_error(char *buf, int len)
+{
+ char *bp;
+ int i;
+ char csum = 0;
+ int ret = 0;
+
+ bp = buf;
+ if (len < 128) {
+ pr_err("%s: Error: len=%x\n", __func__, len);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 128; i++)
+ csum += *bp++;
+
+ if (csum != 0) {
+ pr_err("%s: Error: csum=%x\n", __func__, csum);
+ return -EINVAL;
+ }
+
+ if (strncmp(buf, edid_hdr, strlen(edid_hdr))) {
+ pr_err("%s: Error: header\n", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+
+void edp_extract_edid_manufacturer(struct edp_edid *edid, char *buf)
+{
+ char *bp;
+ char data;
+
+ bp = &buf[8];
+ data = *bp & 0x7f;
+ data >>= 2;
+ edid->id_name[0] = 'A' + data - 1;
+ data = *bp & 0x03;
+ data <<= 3;
+ bp++;
+ data |= (*bp >> 5);
+ edid->id_name[1] = 'A' + data - 1;
+ data = *bp & 0x1f;
+ edid->id_name[2] = 'A' + data - 1;
+ edid->id_name[3] = 0;
+
+ pr_debug("%s: edid manufacturer = %s", __func__, edid->id_name);
+}
+
+void edp_extract_edid_product(struct edp_edid *edid, char *buf)
+{
+ char *bp;
+ u32 data;
+
+ bp = &buf[0x0a];
+ data = *bp;
+ edid->id_product = *bp++;
+ edid->id_product &= 0x0ff;
+ data = *bp & 0x0ff;
+ data <<= 8;
+ edid->id_product |= data;
+
+ pr_debug("%s: edid product = 0x%x", __func__, edid->id_product);
+};
+
+void edp_extract_edid_version(struct edp_edid *edid, char *buf)
+{
+ edid->version = buf[0x12];
+ edid->revision = buf[0x13];
+ pr_debug("%s: edid version = %d.%d", __func__, edid->version,
+ edid->revision);
+};
+
+void edp_extract_edid_ext_block_cnt(struct edp_edid *edid, char *buf)
+{
+ edid->ext_block_cnt = buf[0x7e];
+ pr_debug("%s: edid extension = %d", __func__,
+ edid->ext_block_cnt);
+};
+
+void edp_extract_edid_video_support(struct edp_edid *edid, char *buf)
+{
+ char *bp;
+
+ bp = &buf[0x14];
+ if (*bp & 0x80) {
+ edid->video_intf = *bp & 0x0f;
+ /* 6, 8, 10, 12, 14 and 16 bit per component */
+ edid->color_depth = ((*bp & 0x70) >> 4); /* color bit depth */
+ if (edid->color_depth) {
+ edid->color_depth *= 2;
+ edid->color_depth += 4;
+ }
+ pr_debug("%s: Digital Video intf=%d color_depth=%d\n",
+ __func__, edid->video_intf, edid->color_depth);
+ } else {
+ pr_err("%s: Error, Analog video interface", __func__);
+ }
+};
+
+void edp_extract_edid_feature(struct edp_edid *edid, char *buf)
+{
+ char *bp;
+ char data;
+
+ bp = &buf[0x18];
+ data = *bp;
+ data &= 0xe0;
+ data >>= 5;
+ if (data == 0x01)
+ edid->dpm = 1; /* display power management */
+
+ if (edid->video_intf) {
+ if (*bp & 0x80) {
+ /* RGB 4:4:4, YcrCb 4:4:4 and YCrCb 4:2:2 */
+ edid->color_format = *bp & 0x18;
+ edid->color_format >>= 3;
+ }
+ }
+
+ pr_debug("%s: edid dpm=%d color_format=%d", __func__,
+ edid->dpm, edid->color_format);
+};
+
+void edp_extract_edid_detailed_timing_description(struct edp_edid *edid,
+ char *buf)
+{
+ char *bp;
+ u32 data;
+ struct display_timing_desc *dp;
+
+ dp = &edid->timing[0];
+
+ bp = &buf[0x36];
+ dp->pclk = 0;
+ dp->pclk = *bp++; /* byte 0x36 */
+ dp->pclk |= (*bp++ << 8); /* byte 0x37 */
+
+ dp->h_addressable = *bp++; /* byte 0x38 */
+
+ if (dp->pclk == 0 && dp->h_addressable == 0)
+ return; /* Not detailed timing definition */
+
+ dp->pclk *= 10000;
+
+ dp->h_blank = *bp++;/* byte 0x39 */
+ data = *bp & 0xf0; /* byte 0x3A */
+ data <<= 4;
+ dp->h_addressable |= data;
+
+ data = *bp++ & 0x0f;
+ data <<= 8;
+ dp->h_blank |= data;
+
+ dp->v_addressable = *bp++; /* byte 0x3B */
+ dp->v_blank = *bp++; /* byte 0x3C */
+ data = *bp & 0xf0; /* byte 0x3D */
+ data <<= 4;
+ dp->v_addressable |= data;
+
+ data = *bp++ & 0x0f;
+ data <<= 8;
+ dp->v_blank |= data;
+
+ dp->h_fporch = *bp++; /* byte 0x3E */
+ dp->h_sync_pulse = *bp++; /* byte 0x3F */
+
+ dp->v_fporch = *bp & 0x0f0; /* byte 0x40 */
+ dp->v_fporch >>= 4;
+ dp->v_sync_pulse = *bp & 0x0f;
+
+ bp++;
+ data = *bp & 0xc0; /* byte 0x41 */
+ data <<= 2;
+ dp->h_fporch |= data;
+
+ data = *bp & 0x30;
+ data <<= 4;
+ dp->h_sync_pulse |= data;
+
+ data = *bp & 0x0c;
+ data <<= 2;
+ dp->v_fporch |= data;
+
+ data = *bp & 0x03;
+ data <<= 4;
+ dp->v_sync_pulse |= data;
+
+ bp++;
+ dp->width_mm = *bp++; /* byte 0x42 */
+ dp->height_mm = *bp++; /* byte 0x43 */
+ data = *bp & 0x0f0; /* byte 0x44 */
+ data <<= 4;
+ dp->width_mm |= data;
+ data = *bp & 0x0f;
+ data <<= 8;
+ dp->height_mm |= data;
+
+ bp++;
+ dp->h_border = *bp++; /* byte 0x45 */
+ dp->v_border = *bp++; /* byte 0x46 */
+
+ dp->interlaced = *bp & 0x80; /* byte 0x47 */
+
+ dp->stereo = *bp & 0x60;
+ dp->stereo >>= 5;
+
+ data = *bp & 0x1e; /* bit 4,3,2 1*/
+ data >>= 1;
+ dp->sync_type = data & 0x08;
+ dp->sync_type >>= 3; /* analog or digital */
+ if (dp->sync_type) {
+ dp->sync_separate = data & 0x04;
+ dp->sync_separate >>= 2;
+ if (dp->sync_separate) {
+ if (data & 0x02)
+ dp->vsync_pol = 1; /* positive */
+ else
+ dp->vsync_pol = 0;/* negative */
+
+ if (data & 0x01)
+ dp->hsync_pol = 1; /* positive */
+ else
+ dp->hsync_pol = 0; /* negative */
+ }
+ }
+
+ pr_debug("%s: pixel_clock = %d\n", __func__, dp->pclk);
+
+ pr_debug("%s: horizontal=%d, blank=%d, porch=%d, sync=%d\n"
+ , __func__, dp->h_addressable, dp->h_blank,
+ dp->h_fporch, dp->h_sync_pulse);
+ pr_debug("%s: vertical=%d, blank=%d, porch=%d, vsync=%d\n"
+ , __func__, dp->v_addressable, dp->v_blank,
+ dp->v_fporch, dp->v_sync_pulse);
+ pr_debug("%s: panel size in mm, width=%d height=%d\n", __func__,
+ dp->width_mm, dp->height_mm);
+ pr_debug("%s: panel border horizontal=%d vertical=%d\n", __func__,
+ dp->h_border, dp->v_border);
+ pr_debug("%s: flags: interlaced=%d stereo=%d sync_type=%d sync_sep=%d\n"
+ , __func__, dp->interlaced, dp->stereo,
+ dp->sync_type, dp->sync_separate);
+ pr_debug("%s: polarity vsync=%d, hsync=%d", __func__,
+ dp->vsync_pol, dp->hsync_pol);
+}
+
+
+/*
+ * EDID structure can be found in VESA standart here:
+ * http://read.pudn.com/downloads110/ebook/456020/E-EDID%20Standard.pdf
+ *
+ * following table contains default edid
+ * static char edid_raw_data[128] = {
+ * 0, 255, 255, 255, 255, 255, 255, 0,
+ * 6, 175, 93, 48, 0, 0, 0, 0, 0, 22,
+ * 1, 4,
+ * 149, 26, 14, 120, 2,
+ * 164, 21,158, 85, 78, 155, 38, 15, 80, 84,
+ * 0, 0, 0,
+ * 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ * 29, 54, 128, 160, 112, 56, 30, 64, 48, 32, 142, 0, 0, 144, 16,0,0,24,
+ * 19, 36, 128, 160, 112, 56, 30, 64, 48, 32, 142, 0, 0, 144, 16,0,0,24,
+ * 0, 0, 0, 254, 0, 65, 85, 79, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+ * 0, 0, 0, 254, 0, 66, 49, 49, 54, 72, 65, 78, 48, 51, 46, 48, 32, 10,
+ * 0, 75 };
+ */
+
+static int edp_aux_chan_ready(struct mdss_edp_drv_pdata *ep)
+{
+ int cnt, ret;
+ char data = 0;
+
+ for (cnt = 5; cnt; cnt--) {
+ ret = edp_aux_write_buf(ep, 0x50, &data, 1, 1);
+ pr_debug("%s: ret=%d\n", __func__, ret);
+ if (ret >= 0)
+ break;
+ pr_debug("%s: failed in write\n", __func__);
+ msleep(100);
+ }
+
+ if (cnt == 0)
+ return 0;
+
+ return 1;
+}
+
+static int edp_sink_edid_read(struct mdss_edp_drv_pdata *ep, int block)
+{
+ struct edp_buf *rp;
+ int cnt, rlen;
+ int ret = 0;
+
+ ret = edp_aux_chan_ready(ep);
+ if (ret == 0) {
+ pr_err("%s: aux chan NOT ready\n", __func__);
+ return ret;
+ }
+
+ for (cnt = 5; cnt; cnt--) {
+ rlen = edp_aux_read_buf(ep, 0x50, 128, 1);
+ if (rlen > 0) {
+ pr_debug("%s: rlen=%d\n", __func__, rlen);
+
+ rp = &ep->rxp;
+ if (!edp_edid_buf_error(rp->data, rp->len))
+ break;
+ }
+ }
+
+ if (cnt <= 0) {
+ pr_err("%s: Failed\n", __func__);
+ return -EINVAL;
+ }
+
+ edp_extract_edid_manufacturer(&ep->edid, rp->data);
+ edp_extract_edid_product(&ep->edid, rp->data);
+ edp_extract_edid_version(&ep->edid, rp->data);
+ edp_extract_edid_ext_block_cnt(&ep->edid, rp->data);
+ edp_extract_edid_video_support(&ep->edid, rp->data);
+ edp_extract_edid_feature(&ep->edid, rp->data);
+ edp_extract_edid_detailed_timing_description(&ep->edid, rp->data);
+
+ return 128;
+}
+
+static void edp_sink_capability_read(struct mdss_edp_drv_pdata *ep,
+ int len)
+{
+ char *bp;
+ char data;
+ struct dpcd_cap *cap;
+ struct edp_buf *rp;
+ int rlen;
+
+ rlen = edp_aux_read_buf(ep, 0, len, 0);
+ if (rlen <= 0) {
+ pr_err("%s: edp aux read failed\n", __func__);
+ return;
+ }
+ rp = &ep->rxp;
+ cap = &ep->dpcd;
+ bp = rp->data;
+
+ data = *bp++; /* byte 0 */
+ cap->major = (data >> 4) & 0x0f;
+ cap->minor = data & 0x0f;
+ if (--rlen <= 0)
+ return;
+ pr_debug("%s: version: %d.%d\n", __func__, cap->major, cap->minor);
+
+ data = *bp++; /* byte 1 */
+ /* 162, 270 and 540 MB, symbol rate, NOT bit rate */
+ cap->max_link_rate = data * 27;
+ if (--rlen <= 0)
+ return;
+ pr_debug("%s: link_rate=%d\n", __func__, cap->max_link_rate);
+
+ data = *bp++; /* byte 2 */
+ if (data & BIT(7))
+ cap->flags |= DPCD_ENHANCED_FRAME;
+ if (data & 0x40)
+ cap->flags |= DPCD_TPS3;
+ data &= 0x0f;
+ cap->max_lane_count = data;
+ if (--rlen <= 0)
+ return;
+ pr_debug("%s: lane_count=%d\n", __func__, cap->max_lane_count);
+
+ data = *bp++; /* byte 3 */
+ if (data & BIT(0)) {
+ cap->flags |= DPCD_MAX_DOWNSPREAD_0_5;
+ pr_debug("%s: max_downspread\n", __func__);
+ }
+
+ if (data & BIT(6)) {
+ cap->flags |= DPCD_NO_AUX_HANDSHAKE;
+ pr_debug("%s: NO Link Training\n", __func__);
+ }
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 4 */
+ cap->num_rx_port = (data & BIT(0)) + 1;
+ pr_debug("%s: rx_ports=%d", __func__, cap->num_rx_port);
+ if (--rlen <= 0)
+ return;
+
+ bp += 3; /* skip 5, 6 and 7 */
+ rlen -= 3;
+ if (rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 8 */
+ if (data & BIT(1)) {
+ cap->flags |= DPCD_PORT_0_EDID_PRESENTED;
+ pr_debug("%s: edid presented\n", __func__);
+ }
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 9 */
+ cap->rx_port0_buf_size = (data + 1) * 32;
+ pr_debug("%s: lane_buf_size=%d", __func__, cap->rx_port0_buf_size);
+ if (--rlen <= 0)
+ return;
+
+ bp += 2; /* skip 10, 11 port1 capability */
+ rlen -= 2;
+ if (rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 12 */
+ cap->i2c_speed_ctrl = data;
+ if (cap->i2c_speed_ctrl > 0)
+ pr_debug("%s: i2c_rate=%d", __func__, cap->i2c_speed_ctrl);
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 13 */
+ cap->scrambler_reset = data & BIT(0);
+ pr_debug("%s: scrambler_reset=%d\n", __func__,
+ cap->scrambler_reset);
+
+ cap->enhanced_frame = data & BIT(1);
+ pr_debug("%s: enhanced_framing=%d\n", __func__,
+ cap->enhanced_frame);
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 14 */
+ if (data == 0)
+ cap->training_read_interval = 4000; /* us */
+ else
+ cap->training_read_interval = 4000 * data; /* us */
+ pr_debug("%s: training_interval=%d\n", __func__,
+ cap->training_read_interval);
+}
+
+static void edp_link_status_read(struct mdss_edp_drv_pdata *ep, int len)
+{
+ char *bp;
+ char data;
+ struct dpcd_link_status *sp;
+ struct edp_buf *rp;
+ int rlen;
+
+ pr_debug("%s: len=%d", __func__, len);
+ /* skip byte 0x200 and 0x201 */
+ rlen = edp_aux_read_buf(ep, 0x202, len, 0);
+ if (rlen <= 0) {
+ pr_err("%s: edp aux read failed\n", __func__);
+ return;
+ }
+ rp = &ep->rxp;
+ bp = rp->data;
+ sp = &ep->link_status;
+
+ data = *bp++; /* byte 0x202 */
+ sp->lane_01_status = data; /* lane 0, 1 */
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 0x203 */
+ sp->lane_23_status = data; /* lane 2, 3 */
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 0x204 */
+ sp->interlane_align_done = (data & BIT(0));
+ sp->downstream_port_status_changed = (data & BIT(6));
+ sp->link_status_updated = (data & BIT(7));
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 0x205 */
+ sp->port_0_in_sync = (data & BIT(0));
+ sp->port_1_in_sync = (data & BIT(1));
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 0x206 */
+ sp->req_voltage_swing[0] = data & 0x03;
+ data >>= 2;
+ sp->req_pre_emphasis[0] = data & 0x03;
+ data >>= 2;
+ sp->req_voltage_swing[1] = data & 0x03;
+ data >>= 2;
+ sp->req_pre_emphasis[1] = data & 0x03;
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 0x207 */
+ sp->req_voltage_swing[2] = data & 0x03;
+ data >>= 2;
+ sp->req_pre_emphasis[2] = data & 0x03;
+ data >>= 2;
+ sp->req_voltage_swing[3] = data & 0x03;
+ data >>= 2;
+ sp->req_pre_emphasis[3] = data & 0x03;
+}
+
+static int edp_cap_lane_rate_set(struct mdss_edp_drv_pdata *ep)
+{
+ char buf[4];
+ int len = 0;
+
+ pr_debug("%s: bw=%x lane=%d\n", __func__, ep->link_rate, ep->lane_cnt);
+ buf[0] = ep->link_rate;
+ buf[1] = ep->lane_cnt;
+ len = edp_aux_write_buf(ep, 0x100, buf, 2, 0);
+
+ return len;
+}
+
+static int edp_lane_set_write(struct mdss_edp_drv_pdata *ep, int voltage_level,
+ int pre_emphasis_level)
+{
+ int i;
+ char buf[4];
+
+ if (voltage_level >= DPCD_LINK_VOLTAGE_MAX)
+ voltage_level |= 0x04;
+
+ if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX)
+ pre_emphasis_level |= 0x04;
+
+ pre_emphasis_level <<= 3;
+
+ for (i = 0; i < 4; i++)
+ buf[i] = voltage_level | pre_emphasis_level;
+
+ pr_debug("%s: p|v=0x%x", __func__, voltage_level | pre_emphasis_level);
+ return edp_aux_write_buf(ep, 0x103, buf, 4, 0);
+}
+
+static int edp_powerstate_write(struct mdss_edp_drv_pdata *ep,
+ char powerstate)
+{
+ pr_debug("%s: state=%d\n", __func__, powerstate);
+ return edp_aux_write_buf(ep, 0x600, &powerstate, 1, 0);
+}
+
+static int edp_train_pattern_set_write(struct mdss_edp_drv_pdata *ep,
+ int pattern)
+{
+ char buf[4];
+
+ pr_debug("%s: pattern=%x\n", __func__, pattern);
+ buf[0] = pattern;
+ return edp_aux_write_buf(ep, 0x102, buf, 1, 0);
+}
+
+static int edp_sink_clock_recovery_done(struct mdss_edp_drv_pdata *ep)
+{
+ u32 mask;
+ u32 data;
+
+ pr_debug("%s:\n", __func__);
+
+ if (ep->lane_cnt == 1) {
+ mask = 0x01; /* lane 0 */
+ data = ep->link_status.lane_01_status;
+ } else if (ep->lane_cnt == 2) {
+ mask = 0x011; /*B lane 0, 1 */
+ data = ep->link_status.lane_01_status;
+ } else {
+ mask = 0x01111; /*B lane 0, 1 */
+ data = ep->link_status.lane_23_status;
+ data <<= 8;
+ data |= ep->link_status.lane_01_status;
+ }
+
+ pr_debug("%s: data=%x mask=%x\n", __func__, data, mask);
+ data &= mask;
+ if (data == mask) /* all done */
+ return 1;
+
+ return 0;
+}
+
+static int edp_sink_channel_eq_done(struct mdss_edp_drv_pdata *ep)
+{
+ u32 mask;
+ u32 data;
+
+ pr_debug("%s:\n", __func__);
+
+ if (!ep->link_status.interlane_align_done) /* not align */
+ return 0;
+
+ if (ep->lane_cnt == 1) {
+ mask = 0x7;
+ data = ep->link_status.lane_01_status;
+ } else if (ep->lane_cnt == 2) {
+ mask = 0x77;
+ data = ep->link_status.lane_01_status;
+ } else {
+ mask = 0x7777;
+ data = ep->link_status.lane_23_status;
+ data <<= 8;
+ data |= ep->link_status.lane_01_status;
+ }
+
+ pr_debug("%s: data=%x mask=%x\n", __func__, data, mask);
+
+ data &= mask;
+ if (data == mask)/* all done */
+ return 1;
+
+ return 0;
+}
+
+void edp_sink_train_set_adjust(struct mdss_edp_drv_pdata *ep)
+{
+ int i;
+ int max = 0;
+
+
+ /* use the max level across lanes */
+ for (i = 0; i < ep->lane_cnt; i++) {
+ pr_debug("%s: lane=%d req_voltage_swing=%d",
+ __func__, i, ep->link_status.req_voltage_swing[i]);
+ if (max < ep->link_status.req_voltage_swing[i])
+ max = ep->link_status.req_voltage_swing[i];
+ }
+
+ ep->v_level = max;
+
+ /* use the max level across lanes */
+ max = 0;
+ for (i = 0; i < ep->lane_cnt; i++) {
+ pr_debug(" %s: lane=%d req_pre_emphasis=%d",
+ __func__, i, ep->link_status.req_pre_emphasis[i]);
+ if (max < ep->link_status.req_pre_emphasis[i])
+ max = ep->link_status.req_pre_emphasis[i];
+ }
+
+ ep->p_level = max;
+ pr_debug("%s: v_level=%d, p_level=%d", __func__,
+ ep->v_level, ep->p_level);
+}
+
+static void edp_host_train_set(struct mdss_edp_drv_pdata *ep, int train)
+{
+ int bit, cnt;
+ u32 data;
+
+
+ bit = 1;
+ bit <<= (train - 1);
+ pr_debug("%s: bit=%d train=%d\n", __func__, bit, train);
+ edp_write(ep->base + EDP_STATE_CTRL, bit);
+
+ bit = 8;
+ bit <<= (train - 1);
+ cnt = 10;
+ while (cnt--) {
+ data = edp_read(ep->base + EDP_MAINLINK_READY);
+ if (data & bit)
+ break;
+ }
+
+ if (cnt == 0)
+ pr_err("%s: set link_train=%d failed\n", __func__, train);
+}
+
+char vm_pre_emphasis[4][4] = {
+ {0x03, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
+ {0x03, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
+ {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
+ {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
+};
+
+/* voltage swing, 0.2v and 1.0v are not support */
+char vm_voltage_swing[4][4] = {
+ {0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
+ {0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
+ {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
+ {0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
+};
+
+static void edp_voltage_pre_emphasise_set(struct mdss_edp_drv_pdata *ep)
+{
+ u32 value0 = 0;
+ u32 value1 = 0;
+
+ pr_debug("%s: v=%d p=%d\n", __func__, ep->v_level, ep->p_level);
+
+ value0 = vm_pre_emphasis[(int)(ep->v_level)][(int)(ep->p_level)];
+ value1 = vm_voltage_swing[(int)(ep->v_level)][(int)(ep->p_level)];
+
+ /* Configure host and panel only if both values are allowed */
+ if (value0 != 0xFF && value1 != 0xFF) {
+ edp_write(ep->base + EDP_PHY_EDPPHY_GLB_VM_CFG0, value0);
+ edp_write(ep->base + EDP_PHY_EDPPHY_GLB_VM_CFG1, value1);
+ pr_debug("%s: value0=0x%x value1=0x%x", __func__,
+ value0, value1);
+ edp_lane_set_write(ep, ep->v_level, ep->p_level);
+ }
+
+}
+
+static int edp_start_link_train_1(struct mdss_edp_drv_pdata *ep)
+{
+ int tries, old_v_level;
+ int ret = 0;
+
+ pr_debug("%s:", __func__);
+
+ edp_host_train_set(ep, 0x01); /* train_1 */
+ edp_voltage_pre_emphasise_set(ep);
+ edp_train_pattern_set_write(ep, 0x21); /* train_1 */
+
+ tries = 0;
+ old_v_level = ep->v_level;
+ while (1) {
+ usleep(ep->dpcd.training_read_interval);
+
+ edp_link_status_read(ep, 6);
+ if (edp_sink_clock_recovery_done(ep)) {
+ ret = 0;
+ break;
+ }
+
+ if (ep->v_level == DPCD_LINK_VOLTAGE_MAX) {
+ ret = -1;
+ break; /* quit */
+ }
+
+ if (old_v_level == ep->v_level) {
+ tries++;
+ if (tries >= 5) {
+ ret = -1;
+ break; /* quit */
+ }
+ } else {
+ tries = 0;
+ old_v_level = ep->v_level;
+ }
+
+ edp_sink_train_set_adjust(ep);
+ edp_voltage_pre_emphasise_set(ep);
+ }
+
+ return ret;
+}
+
+static int edp_start_link_train_2(struct mdss_edp_drv_pdata *ep)
+{
+ int tries;
+ int ret = 0;
+ char pattern;
+
+ pr_debug("%s:", __func__);
+
+ if (ep->dpcd.flags & DPCD_TPS3)
+ pattern = 0x03;
+ else
+ pattern = 0x02;
+
+ edp_host_train_set(ep, pattern); /* train_2 */
+ edp_voltage_pre_emphasise_set(ep);
+ edp_train_pattern_set_write(ep, pattern | 0x20);/* train_2 */
+
+ tries = 0;
+ while (1) {
+ usleep(ep->dpcd.training_read_interval);
+
+ edp_link_status_read(ep, 6);
+
+ if (edp_sink_channel_eq_done(ep)) {
+ ret = 0;
+ break;
+ }
+
+ tries++;
+ if (tries > 5) {
+ ret = -1;
+ break;
+ }
+
+ edp_sink_train_set_adjust(ep);
+ edp_voltage_pre_emphasise_set(ep);
+ }
+
+ return ret;
+}
+
+static int edp_link_rate_shift(struct mdss_edp_drv_pdata *ep)
+{
+ /* add calculation later */
+ return -EINVAL;
+}
+
+static void edp_clear_training_pattern(struct mdss_edp_drv_pdata *ep)
+{
+ pr_debug("%s:\n", __func__);
+ edp_write(ep->base + EDP_STATE_CTRL, 0);
+ edp_train_pattern_set_write(ep, 0);
+ usleep(ep->dpcd.training_read_interval);
+}
+
+static int edp_aux_link_train(struct mdss_edp_drv_pdata *ep)
+{
+ int ret = 0;
+
+ pr_debug("%s", __func__);
+ ret = edp_aux_chan_ready(ep);
+ if (ret == 0) {
+ pr_err("%s: LINK Train failed: aux chan NOT ready\n", __func__);
+ complete(&ep->train_comp);
+ return ret;
+ }
+
+ /* start with max rate and lane */
+ ep->lane_cnt = ep->dpcd.max_lane_count;
+ ep->link_rate = ep->dpcd.max_link_rate;
+ edp_write(ep->base + EDP_MAINLINK_CTRL, 0x1);
+
+train_start:
+ ep->v_level = 0; /* start from default level */
+ ep->p_level = 0;
+ edp_cap_lane_rate_set(ep);
+
+ edp_clear_training_pattern(ep);
+ usleep(ep->dpcd.training_read_interval);
+ edp_powerstate_write(ep, 1);
+
+ ret = edp_start_link_train_1(ep);
+ if (ret < 0) {
+ if (edp_link_rate_shift(ep) == 0) {
+ goto train_start;
+ } else {
+ pr_err("%s: Training 1 failed", __func__);
+ ret = -1;
+ goto clear;
+ }
+ }
+
+ pr_debug("%s: Training 1 completed successfully", __func__);
+
+ edp_clear_training_pattern(ep);
+ ret = edp_start_link_train_2(ep);
+ if (ret < 0) {
+ if (edp_link_rate_shift(ep) == 0) {
+ goto train_start;
+ } else {
+ pr_err("%s: Training 2 failed", __func__);
+ ret = -1;
+ goto clear;
+ }
+ }
+
+ pr_debug("%s: Training 2 completed successfully", __func__);
+
+clear:
+ edp_clear_training_pattern(ep);
+
+ complete(&ep->train_comp);
+ return ret;
+}
+
+void mdss_edp_dpcd_cap_read(struct mdss_edp_drv_pdata *ep)
+{
+ edp_sink_capability_read(ep, 16);
+}
+
+void mdss_edp_dpcd_status_read(struct mdss_edp_drv_pdata *ep)
+{
+ edp_link_status_read(ep, 6);
+}
+
+void mdss_edp_edid_read(struct mdss_edp_drv_pdata *ep, int block)
+{
+ edp_sink_edid_read(ep, block);
+}
+
+int mdss_edp_link_train(struct mdss_edp_drv_pdata *ep)
+{
+ return edp_aux_link_train(ep);
+}
+
+void mdss_edp_aux_init(struct mdss_edp_drv_pdata *ep)
+{
+ mutex_init(&ep->aux_mutex);
+ init_completion(&ep->aux_comp);
+ init_completion(&ep->train_comp);
+ complete(&ep->train_comp); /* make non block at first time */
+
+ edp_buf_init(&ep->txp, ep->txbuf, sizeof(ep->txbuf));
+ edp_buf_init(&ep->rxp, ep->rxbuf, sizeof(ep->rxbuf));
+}
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index ce6b805..ac87cbd 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -111,7 +111,7 @@
return ret;
}
- if (notify > NOTIFY_UPDATE_STOP)
+ if (notify > NOTIFY_UPDATE_POWER_OFF)
return -EINVAL;
if (notify == NOTIFY_UPDATE_START) {
@@ -119,12 +119,19 @@
ret = wait_for_completion_interruptible_timeout(
&mfd->update.comp, 4 * HZ);
to_user = mfd->update.value;
- } else {
+ } else if (notify == NOTIFY_UPDATE_STOP) {
INIT_COMPLETION(mfd->no_update.comp);
ret = wait_for_completion_interruptible_timeout(
&mfd->no_update.comp, 4 * HZ);
to_user = mfd->no_update.value;
+ } else {
+ if (mfd->panel_power_on) {
+ INIT_COMPLETION(mfd->power_off_comp);
+ ret = wait_for_completion_interruptible_timeout(
+ &mfd->power_off_comp, 1 * HZ);
+ }
}
+
if (ret == 0)
ret = -ETIMEDOUT;
else if (ret > 0)
@@ -649,6 +656,9 @@
if (!op_enable)
return -EPERM;
+ if (mfd->dcm_state == DCM_ENTER)
+ return -EPERM;
+
switch (blank_mode) {
case FB_BLANK_UNBLANK:
if (!mfd->panel_power_on && mfd->mdp.on_fnc) {
@@ -690,6 +700,7 @@
else
mdss_fb_release_fences(mfd);
mfd->op_enable = true;
+ complete(&mfd->power_off_comp);
}
break;
}
@@ -1019,6 +1030,7 @@
mfd->ref_cnt = 0;
mfd->panel_power_on = false;
+ mfd->dcm_state = DCM_UNINIT;
mdss_fb_parse_dt_split(mfd);
@@ -1037,6 +1049,7 @@
mfd->no_update.timer.data = (unsigned long)mfd;
init_completion(&mfd->update.comp);
init_completion(&mfd->no_update.comp);
+ init_completion(&mfd->power_off_comp);
init_completion(&mfd->commit_comp);
init_completion(&mfd->power_set_comp);
INIT_WORK(&mfd->commit_work, mdss_fb_commit_wq_handler);
@@ -1522,6 +1535,58 @@
return 0;
}
+int mdss_fb_dcm(struct msm_fb_data_type *mfd, int req_state)
+{
+ int ret = -EINVAL;
+
+ if (req_state == mfd->dcm_state) {
+ pr_warn("Already in correct DCM state");
+ ret = 0;
+ }
+
+ switch (req_state) {
+ case DCM_UNBLANK:
+ if (mfd->dcm_state == DCM_UNINIT &&
+ !mfd->panel_power_on && mfd->mdp.on_fnc) {
+ ret = mfd->mdp.on_fnc(mfd);
+ if (ret == 0) {
+ mfd->panel_power_on = true;
+ mfd->dcm_state = DCM_UNBLANK;
+ }
+ }
+ break;
+ case DCM_ENTER:
+ if (mfd->dcm_state == DCM_UNBLANK) {
+ /* Keep unblank path available for only
+ DCM operation */
+ mfd->panel_power_on = false;
+ mfd->dcm_state = DCM_ENTER;
+ ret = 0;
+ }
+ break;
+ case DCM_EXIT:
+ if (mfd->dcm_state == DCM_ENTER) {
+ /* Release the unblank path for exit */
+ mfd->panel_power_on = true;
+ mfd->dcm_state = DCM_EXIT;
+ ret = 0;
+ }
+ break;
+ case DCM_BLANK:
+ if ((mfd->dcm_state == DCM_EXIT ||
+ mfd->dcm_state == DCM_UNBLANK) &&
+ mfd->panel_power_on && mfd->mdp.off_fnc) {
+ ret = mfd->mdp.off_fnc(mfd);
+ if (ret == 0) {
+ mfd->panel_power_on = false;
+ mfd->dcm_state = DCM_UNINIT;
+ }
+ }
+ break;
+ }
+ return ret;
+}
+
static int mdss_fb_cursor(struct fb_info *info, void __user *p)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 12f3d68..030fd67 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -124,6 +124,7 @@
struct disp_info_notify update;
struct disp_info_notify no_update;
+ struct completion power_off_comp;
struct msm_mdp_interface mdp;
@@ -145,6 +146,8 @@
void *msm_fb_backup;
struct completion power_set_comp;
u32 is_power_setting;
+
+ u32 dcm_state;
};
struct msm_fb_backup_type {
@@ -177,4 +180,5 @@
void mdss_fb_wait_for_fence(struct msm_fb_data_type *mfd);
void mdss_fb_signal_timeline(struct msm_fb_data_type *mfd);
int mdss_fb_register_mdp_instance(struct msm_mdp_interface *mdp);
+int mdss_fb_dcm(struct msm_fb_data_type *mfd, int req_state);
#endif /* MDSS_FB_H */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_edid.c b/drivers/video/msm/mdss/mdss_hdmi_edid.c
index 9a5b20b..65dc19c 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_edid.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_edid.c
@@ -366,8 +366,9 @@
{
const u8 *b = NULL;
u32 ndx, check_sum, print_len;
- int block_size = 0x80;
+ int block_size;
int i, status;
+ int retry_cnt = 0;
struct hdmi_tx_ddc_data ddc_data;
b = edid_buf;
@@ -376,6 +377,9 @@
return -EINVAL;
}
+read_retry:
+ block_size = 0x80;
+ status = 0;
do {
DEV_DBG("EDID: reading block(%d) with block-size=%d\n",
block, block_size);
@@ -422,6 +426,10 @@
ndx, ndx+3,
b[ndx+0], b[ndx+1], b[ndx+2], b[ndx+3]);
status = -EPROTO;
+ if (retry_cnt++ < 3) {
+ DEV_DBG("Retrying reading EDID %d time\n", retry_cnt);
+ goto read_retry;
+ }
goto error;
}
diff --git a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
index 1f0efd3..bcd5f28 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
@@ -140,6 +140,48 @@
__func__, HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
} /* reset_hdcp_ddc_failures */
+static void hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct dss_io_data *io = NULL;
+ u32 hdcp_ddc_status, ddc_hw_status;
+ u32 ddc_xfer_done, ddc_xfer_req, ddc_hw_done;
+ u32 ddc_hw_not_ready;
+ u32 timeout_count;
+
+ if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ io = hdcp_ctrl->init_data.core_io;
+ if (!io->base) {
+ DEV_ERR("%s: core io not inititalized\n", __func__);
+ return;
+ }
+
+ if (DSS_REG_R(io, HDMI_DDC_HW_STATUS) != 0) {
+ /* Wait to be clean on DDC HW engine */
+ timeout_count = 100;
+ do {
+ hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+ ddc_hw_status = DSS_REG_R(io, HDMI_DDC_HW_STATUS);
+ ddc_xfer_done = (hdcp_ddc_status & BIT(10)) ;
+ ddc_xfer_req = (hdcp_ddc_status & BIT(4)) ;
+ ddc_hw_done = (ddc_hw_status & BIT(3)) ;
+ ddc_hw_not_ready = ((ddc_xfer_done != 1) ||
+ (ddc_xfer_req != 0) || (ddc_hw_done != 1));
+
+ DEV_DBG("%s: %s: timeout count(%d):ddc hw%sready\n",
+ __func__, HDCP_STATE_NAME, timeout_count,
+ ddc_hw_not_ready ? " not " : " ");
+ DEV_DBG("hdcp_ddc_status[0x%x], ddc_hw_status[0x%x]\n",
+ hdcp_ddc_status, ddc_hw_status);
+ if (ddc_hw_not_ready)
+ msleep(20);
+ } while (ddc_hw_not_ready && --timeout_count);
+ }
+} /* hdmi_hdcp_hw_ddc_clean */
+
static int hdmi_hdcp_authentication_part1(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
@@ -853,6 +895,7 @@
struct delayed_work *dw = to_delayed_work(work);
struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(dw,
struct hdmi_hdcp_ctrl, hdcp_auth_work);
+ struct dss_io_data *io;
if (!hdcp_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
@@ -865,6 +908,11 @@
return;
}
+ io = hdcp_ctrl->init_data.core_io;
+ /* Enabling Software DDC */
+ DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION , DSS_REG_R(io,
+ HDMI_DDC_ARBITRATION) & ~(BIT(4)));
+
rc = hdmi_hdcp_authentication_part1(hdcp_ctrl);
if (rc) {
DEV_DBG("%s: %s: HDCP Auth Part I failed\n", __func__,
@@ -878,6 +926,10 @@
HDCP_STATE_NAME);
goto error;
}
+ /* Disabling software DDC before going into part3 to make sure
+ * there is no Arbitratioon between software and hardware for DDC */
+ DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION , DSS_REG_R(io,
+ HDMI_DDC_ARBITRATION) | (BIT(4)));
error:
/*
@@ -969,6 +1021,9 @@
DSS_REG_W(io, HDMI_HDCP_RESET, BIT(0));
+ /* Wait to be clean on DDC HW engine */
+ hdmi_hdcp_hw_ddc_clean(hdcp_ctrl);
+
/* Disable encryption and disable the HDCP block */
DSS_REG_W(io, HDMI_HDCP_CTRL, 0);
@@ -1036,6 +1091,9 @@
DSS_REG_W(io, HDMI_HDCP_RESET, BIT(0));
+ /* Wait to be clean on DDC HW engine */
+ hdmi_hdcp_hw_ddc_clean(hdcp_ctrl);
+
/* Disable encryption and disable the HDCP block */
DSS_REG_W(io, HDMI_HDCP_CTRL, 0);
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index e4a6b86..1fef395 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -866,13 +866,14 @@
static void hdmi_tx_hpd_int_work(struct work_struct *work)
{
struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ struct dss_io_data *io;
hdmi_ctrl = container_of(work, struct hdmi_tx_ctrl, hpd_int_work);
if (!hdmi_ctrl || !hdmi_ctrl->hpd_initialized) {
DEV_DBG("%s: invalid input\n", __func__);
return;
}
-
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
DEV_DBG("%s: Got HPD interrupt\n", __func__);
if (hdmi_ctrl->hpd_state) {
@@ -880,6 +881,9 @@
DEV_ERR("%s: Failed to enable ddc power\n", __func__);
return;
}
+ /* Enable SW DDC before EDID read */
+ DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION ,
+ DSS_REG_R(io, HDMI_DDC_ARBITRATION) & ~(BIT(4)));
hdmi_tx_read_sink_info(hdmi_ctrl);
hdmi_tx_send_cable_notification(hdmi_ctrl, 1);
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 950fd27..aba77e3 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -579,6 +579,10 @@
struct mdss_mdp_ctl *mdss_mdp_ctl_mixer_switch(struct mdss_mdp_ctl *ctl,
u32 return_type);
+int mdss_mdp_wb_set_format(struct msm_fb_data_type *mfd, int dst_format);
+int mdss_mdp_wb_get_format(struct msm_fb_data_type *mfd,
+ struct mdp_mixer_cfg *mixer_cfg);
+
#define mfd_to_mdp5_data(mfd) (mfd->mdp.private1)
#define mfd_to_mdata(mfd) (((struct mdss_overlay_private *)\
(mfd->mdp.private1))->mdata)
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index b5a5383..f44ebaf 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -1337,7 +1337,7 @@
* writeback block
*/
head[len] = head[len - 1];
- head[len].num = -1;
+ head[len].num = head[len - 1].num;
}
mdata->ctl_off = head;
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index d0c1818..cb4c1f2 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -224,7 +224,6 @@
mdss_mdp_ctl_intf_event
(ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL, (void *)0);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
- complete(&ctx->stop_comp);
}
mutex_unlock(&ctx->clk_mtx);
}
@@ -255,6 +254,7 @@
if (ctx->rdptr_enabled == 0) {
mdss_mdp_irq_disable_nosync
(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num);
+ complete(&ctx->stop_comp);
schedule_work(&ctx->clk_work);
}
@@ -456,7 +456,7 @@
struct mdss_mdp_cmd_ctx *ctx;
unsigned long flags;
int need_wait = 0;
- int ret;
+ int ret = 0;
ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
if (!ctx) {
@@ -474,13 +474,15 @@
}
spin_unlock_irqrestore(&ctx->clk_lock, flags);
- if (need_wait) {
+ if (need_wait)
if (wait_for_completion_timeout(&ctx->stop_comp, STOP_TIMEOUT)
- <= 0) {
+ <= 0)
WARN(1, "stop cmd time out\n");
- mdss_mdp_cmd_clk_off(ctx);
- }
- }
+
+ if (cancel_work_sync(&ctx->clk_work))
+ pr_debug("no pending clk work\n");
+
+ mdss_mdp_cmd_clk_off(ctx);
ctx->panel_on = 0;
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 8aee5d6..4032b91 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -1601,6 +1601,13 @@
if (ret)
return ret;
+ /* Supprt only MDP register read/write and
+ exit_dcm in DCM state*/
+ if (mfd->dcm_state == DCM_ENTER &&
+ (mdp_pp.op != mdp_op_calib_buffer &&
+ mdp_pp.op != mdp_op_calib_dcm_state))
+ return -EPERM;
+
switch (mdp_pp.op) {
case mdp_op_pa_cfg:
ret = mdss_mdp_pa_config(mdp5_data->ctl,
@@ -1681,6 +1688,9 @@
(struct mdp_calib_config_buffer *)
&mdp_pp.data.calib_buffer, ©back);
break;
+ case mdp_op_calib_dcm_state:
+ ret = mdss_fb_dcm(mfd, mdp_pp.data.calib_dcm.dcm_state);
+ break;
default:
pr_err("Unsupported request to MDP_PP IOCTL. %d = op\n",
mdp_pp.op);
@@ -1757,6 +1767,10 @@
return -EPERM;
ret = mdss_misr_crc_set(mdata, &metadata->data.misr_request);
break;
+ case metadata_op_wb_format:
+ ret = mdss_mdp_wb_set_format(mfd,
+ metadata->data.mixer_cfg.writeback_format);
+ break;
default:
pr_warn("unsupported request to MDP META IOCTL\n");
ret = -EINVAL;
@@ -1798,6 +1812,9 @@
return -EPERM;
ret = mdss_misr_crc_get(mdata, &metadata->data.misr_request);
break;
+ case metadata_op_wb_format:
+ ret = mdss_mdp_wb_get_format(mfd, &metadata->data.mixer_cfg);
+ break;
default:
pr_warn("Unsupported request to MDP META IOCTL.\n");
ret = -EINVAL;
@@ -1979,7 +1996,8 @@
if (!mfd->panel_info->cont_splash_enabled) {
rc = mdss_mdp_overlay_start(mfd);
- if (!IS_ERR_VALUE(rc) && (mfd->panel_info->type != DTV_PANEL))
+ if (!IS_ERR_VALUE(rc) && (mfd->panel_info->type != DTV_PANEL) &&
+ (mfd->panel_info->type != WRITEBACK_PANEL))
rc = mdss_mdp_overlay_kickoff(mfd);
} else {
rc = mdss_mdp_ctl_setup(mdp5_data->ctl);
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 624046d..3f75053 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -162,6 +162,9 @@
int i;
int rc = 0, rot_mode = 0;
u32 nlines;
+ u16 width;
+
+ width = pipe->src.w >> pipe->horz_deci;
if (pipe->bwc_mode) {
rc = mdss_mdp_get_rau_strides(pipe->src.w, pipe->src.h,
@@ -172,11 +175,11 @@
ps.ystride[0], ps.ystride[1]);
} else if (mdata->has_decimation && pipe->src_fmt->is_yuv) {
ps.num_planes = 2;
- ps.ystride[0] = pipe->src.w >> pipe->horz_deci;
+ ps.ystride[0] = width;
ps.ystride[1] = ps.ystride[0];
} else {
rc = mdss_mdp_get_plane_sizes(pipe->src_fmt->format,
- pipe->src.w, pipe->src.h, &ps, 0);
+ width, pipe->src.h, &ps, 0);
if (rc)
return rc;
@@ -184,7 +187,7 @@
rot_mode = 1;
else if (ps.num_planes == 1)
ps.ystride[0] = MAX_BPP *
- max(pipe->mixer->width, pipe->src.w);
+ max(pipe->mixer->width, width);
}
nlines = pipe->bwc_mode ? 1 : 2;
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index ff54067..0c74137 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -577,6 +577,86 @@
}
}
+int mdss_mdp_wb_get_format(struct msm_fb_data_type *mfd,
+ struct mdp_mixer_cfg *mixer_cfg)
+{
+ int dst_format;
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+
+ if (!ctl) {
+ pr_err("No panel data!\n");
+ return -EINVAL;
+ }
+
+ switch (ctl->dst_format) {
+ case MDP_RGB_888:
+ dst_format = WB_FORMAT_RGB_888;
+ break;
+ case MDP_RGB_565:
+ dst_format = WB_FORMAT_RGB_565;
+ break;
+ case MDP_XRGB_8888:
+ dst_format = WB_FORMAT_xRGB_8888;
+ break;
+ case MDP_ARGB_8888:
+ dst_format = WB_FORMAT_ARGB_8888;
+ break;
+ case MDP_BGRA_8888:
+ dst_format = WB_FORMAT_BGRA_8888;
+ break;
+ case MDP_BGRX_8888:
+ dst_format = WB_FORMAT_BGRX_8888;
+ break;
+ case MDP_Y_CBCR_H2V2_VENUS:
+ dst_format = WB_FORMAT_NV12;
+ break;
+ default:
+ return -EINVAL;
+ }
+ mixer_cfg->writeback_format = dst_format;
+ return 0;
+}
+
+int mdss_mdp_wb_set_format(struct msm_fb_data_type *mfd, int dst_format)
+{
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+
+ if (!ctl) {
+ pr_err("No panel data!\n");
+ return -EINVAL;
+ }
+
+ switch (dst_format) {
+ case WB_FORMAT_RGB_888:
+ ctl->dst_format = MDP_RGB_888;
+ break;
+ case WB_FORMAT_RGB_565:
+ ctl->dst_format = MDP_RGB_565;
+ break;
+ case WB_FORMAT_xRGB_8888:
+ ctl->dst_format = MDP_XRGB_8888;
+ break;
+ case WB_FORMAT_ARGB_8888:
+ ctl->dst_format = MDP_ARGB_8888;
+ break;
+ case WB_FORMAT_BGRA_8888:
+ ctl->dst_format = MDP_BGRA_8888;
+ break;
+ case WB_FORMAT_BGRX_8888:
+ ctl->dst_format = MDP_BGRX_8888;
+ break;
+ case WB_FORMAT_NV12:
+ ctl->dst_format = MDP_Y_CBCR_H2V2_VENUS;
+ break;
+ default:
+ pr_err("wfd format not supported\n");
+ return -EINVAL;
+ }
+
+ pr_debug("wfd format %d\n", ctl->dst_format);
+ return 0;
+}
+
int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd,
void *arg)
{
diff --git a/drivers/video/msm/mdss/mhl_msc.c b/drivers/video/msm/mdss/mhl_msc.c
index 15811bb..e7cd1be 100644
--- a/drivers/video/msm/mdss/mhl_msc.c
+++ b/drivers/video/msm/mdss/mhl_msc.c
@@ -74,6 +74,22 @@
}
}
+static bool mhl_qualify_path_enable(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ int rc = false;
+
+ if (!mhl_ctrl)
+ return rc;
+
+ if (mhl_ctrl->tmds_en_state ||
+ /* Identify sink with non-standard INT STAT SIZE */
+ (mhl_ctrl->devcap[DEVCAP_OFFSET_MHL_VERSION] == 0x10 &&
+ mhl_ctrl->devcap[DEVCAP_OFFSET_INT_STAT_SIZE] == 0x44))
+ rc = true;
+
+ return rc;
+}
+
void mhl_register_msc(struct mhl_tx_ctrl *ctrl)
{
if (ctrl)
@@ -224,12 +240,16 @@
case MHL_WRITE_STAT:
if (req->offset == MHL_STATUS_REG_LINK_MODE) {
if (req->payload.data[0]
- & MHL_STATUS_PATH_ENABLED)
+ & MHL_STATUS_PATH_ENABLED) {
/* Enable TMDS output */
mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
- else
+ if (mhl_ctrl->devcap_state == MHL_DEVCAP_ALL)
+ mhl_drive_hpd(mhl_ctrl, HPD_UP);
+ } else {
/* Disable TMDS output */
mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
+ mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+ }
}
break;
case MHL_READ_DEVCAP:
@@ -245,8 +265,14 @@
pr_debug("%s: devcap pow bit unset\n",
__func__);
break;
+ case DEVCAP_OFFSET_RESERVED:
+ mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+ mhl_drive_hpd(mhl_ctrl, HPD_UP);
+ break;
case DEVCAP_OFFSET_MHL_VERSION:
case DEVCAP_OFFSET_INT_STAT_SIZE:
+ if (mhl_qualify_path_enable(mhl_ctrl))
+ mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
break;
}
break;
diff --git a/drivers/video/msm/mdss/mhl_sii8334.c b/drivers/video/msm/mdss/mhl_sii8334.c
index 82b56e3..c66d50d 100644
--- a/drivers/video/msm/mdss/mhl_sii8334.c
+++ b/drivers/video/msm/mdss/mhl_sii8334.c
@@ -794,11 +794,13 @@
void mhl_tmds_ctrl(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
{
struct i2c_client *client = mhl_ctrl->i2c_handle;
+
if (on) {
MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, BIT4);
- mhl_drive_hpd(mhl_ctrl, HPD_UP);
+ mhl_ctrl->tmds_en_state = true;
} else {
MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, 0x00);
+ mhl_ctrl->tmds_en_state = false;
}
}
diff --git a/drivers/video/msm/mdss/msm_mdss_io_8974.c b/drivers/video/msm/mdss/msm_mdss_io_8974.c
index 6c70cc4..7b89eff 100644
--- a/drivers/video/msm/mdss/msm_mdss_io_8974.c
+++ b/drivers/video/msm/mdss/msm_mdss_io_8974.c
@@ -209,61 +209,136 @@
clk_disable_unprepare(ctrl_pdata->ahb_clk);
}
-void mdss_dsi_prepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+static int mdss_dsi_clk_prepare(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
- clk_prepare(ctrl_pdata->byte_clk);
- clk_prepare(ctrl_pdata->esc_clk);
- clk_prepare(ctrl_pdata->pixel_clk);
-}
+ int rc = 0;
-void mdss_dsi_unprepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
-{
- clk_unprepare(ctrl_pdata->esc_clk);
- clk_unprepare(ctrl_pdata->pixel_clk);
+ rc = clk_prepare(ctrl_pdata->esc_clk);
+ if (rc) {
+ pr_err("%s: Failed to prepare dsi esc clk\n", __func__);
+ goto esc_clk_err;
+ }
+
+ rc = clk_prepare(ctrl_pdata->byte_clk);
+ if (rc) {
+ pr_err("%s: Failed to prepare dsi byte clk\n", __func__);
+ goto byte_clk_err;
+ }
+
+ rc = clk_prepare(ctrl_pdata->pixel_clk);
+ if (rc) {
+ pr_err("%s: Failed to prepare dsi pixel clk\n", __func__);
+ goto pixel_clk_err;
+ }
+
+ return rc;
+
+pixel_clk_err:
clk_unprepare(ctrl_pdata->byte_clk);
+byte_clk_err:
+ clk_unprepare(ctrl_pdata->esc_clk);
+esc_clk_err:
+ return rc;
}
-void mdss_dsi_clk_enable(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+static void mdss_dsi_clk_unprepare(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
- u32 esc_clk_rate = 19200000;
-
if (!ctrl_pdata) {
pr_err("%s: Invalid input data\n", __func__);
return;
}
- if (ctrl_pdata->mdss_dsi_clk_on) {
- pr_info("%s: mdss_dsi_clks already ON\n", __func__);
- return;
+ clk_unprepare(ctrl_pdata->pixel_clk);
+ clk_unprepare(ctrl_pdata->byte_clk);
+ clk_unprepare(ctrl_pdata->esc_clk);
+}
+
+static int mdss_dsi_clk_set_rate(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ u32 esc_clk_rate = 19200000;
+ int rc = 0;
+
+ if (!ctrl_pdata) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
}
if (!ctrl_pdata->panel_data.panel_info.cont_splash_enabled) {
pr_debug("%s: Set clk rates: pclk=%d, byteclk=%d escclk=%d\n",
__func__, ctrl_pdata->pclk_rate,
ctrl_pdata->byte_clk_rate, esc_clk_rate);
- if (clk_set_rate(ctrl_pdata->esc_clk, esc_clk_rate) < 0)
+ rc = clk_set_rate(ctrl_pdata->esc_clk, esc_clk_rate);
+ if (rc) {
pr_err("%s: dsi_esc_clk - clk_set_rate failed\n",
__func__);
+ goto error;
+ }
- if (clk_set_rate(ctrl_pdata->byte_clk,
- ctrl_pdata->byte_clk_rate) < 0)
+ rc = clk_set_rate(ctrl_pdata->byte_clk,
+ ctrl_pdata->byte_clk_rate);
+ if (rc) {
pr_err("%s: dsi_byte_clk - clk_set_rate failed\n",
__func__);
+ goto error;
+ }
- if (clk_set_rate(ctrl_pdata->pixel_clk,
- ctrl_pdata->pclk_rate) < 0)
+ rc = clk_set_rate(ctrl_pdata->pixel_clk, ctrl_pdata->pclk_rate);
+ if (rc) {
pr_err("%s: dsi_pixel_clk - clk_set_rate failed\n",
__func__);
+ goto error;
+ }
}
- clk_enable(ctrl_pdata->esc_clk);
- clk_enable(ctrl_pdata->byte_clk);
- clk_enable(ctrl_pdata->pixel_clk);
-
- ctrl_pdata->mdss_dsi_clk_on = 1;
+error:
+ return rc;
}
-void mdss_dsi_clk_disable(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+static int mdss_dsi_clk_enable(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int rc = 0;
+
+ if (!ctrl_pdata) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ctrl_pdata->mdss_dsi_clk_on) {
+ pr_info("%s: mdss_dsi_clks already ON\n", __func__);
+ return 0;
+ }
+
+ rc = clk_enable(ctrl_pdata->esc_clk);
+ if (rc) {
+ pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+ goto esc_clk_err;
+ }
+
+ rc = clk_enable(ctrl_pdata->byte_clk);
+ if (rc) {
+ pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+ goto byte_clk_err;
+ }
+
+ rc = clk_enable(ctrl_pdata->pixel_clk);
+ if (rc) {
+ pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+ goto pixel_clk_err;
+ }
+
+ ctrl_pdata->mdss_dsi_clk_on = 1;
+
+ return rc;
+
+pixel_clk_err:
+ clk_disable(ctrl_pdata->byte_clk);
+byte_clk_err:
+ clk_disable(ctrl_pdata->esc_clk);
+esc_clk_err:
+ return rc;
+}
+
+static void mdss_dsi_clk_disable(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
if (!ctrl_pdata) {
pr_err("%s: Invalid input data\n", __func__);
@@ -275,13 +350,71 @@
return;
}
+ clk_disable(ctrl_pdata->esc_clk);
clk_disable(ctrl_pdata->pixel_clk);
clk_disable(ctrl_pdata->byte_clk);
- clk_disable(ctrl_pdata->esc_clk);
ctrl_pdata->mdss_dsi_clk_on = 0;
}
+int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
+{
+ int rc = 0;
+
+ mutex_lock(&ctrl->mutex);
+ if (enable) {
+ if (ctrl->clk_cnt == 0) {
+ rc = mdss_dsi_enable_bus_clocks(ctrl);
+ if (rc) {
+ pr_err("%s: failed to enable bus clks. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+
+ rc = mdss_dsi_clk_set_rate(ctrl);
+ if (rc) {
+ pr_err("%s: failed to set clk rates. rc=%d\n",
+ __func__, rc);
+ mdss_dsi_disable_bus_clocks(ctrl);
+ goto error;
+ }
+
+ rc = mdss_dsi_clk_prepare(ctrl);
+ if (rc) {
+ pr_err("%s: failed to prepare clks. rc=%d\n",
+ __func__, rc);
+ mdss_dsi_disable_bus_clocks(ctrl);
+ goto error;
+ }
+
+ rc = mdss_dsi_clk_enable(ctrl);
+ if (rc) {
+ pr_err("%s: failed to enable clks. rc=%d\n",
+ __func__, rc);
+ mdss_dsi_clk_unprepare(ctrl);
+ mdss_dsi_disable_bus_clocks(ctrl);
+ goto error;
+ }
+ }
+ ctrl->clk_cnt++;
+ } else {
+ if (ctrl->clk_cnt) {
+ ctrl->clk_cnt--;
+ if (ctrl->clk_cnt == 0) {
+ mdss_dsi_clk_disable(ctrl);
+ mdss_dsi_clk_unprepare(ctrl);
+ mdss_dsi_disable_bus_clocks(ctrl);
+ }
+ }
+ }
+ pr_debug("%s: ctrl ndx=%d enabled=%d clk_cnt=%d\n",
+ __func__, ctrl->ndx, enable, ctrl->clk_cnt);
+
+error:
+ mutex_unlock(&ctrl->mutex);
+ return rc;
+}
+
void mdss_dsi_phy_sw_reset(unsigned char *ctrl_base)
{
/* start phy sw reset */
@@ -457,30 +590,84 @@
}
-/* EDP phy configuration settings */
-void mdss_edp_phy_sw_reset(unsigned char *edp_base)
+void mdss_edp_timing_engine_ctrl(unsigned char *edp_base, int enable)
{
- /* phy sw reset */
- edp_write(edp_base + 0x74, 0x100); /* EDP_PHY_CTRL */
- wmb();
- usleep(1);
- edp_write(edp_base + 0x74, 0x000); /* EDP_PHY_CTRL */
- wmb();
- usleep(1);
-
- /* phy PLL sw reset */
- edp_write(edp_base + 0x74, 0x001); /* EDP_PHY_CTRL */
- wmb();
- usleep(1);
- edp_write(edp_base + 0x74, 0x000); /* EDP_PHY_CTRL */
- wmb();
- usleep(1);
+ /* should eb last reg to program */
+ edp_write(edp_base + 0x94, enable); /* EDP_TIMING_ENGINE_EN */
}
-void mdss_edp_hw_powerup(unsigned char *edp_base, int enable)
+void mdss_edp_mainlink_ctrl(unsigned char *edp_base, int enable)
{
- int ret = 0;
+ edp_write(edp_base + 0x04, enable); /* EDP_MAINLINK_CTRL */
+}
+void mdss_edp_mainlink_reset(unsigned char *edp_base)
+{
+ edp_write(edp_base + 0x04, 0x02); /* EDP_MAINLINK_CTRL */
+ usleep(1000);
+ edp_write(edp_base + 0x04, 0); /* EDP_MAINLINK_CTRL */
+}
+
+void mdss_edp_aux_reset(unsigned char *edp_base)
+{
+ /*reset AUX */
+ edp_write(edp_base + 0x300, BIT(1)); /* EDP_AUX_CTRL */
+ usleep(1000);
+ edp_write(edp_base + 0x300, 0); /* EDP_AUX_CTRL */
+}
+
+void mdss_edp_aux_ctrl(unsigned char *edp_base, int enable)
+{
+ u32 data;
+
+ data = edp_read(edp_base + 0x300);
+ if (enable)
+ data |= 0x01;
+ else
+ data |= ~0x01;
+ edp_write(edp_base + 0x300, data); /* EDP_AUX_CTRL */
+}
+
+void mdss_edp_phy_pll_reset(unsigned char *edp_base)
+{
+ /* EDP_PHY_CTRL */
+ edp_write(edp_base + 0x74, 0x005); /* bit 0, 2 */
+ usleep(1000);
+ edp_write(edp_base + 0x74, 0x000); /* EDP_PHY_CTRL */
+}
+
+int mdss_edp_phy_pll_ready(unsigned char *edp_base)
+{
+ int cnt;
+ u32 status;
+
+ cnt = 10;
+ while (cnt--) {
+ status = edp_read(edp_base + 0x6c0);
+ if (status & 0x01)
+ break;
+ usleep(100);
+ }
+
+ if (cnt == 0) {
+ pr_err("%s: PLL NOT ready\n", __func__);
+ return 0;
+ } else
+ return 1;
+}
+
+int mdss_edp_phy_ready(unsigned char *edp_base)
+{
+ u32 status;
+
+ status = edp_read(edp_base + 0x598);
+ status &= 0x01;
+
+ return status;
+}
+
+void mdss_edp_phy_powerup(unsigned char *edp_base, int enable)
+{
if (enable) {
/* EDP_PHY_EDPPHY_GLB_PD_CTL */
edp_write(edp_base + 0x52c, 0x3f);
@@ -488,9 +675,6 @@
edp_write(edp_base + 0x528, 0x1);
/* EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG */
edp_write(edp_base + 0x620, 0xf);
- /* EDP_AUX_CTRL */
- ret = edp_read(edp_base + 0x300);
- edp_write(edp_base + 0x300, ret | 0x1);
} else {
/* EDP_PHY_EDPPHY_GLB_PD_CTL */
edp_write(edp_base + 0x52c, 0xc0);
@@ -527,7 +711,7 @@
edp_write(edp_base + 0x620, 0x7);
edp_write(edp_base + 0x620, 0xf);
- } else if (rate == 138500000) {
+ } else if (rate == 138530000) {
edp_write(edp_base + 0x664, 0x5); /* UNIPHY_PLL_LKDET_CFG2 */
edp_write(edp_base + 0x600, 0x1); /* UNIPHY_PLL_REFCLK_CFG */
edp_write(edp_base + 0x638, 0x36); /* UNIPHY_PLL_SDM_CFG0 */
@@ -558,7 +742,7 @@
edp_write(edp_base + 0x620, 0x7); /* UNIPHY_PLL_GLB_CFG */
edp_write(edp_base + 0x620, 0xf); /* UNIPHY_PLL_GLB_CFG */
} else {
- pr_err("%s: Unknown configuration rate\n", __func__);
+ pr_err("%s: rate=%d is NOT supported\n", __func__, rate);
}
}
@@ -598,22 +782,20 @@
}
}
-void mdss_edp_enable_lane_bist(unsigned char *edp_base, int lane, int enable)
+void mdss_edp_lane_power_ctrl(unsigned char *edp_base, int max_lane, int up)
{
- unsigned char *addr_ln_bist_cfg, *addr_ln_pd_ctrl;
+ int i, off;
+ u32 data;
+
+ if (up)
+ data = 0; /* power up */
+ else
+ data = 0x7; /* power down */
/* EDP_PHY_EDPPHY_LNn_PD_CTL */
- addr_ln_pd_ctrl = edp_base + 0x404 + (0x40 * lane);
- /* EDP_PHY_EDPPHY_LNn_BIST_CFG0 */
- addr_ln_bist_cfg = edp_base + 0x408 + (0x40 * lane);
-
- if (enable) {
- edp_write(addr_ln_pd_ctrl, 0x0);
- edp_write(addr_ln_bist_cfg, 0x10);
-
- } else {
- edp_write(addr_ln_pd_ctrl, 0xf);
- edp_write(addr_ln_bist_cfg, 0x10);
+ for (i = 0; i < max_lane; i++) {
+ off = 0x40 * i;
+ edp_write(edp_base + 0x404 + off , data);
}
}
@@ -668,12 +850,47 @@
return -EPERM;
}
-
-void mdss_edp_clk_enable(struct mdss_edp_drv_pdata *edp_drv)
+int mdss_edp_aux_clk_enable(struct mdss_edp_drv_pdata *edp_drv)
{
+ int ret;
+
+ if (clk_set_rate(edp_drv->aux_clk, 19200000) < 0)
+ pr_err("%s: aux_clk - clk_set_rate failed\n",
+ __func__);
+
+ ret = clk_enable(edp_drv->aux_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable aux clk\n", __func__);
+ goto c2;
+ }
+
+ ret = clk_enable(edp_drv->ahb_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable ahb clk\n", __func__);
+ goto c1;
+ }
+
+ return 0;
+c1:
+ clk_disable(edp_drv->aux_clk);
+c2:
+ return ret;
+
+}
+
+void mdss_edp_aux_clk_disable(struct mdss_edp_drv_pdata *edp_drv)
+{
+ clk_disable(edp_drv->aux_clk);
+ clk_disable(edp_drv->ahb_clk);
+}
+
+int mdss_edp_clk_enable(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret;
+
if (edp_drv->clk_on) {
pr_info("%s: edp clks are already ON\n", __func__);
- return;
+ return 0;
}
if (clk_set_rate(edp_drv->aux_clk, 19200000) < 0)
@@ -688,12 +905,39 @@
pr_err("%s: link_clk - clk_set_rate failed\n",
__func__);
- clk_enable(edp_drv->aux_clk);
- clk_enable(edp_drv->pixel_clk);
- clk_enable(edp_drv->ahb_clk);
- clk_enable(edp_drv->link_clk);
+ ret = clk_enable(edp_drv->aux_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable aux clk\n", __func__);
+ goto c4;
+ }
+ ret = clk_enable(edp_drv->pixel_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable pixel clk\n", __func__);
+ goto c3;
+ }
+ ret = clk_enable(edp_drv->ahb_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable ahb clk\n", __func__);
+ goto c2;
+ }
+ ret = clk_enable(edp_drv->link_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable link clk\n", __func__);
+ goto c1;
+ }
edp_drv->clk_on = 1;
+
+ return 0;
+
+c1:
+ clk_disable(edp_drv->ahb_clk);
+c2:
+ clk_disable(edp_drv->pixel_clk);
+c3:
+ clk_disable(edp_drv->aux_clk);
+c4:
+ return ret;
}
void mdss_edp_clk_disable(struct mdss_edp_drv_pdata *edp_drv)
@@ -711,12 +955,69 @@
edp_drv->clk_on = 0;
}
-void mdss_edp_prepare_clocks(struct mdss_edp_drv_pdata *edp_drv)
+int mdss_edp_prepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv)
{
- clk_prepare(edp_drv->aux_clk);
- clk_prepare(edp_drv->pixel_clk);
- clk_prepare(edp_drv->ahb_clk);
- clk_prepare(edp_drv->link_clk);
+ int ret;
+
+ ret = clk_prepare(edp_drv->aux_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare aux clk\n", __func__);
+ goto c2;
+ }
+ ret = clk_prepare(edp_drv->ahb_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare ahb clk\n", __func__);
+ goto c1;
+ }
+
+ return 0;
+c1:
+ clk_unprepare(edp_drv->aux_clk);
+c2:
+ return ret;
+
+}
+
+void mdss_edp_unprepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv)
+{
+ clk_unprepare(edp_drv->aux_clk);
+ clk_unprepare(edp_drv->ahb_clk);
+}
+
+int mdss_edp_prepare_clocks(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret;
+
+ ret = clk_prepare(edp_drv->aux_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare aux clk\n", __func__);
+ goto c4;
+ }
+ ret = clk_prepare(edp_drv->pixel_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare pixel clk\n", __func__);
+ goto c3;
+ }
+ ret = clk_prepare(edp_drv->ahb_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare ahb clk\n", __func__);
+ goto c2;
+ }
+ ret = clk_prepare(edp_drv->link_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare link clk\n", __func__);
+ goto c1;
+ }
+
+ return 0;
+c1:
+ clk_unprepare(edp_drv->ahb_clk);
+c2:
+ clk_unprepare(edp_drv->pixel_clk);
+c3:
+ clk_unprepare(edp_drv->aux_clk);
+c4:
+ return ret;
}
void mdss_edp_unprepare_clocks(struct mdss_edp_drv_pdata *edp_drv)
@@ -775,14 +1076,29 @@
mdss_edp_enable_pixel_clk(edp_base, mmss_cc_base, 0);
}
-void mdss_edp_phy_misc_cfg(unsigned char *edp_base)
+void mdss_edp_clock_synchrous(unsigned char *edp_base, int sync)
+{
+ u32 data;
+
+ /* EDP_MISC1_MISC0 */
+ data = edp_read(edp_base + 0x02c);
+
+ if (sync)
+ data |= 0x01;
+ else
+ data &= ~0x01;
+
+ /* EDP_MISC1_MISC0 */
+ edp_write(edp_base + 0x2c, data);
+}
+
+/* voltage mode and pre emphasis cfg */
+void mdss_edp_phy_vm_pe_init(unsigned char *edp_base)
{
/* EDP_PHY_EDPPHY_GLB_VM_CFG0 */
- edp_write(edp_base + 0x510, 0x3);
+ edp_write(edp_base + 0x510, 0x3); /* vm only */
/* EDP_PHY_EDPPHY_GLB_VM_CFG1 */
edp_write(edp_base + 0x514, 0x64);
/* EDP_PHY_EDPPHY_GLB_MISC9 */
edp_write(edp_base + 0x518, 0x6c);
- /* EDP_MISC1_MISC0 */
- edp_write(edp_base + 0x2c, 0x1);
}
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 21d836f..668c397 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -10,6 +10,7 @@
#include <linux/namei.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/kmemleak.h>
#include "internal.h"
static const struct dentry_operations proc_sys_dentry_operations;
@@ -1215,6 +1216,8 @@
if (!header)
return NULL;
+ kmemleak_not_leak(header);
+
node = (struct ctl_node *)(header + 1);
init_header(header, root, set, node, table);
if (sysctl_check_table(path, table))
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 72961c3..cd141a4 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -96,4 +96,10 @@
}
#endif /* CONFIG_GENERIC_BUG */
+
+#ifdef CONFIG_PANIC_ON_DATA_CORRUPTION
+#define PANIC_CORRUPTION 1
+#else
+#define PANIC_CORRUPTION 0
+#endif /* CONFIG_PANIC_ON_DATA_CORRUPTION */
#endif /* _LINUX_BUG_H */
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index dd675f3..02ecb0c 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -152,6 +152,8 @@
#define DMX_IDX_VC1_FIRST_SEQ_FRAME_END 0x00800000
#define DMX_IDX_VC1_FRAME_START 0x01000000
#define DMX_IDX_VC1_FRAME_END 0x02000000
+#define DMX_IDX_H264_ACCESS_UNIT_DEL 0x04000000
+#define DMX_IDX_H264_SEI 0x08000000
struct dmx_pes_filter_params
{
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 1e7c011..4f6fffb 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -36,6 +36,8 @@
__used __section(.builtin_fw) = { name, blob, size }
#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
+int request_firmware_direct(const char *name, struct device *device,
+ phys_addr_t addr, size_t size);
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
int request_firmware_nowait(
@@ -45,6 +47,12 @@
void release_firmware(const struct firmware *fw);
#else
+static inline int request_firmware_direct(const char *name,
+ struct device *device,
+ phys_addr_t addr, size_t size)
+{
+ return -EINVAL;
+}
static inline int request_firmware(const struct firmware **fw,
const char *name,
struct device *device)
diff --git a/include/linux/input/synaptics_dsx.h b/include/linux/input/synaptics_dsx.h
index 73016d6..d121695 100644
--- a/include/linux/input/synaptics_dsx.h
+++ b/include/linux/input/synaptics_dsx.h
@@ -35,8 +35,11 @@
* struct synaptics_rmi4_platform_data - rmi4 platform data
* @x_flip: x flip flag
* @y_flip: y flip flag
+ * @i2c_pull_up: pull up i2c bus with regulator
+ * @power_down_enable: enable complete regulator shutdown in suspend
* @irq_gpio: attention interrupt gpio
* @irq_flags: flags used by the irq
+ * @reset_flags: flags used by reset line
* @reset_gpio: reset gpio
* @panel_x: panel maximum values on the x
* @panel_y: panel maximum values on the y
@@ -47,6 +50,7 @@
bool x_flip;
bool y_flip;
bool i2c_pull_up;
+ bool power_down_enable;
unsigned irq_gpio;
u32 irq_flags;
u32 reset_flags;
diff --git a/include/linux/mhl_8334.h b/include/linux/mhl_8334.h
index 560f75b..d1ee11c 100644
--- a/include/linux/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -99,7 +99,7 @@
int mhl_mode;
struct completion rgnd_done;
struct completion msc_cmd_done;
- uint8_t devcap_state;
+ uint16_t devcap_state;
uint8_t path_en_state;
struct work_struct mhl_msc_send_work;
struct list_head list_cmd;
@@ -146,9 +146,10 @@
int current_val;
struct completion msc_cmd_done;
uint8_t devcap[16];
- uint8_t devcap_state;
+ uint16_t devcap_state;
uint8_t status[2];
uint8_t path_en_state;
+ uint8_t tmds_en_state;
void *hdmi_mhl_ops;
struct work_struct mhl_msc_send_work;
struct list_head list_cmd;
diff --git a/include/linux/mhl_defs.h b/include/linux/mhl_defs.h
index f5dacfd..6177f07 100644
--- a/include/linux/mhl_defs.h
+++ b/include/linux/mhl_defs.h
@@ -132,6 +132,7 @@
#define MHL_SCRATCHPAD_SIZE 16
#define MAX_SCRATCHPAD_TRANSFER_SIZE 64
#define ADOPTER_ID_SIZE 2
+#define MHL_DEVCAP_ALL 0xffff
/* manually define highest number */
#define MHL_MAX_BUFFER_SIZE MHL_SCRATCHPAD_SIZE
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 72e31b2..424b1d9 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -23,6 +23,11 @@
s32 cookie;
};
+enum sdhci_power_policy {
+ SDHCI_PERFORMANCE_MODE,
+ SDHCI_POWER_SAVE_MODE,
+};
+
struct sdhci_host {
/* Data set by hardware interface driver */
const char *hw_name; /* Hardware bus name */
@@ -238,9 +243,13 @@
unsigned int cpu_dma_latency_us;
struct pm_qos_request pm_qos_req_dma;
+ unsigned int pm_qos_timeout_us; /* timeout for PM QoS request */
+ struct device_attribute pm_qos_tout;
struct sdhci_next next_data;
ktime_t data_start_time;
+ struct mutex ios_mutex;
+ enum sdhci_power_policy power_policy;
unsigned long private[0] ____cacheline_aligned;
};
diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h
index 6a8633b..16a1000 100644
--- a/include/linux/msm_ion.h
+++ b/include/linux/msm_ion.h
@@ -72,6 +72,12 @@
*/
#define ION_FLAG_FORCE_CONTIGUOUS (1 << 30)
+/*
+ * Used in conjunction with heap which pool memory to force an allocation
+ * to come from the page allocator directly instead of from the pool allocation
+ */
+#define ION_FLAG_POOL_FORCE_ALLOC (1 << 16)
+
/**
* Deprecated! Please use the corresponding ION_FLAG_*
*/
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index f8b78a4..87047d2 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -30,6 +30,7 @@
#define KGSL_CONTEXT_TYPE_CL 2
#define KGSL_CONTEXT_TYPE_C2D 3
#define KGSL_CONTEXT_TYPE_RS 4
+#define KGSL_CONTEXT_TYPE_UNKNOWN 0x1E
#define KGSL_CONTEXT_INVALID 0xffffffff
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index c3ff9de..2455212 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -87,6 +87,7 @@
enum {
NOTIFY_UPDATE_START,
NOTIFY_UPDATE_STOP,
+ NOTIFY_UPDATE_POWER_OFF,
};
enum {
@@ -612,6 +613,19 @@
uint32_t *buffer;
};
+struct mdp_calib_dcm_state {
+ uint32_t ops;
+ uint32_t dcm_state;
+};
+
+enum {
+ DCM_UNINIT,
+ DCM_UNBLANK,
+ DCM_ENTER,
+ DCM_EXIT,
+ DCM_BLANK,
+};
+
#define MDSS_MAX_BL_BRIGHTNESS 255
#define AD_BL_LIN_LEN (MDSS_MAX_BL_BRIGHTNESS + 1)
@@ -704,6 +718,7 @@
mdp_op_ad_input,
mdp_op_calib_mode,
mdp_op_calib_buffer,
+ mdp_op_calib_dcm_state,
mdp_op_max,
};
@@ -713,6 +728,8 @@
WB_FORMAT_RGB_888,
WB_FORMAT_xRGB_8888,
WB_FORMAT_ARGB_8888,
+ WB_FORMAT_BGRA_8888,
+ WB_FORMAT_BGRX_8888,
WB_FORMAT_ARGB_8888_INPUT_ALPHA /* Need to support */
};
@@ -732,6 +749,7 @@
struct mdss_calib_cfg mdss_calib_cfg;
struct mdss_ad_input ad_input;
struct mdp_calib_config_buffer calib_buffer;
+ struct mdp_calib_dcm_state calib_dcm;
} data;
};
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 3d7b1c9..5d6cdac 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -98,7 +98,9 @@
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_POWER_NOW,
@@ -283,6 +285,7 @@
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW:
case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_CURRENT_AVG:
return 1;
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index dfb156f..041aae7 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -1419,11 +1419,16 @@
enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result);
/**
- * qpnp_iadc_calibrate_for_trim() - Clients can use this API to re-calibrate
- * IADC.
- * @result: 0 on success.
+ * qpnp_iadc_calibrate_for_trim - Clients can use this API to re-calibrate
+ * IADC. The offset and gain values are programmed in the trim
+ * registers. The offset and the gain can be retrieved using
+ * qpnp_iadc_get_gain_and_offset
+ * @batfet_closed: batfet is opened or closed. The IADC chooses proper
+ * channel (internal/external) based on batfet status
+ * for calibration.
+ * RETURNS: 0 on success.
*/
-int32_t qpnp_iadc_calibrate_for_trim(void);
+int32_t qpnp_iadc_calibrate_for_trim(bool batfet_closed);
int32_t qpnp_iadc_comp_result(int64_t *result);
#else
static inline int32_t qpnp_iadc_read(enum qpnp_iadc_channels channel,
@@ -1440,7 +1445,7 @@
enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
{ return -ENXIO; }
-static inline int32_t qpnp_iadc_calibrate_for_trim(void)
+static inline int32_t qpnp_iadc_calibrate_for_trim(bool batfet_closed)
{ return -ENXIO; }
static inline int32_t qpnp_iadc_comp_result(int64_t *result, int32_t sign)
{ return -ENXIO; }
@@ -1494,6 +1499,20 @@
* has not occured.
*/
int32_t qpnp_adc_tm_is_ready(void);
+/**
+ * qpnp_iadc_skip_calibration() - Clients can use this API to ask the driver
+ * to skip iadc calibrations
+ * @result: 0 on success and -EPROBE_DEFER when probe for the device
+ * has not occured.
+ */
+int qpnp_iadc_skip_calibration(void);
+/**
+ * qpnp_iadc_resume_calibration() - Clients can use this API to ask the driver
+ * to resume iadc calibrations
+ * @result: 0 on success and -EPROBE_DEFER when probe for the device
+ * has not occured.
+ */
+int qpnp_iadc_resume_calibration(void);
#else
static inline int32_t qpnp_adc_tm_usbid_configure(
struct qpnp_adc_tm_btm_param *param)
@@ -1507,6 +1526,10 @@
{ return -ENXIO; }
static inline int32_t qpnp_adc_tm_is_ready(void)
{ return -ENXIO; }
+static inline int qpnp_iadc_skip_calibration(void)
+{ return -ENXIO; }
+static inline int qpnp_iadc_resume_calibration(void);
+{ return -ENXIO; }
#endif
#endif
diff --git a/include/linux/stk3x1x.h b/include/linux/stk3x1x.h
new file mode 100644
index 0000000..c34116a
--- /dev/null
+++ b/include/linux/stk3x1x.h
@@ -0,0 +1,29 @@
+/*
+ *
+ * $Id: stk3x1x.h
+ *
+ * Copyright (C) 2012 Lex Hsieh <lex_hsieh@sitronix.com.tw>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ */
+#ifndef __STK3X1X_H__
+#define __STK3X1X_H__
+
+/* platform data */
+struct stk3x1x_platform_data
+{
+ uint8_t state_reg;
+ uint8_t psctrl_reg;
+ uint8_t alsctrl_reg;
+ uint8_t ledctrl_reg;
+ uint8_t wait_reg;
+ uint16_t ps_thd_h;
+ uint16_t ps_thd_l;
+ int int_pin;
+ uint32_t transmittance;
+};
+
+#endif // __STK3X1X_H__
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index cb2162e..8d104c6 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -438,6 +438,7 @@
struct power_supply usb_psy;
unsigned int online;
unsigned int host_mode;
+ unsigned int voltage_max;
unsigned int current_max;
dev_t ext_chg_dev;
@@ -465,6 +466,7 @@
unsigned data;
bool ignore_cal_pad_config;
bool phy_sof_workaround;
+ u32 reset_delay;
int strobe_pad_offset;
int data_pad_offset;
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 4404df5..101325e 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -704,6 +704,7 @@
#define V4L2_QCOM_BUF_FLAG_DECODEONLY 0x40000
#define V4L2_QCOM_BUF_DATA_CORRUPT 0x80000
#define V4L2_QCOM_BUF_DROP_FRAME 0x100000
+#define V4L2_QCOM_BUF_INPUT_UNSUPPORTED 0x200000
/*
* O V E R L A Y P R E V I E W
@@ -767,7 +768,8 @@
#define V4L2_CAP_QCOM_FRAMESKIP 0x2000 /* frame skipping is supported */
struct v4l2_qcom_frameskip {
- __u64 maxframeinterval;
+ __u64 maxframeinterval;
+ __u8 fpsvariance;
};
struct v4l2_outputparm {
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 5ae852a..ec8ec9a 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -98,6 +98,7 @@
struct msm_vfe_camif_cfg camif_cfg;
enum msm_vfe_inputmux input_mux;
enum ISP_START_PIXEL_PATTERN pixel_pattern;
+ uint32_t input_format;
};
struct msm_vfe_rdi_cfg {
diff --git a/include/media/msmb_pproc.h b/include/media/msmb_pproc.h
index 162729a..de42c38 100644
--- a/include/media/msmb_pproc.h
+++ b/include/media/msmb_pproc.h
@@ -13,6 +13,8 @@
#define MAX_NUM_CPP_STRIPS 8
#define MSM_CPP_MAX_NUM_PLANES 3
+#define MSM_CPP_MAX_FRAME_LENGTH 1024
+#define MSM_CPP_MAX_FW_NAME_LEN 32
enum msm_cpp_frame_type {
MSM_CPP_OFFLINE_FRAME,
diff --git a/include/media/radio-iris.h b/include/media/radio-iris.h
index 4cbac7b..419e055 100644
--- a/include/media/radio-iris.h
+++ b/include/media/radio-iris.h
@@ -626,7 +626,8 @@
FM_RECV,
FM_TRANS,
FM_RESET,
- FM_CALIB
+ FM_CALIB,
+ FM_TURNING_OFF
};
enum emphasis_type {
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 364de9a..88fcf61 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -6924,6 +6924,11 @@
/* Dolby DAP topology */
#define DOLBY_ADM_COPP_TOPOLOGY_ID 0x0001033B
+/* RMS value from DSP */
+#define RMS_MODULEID_APPI_PASSTHRU 0x10009011
+#define RMS_PARAM_FIRST_SAMPLE 0x10009012
+#define RMS_PAYLOAD_LEN 4
+
struct afe_svc_cmd_set_clip_bank_selection {
struct apr_hdr hdr;
struct afe_svc_cmd_set_param param;
diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h
index 795bb99..e07f634 100644
--- a/include/sound/q6adm-v2.h
+++ b/include/sound/q6adm-v2.h
@@ -32,7 +32,7 @@
int adm_open(int port, int path, int rate, int mode, int topology,
bool perf_mode, uint16_t bits_per_sample);
-int adm_dolby_dap_get_params(int port_id, uint32_t module_id, uint32_t param_id,
+int adm_get_params(int port_id, uint32_t module_id, uint32_t param_id,
uint32_t params_length, char *params);
int adm_dolby_dap_send_params(int port_id, char *params,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f1a6e9e..90fd57d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
+#include <linux/bug.h>
#include "workqueue_sched.h"
@@ -1878,6 +1879,7 @@
printk(KERN_ERR " last function: ");
print_symbol("%s\n", (unsigned long)f);
debug_show_held_locks(current);
+ BUG_ON(PANIC_CORRUPTION);
dump_stack();
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 097d70f..b9060a1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1290,6 +1290,13 @@
If unsure, say N.
+config PANIC_ON_DATA_CORRUPTION
+ bool "Cause a Kernel Panic When Data Corruption is detected"
+ help
+ Select this option to upgrade warnings for potentially
+ recoverable data corruption scenarios to system-halting panics,
+ for easier detection and debug.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 982b850..6a039a6 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/bug.h>
/*
* Insert a new entry between two known consecutive entries.
@@ -30,6 +31,10 @@
"list_add corruption. prev->next should be "
"next (%p), but was %p. (prev=%p).\n",
next, prev->next, prev);
+
+ BUG_ON(((prev->next != next) || (next->prev != prev)) &&
+ PANIC_CORRUPTION);
+
next->prev = new;
new->next = next;
new->prev = prev;
@@ -55,8 +60,10 @@
"but was %p\n", entry, prev->next) ||
WARN(next->prev != entry,
"list_del corruption. next->prev should be %p, "
- "but was %p\n", entry, next->prev))
+ "but was %p\n", entry, next->prev)) {
+ BUG_ON(PANIC_CORRUPTION);
return;
+ }
__list_del(prev, next);
}
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index f2aa5de..ac516f4 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -12,6 +12,7 @@
#include <linux/debug_locks.h>
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/bug.h>
void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key)
@@ -64,6 +65,7 @@
owner ? owner->comm : "<none>",
owner ? task_pid_nr(owner) : -1,
lock->owner_cpu);
+ BUG_ON(PANIC_CORRUPTION);
dump_stack();
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c69f5e2..1438de9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2310,6 +2310,19 @@
} while (memcg);
}
+static bool zone_balanced(struct zone *zone, int order,
+ unsigned long balance_gap, int classzone_idx)
+{
+ if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
+ balance_gap, classzone_idx, 0))
+ return false;
+
+ if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
+ return false;
+
+ return true;
+}
+
/*
* pgdat_balanced is used when checking if a node is balanced for high-order
* allocations. Only zones that meet watermarks and are in a zone allowed
@@ -2369,8 +2382,7 @@
continue;
}
- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
- i, 0))
+ if (!zone_balanced(zone, order, 0, i))
all_zones_ok = false;
else
balanced += zone->present_pages;
@@ -2479,8 +2491,7 @@
break;
}
- if (!zone_watermark_ok_safe(zone, order,
- high_wmark_pages(zone), 0, 0)) {
+ if (!zone_balanced(zone, order, 0, 0)) {
end_zone = i;
break;
} else {
@@ -2556,9 +2567,8 @@
testorder = 0;
if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
- !zone_watermark_ok_safe(zone, testorder,
- high_wmark_pages(zone) + balance_gap,
- end_zone, 0)) {
+ !zone_balanced(zone, testorder,
+ balance_gap, end_zone)) {
shrink_zone(zone, &sc);
reclaim_state->reclaimed_slab = 0;
@@ -2585,8 +2595,7 @@
continue;
}
- if (!zone_watermark_ok_safe(zone, testorder,
- high_wmark_pages(zone), end_zone, 0)) {
+ if (!zone_balanced(zone, testorder, 0, end_zone)) {
all_zones_ok = 0;
/*
* We are still under min water mark. This
@@ -2681,22 +2690,6 @@
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable &&
- sc.priority != DEF_PRIORITY)
- continue;
-
- /* Would compaction fail due to lack of free memory? */
- if (COMPACTION_BUILD &&
- compaction_suitable(zone, order) == COMPACT_SKIPPED)
- goto loop_again;
-
- /* Confirm the zone is balanced for order-0 */
- if (!zone_watermark_ok(zone, 0,
- high_wmark_pages(zone), 0, 0)) {
- order = sc.order = 0;
- goto loop_again;
- }
-
/* Check if the memory needs to be defragmented. */
if (zone_watermark_ok(zone, order,
low_wmark_pages(zone), *classzone_idx, 0))
diff --git a/net/core/flow.c b/net/core/flow.c
index e318c7e..9a517c6 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -423,6 +423,7 @@
if (!fc->percpu)
return -ENOMEM;
+ get_online_cpus();
for_each_online_cpu(i) {
if (flow_cache_cpu_prepare(fc, i))
goto err;
@@ -431,6 +432,7 @@
.notifier_call = flow_cache_cpu,
};
register_hotcpu_notifier(&fc->hotcpu_notifier);
+ put_online_cpus();
setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
(unsigned long) fc);
@@ -440,6 +442,7 @@
return 0;
err:
+ put_online_cpus();
for_each_possible_cpu(i) {
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
kfree(fcp->hash_table);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0c28508..247c69b 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -14,6 +14,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/kmemleak.h>
#include <net/ip.h>
#include <net/sock.h>
@@ -256,7 +257,7 @@
{
static struct ctl_table empty[1];
- register_sysctl_paths(net_core_path, empty);
+ kmemleak_not_leak(register_sysctl_paths(net_core_path, empty));
register_net_sysctl_rotable(net_core_path, net_core_table);
return register_pernet_subsys(&sysctl_core_ops);
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 167ea10..d02a8da 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -109,6 +109,7 @@
#include <net/rtnetlink.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
+#include <linux/kmemleak.h>
#endif
#include <net/secure_seq.h>
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 2b486b1..21d66c2 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -1408,12 +1408,15 @@
ifname, uid, sk, direction, proto, bytes);
+ spin_lock_bh(&iface_stat_list_lock);
iface_entry = get_iface_entry(ifname);
if (!iface_entry) {
+ spin_unlock_bh(&iface_stat_list_lock);
pr_err("qtaguid: iface_stat: stat_update() %s not found\n",
ifname);
return;
}
+ spin_unlock_bh(&iface_stat_list_lock);
/* It is ok to process data when an iface_entry is inactive */
MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
diff --git a/scripts/build-all.py b/scripts/build-all.py
index c585e4a..5a109bb 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -46,8 +46,8 @@
make_env = os.environ
make_env.update({
'ARCH': 'arm',
- 'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
+make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
diff --git a/sound/soc/codecs/msm8x10-wcd.c b/sound/soc/codecs/msm8x10-wcd.c
index de0b4da..46b0a91 100644
--- a/sound/soc/codecs/msm8x10-wcd.c
+++ b/sound/soc/codecs/msm8x10-wcd.c
@@ -2891,6 +2891,7 @@
if (client->addr != HELICON_CORE_0_I2C_ADDR)
goto rtn;
+ dev_set_name(&client->dev, "%s", MSM8X10_CODEC_NAME);
dev = &client->dev;
if (client->dev.of_node) {
dev_dbg(&client->dev, "%s:Platform data from device tree\n",
diff --git a/sound/soc/codecs/msm8x10-wcd.h b/sound/soc/codecs/msm8x10-wcd.h
index d8f6ace..8e561cf 100644
--- a/sound/soc/codecs/msm8x10-wcd.h
+++ b/sound/soc/codecs/msm8x10-wcd.h
@@ -32,6 +32,7 @@
#define MSM8X10_DINO_LPASS_DIGCODEC_CBCR 0xFE02C014
#define MSM8X10_DINO_LPASS_DIGCODEC_AHB_CBCR 0xFE02C018
+#define MSM8X10_CODEC_NAME "msm8x10_wcd_codec"
#define MSM8X10_WCD_IS_DINO_REG(reg) \
(((reg >= 0x400) && (reg <= 0x5FF)) ? 1 : 0)
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index 7b896c2..ff190cd 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -38,6 +38,9 @@
#include "wcd9xxx-resmgr.h"
#include "wcd9xxx-common.h"
+#define TAPAN_HPH_PA_SETTLE_COMP_ON 3000
+#define TAPAN_HPH_PA_SETTLE_COMP_OFF 13000
+
static atomic_t kp_tapan_priv;
static int spkr_drv_wrnd_param_set(const char *val,
const struct kernel_param *kp);
@@ -224,8 +227,8 @@
};
static const u32 comp_shift[] = {
- 4, /* Compander 0's clock source is on interpolator 7 */
0,
+ 1,
2,
};
@@ -234,47 +237,44 @@
COMPANDER_1,
COMPANDER_2,
COMPANDER_2,
- COMPANDER_2,
- COMPANDER_2,
- COMPANDER_0,
COMPANDER_MAX,
};
static const struct comp_sample_dependent_params comp_samp_params[] = {
{
/* 8 Khz */
- .peak_det_timeout = 0x02,
+ .peak_det_timeout = 0x06,
.rms_meter_div_fact = 0x09,
.rms_meter_resamp_fact = 0x06,
},
{
/* 16 Khz */
- .peak_det_timeout = 0x03,
+ .peak_det_timeout = 0x07,
.rms_meter_div_fact = 0x0A,
.rms_meter_resamp_fact = 0x0C,
},
{
/* 32 Khz */
- .peak_det_timeout = 0x05,
+ .peak_det_timeout = 0x08,
.rms_meter_div_fact = 0x0B,
.rms_meter_resamp_fact = 0x1E,
},
{
/* 48 Khz */
- .peak_det_timeout = 0x05,
+ .peak_det_timeout = 0x09,
.rms_meter_div_fact = 0x0B,
.rms_meter_resamp_fact = 0x28,
},
{
/* 96 Khz */
- .peak_det_timeout = 0x06,
+ .peak_det_timeout = 0x0A,
.rms_meter_div_fact = 0x0C,
.rms_meter_resamp_fact = 0x50,
},
{
/* 192 Khz */
- .peak_det_timeout = 0x07,
- .rms_meter_div_fact = 0xD,
+ .peak_det_timeout = 0x0B,
+ .rms_meter_div_fact = 0xC,
.rms_meter_resamp_fact = 0xA0,
},
};
@@ -673,6 +673,37 @@
dev_dbg(codec->dev, "%s: Compander %d enable current %d, new %d\n",
__func__, comp, tapan->comp_enabled[comp], value);
tapan->comp_enabled[comp] = value;
+
+ if (comp == COMPANDER_1 &&
+ tapan->comp_enabled[comp] == 1) {
+ /* Wavegen to 5 msec */
+ snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_CTL, 0xDA);
+ snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_TIME, 0x15);
+ snd_soc_write(codec, TAPAN_A_RX_HPH_BIAS_WG_OCP, 0x2A);
+
+ /* Enable Chopper */
+ snd_soc_update_bits(codec,
+ TAPAN_A_RX_HPH_CHOP_CTL, 0x80, 0x80);
+
+ snd_soc_write(codec, TAPAN_A_NCP_DTEST, 0x20);
+ pr_debug("%s: Enabled Chopper and set wavegen to 5 msec\n",
+ __func__);
+ } else if (comp == COMPANDER_1 &&
+ tapan->comp_enabled[comp] == 0) {
+ /* Wavegen to 20 msec */
+ snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_CTL, 0xDB);
+ snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_TIME, 0x58);
+ snd_soc_write(codec, TAPAN_A_RX_HPH_BIAS_WG_OCP, 0x1A);
+
+ /* Disable CHOPPER block */
+ snd_soc_update_bits(codec,
+ TAPAN_A_RX_HPH_CHOP_CTL, 0x80, 0x00);
+
+ snd_soc_write(codec, TAPAN_A_NCP_DTEST, 0x10);
+ pr_debug("%s: Disabled Chopper and set wavegen to 20 msec\n",
+ __func__);
+ }
+
return 0;
}
@@ -708,26 +739,52 @@
static void tapan_discharge_comp(struct snd_soc_codec *codec, int comp)
{
- /* Update RSM to 1, DIVF to 5 */
- snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8), 1);
+ /* Level meter DIV Factor to 5*/
snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8), 0xF0,
- 1 << 5);
- /* Wait for 1ms */
- usleep_range(1000, 1000);
+ 0x05 << 4);
+ /* RMS meter Sampling to 0x01 */
+ snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8), 0x01);
+
+ /* Worst case timeout for compander CnP sleep timeout */
+ usleep_range(3000, 3000);
+}
+
+static enum wcd9xxx_buck_volt tapan_codec_get_buck_mv(
+ struct snd_soc_codec *codec)
+{
+ int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
+ struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
+ struct wcd9xxx_pdata *pdata = tapan->resmgr.pdata;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
+ if (!strncmp(pdata->regulator[i].name,
+ WCD9XXX_SUPPLY_BUCK_NAME,
+ sizeof(WCD9XXX_SUPPLY_BUCK_NAME))) {
+ if ((pdata->regulator[i].min_uV ==
+ WCD9XXX_CDC_BUCK_MV_1P8) ||
+ (pdata->regulator[i].min_uV ==
+ WCD9XXX_CDC_BUCK_MV_2P15))
+ buck_volt = pdata->regulator[i].min_uV;
+ break;
+ }
+ }
+ pr_debug("%s: S4 voltage requested is %d\n", __func__, buck_volt);
+ return buck_volt;
}
static int tapan_config_compander(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- int mask, emask;
- bool timedout;
- unsigned long timeout;
+ int mask, enable_mask;
+ u8 rdac5_mux;
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
const int comp = w->shift;
const u32 rate = tapan->comp_fs[comp];
const struct comp_sample_dependent_params *comp_params =
&comp_samp_params[rate];
+ enum wcd9xxx_buck_volt buck_mv;
dev_dbg(codec->dev, "%s: %s event %d compander %d, enabled %d",
__func__, w->name, event, comp, tapan->comp_enabled[comp]);
@@ -737,72 +794,105 @@
/* Compander 0 has single channel */
mask = (comp == COMPANDER_0 ? 0x01 : 0x03);
- emask = (comp == COMPANDER_0 ? 0x02 : 0x03);
+ buck_mv = tapan_codec_get_buck_mv(codec);
+
+ rdac5_mux = snd_soc_read(codec, TAPAN_A_CDC_CONN_MISC);
+ rdac5_mux = (rdac5_mux & 0x04) >> 2;
+
+ if (comp == COMPANDER_0) { /* SPK compander */
+ enable_mask = 0x02;
+ } else if (comp == COMPANDER_1) { /* HPH compander */
+ enable_mask = 0x03;
+ } else if (comp == COMPANDER_2) { /* LO compander */
+
+ if (rdac5_mux == 0) { /* DEM4 */
+
+ /* for LO Stereo SE, enable Compander 2 left
+ * channel on RX3 interpolator Path and Compander 2
+ * rigt channel on RX4 interpolator Path.
+ */
+ enable_mask = 0x03;
+ } else if (rdac5_mux == 1) { /* DEM3_INV */
+
+ /* for LO mono differential only enable Compander 2
+ * left channel on RX3 interpolator Path.
+ */
+ enable_mask = 0x02;
+ } else {
+ dev_err(codec->dev, "%s: invalid rdac5_mux val %d",
+ __func__, rdac5_mux);
+ return -EINVAL;
+ }
+ } else {
+ dev_err(codec->dev, "%s: invalid compander %d", __func__, comp);
+ return -EINVAL;
+ }
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ /* Set compander Sample rate */
+ snd_soc_update_bits(codec,
+ TAPAN_A_CDC_COMP0_FS_CFG + (comp * 8),
+ 0x07, rate);
+ /* Set the static gain offset for HPH Path */
+ if (comp == COMPANDER_1) {
+ if (buck_mv == WCD9XXX_CDC_BUCK_MV_2P15)
+ snd_soc_update_bits(codec,
+ TAPAN_A_CDC_COMP0_B4_CTL + (comp * 8),
+ 0x80, 0x00);
+ else
+ snd_soc_update_bits(codec,
+ TAPAN_A_CDC_COMP0_B4_CTL + (comp * 8),
+ 0x80, 0x80);
+ }
+ /* Enable RX interpolation path compander clocks */
+ snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_B2_CTL,
+ 0x01 << comp_shift[comp],
+ 0x01 << comp_shift[comp]);
+
+ /* Toggle compander reset bits */
+ snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
+ 0x01 << comp_shift[comp],
+ 0x01 << comp_shift[comp]);
+ snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
+ 0x01 << comp_shift[comp], 0);
+
/* Set gain source to compander */
tapan_config_gain_compander(codec, comp, true);
- /* Enable RX interpolation path clocks */
- snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_B2_CTL,
- mask << comp_shift[comp],
- mask << comp_shift[comp]);
+
+ /* Compander enable */
+ snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B1_CTL +
+ (comp * 8), enable_mask, enable_mask);
tapan_discharge_comp(codec, comp);
- /* Clear compander halt */
- snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B1_CTL +
- (comp * 8),
- 1 << 2, 0);
+ /* Set sample rate dependent paramater */
+ snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8),
+ comp_params->rms_meter_resamp_fact);
+ snd_soc_update_bits(codec,
+ TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
+ 0xF0, comp_params->rms_meter_div_fact << 4);
+ snd_soc_update_bits(codec,
+ TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
+ 0x0F, comp_params->peak_det_timeout);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ /* Disable compander */
+ snd_soc_update_bits(codec,
+ TAPAN_A_CDC_COMP0_B1_CTL + (comp * 8),
+ enable_mask, 0x00);
+
/* Toggle compander reset bits */
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
mask << comp_shift[comp],
mask << comp_shift[comp]);
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
mask << comp_shift[comp], 0);
- break;
- case SND_SOC_DAPM_POST_PMU:
- /* Set sample rate dependent paramater */
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_COMP0_FS_CFG + (comp * 8),
- 0x07, rate);
- snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8),
- comp_params->rms_meter_resamp_fact);
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
- 0x0F, comp_params->peak_det_timeout);
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
- 0xF0, comp_params->rms_meter_div_fact << 4);
- /* Compander enable */
- snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B1_CTL +
- (comp * 8), emask, emask);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- /* Halt compander */
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_COMP0_B1_CTL + (comp * 8),
- 1 << 2, 1 << 2);
- /* Wait up to a second for shutdown complete */
- timeout = jiffies + HZ;
- do {
- if ((snd_soc_read(codec,
- TAPAN_A_CDC_COMP0_SHUT_DOWN_STATUS +
- (comp * 8)) & mask) == mask)
- break;
- } while (!(timedout = time_after(jiffies, timeout)));
- dev_dbg(codec->dev, "%s: Compander %d shutdown %s in %dms\n",
- __func__, comp, timedout ? "timedout" : "completed",
- jiffies_to_msecs(timeout - HZ - jiffies));
- break;
- case SND_SOC_DAPM_POST_PMD:
- /* Disable compander */
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_COMP0_B1_CTL + (comp * 8),
- emask, 0x00);
+
/* Turn off the clock for compander in pair */
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_B2_CTL,
mask << comp_shift[comp], 0);
+
/* Set gain source to register */
tapan_config_gain_compander(codec, comp, false);
break;
@@ -2267,6 +2357,7 @@
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
enum wcd9xxx_notify_event e_pre_on, e_post_off;
u8 req_clsh_state;
+ u32 pa_settle_time = TAPAN_HPH_PA_SETTLE_COMP_OFF;
dev_dbg(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
if (w->shift == 5) {
@@ -2282,23 +2373,32 @@
return -EINVAL;
}
+ if (tapan->comp_enabled[COMPANDER_1])
+ pa_settle_time = TAPAN_HPH_PA_SETTLE_COMP_ON;
+
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Let MBHC module know PA is turning on */
wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_pre_on);
break;
-
case SND_SOC_DAPM_POST_PMU:
+ dev_dbg(codec->dev, "%s: sleep %d ms after %s PA enable.\n",
+ __func__, pa_settle_time / 1000, w->name);
+ /* Time needed for PA to settle */
+ usleep_range(pa_settle_time, pa_settle_time + 1000);
+
wcd9xxx_clsh_fsm(codec, &tapan->clsh_d,
req_clsh_state,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
-
- usleep_range(5000, 5010);
break;
-
case SND_SOC_DAPM_POST_PMD:
+ dev_dbg(codec->dev, "%s: sleep %d ms after %s PA disable.\n",
+ __func__, pa_settle_time / 1000, w->name);
+ /* Time needed for PA to settle */
+ usleep_range(pa_settle_time, pa_settle_time + 1000);
+
/* Let MBHC module know PA turned off */
wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_post_off);
@@ -2306,10 +2406,6 @@
req_clsh_state,
WCD9XXX_CLSH_REQ_DISABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
-
- dev_dbg(codec->dev, "%s: sleep 10 ms after %s PA disable.\n",
- __func__, w->name);
- usleep_range(5000, 5010);
break;
}
return 0;
@@ -2549,6 +2645,7 @@
{"RX1 MIX1", NULL, "COMP1_CLK"},
{"RX2 MIX1", NULL, "COMP1_CLK"},
{"RX3 MIX1", NULL, "COMP2_CLK"},
+ {"RX4 MIX1", NULL, "COMP0_CLK"},
{"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
{"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
@@ -3019,6 +3116,7 @@
u16 rx_mix_1_reg_1, rx_mix_1_reg_2;
u16 rx_fs_reg;
u8 rx_mix_1_reg_1_val, rx_mix_1_reg_2_val;
+ u8 rdac5_mux;
struct snd_soc_codec *codec = dai->codec;
struct wcd9xxx_ch *ch;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
@@ -3036,6 +3134,9 @@
rx_mix_1_reg_1 = TAPAN_A_CDC_CONN_RX1_B1_CTL;
+ rdac5_mux = snd_soc_read(codec, TAPAN_A_CDC_CONN_MISC);
+ rdac5_mux = (rdac5_mux & 0x04) >> 2;
+
for (j = 0; j < NUM_INTERPOLATORS; j++) {
rx_mix_1_reg_2 = rx_mix_1_reg_1 + 1;
@@ -3060,9 +3161,14 @@
snd_soc_update_bits(codec, rx_fs_reg,
0xE0, rx_fs_rate_reg_val);
- if (comp_rx_path[j] < COMPANDER_MAX)
- tapan->comp_fs[comp_rx_path[j]]
- = compander_fs;
+ if (comp_rx_path[j] < COMPANDER_MAX) {
+ if ((j == 3) && (rdac5_mux == 1))
+ tapan->comp_fs[COMPANDER_0] =
+ compander_fs;
+ else
+ tapan->comp_fs[comp_rx_path[j]]
+ = compander_fs;
+ }
}
if (j <= 1)
rx_mix_1_reg_1 += 3;
@@ -3893,13 +3999,13 @@
SND_SOC_DAPM_SUPPLY("COMP0_CLK", SND_SOC_NOPM, 0, 0,
tapan_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 1, 0,
tapan_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 2, 0,
tapan_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_INPUT("AMIC1"),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 External", TAPAN_A_MICB_1_CTL, 7, 0,
@@ -4239,12 +4345,11 @@
TAPAN_REG_VAL(TAPAN_A_RX_HPH_CHOP_CTL, 0xF4),
TAPAN_REG_VAL(TAPAN_A_BIAS_CURR_CTL_2, 0x08),
TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_1, 0x5B),
- TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_3, 0x60),
+ TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_3, 0x6F),
/* TODO: Check below reg writes conflict with above */
/* PROGRAM_THE_0P85V_VBG_REFERENCE = V_0P858V */
TAPAN_REG_VAL(TAPAN_A_BIAS_CURR_CTL_2, 0x04),
- TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_4, 0x54),
TAPAN_REG_VAL(TAPAN_A_RX_HPH_CHOP_CTL, 0x74),
TAPAN_REG_VAL(TAPAN_A_RX_BUCK_BIAS1, 0x62),
@@ -4414,6 +4519,15 @@
{TAPAN_A_CDC_COMP0_B5_CTL, 0x7F, 0x7F},
{TAPAN_A_CDC_COMP1_B5_CTL, 0x7F, 0x7F},
{TAPAN_A_CDC_COMP2_B5_CTL, 0x7F, 0x7F},
+
+ /*
+ * Setup wavegen timer to 20msec and disable chopper
+ * as default. This corresponds to Compander OFF
+ */
+ {TAPAN_A_RX_HPH_CNP_WG_CTL, 0xFF, 0xDB},
+ {TAPAN_A_RX_HPH_CNP_WG_TIME, 0xFF, 0x58},
+ {TAPAN_A_RX_HPH_BIAS_WG_OCP, 0xFF, 0x1A},
+ {TAPAN_A_RX_HPH_CHOP_CTL, 0xFF, 0x24},
};
static void tapan_codec_init_reg(struct snd_soc_codec *codec)
@@ -4554,7 +4668,6 @@
}
}
- wcd9xxx_resmgr_post_ssr(&tapan->resmgr);
if (spkr_drv_wrnd == 1)
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_EN, 0x80, 0x80);
@@ -4567,6 +4680,8 @@
tapan_slim_interface_init_reg(codec);
+ wcd9xxx_resmgr_post_ssr(&tapan->resmgr);
+
wcd9xxx_mbhc_deinit(&tapan->mbhc);
if (TAPAN_IS_1_0(wcd9xxx->version))
@@ -4575,7 +4690,7 @@
rco_clk_rate = TAPAN_MCLK_CLK_9P6MHZ;
ret = wcd9xxx_mbhc_init(&tapan->mbhc, &tapan->resmgr, codec, NULL,
- &mbhc_cb, rco_clk_rate);
+ &mbhc_cb, rco_clk_rate, false);
if (ret)
pr_err("%s: mbhc init failed %d\n", __func__, ret);
else
@@ -4595,30 +4710,6 @@
return 0;
}
-static enum wcd9xxx_buck_volt tapan_codec_get_buck_mv(
- struct snd_soc_codec *codec)
-{
- int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
- struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx_pdata *pdata = tapan->resmgr.pdata;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
- if (!strncmp(pdata->regulator[i].name,
- WCD9XXX_SUPPLY_BUCK_NAME,
- sizeof(WCD9XXX_SUPPLY_BUCK_NAME))) {
- if ((pdata->regulator[i].min_uV ==
- WCD9XXX_CDC_BUCK_MV_1P8) ||
- (pdata->regulator[i].min_uV ==
- WCD9XXX_CDC_BUCK_MV_2P15))
- buck_volt = pdata->regulator[i].min_uV;
- break;
- }
- }
- pr_debug("%s: S4 voltage requested is %d\n", __func__, buck_volt);
- return buck_volt;
-}
-
static int tapan_codec_probe(struct snd_soc_codec *codec)
{
struct wcd9xxx *control;
@@ -4677,7 +4768,7 @@
rco_clk_rate = TAPAN_MCLK_CLK_9P6MHZ;
ret = wcd9xxx_mbhc_init(&tapan->mbhc, &tapan->resmgr, codec, NULL,
- &mbhc_cb, rco_clk_rate);
+ &mbhc_cb, rco_clk_rate, false);
if (ret) {
pr_err("%s: mbhc init failed %d\n", __func__, ret);
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 59354a3..c27e085 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -6230,7 +6230,6 @@
codec->reg_size, GFP_KERNEL);
}
- wcd9xxx_resmgr_post_ssr(&taiko->resmgr);
if (spkr_drv_wrnd == 1)
snd_soc_update_bits(codec, TAIKO_A_SPKR_DRV_EN, 0x80, 0x80);
@@ -6243,6 +6242,8 @@
taiko_init_slim_slave_cfg(codec);
taiko_slim_interface_init_reg(codec);
+ wcd9xxx_resmgr_post_ssr(&taiko->resmgr);
+
if (taiko->mbhc_started) {
wcd9xxx_mbhc_deinit(&taiko->mbhc);
taiko->mbhc_started = false;
@@ -6254,7 +6255,7 @@
ret = wcd9xxx_mbhc_init(&taiko->mbhc, &taiko->resmgr, codec,
taiko_enable_mbhc_micbias,
- NULL, rco_clk_rate);
+ NULL, rco_clk_rate, true);
if (ret) {
pr_err("%s: mbhc init failed %d\n", __func__, ret);
} else {
@@ -6436,7 +6437,7 @@
/* init and start mbhc */
ret = wcd9xxx_mbhc_init(&taiko->mbhc, &taiko->resmgr, codec,
taiko_enable_mbhc_micbias,
- NULL, rco_clk_rate);
+ NULL, rco_clk_rate, true);
if (ret) {
pr_err("%s: mbhc init failed %d\n", __func__, ret);
goto err_init;
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index d0c00a7..6fc8e13 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -155,6 +155,8 @@
static int wcd9xxx_detect_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
uint32_t *zr);
+static s16 wcd9xxx_get_current_v(struct wcd9xxx_mbhc *mbhc,
+ const enum wcd9xxx_current_v_idx idx);
static bool wcd9xxx_mbhc_polling(struct wcd9xxx_mbhc *mbhc)
{
@@ -185,6 +187,7 @@
/* called under codec_resource_lock acquisition */
static void wcd9xxx_start_hs_polling(struct wcd9xxx_mbhc *mbhc)
{
+ s16 v_brh, v_b1_hu;
struct snd_soc_codec *codec = mbhc->codec;
int mbhc_state = mbhc->mbhc_state;
@@ -212,6 +215,17 @@
/* set to max */
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL, 0x7F);
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL, 0xFF);
+
+ v_brh = wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_BR_H);
+ snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
+ (v_brh >> 8) & 0xFF);
+ snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL,
+ v_brh & 0xFF);
+ v_b1_hu = wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_HU);
+ snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL,
+ v_b1_hu & 0xFF);
+ snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
+ (v_b1_hu >> 8) & 0xFF);
}
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x1);
@@ -799,8 +813,8 @@
pr_debug("%s: Enabling micbias\n", __func__);
mbhc->micbias_enable_cb(mbhc->codec, true);
}
-
- wcd9xxx_detect_impedance(mbhc, &mbhc->zl, &mbhc->zr);
+ if (mbhc->impedance_detect)
+ wcd9xxx_detect_impedance(mbhc, &mbhc->zl, &mbhc->zr);
pr_debug("%s: Reporting insertion %d(%x)\n", __func__,
jack_type, mbhc->hph_status);
wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
@@ -1000,7 +1014,7 @@
* These will be released by wcd9xxx_cleanup_hs_polling
*/
WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
- wcd9xxx_resmgr_get_bandgap(mbhc->resmgr, WCD9XXX_BANDGAP_MBHC_MODE);
+ wcd9xxx_resmgr_get_bandgap(mbhc->resmgr, WCD9XXX_BANDGAP_AUDIO_MODE);
wcd9xxx_resmgr_get_clk_block(mbhc->resmgr, WCD9XXX_CLK_RCO);
WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
@@ -1205,7 +1219,7 @@
goto exit;
}
- for (i = 0, d = dt; i < size; i++, d++) {
+ for (i = 0, dprev = NULL, d = dt; i < size; i++, d++) {
if (d->vddio) {
dvddio = d;
continue;
@@ -2410,36 +2424,6 @@
return r;
}
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_codec_drive_v_to_micbias(struct wcd9xxx_mbhc *mbhc,
- int usec)
-{
- int cfilt_k_val;
- bool set = true;
-
- if (mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV &&
- mbhc->mbhc_micbias_switched) {
- pr_debug("%s: set mic V to micbias V\n", __func__);
- snd_soc_update_bits(mbhc->codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
- 0x2, 0x2);
- wcd9xxx_turn_onoff_override(mbhc->codec, true);
- while (1) {
- cfilt_k_val =
- wcd9xxx_resmgr_get_k_val(mbhc->resmgr,
- set ? mbhc->mbhc_data.micb_mv :
- VDDIO_MICBIAS_MV);
- snd_soc_update_bits(mbhc->codec,
- mbhc->mbhc_bias_regs.cfilt_val,
- 0xFC, (cfilt_k_val << 2));
- if (!set)
- break;
- usleep_range(usec, usec);
- set = false;
- }
- wcd9xxx_turn_onoff_override(mbhc->codec, false);
- }
-}
-
static int wcd9xxx_is_fake_press(struct wcd9xxx_mbhc *mbhc)
{
int i;
@@ -2564,12 +2548,43 @@
snd_soc_write(codec, mbhc->mbhc_bias_regs.ctl_reg, reg0);
}
+/*
+ * wcd9xxx_update_rel_threshold : update mbhc release upper bound threshold
+ * to ceilmv + buffer
+ */
+static int wcd9xxx_update_rel_threshold(struct wcd9xxx_mbhc *mbhc, int ceilmv)
+{
+ u16 v_brh, v_b1_hu;
+ int mv;
+ struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
+ void *calibration = mbhc->mbhc_cfg->calibration;
+ struct snd_soc_codec *codec = mbhc->codec;
+
+ btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
+ mv = ceilmv + btn_det->v_btn_press_delta_cic;
+ pr_debug("%s: reprogram vb1hu/vbrh to %dmv\n", __func__, mv);
+
+ /* update LSB first so mbhc hardware block doesn't see too low value */
+ v_b1_hu = wcd9xxx_codec_v_sta_dce(mbhc, STA, mv);
+ snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, v_b1_hu & 0xFF);
+ snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
+ (v_b1_hu >> 8) & 0xFF);
+ v_brh = wcd9xxx_codec_v_sta_dce(mbhc, DCE, mv);
+ snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL, v_brh & 0xFF);
+ snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
+ (v_brh >> 8) & 0xFF);
+ return 0;
+}
+
irqreturn_t wcd9xxx_dce_handler(int irq, void *data)
{
int i, mask;
bool vddio;
u8 mbhc_status;
s16 dce_z, sta_z;
+ s32 stamv, stamv_s;
+ s16 *v_btn_high;
+ struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
int btn = -1, meas = 0;
struct wcd9xxx_mbhc *mbhc = data;
const struct wcd9xxx_mbhc_btn_detect_cfg *d =
@@ -2577,10 +2592,10 @@
short btnmeas[d->n_btn_meas + 1];
short dce[d->n_btn_meas + 1], sta;
s32 mv[d->n_btn_meas + 1], mv_s[d->n_btn_meas + 1];
- s32 stamv, stamv_s;
struct snd_soc_codec *codec = mbhc->codec;
struct wcd9xxx *core = mbhc->resmgr->core;
int n_btn_meas = d->n_btn_meas;
+ void *calibration = mbhc->mbhc_cfg->calibration;
pr_debug("%s: enter\n", __func__);
@@ -2705,6 +2720,13 @@
__func__);
goto done;
}
+ btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
+ v_btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
+ MBHC_BTN_DET_V_BTN_HIGH);
+ WARN_ON(btn >= btn_det->num_btn);
+ /* reprogram release threshold to catch voltage ramp up early */
+ wcd9xxx_update_rel_threshold(mbhc, v_btn_high[btn]);
+
mask = wcd9xxx_get_button_mask(btn);
mbhc->buttons_pressed |= mask;
wcd9xxx_lock_sleep(core);
@@ -2733,8 +2755,6 @@
WCD9XXX_BCL_LOCK(mbhc->resmgr);
mbhc->mbhc_state = MBHC_STATE_RELEASE;
- wcd9xxx_codec_drive_v_to_micbias(mbhc, 10000);
-
if (mbhc->buttons_pressed & WCD9XXX_JACK_BUTTON_MASK) {
ret = wcd9xxx_cancel_btn_work(mbhc);
if (ret == 0) {
@@ -3785,7 +3805,13 @@
mutex_lock(&codec->mutex);
WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
- wcd9xxx_resmgr_get_bandgap(mbhc->resmgr, WCD9XXX_BANDGAP_MBHC_MODE);
+ /*
+ * Fast(mbhc) mode bandagap doesn't need to be enabled explicitly
+ * since fast mode is set by MBHC hardware when override is on.
+ * Enable bandgap mode to avoid unnecessary RCO disable and enable
+ * during clock source change.
+ */
+ wcd9xxx_resmgr_get_bandgap(mbhc->resmgr, WCD9XXX_BANDGAP_AUDIO_MODE);
wcd9xxx_resmgr_get_clk_block(mbhc->resmgr, WCD9XXX_CLK_RCO);
WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
@@ -3938,7 +3964,8 @@
int wcd9xxx_mbhc_init(struct wcd9xxx_mbhc *mbhc, struct wcd9xxx_resmgr *resmgr,
struct snd_soc_codec *codec,
int (*micbias_enable_cb) (struct snd_soc_codec*, bool),
- const struct wcd9xxx_mbhc_cb *mbhc_cb, int rco_clk_rate)
+ const struct wcd9xxx_mbhc_cb *mbhc_cb, int rco_clk_rate,
+ bool impedance_det_en)
{
int ret;
void *core;
@@ -3964,6 +3991,7 @@
mbhc->micbias_enable_cb = micbias_enable_cb;
mbhc->rco_clk_rate = rco_clk_rate;
mbhc->mbhc_cb = mbhc_cb;
+ mbhc->impedance_detect = impedance_det_en;
if (mbhc->headset_jack.jack == NULL) {
ret = snd_soc_jack_new(codec, "Headset Jack", WCD9XXX_JACK_MASK,
@@ -4056,6 +4084,9 @@
}
wcd9xxx_disable_irq(codec->control_data, WCD9XXX_IRQ_HPH_PA_OCPR_FAULT);
+ wcd9xxx_regmgr_cond_register(resmgr, 1 << WCD9XXX_COND_HPH_MIC |
+ 1 << WCD9XXX_COND_HPH);
+
pr_debug("%s: leave ret %d\n", __func__, ret);
return ret;
@@ -4081,6 +4112,9 @@
{
void *cdata = mbhc->codec->control_data;
+ wcd9xxx_regmgr_cond_deregister(mbhc->resmgr, 1 << WCD9XXX_COND_HPH_MIC |
+ 1 << WCD9XXX_COND_HPH);
+
wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_RELEASE, mbhc);
wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_POTENTIAL, mbhc);
wcd9xxx_free_irq(cdata, WCD9XXX_IRQ_MBHC_REMOVAL, mbhc);
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.h b/sound/soc/codecs/wcd9xxx-mbhc.h
index 1f6502f..0599ccb 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.h
+++ b/sound/soc/codecs/wcd9xxx-mbhc.h
@@ -291,6 +291,7 @@
bool micbias_enable;
int (*micbias_enable_cb) (struct snd_soc_codec*, bool);
+ bool impedance_detect;
/* impedance of hphl and hphr */
uint32_t zl, zr;
@@ -363,7 +364,8 @@
struct snd_soc_codec *codec,
int (*micbias_enable_cb) (struct snd_soc_codec*, bool),
const struct wcd9xxx_mbhc_cb *mbhc_cb,
- int rco_clk_rate);
+ int rco_clk_rate,
+ bool impedance_det_en);
void wcd9xxx_mbhc_deinit(struct wcd9xxx_mbhc *mbhc);
void *wcd9xxx_mbhc_cal_btn_det_mp(
const struct wcd9xxx_mbhc_btn_detect_cfg *btn_det,
diff --git a/sound/soc/codecs/wcd9xxx-resmgr.c b/sound/soc/codecs/wcd9xxx-resmgr.c
index be11e53..9633cc0 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr.c
@@ -652,15 +652,17 @@
return rc;
}
-void wcd9xxx_resmgr_cond_trigger_cond(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_resmgr_cond cond)
+static void wcd9xxx_resmgr_cond_trigger_cond(struct wcd9xxx_resmgr *resmgr,
+ enum wcd9xxx_resmgr_cond cond)
{
struct list_head *l;
struct wcd9xxx_resmgr_cond_entry *e;
bool set;
pr_debug("%s: enter\n", __func__);
- set = !!test_bit(cond, &resmgr->cond_flags);
+ /* update bit if cond isn't available or cond is set */
+ set = !test_bit(cond, &resmgr->cond_avail_flags) ||
+ !!test_bit(cond, &resmgr->cond_flags);
list_for_each(l, &resmgr->update_bit_cond_h) {
e = list_entry(l, struct wcd9xxx_resmgr_cond_entry, list);
if (e->cond == cond)
@@ -672,6 +674,44 @@
pr_debug("%s: leave\n", __func__);
}
+/*
+ * wcd9xxx_regmgr_cond_register : notify resmgr conditions in the condbits are
+ * avaliable and notified.
+ * condbits : contains bitmask of enum wcd9xxx_resmgr_cond
+ */
+void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr,
+ unsigned long condbits)
+{
+ unsigned int cond;
+
+ for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) {
+ mutex_lock(&resmgr->update_bit_cond_lock);
+ WARN(test_bit(cond, &resmgr->cond_avail_flags),
+ "Condition 0x%0x is already registered\n", cond);
+ set_bit(cond, &resmgr->cond_avail_flags);
+ wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
+ mutex_unlock(&resmgr->update_bit_cond_lock);
+ pr_debug("%s: Condition 0x%x is registered\n", __func__, cond);
+ }
+}
+
+void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr,
+ unsigned long condbits)
+{
+ unsigned int cond;
+
+ for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) {
+ mutex_lock(&resmgr->update_bit_cond_lock);
+ WARN(!test_bit(cond, &resmgr->cond_avail_flags),
+ "Condition 0x%0x isn't registered\n", cond);
+ clear_bit(cond, &resmgr->cond_avail_flags);
+ wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
+ mutex_unlock(&resmgr->update_bit_cond_lock);
+ pr_debug("%s: Condition 0x%x is deregistered\n", __func__,
+ cond);
+ }
+}
+
void wcd9xxx_resmgr_cond_update_cond(struct wcd9xxx_resmgr *resmgr,
enum wcd9xxx_resmgr_cond cond, bool set)
{
diff --git a/sound/soc/codecs/wcd9xxx-resmgr.h b/sound/soc/codecs/wcd9xxx-resmgr.h
index aaf7317..e6a8f5d 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr.h
+++ b/sound/soc/codecs/wcd9xxx-resmgr.h
@@ -134,6 +134,7 @@
struct wcd9xxx_mbhc *mbhc;
unsigned long cond_flags;
+ unsigned long cond_avail_flags;
struct list_head update_bit_cond_h;
struct mutex update_bit_cond_lock;
@@ -227,6 +228,10 @@
WCD9XXX_COND_HPH = 0x01, /* Headphone */
WCD9XXX_COND_HPH_MIC = 0x02, /* Microphone on the headset */
};
+void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr,
+ unsigned long condbits);
+void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr,
+ unsigned long condbits);
int wcd9xxx_resmgr_rm_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
enum wcd9xxx_resmgr_cond cond,
unsigned short reg, int shift,
diff --git a/sound/soc/msm/apq8074.c b/sound/soc/msm/apq8074.c
index cb101bd..3a055e2 100644
--- a/sound/soc/msm/apq8074.c
+++ b/sound/soc/msm/apq8074.c
@@ -123,7 +123,7 @@
.gpio = 0,
.gpio_irq = 0,
.gpio_level_insert = 1,
- .detect_extn_cable = true,
+ .detect_extn_cable = false,
.insert_detect = true,
.swap_gnd_mic = NULL,
};
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index a0ed887..41fe8aa 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -741,6 +741,30 @@
.ops = &msm_fe_Multimedia_dai_ops,
.name = "MultiMedia9",
},
+ {
+ .playback = {
+ .stream_name = "QCHAT Playback",
+ .aif_name = "QCHAT_DL",
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .capture = {
+ .stream_name = "QCHAT Capture",
+ .aif_name = "QCHAT_UL",
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &msm_fe_dai_ops,
+ .name = "QCHAT",
+ },
};
static __devinit int msm_fe_dai_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index 0b7e7f2..b28f0f49 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -2150,6 +2150,22 @@
/* this dainlink has playback support */
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA8,
},
+ {
+ .name = "QCHAT",
+ .stream_name = "QCHAT",
+ .cpu_dai_name = "QCHAT",
+ .platform_name = "msm-pcm-voice",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .be_id = MSM_FRONTEND_DAI_QCHAT,
+ },
/* HDMI Hostless */
{
.name = "HDMI_RX_HOSTLESS",
diff --git a/sound/soc/msm/msm8x10.c b/sound/soc/msm/msm8x10.c
index 4d9632c..340d3db 100644
--- a/sound/soc/msm/msm8x10.c
+++ b/sound/soc/msm/msm8x10.c
@@ -587,7 +587,7 @@
.stream_name = "Secondary MI2S Playback",
.cpu_dai_name = "msm-dai-q6-mi2s.1",
.platform_name = "msm-pcm-routing",
- .codec_name = "msm8x10-wcd-i2c-core.5-000d",
+ .codec_name = MSM8X10_CODEC_NAME,
.codec_dai_name = "msm8x10_wcd_i2s_rx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
@@ -601,7 +601,7 @@
.stream_name = "Primary MI2S Capture",
.cpu_dai_name = "msm-dai-q6-mi2s.0",
.platform_name = "msm-pcm-routing",
- .codec_name = "msm8x10-wcd-i2c-core.5-000d",
+ .codec_name = MSM8X10_CODEC_NAME,
.codec_dai_name = "msm8x10_wcd_i2s_tx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_PRI_MI2S_TX,
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
index ca2afaf..cd08b39 100644
--- a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
@@ -557,7 +557,7 @@
return -ENOMEM;
}
if (DOLBY_PARAM_ID_VER == dolby_dap_params_get.param_id) {
- rc = adm_dolby_dap_get_params(dolby_dap_params_get.port_id,
+ rc = adm_get_params(dolby_dap_params_get.port_id,
DOLBY_BUNDLE_MODULE_ID,
DOLBY_PARAM_ID_VER,
params_length +
@@ -575,7 +575,7 @@
params_length = (dolby_dap_params_length[i] +
DOLBY_PARAM_PAYLOAD_SIZE) *
sizeof(uint32_t);
- rc = adm_dolby_dap_get_params(
+ rc = adm_get_params(
dolby_dap_params_get.port_id,
DOLBY_BUNDLE_MODULE_ID,
dolby_dap_params_id[i],
@@ -652,7 +652,7 @@
}
offset = 0;
params_length = length * sizeof(uint32_t);
- rc = adm_dolby_dap_get_params(dolby_dap_params_states.port_id,
+ rc = adm_get_params(dolby_dap_params_states.port_id,
DOLBY_BUNDLE_MODULE_ID,
DOLBY_PARAM_ID_VCBG,
params_length + param_payload_len,
@@ -664,7 +664,7 @@
}
offset = length * sizeof(uint32_t);
- rc = adm_dolby_dap_get_params(dolby_dap_params_states.port_id,
+ rc = adm_get_params(dolby_dap_params_states.port_id,
DOLBY_BUNDLE_MODULE_ID,
DOLBY_PARAM_ID_VCBE,
params_length + param_payload_len,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index d02713a..de60430 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -30,6 +30,7 @@
#include <sound/tlv.h>
#include <sound/asound.h>
#include <sound/pcm_params.h>
+#include <linux/slab.h>
#include "msm-pcm-routing-v2.h"
#include "msm-dolby-dap-config.h"
@@ -62,6 +63,7 @@
static int lsm_mux_slim_port;
static int slim0_rx_aanc_fb_port;
static int msm_route_ec_ref_rx = 3; /* NONE */
+static uint32_t voc_session_id = ALL_SESSION_VSID;
enum {
MADNONE,
@@ -571,7 +573,7 @@
}
if ((msm_bedais[reg].port_id == VOICE_RECORD_RX)
|| (msm_bedais[reg].port_id == VOICE_RECORD_TX))
- voc_start_record(msm_bedais[reg].port_id, set);
+ voc_start_record(msm_bedais[reg].port_id, set, voc_session_id);
mutex_unlock(&routing_lock);
}
@@ -627,6 +629,8 @@
session_id = voc_get_session_id(VOLTE_SESSION_NAME);
else if (val == MSM_FRONTEND_DAI_VOICE2)
session_id = voc_get_session_id(VOICE2_SESSION_NAME);
+ else if (val == MSM_FRONTEND_DAI_QCHAT)
+ session_id = voc_get_session_id(QCHAT_SESSION_NAME);
else
session_id = voc_get_session_id(VOIP_SESSION_NAME);
@@ -1869,6 +1873,9 @@
SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new mmul5_mixer_controls[] = {
@@ -1914,6 +1921,9 @@
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_PRI_I2S_RX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new sec_i2s_rx_voice_mixer_controls[] = {
@@ -1932,6 +1942,9 @@
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_I2S_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SEC_I2S_RX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new sec_mi2s_rx_voice_mixer_controls[] = {
@@ -1950,6 +1963,9 @@
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new slimbus_rx_voice_mixer_controls[] = {
@@ -1971,6 +1987,9 @@
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new bt_sco_rx_voice_mixer_controls[] = {
@@ -1992,6 +2011,9 @@
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new mi2s_rx_voice_mixer_controls[] = {
@@ -2013,6 +2035,9 @@
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_MI2S_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new afe_pcm_rx_voice_mixer_controls[] = {
@@ -2034,6 +2059,9 @@
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_AFE_PCM_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new aux_pcm_rx_voice_mixer_controls[] = {
@@ -2055,6 +2083,9 @@
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_AUXPCM_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new sec_aux_pcm_rx_voice_mixer_controls[] = {
@@ -2073,6 +2104,9 @@
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new hdmi_rx_voice_mixer_controls[] = {
@@ -2094,6 +2128,9 @@
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_HDMI_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new stub_rx_mixer_controls[] = {
@@ -2243,6 +2280,33 @@
msm_routing_put_voice_stub_mixer),
};
+static const struct snd_kcontrol_new tx_qchat_mixer_controls[] = {
+ SOC_SINGLE_EXT("PRI_TX_QCHAT", MSM_BACKEND_DAI_PRI_I2S_TX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("SLIM_0_TX_QCHAT", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_QCHAT",
+ MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_QCHAT, 1, 0,
+ msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX_QCHAT", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("AUX_PCM_TX_QCHAT", MSM_BACKEND_DAI_AUXPCM_TX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("SEC_AUX_PCM_TX_QCHAT", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("MI2S_TX_QCHAT", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX_QCHAT", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+};
+
static const struct snd_kcontrol_new sbus_0_rx_port_mixer_controls[] = {
SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
@@ -2526,6 +2590,73 @@
msm_routing_put_dolby_dap_endpoint_control),
};
+int msm_routing_get_rms_value_control(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ int rc = 0;
+ char *param_value;
+ int *update_param_value;
+ uint32_t param_length = sizeof(uint32_t);
+ uint32_t param_payload_len = RMS_PAYLOAD_LEN * sizeof(uint32_t);
+ param_value = kzalloc(param_length, GFP_KERNEL);
+ if (!param_value) {
+ pr_err("%s, param memory alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ rc = adm_get_params(SLIMBUS_0_TX,
+ RMS_MODULEID_APPI_PASSTHRU,
+ RMS_PARAM_FIRST_SAMPLE,
+ param_length + param_payload_len,
+ param_value);
+ if (rc) {
+ pr_err("%s: get parameters failed\n", __func__);
+ kfree(param_value);
+ return -EINVAL;
+ }
+ update_param_value = (int *)param_value;
+ ucontrol->value.integer.value[0] = update_param_value[0];
+
+ pr_debug("%s: FROM DSP value[0] 0x%x\n",
+ __func__, update_param_value[0]);
+ kfree(param_value);
+ return 0;
+}
+
+int msm_routing_put_rms_value_control(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ /* not used */
+ return 0;
+}
+
+static const struct snd_kcontrol_new get_rms_controls[] = {
+ SOC_SINGLE_EXT("Get RMS", SND_SOC_NOPM, 0, 0xFFFFFFFF,
+ 0, msm_routing_get_rms_value_control,
+ msm_routing_put_rms_value_control),
+};
+
+static int msm_voc_session_id_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ voc_session_id = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: voc_session_id=%u\n", __func__, voc_session_id);
+
+ return 0;
+}
+
+static int msm_voc_session_id_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = voc_session_id;
+
+ return 0;
+}
+
+static struct snd_kcontrol_new msm_voc_session_controls[] = {
+ SOC_SINGLE_MULTI_EXT("Voc VSID", SND_SOC_NOPM, 0,
+ 0xFFFFFFFF, 0, 1, msm_voc_session_id_get,
+ msm_voc_session_id_put),
+};
+
static const struct snd_kcontrol_new eq_enable_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1 EQ Enable", SND_SOC_NOPM,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_eq_enable_mixer,
@@ -2831,6 +2962,8 @@
/* LSM */
SND_SOC_DAPM_AIF_OUT("LSM_UL_HL", "Listen Audio Service Capture",
0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QCHAT_DL", "QCHAT Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QCHAT_UL", "QCHAT Capture", 0, 0, 0, 0),
/* Backend AIF */
/* Stream name equals to backend dai link stream name
*/
@@ -3065,6 +3198,9 @@
SND_SOC_DAPM_MIXER("PRI_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
primary_mi2s_rx_port_mixer_controls,
ARRAY_SIZE(primary_mi2s_rx_port_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QCHAT_Tx Mixer",
+ SND_SOC_NOPM, 0, 0, tx_qchat_mixer_controls,
+ ARRAY_SIZE(tx_qchat_mixer_controls)),
/* Virtual Pins to force backends ON atm */
SND_SOC_DAPM_OUTPUT("BE_OUT"),
SND_SOC_DAPM_INPUT("BE_IN"),
@@ -3136,6 +3272,7 @@
{"MultiMedia1 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"},
{"MultiMedia1 Mixer", "SLIM_4_TX", "SLIMBUS_4_TX"},
{"MultiMedia4 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+ {"MultiMedia4 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
@@ -3266,6 +3403,7 @@
{"MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
{"MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+ {"MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
{"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"},
{"PRI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
@@ -3273,6 +3411,7 @@
{"PRI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"PRI_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"PRI_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+ {"PRI_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
{"PRI_I2S_RX", NULL, "PRI_RX_Voice Mixer"},
{"SEC_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
@@ -3280,6 +3419,7 @@
{"SEC_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"SEC_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SEC_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+ {"SEC_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
{"SEC_I2S_RX", NULL, "SEC_RX_Voice Mixer"},
{"SEC_MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
@@ -3287,6 +3427,7 @@
{"SEC_MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"SEC_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SEC_MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+ {"SEC_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
{"SEC_MI2S_RX", NULL, "SEC_MI2S_RX_Voice Mixer"},
{"SLIM_0_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
@@ -3295,6 +3436,7 @@
{"SLIM_0_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SLIM_0_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"SLIM_0_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"SLIM_0_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
{"SLIMBUS_0_RX", NULL, "SLIM_0_RX_Voice Mixer"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
@@ -3302,6 +3444,7 @@
{"INTERNAL_BT_SCO_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+ {"INTERNAL_BT_SCO_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX_Voice Mixer"},
{"AFE_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
@@ -3309,6 +3452,7 @@
{"AFE_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"AFE_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"AFE_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+ {"AFE_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
{"PCM_RX", NULL, "AFE_PCM_RX_Voice Mixer"},
{"AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
@@ -3317,6 +3461,7 @@
{"AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"AUX_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"AUX_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
{"AUX_PCM_RX", NULL, "AUX_PCM_RX_Voice Mixer"},
{"SEC_AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
@@ -3324,6 +3469,7 @@
{"SEC_AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SEC_AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"SEC_AUX_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"SEC_AUX_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
{"SEC_AUX_PCM_RX", NULL, "SEC_AUX_PCM_RX_Voice Mixer"},
{"HDMI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
@@ -3331,6 +3477,7 @@
{"HDMI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"HDMI_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"HDMI_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
+ {"HDMI_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
{"HDMI", NULL, "HDMI_RX_Voice Mixer"},
{"HDMI", NULL, "HDMI_DL_HL"},
@@ -3339,6 +3486,7 @@
{"MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
{"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"},
{"Voice_Tx Mixer", "PRI_TX_Voice", "PRI_I2S_TX"},
@@ -3398,6 +3546,16 @@
{"LSM1 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
{"LSM_UL_HL", NULL, "LSM1 MUX"},
+ {"QCHAT_Tx Mixer", "PRI_TX_QCHAT", "PRI_I2S_TX"},
+ {"QCHAT_Tx Mixer", "SLIM_0_TX_QCHAT", "SLIMBUS_0_TX"},
+ {"QCHAT_Tx Mixer", "INTERNAL_BT_SCO_TX_QCHAT", "INT_BT_SCO_TX"},
+ {"QCHAT_Tx Mixer", "AFE_PCM_TX_QCHAT", "PCM_TX"},
+ {"QCHAT_Tx Mixer", "AUX_PCM_TX_QCHAT", "AUX_PCM_TX"},
+ {"QCHAT_Tx Mixer", "SEC_AUX_PCM_TX_QCHAT", "SEC_AUX_PCM_TX"},
+ {"QCHAT_Tx Mixer", "MI2S_TX_QCHAT", "MI2S_TX"},
+ {"QCHAT_Tx Mixer", "PRI_MI2S_TX_QCHAT", "PRI_MI2S_TX"},
+ {"QCHAT_UL", NULL, "QCHAT_Tx Mixer"},
+
{"INT_FM_RX", NULL, "INTFM_DL_HL"},
{"INTFM_UL_HL", NULL, "INT_FM_TX"},
{"AUX_PCM_RX", NULL, "AUXPCM_DL_HL"},
@@ -3773,6 +3931,14 @@
snd_soc_add_platform_controls(platform,
ec_ref_rx_mixer_controls,
ARRAY_SIZE(ec_ref_rx_mixer_controls));
+
+ snd_soc_add_platform_controls(platform,
+ get_rms_controls,
+ ARRAY_SIZE(get_rms_controls));
+
+ snd_soc_add_platform_controls(platform, msm_voc_session_controls,
+ ARRAY_SIZE(msm_voc_session_controls));
+
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index 4ce0db5..10be150 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -81,6 +81,7 @@
MSM_FRONTEND_DAI_DTMF_RX,
MSM_FRONTEND_DAI_LSM1,
MSM_FRONTEND_DAI_VOICE2,
+ MSM_FRONTEND_DAI_QCHAT,
MSM_FRONTEND_DAI_MAX,
};
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
index 053375e..1074d76 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
@@ -51,7 +51,7 @@
.fifo_size = 0,
};
-static int is_volte(struct msm_voice *pvolte)
+static bool is_volte(struct msm_voice *pvolte)
{
if (pvolte == &voice_info[VOLTE_SESSION_INDEX])
return true;
@@ -59,7 +59,7 @@
return false;
}
-static int is_voice2(struct msm_voice *pvoice2)
+static bool is_voice2(struct msm_voice *pvoice2)
{
if (pvoice2 == &voice_info[VOICE2_SESSION_INDEX])
return true;
@@ -67,6 +67,14 @@
return false;
}
+static bool is_qchat(struct msm_voice *pqchat)
+{
+ if (pqchat == &voice_info[QCHAT_SESSION_INDEX])
+ return true;
+ else
+ return false;
+}
+
static uint32_t get_session_id(struct msm_voice *pvoc)
{
uint32_t session_id = 0;
@@ -75,6 +83,8 @@
session_id = voc_get_session_id(VOLTE_SESSION_NAME);
else if (is_voice2(pvoc))
session_id = voc_get_session_id(VOICE2_SESSION_NAME);
+ else if (is_qchat(pvoc))
+ session_id = voc_get_session_id(QCHAT_SESSION_NAME);
else
session_id = voc_get_session_id(VOICE_SESSION_NAME);
@@ -120,6 +130,10 @@
voice = &voice_info[VOICE2_SESSION_INDEX];
pr_debug("%s: Open Voice2 Substream Id=%s\n",
__func__, substream->pcm->id);
+ } else if (!strncmp("QCHAT", substream->pcm->id, 5)) {
+ voice = &voice_info[QCHAT_SESSION_INDEX];
+ pr_debug("%s: Open QCHAT Substream Id=%s\n",
+ __func__, substream->pcm->id);
} else {
voice = &voice_info[VOICE_SESSION_INDEX];
pr_debug("%s: Open VOICE Substream Id=%s\n",
@@ -442,26 +456,17 @@
struct snd_ctl_elem_value *ucontrol)
{
int st_enable = ucontrol->value.integer.value[0];
+ uint32_t session_id = ucontrol->value.integer.value[1];
- pr_debug("%s: st enable=%d\n", __func__, st_enable);
+ pr_debug("%s: st enable=%d session_id=%#x\n", __func__, st_enable,
+ session_id);
- voc_set_pp_enable(voc_get_session_id(VOICE_SESSION_NAME),
- MODULE_ID_VOICE_MODULE_ST, st_enable);
- voc_set_pp_enable(voc_get_session_id(VOICE2_SESSION_NAME),
+ voc_set_pp_enable(session_id,
MODULE_ID_VOICE_MODULE_ST, st_enable);
return 0;
}
-static int msm_voice_slowtalk_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- ucontrol->value.integer.value[0] =
- voc_get_pp_enable(voc_get_session_id(VOICE_SESSION_NAME),
- MODULE_ID_VOICE_MODULE_ST);
- return 0;
-}
-
static struct snd_kcontrol_new msm_voice_controls[] = {
SOC_SINGLE_MULTI_EXT("Voice Rx Device Mute", SND_SOC_NOPM, 0, VSID_MAX,
0, 3, NULL, msm_voice_rx_device_mute_put),
@@ -471,8 +476,8 @@
NULL, msm_voice_gain_put),
SOC_ENUM_EXT("TTY Mode", msm_tty_mode_enum[0], msm_voice_tty_mode_get,
msm_voice_tty_mode_put),
- SOC_SINGLE_EXT("Slowtalk Enable", SND_SOC_NOPM, 0, 1, 0,
- msm_voice_slowtalk_get, msm_voice_slowtalk_put),
+ SOC_SINGLE_MULTI_EXT("Slowtalk Enable", SND_SOC_NOPM, 0, VSID_MAX, 0, 2,
+ NULL, msm_voice_slowtalk_put),
};
static struct snd_pcm_ops msm_pcm_ops = {
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
index 5425c46..f199be6 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
@@ -17,6 +17,7 @@
VOICE_SESSION_INDEX,
VOLTE_SESSION_INDEX,
VOICE2_SESSION_INDEX,
+ QCHAT_SESSION_INDEX,
VOICE_SESSION_INDEX_MAX,
};
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index a6ae357..df0fa6a 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -69,7 +69,7 @@
{0, 0, 0, 0, 0, 0, 0, 0}
};
-static int adm_dolby_get_parameters[ADM_GET_PARAMETER_LENGTH];
+static int adm_get_parameters[ADM_GET_PARAMETER_LENGTH];
int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params)
{
@@ -333,10 +333,10 @@
return rc;
}
-int adm_dolby_dap_get_params(int port_id, uint32_t module_id, uint32_t param_id,
- uint32_t params_length, char *params)
+int adm_get_params(int port_id, uint32_t module_id, uint32_t param_id,
+ uint32_t params_length, char *params)
{
- struct adm_cmd_get_pp_params_v5 *adm_params = NULL;
+ struct adm_cmd_get_pp_params_v5 *adm_params = NULL;
int sz, rc = 0, i = 0, index = afe_get_port_index(port_id);
int *params_data = (int *)params;
@@ -345,17 +345,17 @@
__func__, index, port_id);
return -EINVAL;
}
- sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
+ sz = sizeof(struct adm_cmd_get_pp_params_v5) + params_length;
adm_params = kzalloc(sz, GFP_KERNEL);
if (!adm_params) {
pr_err("%s, adm params memory alloc failed", __func__);
return -ENOMEM;
}
- memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
- params, params_length);
+ memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_get_pp_params_v5)),
+ params, params_length);
adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
adm_params->hdr.pkt_size = sz;
adm_params->hdr.src_svc = APR_SVC_ADM;
adm_params->hdr.src_domain = APR_DOMAIN_APPS;
@@ -376,31 +376,33 @@
atomic_set(&this_adm.copp_stat[index], 0);
rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
if (rc < 0) {
- pr_err("%s: Failed to Get DOLBY Params on port %d\n", __func__,
+ pr_err("%s: Failed to Get Params on port %d\n", __func__,
port_id);
rc = -EINVAL;
- goto dolby_dap_get_param_return;
+ goto adm_get_param_return;
}
/* Wait for the callback with copp id */
rc = wait_event_timeout(this_adm.wait[index],
- atomic_read(&this_adm.copp_stat[index]),
- msecs_to_jiffies(TIMEOUT_MS));
+ atomic_read(&this_adm.copp_stat[index]),
+ msecs_to_jiffies(TIMEOUT_MS));
if (!rc) {
- pr_err("%s: DOLBY get params timed out port = %d\n", __func__,
+ pr_err("%s: get params timed out port = %d\n", __func__,
port_id);
rc = -EINVAL;
- goto dolby_dap_get_param_return;
+ goto adm_get_param_return;
}
if (params_data) {
- for (i = 0; i < adm_dolby_get_parameters[0]; i++)
- params_data[i] = adm_dolby_get_parameters[1+i];
+ for (i = 0; i < adm_get_parameters[0]; i++)
+ params_data[i] = adm_get_parameters[1+i];
}
rc = 0;
-dolby_dap_get_param_return:
+adm_get_param_return:
kfree(adm_params);
+
return rc;
}
+
static void adm_callback_debug_print(struct apr_client_data *data)
{
uint32_t *payload;
@@ -574,11 +576,11 @@
__func__, payload[0]);
rtac_make_adm_callback(payload,
data->payload_size);
- adm_dolby_get_parameters[0] = payload[3];
+ adm_get_parameters[0] = payload[3];
pr_debug("GET_PP PARAM:received parameter length: %x\n",
- adm_dolby_get_parameters[0]);
+ adm_get_parameters[0]);
for (i = 0; i < payload[3]; i++)
- adm_dolby_get_parameters[1+i] = payload[4+i];
+ adm_get_parameters[1+i] = payload[4+i];
atomic_set(&this_adm.copp_stat[index], 1);
wake_up(&this_adm.wait[index]);
break;
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 869d642..6a34470 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -542,6 +542,7 @@
}
apr_deregister(ac->apr);
+ ac->apr = NULL;
ac->mmap_apr = NULL;
q6asm_session_free(ac);
q6asm_mmap_apr_dereg();
@@ -550,6 +551,7 @@
/*done:*/
kfree(ac);
+ ac = NULL;
return;
}
@@ -1327,6 +1329,11 @@
{
pr_debug("%s:pkt_size=%d cmd_flg=%d session=%d\n", __func__, pkt_size,
cmd_flg, ac->session);
+ if (ac->apr == NULL) {
+ pr_err("%s: ac->apr is NULL", __func__);
+ return;
+ }
+
mutex_lock(&ac->cmd_lock);
hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
APR_HDR_LEN(sizeof(struct apr_hdr)),\
@@ -1354,6 +1361,10 @@
hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
APR_HDR_LEN(sizeof(struct apr_hdr)),\
APR_PKT_VER);
+ if (ac->apr == NULL) {
+ pr_err("%s: ac->apr is NULL", __func__);
+ return;
+ }
hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
hdr->src_domain = APR_DOMAIN_APPS;
hdr->dest_svc = APR_SVC_ASM;
@@ -2908,6 +2919,12 @@
int sz = 0;
int rc = 0;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
sz = sizeof(struct asm_volume_ctrl_lr_chan_gain);
q6asm_add_hdr_async(ac, &lrgain.hdr, sz, TRUE);
lrgain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -2950,6 +2967,12 @@
int sz = 0;
int rc = 0;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
sz = sizeof(struct asm_volume_ctrl_mute_config);
q6asm_add_hdr_async(ac, &mute.hdr, sz, TRUE);
mute.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -2991,6 +3014,12 @@
int sz = 0;
int rc = 0;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
sz = sizeof(struct asm_volume_ctrl_master_gain);
q6asm_add_hdr_async(ac, &vol.hdr, sz, TRUE);
vol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -3034,6 +3063,12 @@
int sz = 0;
int rc = 0;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
sz = sizeof(struct asm_soft_pause_params);
q6asm_add_hdr_async(ac, &softpause.hdr, sz, TRUE);
softpause.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -3081,6 +3116,12 @@
int sz = 0;
int rc = 0;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
sz = sizeof(struct asm_soft_step_volume_params);
q6asm_add_hdr_async(ac, &softvol.hdr, sz, TRUE);
softvol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -3127,6 +3168,12 @@
int sz = 0;
int rc = 0;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
if (eq_p == NULL) {
pr_err("%s[%d]: Invalid Eq param\n", __func__, ac->session);
rc = -EINVAL;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 147530c..056e2dc 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -130,6 +130,28 @@
return ret;
}
+static bool voice_is_valid_session_id(uint32_t session_id)
+{
+ bool ret = false;
+
+ switch (session_id) {
+ case VOICE_SESSION_VSID:
+ case VOICE2_SESSION_VSID:
+ case VOLTE_SESSION_VSID:
+ case VOIP_SESSION_VSID:
+ case QCHAT_SESSION_VSID:
+ case ALL_SESSION_VSID:
+ ret = true;
+ break;
+ default:
+ pr_err("%s: Invalid session_id : %x\n", __func__, session_id);
+
+ break;
+ }
+
+ return ret;
+}
+
static u16 voice_get_mvm_handle(struct voice_data *v)
{
if (v == NULL) {
@@ -208,6 +230,9 @@
} else if (session_id ==
common.voice[VOC_PATH_VOLTE_PASSIVE].session_id) {
session_name = VOLTE_SESSION_NAME;
+ } else if (session_id ==
+ common.voice[VOC_PATH_QCHAT_PASSIVE].session_id) {
+ session_name = QCHAT_SESSION_NAME;
} else if (session_id == common.voice[VOC_PATH_FULL].session_id) {
session_name = VOIP_SESSION_NAME;
}
@@ -227,6 +252,9 @@
else if (!strncmp(name, "VoLTE session", 13))
session_id =
common.voice[VOC_PATH_VOLTE_PASSIVE].session_id;
+ else if (!strncmp(name, "QCHAT session", 13))
+ session_id =
+ common.voice[VOC_PATH_QCHAT_PASSIVE].session_id;
else
session_id = common.voice[VOC_PATH_FULL].session_id;
@@ -258,6 +286,10 @@
v = &common.voice[VOC_PATH_FULL];
break;
+ case QCHAT_SESSION_VSID:
+ v = &common.voice[VOC_PATH_QCHAT_PASSIVE];
+ break;
+
case ALL_SESSION_VSID:
break;
@@ -294,6 +326,10 @@
idx = VOC_PATH_FULL;
break;
+ case QCHAT_SESSION_VSID:
+ idx = VOC_PATH_QCHAT_PASSIVE;
+ break;
+
case ALL_SESSION_VSID:
idx = MAX_VOC_SESSIONS - 1;
break;
@@ -313,11 +349,6 @@
NULL : &common.voice[idx]);
}
-static bool is_voice_session(u32 session_id)
-{
- return (session_id == common.voice[VOC_PATH_PASSIVE].session_id);
-}
-
static bool is_voip_session(u32 session_id)
{
return (session_id == common.voice[VOC_PATH_FULL].session_id);
@@ -333,6 +364,11 @@
return (session_id == common.voice[VOC_PATH_VOICE2_PASSIVE].session_id);
}
+static bool is_qchat_session(u32 session_id)
+{
+ return (session_id == common.voice[VOC_PATH_QCHAT_PASSIVE].session_id);
+}
+
static bool is_voc_state_active(int voc_state)
{
if ((voc_state == VOC_RUN) ||
@@ -363,12 +399,19 @@
return ret;
}
+static bool is_voice_app_id(u32 session_id)
+{
+ return (((session_id & APP_ID_MASK) >> APP_ID_SHIFT) ==
+ VSID_APP_CS_VOICE);
+}
+
static void init_session_id(void)
{
common.voice[VOC_PATH_PASSIVE].session_id = VOICE_SESSION_VSID;
common.voice[VOC_PATH_VOLTE_PASSIVE].session_id = VOLTE_SESSION_VSID;
common.voice[VOC_PATH_VOICE2_PASSIVE].session_id = VOICE2_SESSION_VSID;
common.voice[VOC_PATH_FULL].session_id = VOIP_SESSION_VSID;
+ common.voice[VOC_PATH_QCHAT_PASSIVE].session_id = QCHAT_SESSION_VSID;
}
static int voice_apr_register(void)
@@ -493,10 +536,8 @@
pr_err("%s: apr_mvm is NULL.\n", __func__);
return -EINVAL;
}
- pr_debug("%s: VoLTE command to MVM\n", __func__);
- if (is_volte_session(v->session_id) ||
- is_voice_session(v->session_id) ||
- is_voice2_session(v->session_id)) {
+ pr_debug("%s: Send Dual Control command to MVM\n", __func__);
+ if (!is_voip_session(v->session_id)) {
mvm_handle = voice_get_mvm_handle(v);
mvm_voice_ctl_cmd.hdr.hdr_field = APR_HDR_FIELD(
APR_MSG_TYPE_SEQ_CMD,
@@ -570,9 +611,7 @@
/* send cmd to create mvm session and wait for response */
if (!mvm_handle) {
- if (is_voice_session(v->session_id) ||
- is_volte_session(v->session_id) ||
- is_voice2_session(v->session_id)) {
+ if (!is_voip_session(v->session_id)) {
mvm_session_cmd.hdr.hdr_field = APR_HDR_FIELD(
APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE),
@@ -597,6 +636,10 @@
strlcpy(mvm_session_cmd.mvm_session.name,
VOICE2_SESSION_VSID_STR,
sizeof(mvm_session_cmd.mvm_session.name));
+ } else if (is_qchat_session(v->session_id)) {
+ strlcpy(mvm_session_cmd.mvm_session.name,
+ QCHAT_SESSION_VSID_STR,
+ sizeof(mvm_session_cmd.mvm_session.name));
} else {
strlcpy(mvm_session_cmd.mvm_session.name,
"default modem voice",
@@ -659,9 +702,7 @@
}
/* send cmd to create cvs session */
if (!cvs_handle) {
- if (is_voice_session(v->session_id) ||
- is_volte_session(v->session_id) ||
- is_voice2_session(v->session_id)) {
+ if (!is_voip_session(v->session_id)) {
pr_debug("%s: creating CVS passive session\n",
__func__);
@@ -687,6 +728,10 @@
strlcpy(cvs_session_cmd.cvs_session.name,
VOICE2_SESSION_VSID_STR,
sizeof(cvs_session_cmd.cvs_session.name));
+ } else if (is_qchat_session(v->session_id)) {
+ strlcpy(cvs_session_cmd.cvs_session.name,
+ QCHAT_SESSION_VSID_STR,
+ sizeof(cvs_session_cmd.cvs_session.name));
} else {
strlcpy(cvs_session_cmd.cvs_session.name,
"default modem voice",
@@ -875,7 +920,9 @@
}
}
- if (is_voip_session(v->session_id) || v->voc_state == VOC_ERROR) {
+ if (is_voip_session(v->session_id) ||
+ is_qchat_session(v->session_id) ||
+ v->voc_state == VOC_ERROR) {
/* Destroy CVS. */
pr_debug("%s: CVS destroy session\n", __func__);
@@ -2964,6 +3011,22 @@
v->music_info.force = 1;
voice_cvs_stop_playback(v);
voice_cvs_stop_record(v);
+ /* If voice call is active during VoLTE, SRVCC happens.
+ Start recording on voice session if recording started during VoLTE.
+ */
+ if (is_volte_session(v->session_id) &&
+ ((common.voice[VOC_PATH_PASSIVE].voc_state == VOC_RUN) ||
+ (common.voice[VOC_PATH_PASSIVE].voc_state == VOC_CHANGE))) {
+ if (v->rec_info.rec_enable) {
+ voice_cvs_start_record(
+ &common.voice[VOC_PATH_PASSIVE],
+ v->rec_info.rec_mode);
+ common.srvcc_rec_flag = true;
+
+ pr_debug("%s: switch recording, srvcc_rec_flag %d\n",
+ __func__, common.srvcc_rec_flag);
+ }
+ }
/* send stop voice cmd */
voice_send_stop_voice_cmd(v);
@@ -3536,17 +3599,35 @@
return ret;
}
-int voc_start_record(uint32_t port_id, uint32_t set)
+int voc_start_record(uint32_t port_id, uint32_t set, uint32_t session_id)
{
int ret = 0;
int rec_mode = 0;
u16 cvs_handle;
- int i, rec_set = 0;
+ int rec_set = 0;
+ struct voice_session_itr itr;
+ struct voice_data *v = NULL;
- for (i = 0; i < MAX_VOC_SESSIONS; i++) {
- struct voice_data *v = &common.voice[i];
- pr_debug("%s: i:%d port_id: %d, set: %d\n",
- __func__, i, port_id, set);
+ /* check if session_id is valid */
+ if (!voice_is_valid_session_id(session_id)) {
+ pr_err("%s: Invalid session id:%u\n", __func__,
+ session_id);
+
+ return -EINVAL;
+ }
+
+ voice_itr_init(&itr, session_id);
+ pr_debug("%s: session_id:%u\n", __func__, session_id);
+
+ while (voice_itr_get_next_session(&itr, &v)) {
+ if (v == NULL) {
+ pr_err("%s: v is NULL, sessionid:%u\n", __func__,
+ session_id);
+
+ break;
+ }
+ pr_debug("%s: port_id: %d, set: %d, v: %p\n",
+ __func__, port_id, set, v);
mutex_lock(&v->lock);
rec_mode = v->rec_info.rec_mode;
@@ -3554,13 +3635,11 @@
if (set) {
if ((v->rec_route_state.ul_flag != 0) &&
(v->rec_route_state.dl_flag != 0)) {
- pr_debug("%s: i=%d, rec mode already set.\n",
- __func__, i);
+ pr_debug("%s: rec mode already set.\n",
+ __func__);
+
mutex_unlock(&v->lock);
- if (i < MAX_VOC_SESSIONS)
- continue;
- else
- return 0;
+ continue;
}
if (port_id == VOICE_RECORD_TX) {
@@ -3590,13 +3669,10 @@
} else {
if ((v->rec_route_state.ul_flag == 0) &&
(v->rec_route_state.dl_flag == 0)) {
- pr_debug("%s: i=%d, rec already stops.\n",
- __func__, i);
+ pr_debug("%s: rec already stops.\n",
+ __func__);
mutex_unlock(&v->lock);
- if (i < MAX_VOC_SESSIONS)
- continue;
- else
- return 0;
+ continue;
}
if (port_id == VOICE_RECORD_TX) {
@@ -3625,8 +3701,8 @@
}
}
}
- pr_debug("%s: i=%d, mode =%d, set =%d\n", __func__,
- i, rec_mode, rec_set);
+ pr_debug("%s: mode =%d, set =%d\n", __func__,
+ rec_mode, rec_set);
cvs_handle = voice_get_cvs_handle(v);
if (cvs_handle != 0) {
@@ -3636,6 +3712,18 @@
ret = voice_cvs_stop_record(v);
}
+ /* During SRVCC, recording will switch from VoLTE session to
+ voice session.
+ Then stop recording, need to stop recording on voice session.
+ */
+ if ((!rec_set) && common.srvcc_rec_flag) {
+ pr_debug("%s, srvcc_rec_flag:%d\n", __func__,
+ common.srvcc_rec_flag);
+
+ voice_cvs_stop_record(&common.voice[VOC_PATH_PASSIVE]);
+ common.srvcc_rec_flag = false;
+ }
+
/* Cache the value */
v->rec_info.rec_enable = rec_set;
v->rec_info.rec_mode = rec_mode;
@@ -4090,28 +4178,37 @@
int voc_set_pp_enable(uint32_t session_id, uint32_t module_id, uint32_t enable)
{
- struct voice_data *v = voice_get_session(session_id);
+ struct voice_data *v = NULL;
int ret = 0;
+ struct voice_session_itr itr;
- if (v == NULL) {
- pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+ voice_itr_init(&itr, session_id);
+ while (voice_itr_get_next_session(&itr, &v)) {
+ if (v != NULL) {
+ if (!(is_voice_app_id(v->session_id) ||
+ is_volte_session(v->session_id)))
+ continue;
- return -EINVAL;
- }
+ mutex_lock(&v->lock);
+ if (module_id == MODULE_ID_VOICE_MODULE_ST)
+ v->st_enable = enable;
- mutex_lock(&v->lock);
- if (module_id == MODULE_ID_VOICE_MODULE_ST)
- v->st_enable = enable;
-
- if (v->voc_state == VOC_RUN) {
- if (module_id == MODULE_ID_VOICE_MODULE_ST)
- ret = voice_send_set_pp_enable_cmd(v,
+ if (v->voc_state == VOC_RUN) {
+ if (module_id ==
+ MODULE_ID_VOICE_MODULE_ST)
+ ret = voice_send_set_pp_enable_cmd(v,
MODULE_ID_VOICE_MODULE_ST,
enable);
+ }
+ mutex_unlock(&v->lock);
+ } else {
+ pr_err("%s: invalid session_id 0x%x\n", __func__,
+ session_id);
+ ret = -EINVAL;
+ break;
+ }
}
- mutex_unlock(&v->lock);
-
return ret;
}
@@ -4536,6 +4633,11 @@
v = voice_get_session(session_id);
if (v != NULL)
v->voc_state = VOC_ERROR;
+
+ session_id = voc_get_session_id(QCHAT_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
} else {
pr_debug("%s: Reset event received in Voice service\n",
__func__);
@@ -4551,6 +4653,9 @@
c->voice[i].shmem_info.mem_handle = 0;
}
}
+ /* clean up srvcc rec flag */
+ c->srvcc_rec_flag = false;
+
return 0;
}
@@ -4679,6 +4784,11 @@
v = voice_get_session(session_id);
if (v != NULL)
v->voc_state = VOC_ERROR;
+
+ session_id = voc_get_session_id(QCHAT_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
} else {
pr_debug("%s: Reset event received in Voice service\n",
__func__);
@@ -4951,6 +5061,11 @@
v = voice_get_session(session_id);
if (v != NULL)
v->voc_state = VOC_ERROR;
+
+ session_id = voc_get_session_id(QCHAT_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
} else {
pr_debug("%s: Reset event received in Voice service\n",
__func__);
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index b3a98e2..20f2857 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -1309,7 +1309,7 @@
void *buf;
};
-#define MAX_VOC_SESSIONS 4
+#define MAX_VOC_SESSIONS 5
struct common_data {
/* these default values are for all devices */
@@ -1338,6 +1338,8 @@
struct dtmf_driver_info dtmf_info;
struct voice_data voice[MAX_VOC_SESSIONS];
+
+ bool srvcc_rec_flag;
};
struct voice_session_itr {
@@ -1372,21 +1374,36 @@
#define VOC_PATH_FULL 1
#define VOC_PATH_VOLTE_PASSIVE 2
#define VOC_PATH_VOICE2_PASSIVE 3
+#define VOC_PATH_QCHAT_PASSIVE 4
#define MAX_SESSION_NAME_LEN 32
#define VOICE_SESSION_NAME "Voice session"
#define VOIP_SESSION_NAME "VoIP session"
#define VOLTE_SESSION_NAME "VoLTE session"
#define VOICE2_SESSION_NAME "Voice2 session"
+#define QCHAT_SESSION_NAME "QCHAT session"
#define VOICE2_SESSION_VSID_STR "10DC1000"
+#define QCHAT_SESSION_VSID_STR "10803000"
#define VOICE_SESSION_VSID 0x10C01000
#define VOICE2_SESSION_VSID 0x10DC1000
#define VOLTE_SESSION_VSID 0x10C02000
#define VOIP_SESSION_VSID 0x10004000
+#define QCHAT_SESSION_VSID 0x10803000
#define ALL_SESSION_VSID 0xFFFFFFFF
#define VSID_MAX ALL_SESSION_VSID
+#define APP_ID_MASK 0x3F000
+#define APP_ID_SHIFT 12
+enum vsid_app_type {
+ VSID_APP_NONE = 0,
+ VSID_APP_CS_VOICE = 1,
+ VSID_APP_IMS = 2, /* IMS voice services covering VoLTE etc */
+ VSID_APP_QCHAT = 3,
+ VSID_APP_VOIP = 4, /* VoIP on AP HLOS without modem processor */
+ VSID_APP_MAX,
+};
+
/* called by alsa driver */
int voc_set_pp_enable(uint32_t session_id, uint32_t module_id,
uint32_t enable);
@@ -1423,7 +1440,7 @@
uint32_t voc_get_session_id(char *name);
int voc_start_playback(uint32_t set, uint16_t port_id);
-int voc_start_record(uint32_t port_id, uint32_t set);
+int voc_start_record(uint32_t port_id, uint32_t set, uint32_t session_id);
int voice_get_idx_for_session(u32 session_id);
#endif