Merge "ASoC: msm: qdsp6v2: Open unique COPP port for concurrent ULL streams"
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
index 8a3e704..c3c8212 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -9,7 +9,7 @@
- compatible
Usage: required
Value type: <string>
- Definition: must be "qcom,clk-cpu-osm".
+ Definition: must be "qcom,clk-cpu-osm" or "qcom,clk-cpu-osm-v2".
- reg
Usage: required
@@ -85,24 +85,6 @@
by the OSM hardware for each supported DCVS setpoint
of the Performance cluster.
-- qcom,l3-min-cpr-vc-binX
- Usage: required
- Value type: <u32>
- Definition: First virtual corner which does not use PLL post-divider
- for the L3 clock domain.
-
-- qcom,pwrcl-min-cpr-vc-binX
- Usage: required
- Value type: <u32>
- Definition: First virtual corner which does not use PLL post-divider
- for the power cluster.
-
-- qcom,perfcl-min-cpr-vc-binX
- Usage: required
- Value type: <u32>
- Definition: First virtual corner which does not use PLL post-divider
- for the performance cluster.
-
- qcom,osm-no-tz
Usage: optional
Value type: <empty>
@@ -501,10 +483,6 @@
< 1881600000 0x404c1462 0x00004e4e 0x2 21 >,
< 1958400000 0x404c1566 0x00005252 0x3 22 >;
- qcom,l3-min-cpr-vc-bin0 = <7>;
- qcom,pwrcl-min-cpr-vc-bin0 = <6>;
- qcom,perfcl-min-cpr-vc-bin0 = <7>;
-
qcom,up-timer =
<1000 1000 1000>;
qcom,down-timer =
diff --git a/Documentation/devicetree/bindings/arm/msm/qmp-debugfs-client.txt b/Documentation/devicetree/bindings/arm/msm/qmp-debugfs-client.txt
new file mode 100644
index 0000000..655bf89
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qmp-debugfs-client.txt
@@ -0,0 +1,17 @@
+QMP debugfs client:
+-----------------
+
+QTI Messaging Protocol(QMP) debugfs client is an interface for clients to
+send data to the Always on processor using QMP.
+
+Required properties :
+- compatible : must be "qcom,debugfs-qmp-client"
+- mboxes : list of QMP mailbox phandle and channel identifier tuples.
+- mbox-names : names of the listed mboxes
+
+Example :
+ qcom,qmp-client {
+ compatible = "qcom,debugfs-qmp-client";
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "aop";
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt
index ce2d8bd..1114308 100644
--- a/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt
+++ b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt
@@ -2,12 +2,15 @@
Required properties:
-compatible : Should be one of
- To communicate with modem
+ To communicate with adsp
qcom,smp2pgpio_client_rdbg_2_in (inbound)
qcom,smp2pgpio_client_rdbg_2_out (outbound)
To communicate with modem
qcom,smp2pgpio_client_rdbg_1_in (inbound)
qcom,smp2pgpio_client_rdbg_1_out (outbound)
+ To communicate with cdsp
+ qcom,smp2pgpio_client_rdbg_5_in (inbound)
+ qcom,smp2pgpio_client_rdbg_5_out (outbound)
-gpios : the relevant gpio pins of the entry.
Example:
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
index c8077cb..051b315 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -20,6 +20,8 @@
- qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
- qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
- qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target. Default value is 1 if not specified.
+ - qcom,smmu-s1-bypass : Boolean flag to bypass SMMU stage 1 translation.
+ - iommus : A list of phandle and IOMMU specifier pairs that describe the IOMMU master interfaces of the device.
Example:
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
index 3c8a79a..fa27198 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -40,6 +40,9 @@
required. For other targets such as fsm, they do not perform
bus scaling. It is not required for those targets.
+ - qcom,smmu-s1-bypass : Boolean flag to bypass SMMU stage 1 translation.
+ - iommus : A list of phandle and IOMMU specifier pairs that describe the IOMMU master interfaces of the device.
+
Example:
qcom,qcrypto@fd444000 {
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index d3222fb..dd668cb 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -52,6 +52,9 @@
The number of offsets defined should reflect the
amount of mixers that can drive data to a panel
interface.
+- qcom,sde-dspp-top-off: Offset address for the dspp top block.
+ The offset is calculated from register "mdp_phys"
+ defined in reg property.
- qcom,sde-dspp-off: Array of offset addresses for the available dspp
blocks. These offsets are calculated from
register "mdp_phys" defined in reg property.
@@ -207,6 +210,7 @@
e.g. qcom,sde-dspp-blocks
-- qcom,sde-dspp-pcc: offset and version of PCC hardware
-- qcom,sde-dspp-gc: offset and version of GC hardware
+ -- qcom,sde-dspp-igc: offset and version of IGC hardware
-- qcom,sde-dspp-hsic: offset and version of global PA adjustment
-- qcom,sde-dspp-memcolor: offset and version of PA memcolor hardware
-- qcom,sde-dspp-sixzone: offset and version of PA sixzone hardware
@@ -424,6 +428,7 @@
0x00002600 0x00002800>;
qcom,sde-mixer-off = <0x00045000 0x00046000
0x00047000 0x0004a000>;
+ qcom,sde-dspp-top-off = <0x1300>;
qcom,sde-dspp-off = <0x00055000 0x00057000>;
qcom,sde-dspp-ad-off = <0x24000 0x22800>;
qcom,sde-dspp-ad-version = <0x00030000>;
@@ -481,6 +486,7 @@
qcom,sde-sspp-src-size = <0x100>;
qcom,sde-mixer-size = <0x100>;
qcom,sde-ctl-size = <0x100>;
+ qcom,sde-dspp-top-size = <0xc>;
qcom,sde-dspp-size = <0x100>;
qcom,sde-intf-size = <0x100>;
qcom,sde-dsc-size = <0x100>;
@@ -599,6 +605,7 @@
};
qcom,sde-dspp-blocks {
+ qcom,sde-dspp-igc = <0x0 0x00010000>;
qcom,sde-dspp-pcc = <0x1700 0x00010000>;
qcom,sde-dspp-gc = <0x17c0 0x00010000>;
qcom,sde-dspp-hsic = <0x0 0x00010000>;
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 669997c..cbe8378 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -144,10 +144,11 @@
0xff = default value.
- qcom,mdss-dsi-border-color: Defines the border color value if border is present.
0 = default value.
-- qcom,mdss-dsi-panel-jitter: An integer value defines the panel jitter timing for rsc
- backoff time. The jitter configurition causes the early
- wakeup if panel needs to adjust before vsync.
- Default jitter value is 5%. Max allowed value is 25%.
+- qcom,mdss-dsi-panel-jitter: Panel jitter value is expressed in terms of numerator
+ and denominator. It contains two u32 values - numerator
+ followed by denominator. The jitter configurition causes
+ the early wakeup if panel needs to adjust before vsync.
+ Default jitter value is 2.0%. Max allowed value is 10%.
- qcom,mdss-dsi-panel-prefill-lines: An integer value defines the panel prefill lines required to
calculate the backoff time of rsc.
Default value is 16 lines. Max allowed value is vtotal.
@@ -664,7 +665,7 @@
<40 120 128>,
<128 240 64>;
qcom,mdss-dsi-panel-orientation = "180"
- qcom,mdss-dsi-panel-jitter = <0x8>;
+ qcom,mdss-dsi-panel-jitter = <0x8 0x10>;
qcom,mdss-dsi-panel-prefill-lines = <0x10>;
qcom,mdss-dsi-force-clock-lane-hs;
qcom,compression-mode = "dsc";
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
index 51abe56..21edaa0 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
@@ -14,6 +14,10 @@
- #size-cells: Should be <0> as i2c addresses have no size component
- qcom,wrapper-core: Wrapper QUPv3 core containing this I2C controller.
+Optional property:
+ - qcom,clk-freq-out : Desired I2C bus clock frequency in Hz.
+ When missing default to 400000Hz.
+
Child nodes should conform to i2c bus binding.
Example:
@@ -32,4 +36,5 @@
#address-cells = <1>;
#size-cells = <0>;
qcom,wrapper-core = <&qupv3_0>;
+ qcom,clk-freq-out = <400000>;
};
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 375eaf2..196f6f7 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -91,6 +91,11 @@
a four level page table configuration. Set to use a three
level page table instead.
+- qcom,no-asid-retention:
+ Some hardware may lose internal state for asid after
+ retention. No cache invalidation operations involving asid
+ may be used.
+
- clocks : List of clocks to be used during SMMU register access. See
Documentation/devicetree/bindings/clock/clock-bindings.txt
for information about the format. For each clock specified
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt b/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
new file mode 100644
index 0000000..933ad85
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
@@ -0,0 +1,438 @@
+* Qualcomm Technologies, Inc. MSM EEPROM
+
+EEPROM is an one time programmed(OTP) device that stores the calibration data
+use for camera sensor. It may either be integrated in the sensor module or in
+the sensor itself. As a result, the power, clock and GPIOs may be the same as
+the camera sensor. The following describes the page block map, power supply,
+clock, GPIO and power on sequence properties of the EEPROM device.
+
+=======================================================
+Required Node Structure if probe happens from userspace
+=======================================================
+The EEPROM device is described in one level of the device node.
+
+======================================
+First Level Node - CAM EEPROM device
+======================================
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,eeprom".
+
+- cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: Should specify the hardware index id.
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Register values.
+
+- regulator-names
+ Usage: required
+ Value type: <string>
+ Definition: Name of the regulator resources for EEPROM HW.
+
+- xxxx-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: Regulator reference corresponding to the names listed in
+ "regulator-names".
+
+- rgltr-cntrl-support
+ Usage: required
+ Value type: <bool>
+ Definition: This property specifies if the regulator control is supported
+ e.g. rgltr-min-voltage.
+
+- rgltr-min-voltage
+ Usage: required
+ Value type: <u32>
+ Definition: should contain minimum voltage level for regulators
+ mentioned in regulator-names property.
+
+- rgltr-max-voltage
+ Usage: required
+ Value type: <u32>
+ Definition: should contain maximum voltage level for regulators
+ mentioned in regulator-names property.
+
+- rgltr-load-current
+ Usage: required
+ Value type: <u32>
+ Definition: should contain the maximum current in microamps required for
+ the regulators mentioned in regulator-names property.
+
+- gpio-no-mux
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the gpio mux type.
+
+- gpios
+ Usage: required
+ Value type: <phandle>
+ Definition: should specify the gpios to be used for the eeprom.
+
+- gpio-reset
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the reset gpio index.
+
+- gpio-standby
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the standby gpio index.
+
+- gpio-req-tbl-num
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+ Usage: required
+ Value type: <string>
+ Definition: should specify the gpio labels.
+
+- sensor-position
+ Usage: required
+ Value type: <u32>
+ Definition: should contain the mount angle of the camera sensor.
+
+- cci-master
+ Usage: required
+ Value type: <u32>
+ Definition: should contain i2c master id to be used for this camera
+ sensor.
+
+- sensor-mode
+ Usage: required
+ Value type: <u32>
+ Definition: should contain sensor mode supported.
+
+- clock-names
+ Usage: required
+ Value type: <string>
+ Definition: List of clock names required for EEPROM HW.
+
+- clocks
+ Usage: required
+ Value type: <phandle>
+ Definition: List of clocks used for EEPROM HW.
+
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: says what all different clock levels eeprom node has.
+
+- clock-rates
+ Usage: required
+ Value type: <u32>
+ Definition: List of clocks rates.
+
+Example:
+
+ eprom0: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,eeprom";
+ cam_vdig-supply = <&pm8998_l5>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ regulator-names = "cam_vdig", "cam_vio";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1200000 0>;
+ rgltr-max-voltage = <1200000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ gpios = <&msmgpio 26 0>,
+ <&msmgpio 37 0>,
+ <&msmgpio 36 0>;
+ gpio-reset = <1>;
+ gpio-standby = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK",
+ "CAM_RESET1",
+ "CAM_STANDBY";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+=======================================================
+Required Node Structure if probe happens from kernel
+=======================================================
+The EEPROM device is described in one level of the device node.
+
+======================================
+First Level Node - CAM EEPROM device
+======================================
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,eeprom".
+
+- cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: Should specify the hardware index id.
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Register values.
+
+- qcom,eeprom-name
+ Usage: required
+ Value type: <string>
+ Definition: Name of the EEPROM HW.
+
+- qcom,slave-addr
+ Usage: required
+ Value type: <u32>
+ Definition: Slave address of the EEPROM HW.
+
+- qcom,num-blocks
+ Usage: required
+ Value type: <u32>
+ Definition: Total block number that eeprom contains.
+
+- qcom,pageX
+ Usage: required
+ Value type: <u32>
+ Definition: List of values specifying page size, start address,
+ address type, data, data type, delay in ms.
+ size 0 stand for non-paged.
+
+- qcom,pollX
+ Usage: required
+ Value type: <u32>
+ Definition: List of values specifying poll size, poll reg address,
+ address type, data, data type, delay in ms.
+ size 0 stand for not used.
+
+- qcom,memX
+ Usage: required
+ Value type: <u32>
+ Definition: List of values specifying memory size, start address,
+ address type, data, data type, delay in ms.
+ size 0 stand for not used.
+
+- qcom,saddrX
+ Usage: required
+ Value type: <u32>
+ Definition: property should specify the slave address for block (%d).
+
+- regulator-names
+ Usage: required
+ Value type: <string>
+ Definition: Name of the regulator resources for EEPROM HW.
+
+- qcom,cmm-data-support
+ Usage: required
+ Value type: <u32>
+ Definition: Camera MultiModule data capability flag..
+
+- qcom,cmm-data-compressed
+ Usage: required
+ Value type: <u32>
+ Definition: Camera MultiModule data compression flag.
+
+- qcom,cmm-data-offset
+ Usage: required
+ Value type: <u32>
+ Definition: Camera MultiModule data start offset.
+
+- qcom,cmm-data-size
+ Usage: required
+ Value type: <u32>
+ Definition: Camera MultiModule data size.
+
+- qcom,cam-power-seq-type
+ Usage: required
+ Value type: <string>
+ Definition: should specify the power on sequence types.
+
+- qcom,cam-power-seq-val
+ Usage: required
+ Value type: <string>
+ Definition: should specify the power on sequence values.
+
+- qcom,cam-power-seq-cfg-val
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the power on sequence config values.
+
+- qcom,cam-power-seq-delay
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the power on sequence delay time in ms.
+
+- xxxx-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: Regulator reference corresponding to the names listed in
+ "regulator-names".
+
+- rgltr-cntrl-support
+ Usage: required
+ Value type: <bool>
+ Definition: This property specifies if the regulator control is supported
+ e.g. rgltr-min-voltage.
+
+- rgltr-min-voltage
+ Usage: required
+ Value type: <u32>
+ Definition: should contain minimum voltage level for regulators
+ mentioned in regulator-names property.
+
+- rgltr-max-voltage
+ Usage: required
+ Value type: <u32>
+ Definition: should contain maximum voltage level for regulators
+ mentioned in regulator-names property.
+
+- rgltr-load-current
+ Usage: required
+ Value type: <u32>
+ Definition: should contain the maximum current in microamps required for
+ the regulators mentioned in regulator-names property.
+
+- gpio-no-mux
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the gpio mux type.
+
+- gpios
+ Usage: required
+ Value type: <phandle>
+ Definition: should specify the gpios to be used for the eeprom.
+
+- gpio-reset
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the reset gpio index.
+
+- gpio-standby
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the standby gpio index.
+
+- gpio-req-tbl-num
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+ Usage: required
+ Value type: <string>
+ Definition: should specify the gpio labels.
+
+- sensor-position
+ Usage: required
+ Value type: <u32>
+ Definition: should contain the mount angle of the camera sensor.
+
+- cci-master
+ Usage: required
+ Value type: <u32>
+ Definition: should contain i2c master id to be used for this camera
+ sensor.
+
+- sensor-mode
+ Usage: required
+ Value type: <u32>
+ Definition: should contain sensor mode supported.
+
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: says what all different clock levels eeprom node has.
+
+- clock-names
+ Usage: required
+ Value type: <string>
+ Definition: List of clock names required for EEPROM HW.
+
+- clocks
+ Usage: required
+ Value type: <phandle>
+ Definition: List of clocks used for EEPROM HW.
+
+- clock-rates
+ Usage: required
+ Value type: <u32>
+ Definition: List of clocks rates.
+
+Example:
+
+ eeprom0: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ qcom,eeprom-name = "msm_eeprom";
+ compatible = "qcom,eeprom";
+ qcom,slave-addr = <0x60>;
+ qcom,num-blocks = <2>;
+ qcom,page0 = <1 0x100 2 0x01 1 1>;
+ qcom,poll0 = <0 0x0 2 0 1 1>;
+ qcom,mem0 = <0 0x0 2 0 1 0>;
+ qcom,page1 = <1 0x0200 2 0x8 1 1>;
+ qcom,pageen1 = <1 0x0202 2 0x01 1 10>;
+ qcom,poll1 = <0 0x0 2 0 1 1>;
+ qcom,mem1 = <32 0x3000 2 0 1 0>;
+ qcom,saddr1 = <0x62>;
+ qcom,cmm-data-support;
+ qcom,cmm-data-compressed;
+ qcom,cmm-data-offset = <0>;
+ qcom,cmm-data-size = <0>;
+ qcom,cam-power-seq-type = "sensor_vreg",
+ "sensor_vreg", "sensor_clk",
+ "sensor_gpio", "sensor_gpio";
+ qcom,cam-power-seq-val = "cam_vdig",
+ "cam_vio", "sensor_cam_mclk",
+ "sensor_gpio_reset",
+ "sensor_gpio_standby";
+ qcom,cam-power-seq-cfg-val = <1 1 24000000 1 1>;
+ qcom,cam-power-seq-delay = <1 1 5 5 10>;
+ cam_vdig-supply = <&pm8998_l5>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ regulator-names = "cam_vdig", "cam_vio";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1200000 0>;
+ rgltr-max-voltage = <1200000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ qcom,gpio-no-mux = <0>;
+ gpios = <&msmgpio 26 0>,
+ <&msmgpio 37 0>,
+ <&msmgpio 36 0>;
+ gpio-reset = <1>;
+ gpio-standby = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK",
+ "CAM_RESET1",
+ "CAM_STANDBY";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-cntl-level = "turbo";
+ clock-names = "cam_clk";
+ clock-rates = <24000000>;
+ };
diff --git a/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt b/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt
new file mode 100644
index 0000000..d24314a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt
@@ -0,0 +1,126 @@
+* Qualcomm Technologies, Inc. MSM FLASH
+
+The MSM camera Flash driver provides the definitions for
+enabling and disabling LED Torch/Flash by requesting it to
+PMIC/I2C/GPIO based hardware. It provides the functions for
+the Client to control the Flash hardware.
+
+=======================================================
+Required Node Structure
+=======================================================
+The Flash device is described in one level of the device node.
+
+======================================
+First Level Node - CAM FLASH device
+======================================
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,camera-flash".
+
+- cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: Should specify the hardware index id.
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Register values.
+
+- flash-source
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain array of phandles to Flash source nodes.
+
+- torch-source
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain array of phandles to torch source nodes.
+
+- switch-source
+ Usage: Optional
+ Value type: <phandle>
+ Definition: Should contain phandle to switch source nodes.
+
+- slave-id
+ Usage: optional
+ Value type: <u32>
+ Definition: should contain i2c slave address, device id address
+ and expected id read value.
+
+- cci-master
+ Usage: optional
+ Value type: <u32>
+ Definition: should contain i2c master id to be used for this camera
+ flash.
+
+- max-current
+ Usage: optional
+ Value type: <u32>
+ Definition: Max current in mA supported by flash
+
+- max-duration
+ Usage: optional
+ Value type: <u32>
+ Definition: Max duration in ms flash can glow.
+
+- gpios
+ Usage: optional
+ Value type: <u32>
+ Definition: should specify the gpios to be used for the flash.
+
+- gpio-req-tbl-num
+ Usage: optional
+ Value type: <u32>
+ Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+ Usage: optional
+ Value type: <u32>
+ Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+ Usage: optional
+ Value type: <u32>
+ Definition: should specify the gpio labels.
+
+- gpio-flash-reset
+ Usage: optional
+ Value type: <u32>
+ Definition: should contain index to gpio used by flash's "flash reset" pin.
+
+- gpio-flash-en
+ Usage: optional
+ Value type: <u32>
+ Definition: should contain index to gpio used by flash's "flash enable" pin.
+
+- gpio-flash-now
+ Usage: optional
+ Value type: <u32>
+ Definition: should contain index to gpio used by flash's "flash now" pin.
+
+Example:
+
+led_flash_rear: qcom,camera-flash@0 {
+ reg = <0x00 0x00>;
+ cell-index = <0>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+ torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ switch-source = <&pmi8998_switch0>;
+ qcom,slave-id = <0x00 0x00 0x0011>;
+ qcom,cci-master = <0>;
+ gpios = <&msmgpio 23 0>,
+ <&msmgpio 24 0>;
+ <&msmgpio 25 0>;
+ qcom,gpio-flash-reset = <0>;
+ qcom,gpio-flash-en = <0>;
+ qcom,gpio-flash-now = <1>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <0 0>;
+ qcom,gpio-req-tbl-label = "FLASH_EN",
+ "FLASH_NOW";
+ qcom,max-current = <1500>;
+ qcom,max-duration = <1200>;
+ };
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index 58bac0b..46649af 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -81,6 +81,7 @@
limits.
- qcom,mdss-rot-vbif-qos-setting: This array is used to program vbif qos remapper register
priority for rotator clients.
+- qcom,mdss-rot-vbif-memtype: Array of u32 vbif memory type settings for each xin port.
- qcom,mdss-rot-cdp-setting: Integer array of size two, to indicate client driven
prefetch is available or not. Index 0 represents
if CDP is enabled for read and index 1, if CDP
@@ -173,6 +174,7 @@
/* VBIF QoS remapper settings*/
qcom,mdss-rot-vbif-qos-setting = <1 1 1 1>;
+ qcom,mdss-rot-vbif-memtype = <3 3>;
com,mdss-rot-cdp-setting = <1 1>;
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index da9a632..24c75e2 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -16,6 +16,9 @@
Required "interrupt-names" are "hc_irq" and "pwr_irq".
- <supply-name>-supply: phandle to the regulator device tree node
Required "supply-name" are "vdd" and "vdd-io".
+ - qcom,ice-clk-rates: this is an array that specifies supported Inline
+ Crypto Engine (ICE) clock frequencies, Units - Hz.
+ - sdhc-msm-crypto: phandle to SDHC ICE node
Required alias:
- The slot number is specified via an alias with the following format
@@ -77,6 +80,11 @@
register dumps on CRC errors and also downgrade bus speed mode to
SDR50/DDR50 in case of continuous CRC errors. Set this flag to enable
this workaround.
+ - qcom,restore-after-cx-collapse - specifies whether the SDCC registers contents need
+ to be saved and restored by software when the CX Power Collapse feature is enabled.
+ On certain chipsets, coming out of the CX Power Collapse event, the SDCC registers
+ contents will not be retained. It is software responsibility to restore the
+ SDCC registers before resuming to normal operation.
In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage).
- qcom,<supply>-always-on - specifies whether supply should be kept "on" always.
@@ -116,6 +124,7 @@
reg-names = "hc_mem", "core_mem";
interrupts = <0 123 0>, <0 138 0>;
interrupt-names = "hc_irq", "pwr_irq";
+ sdhc-msm-crypto = <&sdcc1_ice>;
vdd-supply = <&pm8941_l21>;
vdd-io-supply = <&pm8941_l13>;
@@ -138,6 +147,7 @@
qcom,nonremovable;
qcom,large-address-bus;
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,ice-clk-rates = <300000000 150000000>;
qcom,scaling-lower-bus-speed-mode = "DDR52";
diff --git a/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt b/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt
index 1398309..b4ce7cb 100644
--- a/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt
+++ b/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt
@@ -23,10 +23,8 @@
- qcom, msm_bus,num_paths: The paths for source and destination ports
- qcom, msm_bus,vectors: Vectors for bus topology.
- pinctrl-names: Names for the TSIF mode configuration to specify which TSIF interface is active.
-
-Optional properties:
- - qcom,lpass-timer-tts : Indicates to add time stamps to TS packets from LPASS timer.
- bydefault time stamps will be added from TFIS internal counter.
+- qcom,smmu-s1-bypass : Boolean flag to bypass SMMU stage 1 translation.
+- iommus : A list of phandle and IOMMU specifier pairs that describe the IOMMU master interfaces of the device.
Example:
@@ -79,4 +77,7 @@
&tsif0_sync_active
&tsif1_signals_active
&tsif1_sync_active>; /* dual-tsif-mode2 */
+
+ qcom,smmu-s1-bypass;
+ iommus = <&apps_smmu 0x20 0x0f>;
};
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index c116e42..7b491f3 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -329,7 +329,7 @@
- qcom,gpio-connect Gpio that connects to parent
interrupt controller
-* audio-ext-clk
+* audio-ext-clk-up
Required properties:
diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt
index 3e59c43..67ffaed 100644
--- a/Documentation/devicetree/bindings/thermal/tsens.txt
+++ b/Documentation/devicetree/bindings/thermal/tsens.txt
@@ -18,6 +18,7 @@
should be "qcom,sdm660-tsens" for 660 TSENS driver.
should be "qcom,sdm630-tsens" for 630 TSENS driver.
should be "qcom,sdm845-tsens" for SDM845 TSENS driver.
+ should be "qcom,tsens24xx" for 2.4 TSENS controller.
The compatible property is used to identify the respective controller to use
for the corresponding SoC.
- reg : offset and length of the TSENS registers with associated property in reg-names
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 6e027ae..ea40927 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -138,6 +138,7 @@
maps Memory maps to executables and library files (2.4)
mem Memory held by this process
root Link to the root directory of this process
+ reclaim Reclaim pages in this process
stat Process status
statm Process memory status information
status Process status in human readable form
@@ -528,6 +529,25 @@
Any other value written to /proc/PID/clear_refs will have no effect.
+The file /proc/PID/reclaim is used to reclaim pages in this process.
+To reclaim file-backed pages,
+ > echo file > /proc/PID/reclaim
+
+To reclaim anonymous pages,
+ > echo anon > /proc/PID/reclaim
+
+To reclaim all pages,
+ > echo all > /proc/PID/reclaim
+
+Also, you can specify address range of process so part of address space
+will be reclaimed. The format is following as
+ > echo addr size-byte > /proc/PID/reclaim
+
+NOTE: addr should be page-aligned.
+
+Below is example which try to reclaim 2M from 0x100000.
+ > echo 0x100000 2M > /proc/PID/reclaim
+
The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags
using /proc/kpageflags and number of times a page is mapped using
/proc/kpagecount. For detailed explanation, see Documentation/vm/pagemap.txt.
diff --git a/Documentation/misc-devices/qcom_invoke_driver.txt b/Documentation/misc-devices/qcom_invoke_driver.txt
new file mode 100644
index 0000000..38c976a
--- /dev/null
+++ b/Documentation/misc-devices/qcom_invoke_driver.txt
@@ -0,0 +1,54 @@
+Introduction:
+=============
+Invoke driver is a misc driver which helps communication between non secure
+and secure world. Invoke driver communicates with secure side using SCM
+driver. To use invoke driver, open must be called on invoke device i.e.
+/dev/invoke. Invoke driver exposes only one IOCTL invoke which passes
+userspace request to TZ.
+
+SW Architecture
+===============
+Following is SW stack for Invoke driver.
+
++++++++++++++++++++++++++++++++++++++++++
++ Applications +
++++++++++++++++++++++++++++++++++++++++++
++ System Layer +
++++++++++++++++++++++++++++++++++++++++++
++ Kernel +
++ +++++++++++++++++++ +
++ + Invoke driver + +
++ +++++++++++++++++++ +
++ + SCM Driver + +
++++++++++++++++++++++++++++++++++++++++++
+ ||
+ ||
+ \/
++++++++++++++++++++++++++++++++++++++++++
++ Trust Zone +
++ +++++++++++ +++++++++++ +
++ + TZ App1 + + TZ App2 + +
++++++++++++++++++++++++++++++++++++++++++
+
+
+Interfaces
+==========
+Invoke driver exposes INVOKE_IOCTL_INVOKE_REQ IOCTL for userspace to
+communicate with driver. More details of IOCTL are avilable in
+corresponding header file.
+
+
+Driver Parameters
+=================
+This driver is built and statically linked into the kernel; therefore,
+there are no module parameters supported by this driver.
+
+There are no kernel command line parameters supported by this driver.
+
+Power Management
+================
+TBD
+
+Dependencies
+============
+Invoke driver depends on SCM driver to communicate with TZ.
diff --git a/Makefile b/Makefile
index 88cab97..c11421f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 36
+SUBLEVEL = 38
EXTRAVERSION =
NAME = Roaring Lionus
@@ -565,7 +565,7 @@
# Objects we will link into vmlinux / subdirs we need to visit
init-y := init/
-drivers-y := drivers/ sound/ firmware/
+drivers-y := drivers/ sound/ firmware/ techpack/
net-y := net/
libs-y := lib/
core-y := usr/
@@ -1152,6 +1152,7 @@
$(error Headers not exportable for the $(SRCARCH) architecture))
$(Q)$(MAKE) $(hdr-inst)=include/uapi
$(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/asm $(hdr-dst)
+ $(Q)$(MAKE) $(hdr-inst)=techpack
PHONY += headers_check_all
headers_check_all: headers_install_all
@@ -1161,6 +1162,7 @@
headers_check: headers_install
$(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1
$(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/asm $(hdr-dst) HDRCHECK=1
+ $(Q)$(MAKE) $(hdr-inst)=techpack HDRCHECK=1
# ---------------------------------------------------------------------------
# Kernel selftest
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e06ecbb..21c66eb 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -117,7 +117,7 @@
config ARM_DMA_IOMMU_ALIGNMENT
int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
range 4 9
- default 8
+ default 9
help
DMA mapping framework by default aligns all buffers to the smallest
PAGE_SIZE order which is greater than or equal to the requested buffer
@@ -1764,6 +1764,29 @@
source "mm/Kconfig"
+choice
+ prompt "Virtual Memory Reclaim"
+ default NO_VM_RECLAIM
+ help
+ Select the method of reclaiming virtual memory
+
+config ENABLE_VMALLOC_SAVING
+ bool "Reclaim memory for each subsystem"
+ help
+ Enable this config to reclaim the virtual space belonging
+ to any subsystem which is expected to have a lifetime of
+ the entire system. This feature allows lowmem to be non-
+ contiguous.
+
+config NO_VM_RECLAIM
+ bool "Do not reclaim memory"
+ help
+ Do not reclaim any memory. This might result in less lowmem
+ and wasting virtual memory space which could otherwise be
+ reclaimed by using any of the other two config options.
+
+endchoice
+
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "12" if SOC_AM33XX
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index a2a3231..f2a4063 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -211,6 +211,89 @@
reg-names = "pshold-base", "tcsr-boot-misc-detect";
};
+ tsens0: tsens@c222000 {
+ compatible = "qcom,tsens24xx";
+ reg = <0xc222000 0x4>,
+ <0xc263000 0x1ff>;
+ reg-names = "tsens_srot_physical",
+ "tsens_tm_physical";
+ interrupts = <0 163 0>, <0 165 0>;
+ interrupt-names = "tsens-upper-lower", "tsens-critical";
+ #thermal-sensor-cells = <1>;
+ };
+
+ thermal_zones: thermal-zones {
+ mpm-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 0>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ q6-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 1>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ ctile-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 2>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 3>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mdm-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 4>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+ };
+
};
#include "sdxpoorwills-regulator.dtsi"
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index c2252c0..b531fa5 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -210,6 +210,7 @@
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_SUPPLY=y
CONFIG_THERMAL=y
+CONFIG_THERMAL_TSENS=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_SOUND=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index e8fa052..88bd16c 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -203,6 +203,7 @@
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_SUPPLY=y
CONFIG_THERMAL=y
+CONFIG_THERMAL_TSENS=y
CONFIG_MSM_CDC_PINCTRL=y
CONFIG_MSM_CDC_SUPPLY=y
CONFIG_REGULATOR=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 58c6398..ee4a723 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -182,6 +182,20 @@
extern void dmac_clean_range(const void *, const void *);
extern void dmac_flush_range(const void *, const void *);
+static inline void __dma_inv_area(const void *start, size_t len)
+{
+ dmac_inv_range(start, start + len);
+}
+
+static inline void __dma_clean_area(const void *start, size_t len)
+{
+ dmac_clean_range(start, start + len);
+}
+
+static inline void __dma_flush_area(const void *start, size_t len)
+{
+ dmac_flush_range(start, start + len);
+}
#endif
/*
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 776757d..f23454d 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -148,6 +148,7 @@
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
+#define TIF_MM_RELEASED 21 /* task MM has been released */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 19f444e..441063f 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2170,6 +2170,9 @@
if (!bitmap_size)
return ERR_PTR(-EINVAL);
+ WARN(!IS_ALIGNED(size, SZ_128M),
+ "size is not aligned to 128M, alignment enforced");
+
if (bitmap_size > PAGE_SIZE) {
extensions = bitmap_size / PAGE_SIZE;
bitmap_size = PAGE_SIZE;
@@ -2192,7 +2195,7 @@
mapping->nr_bitmaps = 1;
mapping->extensions = extensions;
mapping->base = base;
- mapping->bits = bits;
+ mapping->bits = BITS_PER_BYTE * bitmap_size;
spin_lock_init(&mapping->lock);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 370581a..51496dd 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -469,6 +469,54 @@
#endif
}
+#define MLK(b, t) (b), (t), (((t) - (b)) >> 10)
+#define MLM(b, t) (b), (t), (((t) - (b)) >> 20)
+#define MLK_ROUNDUP(b, t) (b), (t), (DIV_ROUND_UP(((t) - (b)), SZ_1K))
+
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+static void print_vmalloc_lowmem_info(void)
+{
+ struct memblock_region *reg, *prev_reg = NULL;
+
+ pr_notice(
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)high_memory, VMALLOC_END));
+
+ for_each_memblock_rev(memory, reg) {
+ phys_addr_t start_phys = reg->base;
+ phys_addr_t end_phys = reg->base + reg->size;
+
+ if (start_phys > arm_lowmem_limit)
+ continue;
+
+ if (end_phys > arm_lowmem_limit)
+ end_phys = arm_lowmem_limit;
+
+ if (prev_reg == NULL) {
+ prev_reg = reg;
+
+ pr_notice(
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)__va(start_phys),
+ (unsigned long)__va(end_phys)));
+
+ continue;
+ }
+
+ pr_notice(
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)__va(end_phys),
+ (unsigned long)__va(prev_reg->base)));
+
+
+ pr_notice(
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)__va(start_phys),
+ (unsigned long)__va(end_phys)));
+ }
+}
+#endif
+
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
@@ -497,9 +545,6 @@
mem_init_print_info(NULL);
-#define MLK(b, t) b, t, ((t) - (b)) >> 10
-#define MLM(b, t) b, t, ((t) - (b)) >> 20
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
pr_notice("Virtual kernel memory layout:\n"
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
@@ -507,29 +552,34 @@
" DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
" ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
#endif
- " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
- " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#ifdef CONFIG_HIGHMEM
- " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
-#ifdef CONFIG_MODULES
- " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
- " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
-
+ " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n",
MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
(PAGE_SIZE)),
#ifdef CONFIG_HAVE_TCM
MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
MLK(ITCM_OFFSET, (unsigned long) itcm_end),
#endif
- MLK(FIXADDR_START, FIXADDR_END),
+ MLK(FIXADDR_START, FIXADDR_END));
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ print_vmalloc_lowmem_info();
+#else
+ printk(KERN_NOTICE
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
MLM(VMALLOC_START, VMALLOC_END),
- MLM(PAGE_OFFSET, (unsigned long)high_memory),
+ MLM(PAGE_OFFSET, (unsigned long)high_memory));
+#endif
+ printk(KERN_NOTICE
+#ifdef CONFIG_HIGHMEM
+ " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
+#ifdef CONFIG_MODULES
+ " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
+ " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
#ifdef CONFIG_HIGHMEM
MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
(PAGE_SIZE)),
@@ -543,10 +593,6 @@
MLK_ROUNDUP(_sdata, _edata),
MLK_ROUNDUP(__bss_start, __bss_stop));
-#undef MLK
-#undef MLM
-#undef MLK_ROUNDUP
-
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
@@ -573,6 +619,10 @@
}
#ifdef CONFIG_DEBUG_RODATA
+#undef MLK
+#undef MLM
+#undef MLK_ROUNDUP
+
struct section_perm {
const char *name;
unsigned long start;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ff0eed2..203728d 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -92,7 +92,8 @@
void *vaddr;
vm = &svm->vm;
- vm_area_add_early(vm);
+ if (!vm_area_check_early(vm))
+ vm_area_add_early(vm);
vaddr = vm->addr;
list_for_each_entry(curr_svm, &static_vmlist, list) {
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index f7c7413..ddc72dc 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1168,6 +1168,19 @@
*/
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ struct memblock_region *prev_reg = NULL;
+
+ for_each_memblock(memory, reg) {
+ if (prev_reg == NULL) {
+ prev_reg = reg;
+ continue;
+ }
+ vmalloc_limit += reg->base - (prev_reg->base + prev_reg->size);
+ prev_reg = reg;
+ }
+#endif
+
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;
@@ -1428,12 +1441,21 @@
phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
#endif
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ struct static_vm *svm;
+ phys_addr_t start;
+ phys_addr_t end;
+ unsigned long vaddr;
+ unsigned long pfn;
+ unsigned long length;
+ unsigned int type;
+ int nr = 0;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
struct map_desc map;
+ start = reg->base;
+ end = start + reg->size;
+ nr++;
if (memblock_is_nomap(reg))
continue;
@@ -1485,6 +1507,34 @@
}
}
}
+ svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
+
+ for_each_memblock(memory, reg) {
+ struct vm_struct *vm;
+
+ start = reg->base;
+ end = start + reg->size;
+
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
+ if (start >= end)
+ break;
+
+ vm = &svm->vm;
+ pfn = __phys_to_pfn(start);
+ vaddr = __phys_to_virt(start);
+ length = end - start;
+ type = MT_MEMORY_RW;
+
+ vm->addr = (void *)(vaddr & PAGE_MASK);
+ vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
+ vm->phys_addr = __pfn_to_phys(pfn);
+ vm->flags = VM_LOWMEM;
+ vm->flags |= VM_ARM_MTYPE(type);
+ vm->caller = map_lowmem;
+ add_static_vm_early(svm++);
+ mark_vmalloc_reserved_area(vm->addr, vm->size);
+ }
}
#ifdef CONFIG_ARM_PV_FIXUP
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 92dc1e6..b97f1de 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -28,6 +28,17 @@
DTB_OBJS := $(shell find $(obj)/dts/ -name \*.dtb)
endif
+# Add RTIC DTB to the DTB list if RTIC MPGen is enabled
+ifdef RTIC_MPGEN
+DTB_OBJS += rtic_mp.dtb
+endif
+
+rtic_mp.dtb: vmlinux FORCE
+ $(RTIC_MPGEN) --objcopy="${OBJCOPY}" --objdump="${OBJDUMP}" \
+ --binpath="" --vmlinux="vmlinux" --config=${KCONFIG_CONFIG} \
+ --cc="${CC}" --dts=rtic_mp.dts && \
+ $(DTC) -O dtb -o rtic_mp.dtb -b 0 $(DTC_FLAGS) rtic_mp.dts
+
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 9b20651..20288fe 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -2,16 +2,27 @@
dtb-$(CONFIG_ARCH_QCOM) += msm8996-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8096-db820c.dtb
-
ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
dtbo-$(CONFIG_ARCH_SDM845) += \
sdm845-cdp-overlay.dtbo \
sdm845-mtp-overlay.dtbo \
- sdm845-qrd-overlay.dtbo
+ sdm845-qrd-overlay.dtbo \
+ sdm845-v2-cdp-overlay.dtbo \
+ sdm845-v2-mtp-overlay.dtbo \
+ sdm845-v2-qrd-overlay.dtbo \
+ sdm845-4k-panel-mtp-overlay.dtbo \
+ sdm845-4k-panel-cdp-overlay.dtbo \
+ sdm845-4k-panel-qrd-overlay.dtbo
sdm845-cdp-overlay.dtbo-base := sdm845.dtb
sdm845-mtp-overlay.dtbo-base := sdm845.dtb
sdm845-qrd-overlay.dtbo-base := sdm845.dtb
+sdm845-v2-cdp-overlay.dtbo-base := sdm845-v2.dtb
+sdm845-v2-mtp-overlay.dtbo-base := sdm845-v2.dtb
+sdm845-v2-qrd-overlay.dtbo-base := sdm845-v2.dtb
+sdm845-4k-panel-mtp-overlay.dtbo-base := sdm845.dtb
+sdm845-4k-panel-cdp-overlay.dtbo-base := sdm845.dtb
+sdm845-4k-panel-qrd-overlay.dtbo-base := sdm845.dtb
else
dtb-$(CONFIG_ARCH_SDM845) += sdm845-sim.dtb \
sdm845-rumi.dtb \
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
index c52c18b..436a05d 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
@@ -35,6 +35,12 @@
qcom,mdss-dsi-color-order = "rgb_swap_rgb";
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+ 17000 15500 30000 8000 3000>;
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
+ qcom,mdss-dsi-panel-jitter = <0x1 0x1>;
qcom,mdss-dsi-on-command = [
/* CMD2_P0 */
15 01 00 00 00 00 02 ff 20
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
index fe9129c..515949e 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
@@ -30,6 +30,11 @@
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+ 17000 15500 30000 8000 3000>;
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
qcom,mdss-dsi-on-command = [
/* CMD2_P0 */
15 01 00 00 00 00 02 ff 20
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
index e4a0370..64e4d27 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
@@ -35,6 +35,7 @@
qcom,mdss-dsi-color-order = "rgb_swap_rgb";
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-panel-jitter = <0x1 0x1>;
qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
@@ -54,6 +55,11 @@
qcom,mdss-dsi-te-dcs-command = <1>;
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
+ qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+ 17000 15500 30000 8000 3000>;
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
qcom,mdss-dsi-on-command = [
/* CMD2_P0 */
15 01 00 00 00 00 02 FF 20
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
index d6ef3d8..346a8b4 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
@@ -30,6 +30,11 @@
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0x3ff>;
qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+ 17000 15500 30000 8000 3000>;
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
qcom,mdss-dsi-on-command = [
/* CMD2_P0 */
15 01 00 00 00 00 02 FF 20
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
index 6534cdc..744bd2c 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -49,6 +49,7 @@
qcom,mdss-dsi-te-using-te-pin;
qcom,dcs-cmd-by-left;
qcom,mdss-dsi-tx-eot-append;
+ qcom,mdss-dsi-panel-jitter = <0x8 0xa>;
qcom,adjust-timer-wakeup-ms = <1>;
qcom,mdss-dsi-on-command = [
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index 1a2ca5b..02fedbe 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -65,6 +65,7 @@
#iommu-cells = <2>;
qcom,skip-init;
qcom,use-3-lvl-tables;
+ qcom,no-asid-retention;
#global-interrupts = <1>;
#size-cells = <1>;
#address-cells = <1>;
@@ -339,10 +340,10 @@
apps_iommu_coherent_test_device {
compatible = "iommu-debug-test";
/*
- * This SID belongs to QUP1-DMA. We can't use a fake SID for
+ * This SID belongs to TSIF. We can't use a fake SID for
* the apps_smmu device.
*/
- iommus = <&apps_smmu 0x3 0>;
+ iommus = <&apps_smmu 0x20 0>;
dma-coherent;
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
index 2194a42..dcc646c93b 100644
--- a/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
@@ -196,28 +196,15 @@
hw-ctrl-addr = <&gpu_cx_hw_ctrl>;
qcom,no-status-check-on-disable;
qcom,gds-timeout = <500>;
+ qcom,clk-dis-wait-val = <8>;
status = "disabled";
};
- gpu_gx_domain_addr: syscon@0x5091508 {
- compatible = "syscon";
- reg = <0x5091508 0x4>;
- };
-
- gpu_gx_sw_reset: syscon@0x5091008 {
- compatible = "syscon";
- reg = <0x5091008 0x4>;
- };
-
gpu_gx_gdsc: qcom,gdsc@0x509100c {
compatible = "qcom,gdsc";
regulator-name = "gpu_gx_gdsc";
reg = <0x509100c 0x4>;
- domain-addr = <&gpu_gx_domain_addr>;
- sw-reset = <&gpu_gx_sw_reset>;
- qcom,reset-aon-logic;
qcom,poll-cfg-gdscr;
- qcom,toggle-sw-collapse-in-disable;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi b/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi
index 2b8f22e..d9d1be4 100644
--- a/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi
@@ -72,4 +72,35 @@
compatible = "qcom,smp2pgpio_client_rdbg_1_out";
gpios = <&smp2pgpio_rdbg_1_out 0 0>;
};
+
+ smp2pgpio_rdbg_5_in: qcom,smp2pgpio-rdbg-5-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <5>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_5_in {
+ compatible = "qcom,smp2pgpio_client_rdbg_5_in";
+ gpios = <&smp2pgpio_rdbg_5_in 0 0>;
+ };
+
+ smp2pgpio_rdbg_5_out: qcom,smp2pgpio-rdbg-5-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <5>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_5_out {
+ compatible = "qcom,smp2pgpio_client_rdbg_5_out";
+ gpios = <&smp2pgpio_rdbg_5_out 0 0>;
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi
index 4abf260..5d71f2d 100644
--- a/arch/arm64/boot/dts/qcom/pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660.dtsi
@@ -488,6 +488,7 @@
qcom,adc_tm-vadc = <&pm660_vadc>;
qcom,decimation = <0>;
qcom,fast-avg-setup = <0>;
+ #thermal-sensor-cells = <1>;
chan@83 {
label = "vph_pwr";
@@ -531,6 +532,28 @@
qcom,btm-channel-number = <0x78>;
qcom,thermal-node;
};
+
+ chan@4e {
+ label = "emmc_therm";
+ reg = <0x4e>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x80>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@4f {
+ label = "pa_therm0";
+ reg = <0x4f>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x88>;
+ qcom,vadc-thermal-node;
+ };
};
pm660_rradc: rradc@4500 {
@@ -631,3 +654,80 @@
#size-cells = <0>;
};
};
+
+&thermal_zones {
+ xo-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm660_adc_tm 0x4c>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ msm-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm660_adc_tm 0x4d>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ emmc-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm660_adc_tm 0x4e>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ pa-therm0-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm660_adc_tm 0x4f>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ quiet-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm660_adc_tm 0x51>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pm660l.dtsi b/arch/arm64/boot/dts/qcom/pm660l.dtsi
index 0f18ba5..9cd117c 100644
--- a/arch/arm64/boot/dts/qcom/pm660l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660l.dtsi
@@ -250,9 +250,8 @@
<0xd900 0x100>;
reg-names = "qpnp-wled-ctrl-base",
"qpnp-wled-sink-base";
- interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
- <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "ovp-irq", "sc-irq";
+ interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ovp-irq";
linux,name = "wled";
linux,default-trigger = "bkl-trigger";
qcom,fdbk-output = "auto";
@@ -268,7 +267,6 @@
qcom,fs-curr-ua = <25000>;
qcom,cons-sync-write-delay-us = <1000>;
qcom,led-strings-list = [00 01 02];
- qcom,en-ext-pfet-sc-pro;
qcom,loop-auto-gm-en;
qcom,pmic-revid = <&pm660l_revid>;
status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index c8dc1f4..09405ee 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -97,6 +97,7 @@
qcom,boost-threshold-ua = <100000>;
qcom,wipower-max-uw = <5000000>;
+ dpdm-supply = <&qusb_phy0>;
qcom,thermal-mitigation
= <3000000 1500000 1000000 500000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
index 6ea92ee..0cf48a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
@@ -9,3 +9,27 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
+&qupv3_se9_2uart {
+ status = "disabled";
+};
+
+&qupv3_se12_2uart {
+ status = "ok";
+};
+
+&qupv3_se8_spi {
+ status = "disabled";
+};
+
+&qupv3_se3_i2c {
+ status = "disabled";
+};
+
+&qupv3_se10_i2c {
+ status = "disabled";
+};
+
+&qupv3_se6_4uart {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index 6ea92ee..0cf48a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -9,3 +9,27 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
+&qupv3_se9_2uart {
+ status = "disabled";
+};
+
+&qupv3_se12_2uart {
+ status = "ok";
+};
+
+&qupv3_se8_spi {
+ status = "disabled";
+};
+
+&qupv3_se3_i2c {
+ status = "disabled";
+};
+
+&qupv3_se10_i2c {
+ status = "disabled";
+};
+
+&qupv3_se6_4uart {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index cce0860..dcc5d1b 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -904,6 +904,34 @@
};
};
+ qupv3_se12_2uart_pins: qupv3_se12_2uart_pins {
+ qupv3_se12_2uart_active: qupv3_se12_2uart_active {
+ mux {
+ pins = "gpio51", "gpio52";
+ function = "qup9";
+ };
+
+ config {
+ pins = "gpio51", "gpio52";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se12_2uart_sleep: qupv3_se12_2uart_sleep {
+ mux {
+ pins = "gpio51", "gpio52";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio51", "gpio52";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
qupv3_se12_spi_pins: qupv3_se12_spi_pins {
qupv3_se12_spi_active: qupv3_se12_spi_active {
mux {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
index 1fa6e26..657363f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
@@ -407,6 +407,23 @@
status = "disabled";
};
+ /* Debug UART Instance for CDP/MTP platform on SDM670 */
+ qupv3_se12_2uart: qcom,qup_uart@0xa90000 {
+ compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+ reg = <0xa90000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se12_2uart_active>;
+ pinctrl-1 = <&qupv3_se12_2uart_sleep>;
+ interrupts = <GIC_SPI 357 0>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
/* I2C */
qupv3_se8_i2c: i2c@a80000 {
compatible = "qcom,i2c-geni";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
index b790c04..f3e5ddb 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
@@ -25,8 +25,8 @@
compatible = "qcom,smp2p";
reg = <0x1799000c 0x4>;
qcom,remote-pid = <2>;
- qcom,irq-bitmask = <0x200>;
- interrupts = <GIC_SPI 157 IRQ_TYPE_EDGE_RISING>;
+ qcom,irq-bitmask = <0x4000000>;
+ interrupts = <GIC_SPI 172 IRQ_TYPE_EDGE_RISING>;
};
qcom,smp2p-cdsp@1799000c {
@@ -222,4 +222,50 @@
interrupt-controller;
#interrupt-cells = <2>;
};
+
+ /* ssr - inbound entry from mss */
+ smp2pgpio_ssr_smp2p_1_in: qcom,smp2pgpio-ssr-smp2p-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - outbound entry to mss */
+ smp2pgpio_ssr_smp2p_1_out: qcom,smp2pgpio-ssr-smp2p-1-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - inbound entry from cdsp */
+ smp2pgpio_ssr_smp2p_5_in: qcom,smp2pgpio-ssr-smp2p-5-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <5>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - outbound entry to cdsp */
+ smp2pgpio_ssr_smp2p_5_out: qcom,smp2pgpio-ssr-smp2p-5-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <5>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 3eae5f3..7d9702e 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -32,6 +32,14 @@
ufshc1 = &ufshc_mem; /* Embedded UFS slot */
};
+ aliases {
+ serial0 = &qupv3_se12_2uart;
+ spi0 = &qupv3_se8_spi;
+ i2c0 = &qupv3_se10_i2c;
+ i2c1 = &qupv3_se3_i2c;
+ hsuart0 = &qupv3_se6_4uart;
+ };
+
cpus {
#address-cells = <2>;
#size-cells = <0>;
@@ -433,6 +441,324 @@
qcom,pipe-attr-ee;
};
+ thermal_zones: thermal-zones {
+ aoss0-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 0>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu0-silver-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 1>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu1-silver-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 2>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu2-silver-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 3>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu3-silver-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 4>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu4-silver-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 5>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu5-silver-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 6>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ kryo-l3-0-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 7>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ kryo-l3-1-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 8>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu0-gold-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 9>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu1-gold-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 10>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ gpu0-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 11>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ gpu1-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 12>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ aoss1-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 0>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mdm-dsp-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 1>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ ddr-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 2>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ wlan-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 3>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ compute-hvx-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 4>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ camera-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 5>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mmss-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 6>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mdm-core-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 7>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+ };
+
+ tsens0: tsens@c222000 {
+ compatible = "qcom,tsens24xx";
+ reg = <0xc222000 0x4>,
+ <0xc263000 0x1ff>;
+ reg-names = "tsens_srot_physical",
+ "tsens_tm_physical";
+ interrupts = <0 506 0>, <0 508 0>;
+ interrupt-names = "tsens-upper-lower", "tsens-critical";
+ #thermal-sensor-cells = <1>;
+ };
+
+ tsens1: tsens@c223000 {
+ compatible = "qcom,tsens24xx";
+ reg = <0xc223000 0x4>,
+ <0xc265000 0x1ff>;
+ reg-names = "tsens_srot_physical",
+ "tsens_tm_physical";
+ interrupts = <0 507 0>, <0 509 0>;
+ interrupt-names = "tsens-upper-lower", "tsens-critical";
+ #thermal-sensor-cells = <1>;
+ };
+
timer@0x17c90000{
#address-cells = <1>;
#size-cells = <1>;
@@ -499,6 +825,12 @@
reg-names = "pshold-base", "tcsr-boot-misc-detect";
};
+ aop-msg-client {
+ compatible = "qcom,debugfs-qmp-client";
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "aop";
+ };
+
clock_rpmh: qcom,rpmhclk {
compatible = "qcom,dummycc";
clock-output-names = "rpmh_clocks";
@@ -787,8 +1119,8 @@
reg = <0x86000000 0x200000>,
<0x1799000c 0x4>;
reg-names = "smem", "irq-reg-base";
- qcom,irq-mask = <0x100>;
- interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+ qcom,irq-mask = <0x1000000>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_EDGE_RISING>;
label = "lpass";
qcom,qos-config = <&glink_qos_adsp>;
qcom,ramp-time = <0xaf>;
@@ -1042,6 +1374,19 @@
<CONTROL_TCS 1>;
};
+ disp_rsc: mailbox@af20000 {
+ compatible = "qcom,tcs-drv";
+ label = "display_rsc";
+ reg = <0xaf20000 0x100>, <0xaf21c00 0x3000>;
+ interrupts = <0 129 0>;
+ #mbox-cells = <1>;
+ qcom,drv-id = <0>;
+ qcom,tcs-config = <SLEEP_TCS 1>,
+ <WAKE_TCS 1>,
+ <ACTIVE_TCS 0>,
+ <CONTROL_TCS 1>;
+ };
+
system_pm {
compatible = "qcom,system-pm";
mboxes = <&apps_rsc 0>;
@@ -1329,6 +1674,124 @@
qcom,pas-id = <0xf>;
qcom,firmware-name = "ipa_fws";
};
+
+ pil_modem: qcom,mss@4080000 {
+ compatible = "qcom,pil-q6v55-mss";
+ reg = <0x4080000 0x100>,
+ <0x1f63000 0x008>,
+ <0x1f65000 0x008>,
+ <0x1f64000 0x008>,
+ <0x4180000 0x020>,
+ <0xc2b0000 0x004>,
+ <0xb2e0100 0x004>,
+ <0x4180044 0x004>;
+ reg-names = "qdsp6_base", "halt_q6", "halt_modem",
+ "halt_nc", "rmb_base", "restart_reg",
+ "pdc_sync", "alt_reset";
+
+ clocks = <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_MSS_CFG_AHB_CLK>,
+ <&clock_gcc GCC_MSS_Q6_MEMNOC_AXI_CLK>,
+ <&clock_gcc GCC_BOOT_ROM_AHB_CLK>,
+ <&clock_gcc GCC_MSS_GPLL0_DIV_CLK_SRC>,
+ <&clock_gcc GCC_MSS_SNOC_AXI_CLK>,
+ <&clock_gcc GCC_MSS_MFAB_AXIS_CLK>,
+ <&clock_gcc GCC_PRNG_AHB_CLK>;
+ clock-names = "xo", "iface_clk", "bus_clk",
+ "mem_clk", "gpll0_mss_clk", "snoc_axi_clk",
+ "mnoc_axi_clk", "prng_clk";
+ qcom,proxy-clock-names = "xo", "prng_clk";
+ qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk",
+ "gpll0_mss_clk", "snoc_axi_clk",
+ "mnoc_axi_clk";
+
+ interrupts = <0 266 1>;
+ vdd_cx-supply = <&pm660l_s3_level>;
+ vdd_cx-voltage = <RPMH_REGULATOR_LEVEL_TURBO>;
+ vdd_mx-supply = <&pm660l_s1_level>;
+ vdd_mx-uV = <RPMH_REGULATOR_LEVEL_TURBO>;
+ qcom,firmware-name = "modem";
+ qcom,pil-self-auth;
+ qcom,sysmon-id = <0>;
+ qcom,ssctl-instance-id = <0x12>;
+ qcom,override-acc;
+ qcom,qdsp6v65-1-0;
+ status = "ok";
+ memory-region = <&pil_modem_mem>;
+ qcom,mem-protect-id = <0xF>;
+
+ /* GPIO inputs from mss */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>;
+ qcom,gpio-shutdown-ack = <&smp2pgpio_ssr_smp2p_1_in 7 0>;
+
+ /* GPIO output to mss */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+ qcom,mba-mem@0 {
+ compatible = "qcom,pil-mba-mem";
+ memory-region = <&pil_mba_mem>;
+ };
+ };
+
+ qcom,venus@aae0000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0xaae0000 0x4000>;
+
+ vdd-supply = <&venus_gdsc>;
+ qcom,proxy-reg-names = "vdd";
+
+ clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
+ <&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
+ <&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>;
+ clock-names = "core_clk", "iface_clk", "bus_clk";
+ qcom,proxy-clock-names = "core_clk", "iface_clk", "bus_clk";
+
+ qcom,pas-id = <9>;
+ qcom,msm-bus,name = "pil-venus";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <63 512 0 0>,
+ <63 512 0 304000>;
+ qcom,proxy-timeout-ms = <100>;
+ qcom,firmware-name = "venus";
+ memory-region = <&pil_video_mem>;
+ status = "ok";
+ };
+
+ qcom,turing@8300000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0x8300000 0x100000>;
+ interrupts = <0 578 1>;
+
+ vdd_cx-supply = <&pm660l_s3_level>;
+ qcom,proxy-reg-names = "vdd_cx";
+ qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
+
+ clocks = <&clock_rpmh RPMH_CXO_CLK>;
+ clock-names = "xo";
+ qcom,proxy-clock-names = "xo";
+
+ qcom,pas-id = <18>;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,smem-id = <601>;
+ qcom,sysmon-id = <7>;
+ qcom,ssctl-instance-id = <0x17>;
+ qcom,firmware-name = "cdsp";
+ memory-region = <&pil_cdsp_mem>;
+
+ /* GPIO inputs from turing */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_5_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_5_in 2 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_5_in 1 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_5_in 3 0>;
+
+ /* GPIO output to turing*/
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_5_out 0 0>;
+ status = "ok";
+ };
};
#include "sdm670-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp-overlay.dts
new file mode 100644
index 0000000..0006937
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp-overlay.dts
@@ -0,0 +1,66 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-cdp.dtsi"
+#include "sdm845-cdp-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel CDP";
+ compatible = "qcom,sdm845-cdp", "qcom,sdm845", "qcom,cdp";
+ qcom,msm-id = <321 0x0>;
+ qcom,board-id = <1 1>;
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&mdss_mdp {
+ connectors = <&sde_rscc &sde_wb>;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index 94d74e2..faf09c4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-cdp.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp-overlay.dts
new file mode 100644
index 0000000..2675b96
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp-overlay.dts
@@ -0,0 +1,66 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel MTP";
+ compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
+ qcom,msm-id = <321 0x0>;
+ qcom,board-id = <8 1>;
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&mdss_mdp {
+ connectors = <&sde_rscc &sde_wb &sde_dp>;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index fca87e1..2ae9345 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-mtp.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
new file mode 100644
index 0000000..39c9d37
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-qrd.dtsi"
+#include "sdm845-qrd-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel QRD";
+ compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+ qcom,msm-id = <321 0x0>;
+ qcom,board-id = <11 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts
index 6171c7b..5951f6d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-qrd.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index 5db4c35..9d799cb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -153,6 +153,7 @@
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
clock-rates = <24000000>;
};
@@ -192,6 +193,7 @@
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
clock-rates = <24000000>;
};
@@ -234,6 +236,7 @@
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
clock-rates = <24000000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index 5db4c35..f18137c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -153,6 +153,7 @@
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
clock-rates = <24000000>;
};
@@ -192,6 +193,7 @@
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
clock-rates = <24000000>;
};
@@ -234,6 +236,7 @@
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
clock-rates = <24000000>;
};
@@ -242,7 +245,7 @@
compatible = "qcom,cam-sensor";
reg = <0x0>;
csiphy-sd-index = <0>;
- sensor-position-roll = <90>;
+ sensor-position-roll = <270>;
sensor-position-pitch = <0>;
sensor-position-yaw = <180>;
led-flash-src = <&led_flash_rear>;
@@ -333,7 +336,7 @@
compatible = "qcom,cam-sensor";
reg = <0x02>;
csiphy-sd-index = <2>;
- sensor-position-roll = <90>;
+ sensor-position-roll = <270>;
sensor-position-pitch = <0>;
sensor-position-yaw = <0>;
eeprom-src = <&eeprom_front>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index cbd495a..3f19890 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -36,9 +36,7 @@
<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
<&clock_camcc CAM_CC_CSIPHY0_CLK>,
<&clock_camcc CAM_CC_CSI0PHYTIMER_CLK_SRC>,
- <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>,
- <&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
- <&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>;
+ <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>;
clock-names = "camnoc_axi_clk",
"soc_ahb_clk",
"slow_ahb_src_clk",
@@ -46,12 +44,10 @@
"cphy_rx_clk_src",
"csiphy0_clk",
"csi0phytimer_clk_src",
- "csi0phytimer_clk",
- "ife_0_csid_clk",
- "ife_0_csid_clk_src";
+ "csi0phytimer_clk";
clock-cntl-level = "turbo";
clock-rates =
- <0 0 0 0 320000000 0 269333333 0 0 384000000>;
+ <0 0 0 0 320000000 0 269333333 0>;
status = "ok";
};
@@ -74,9 +70,7 @@
<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
<&clock_camcc CAM_CC_CSIPHY1_CLK>,
<&clock_camcc CAM_CC_CSI1PHYTIMER_CLK_SRC>,
- <&clock_camcc CAM_CC_CSI1PHYTIMER_CLK>,
- <&clock_camcc CAM_CC_IFE_1_CSID_CLK>,
- <&clock_camcc CAM_CC_IFE_1_CSID_CLK_SRC>;
+ <&clock_camcc CAM_CC_CSI1PHYTIMER_CLK>;
clock-names = "camnoc_axi_clk",
"soc_ahb_clk",
"slow_ahb_src_clk",
@@ -84,12 +78,10 @@
"cphy_rx_clk_src",
"csiphy1_clk",
"csi1phytimer_clk_src",
- "csi1phytimer_clk",
- "ife_1_csid_clk",
- "ife_1_csid_clk_src";
+ "csi1phytimer_clk";
clock-cntl-level = "turbo";
clock-rates =
- <0 0 0 0 320000000 0 269333333 0 0 384000000>;
+ <0 0 0 0 320000000 0 269333333 0>;
status = "ok";
};
@@ -113,9 +105,7 @@
<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
<&clock_camcc CAM_CC_CSIPHY2_CLK>,
<&clock_camcc CAM_CC_CSI2PHYTIMER_CLK_SRC>,
- <&clock_camcc CAM_CC_CSI2PHYTIMER_CLK>,
- <&clock_camcc CAM_CC_IFE_LITE_CSID_CLK>,
- <&clock_camcc CAM_CC_IFE_LITE_CSID_CLK_SRC>;
+ <&clock_camcc CAM_CC_CSI2PHYTIMER_CLK>;
clock-names = "camnoc_axi_clk",
"soc_ahb_clk",
"slow_ahb_src_clk",
@@ -123,12 +113,10 @@
"cphy_rx_clk_src",
"csiphy2_clk",
"csi2phytimer_clk_src",
- "csi2phytimer_clk",
- "ife_lite_csid_clk",
- "ife_lite_csid_clk_src";
+ "csi2phytimer_clk";
clock-cntl-level = "turbo";
clock-rates =
- <0 0 0 0 320000000 0 269333333 0 0 384000000>;
+ <0 0 0 0 320000000 0 269333333 0>;
status = "ok";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
index 4747c99..efc78e0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
@@ -15,9 +15,11 @@
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "sdm845-sde-display.dtsi"
#include "sdm845-cdp.dtsi"
#include "sdm845-cdp-audio-overlay.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp.dts
index 22e3aea..0a6aa5e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-cdp.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index c4ec012..8fca29c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -169,10 +169,6 @@
pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
- qcom,clk-rates = <400000 20000000 25000000
- 50000000 100000000 200000000>;
- qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
-
extcon = <&extcon_storage_cd>;
status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index 1ce68e1..3f05846 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -195,10 +195,10 @@
qcom,gpu-pwrlevel@3 {
reg = <3>;
- qcom,gpu-freq = <280000000>;
- qcom,bus-freq = <4>;
- qcom,bus-min = <3>;
- qcom,bus-max = <5>;
+ qcom,gpu-freq = <0>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
};
};
@@ -218,7 +218,6 @@
clock-names = "iface_clk", "mem_clk", "mem_iface_clk";
qcom,secure_align_mask = <0xfff>;
- qcom,global_pt;
qcom,hyp_secure_alloc;
gfx3d_user: gfx3d_user {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
index 52c0f05..45941a1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
@@ -15,9 +15,11 @@
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "sdm845-sde-display.dtsi"
#include "sdm845-mtp.dtsi"
#include "sdm845-audio-overlay.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index f7af60c..e74b342 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-mtp.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 1453975..29d80a7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -245,10 +245,6 @@
pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
- qcom,clk-rates = <400000 20000000 25000000
- 50000000 100000000 200000000>;
- qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
-
extcon = <&extcon_storage_cd>;
status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 2a7b6d1..04f67cd 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -2667,7 +2667,7 @@
config {
pins = "gpio80","gpio79";
- bias-disable; /* No PULL */
+ bias-pull-down; /* PULL DOWN */
drive-strength = <2>; /* 2 MA */
};
};
@@ -2723,7 +2723,7 @@
config {
pins = "gpio28";
- bias-disable; /* No PULL */
+ bias-pull-down; /* PULL DOWN */
drive-strength = <2>; /* 2 MA */
};
};
@@ -2780,7 +2780,7 @@
};
config {
pins = "gpio9","gpio8";
- bias-disable; /* No PULL */
+ bias-pull-down; /* PULL DOWN */
drive-strength = <2>; /* 2 MA */
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi
index 2ee9031..b11c912 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi
@@ -15,6 +15,8 @@
&soc {
sound-tavil {
+ qcom,model = "sdm845-tavil-qrd-snd-card";
+
qcom,wsa-max-devs = <1>;
qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts
index 5729d76..6cead9d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts
@@ -15,9 +15,11 @@
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "sdm845-sde-display.dtsi"
#include "sdm845-qrd.dtsi"
#include "sdm845-qrd-audio-overlay.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-qrd.dts
index 228b924..6cb7815 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-qrd.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 9cf18b7..6bdc149 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -170,10 +170,6 @@
pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
- qcom,clk-rates = <400000 20000000 25000000
- 50000000 100000000 200000000>;
- qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
-
extcon = <&extcon_storage_cd>;
status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index bde64b9..6fb4f37 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -496,7 +496,7 @@
qcom,display-topology = <1 1 1>,
<2 2 1>, /* dsc merge */
<2 1 1>; /* 3d mux */
- qcom,default-topology-index = <0>;
+ qcom,default-topology-index = <1>;
};
&dsi_nt35597_truly_dsc_video {
@@ -506,7 +506,7 @@
qcom,display-topology = <1 1 1>,
<2 2 1>, /* dsc merge */
<2 1 1>; /* 3d mux */
- qcom,default-topology-index = <0>;
+ qcom,default-topology-index = <1>;
};
&dsi_sharp_4k_dsc_video {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 41c1876..17adbf4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -57,14 +57,18 @@
0x48000 0x49000 0x4a000>;
qcom,sde-mixer-size = <0x320>;
+ qcom,sde-dspp-top-off = <0x1300>;
+ qcom,sde-dspp-top-size = <0xc>;
+
qcom,sde-dspp-off = <0x55000 0x57000 0x59000 0x5b000>;
qcom,sde-dspp-size = <0x17e0>;
qcom,sde-wb-off = <0x66000>;
qcom,sde-wb-size = <0x2c8>;
-
qcom,sde-wb-xin-id = <6>;
qcom,sde-wb-id = <2>;
+ qcom,sde-wb-clk-ctrl = <0x3b8 24>;
+
qcom,sde-intf-off = <0x6b000 0x6b800
0x6c000 0x6c800>;
qcom,sde-intf-size = <0x280>;
@@ -188,8 +192,10 @@
};
qcom,sde-dspp-blocks {
+ qcom,sde-dspp-igc = <0x0 0x00030001>;
qcom,sde-dspp-vlut = <0xa00 0x00010008>;
qcom,sde-dspp-gamut = <0x1000 0x00040000>;
+ qcom,sde-dspp-pcc = <0x1700 0x00040000>;
qcom,sde-dspp-gc = <0x17c0 0x00010008>;
};
@@ -349,6 +355,7 @@
/* Offline rotator QoS setting */
qcom,mdss-rot-vbif-qos-setting = <3 3 3 3 3 3 3 3>;
+ qcom,mdss-rot-vbif-memtype = <3 3>;
qcom,mdss-rot-cdp-setting = <1 1>;
qcom,mdss-rot-qos-lut = <0x0 0x0 0x0 0x0>;
qcom,mdss-rot-danger-lut = <0x0 0x0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index ac16d03..ec048ca 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -229,12 +229,12 @@
0x12a4 0x12 0x00 /* TXA_RCV_DETECT_LVL_2 */
0x128c 0x16 0x00 /* TXA_LANE_MODE_1 */
0x1248 0x09 0x00 /* TXA_RES_CODE_LANE_OFFSET_RX */
- 0x1244 0x0d 0x00 /* TXA_RES_CODE_LANE_OFFSET_TX */
+ 0x1244 0x06 0x00 /* TXA_RES_CODE_LANE_OFFSET_TX */
0x1660 0x10 0x00 /* TXB_HIGHZ_DRVR_EN */
0x16a4 0x12 0x00 /* TXB_RCV_DETECT_LVL_2 */
0x168c 0x16 0x00 /* TXB_LANE_MODE_1 */
0x1648 0x09 0x00 /* TXB_RES_CODE_LANE_OFFSET_RX */
- 0x1644 0x0d 0x00 /* TXB_RES_CODE_LANE_OFFSET_TX */
+ 0x1644 0x06 0x00 /* TXB_RES_CODE_LANE_OFFSET_TX */
0x1cc8 0x83 0x00 /* PCS_FLL_CNTRL2 */
0x1ccc 0x09 0x00 /* PCS_FLL_CNT_VAL_L */
0x1cd0 0xa2 0x00 /* PCS_FLL_CNT_VAL_H_TOL */
@@ -502,8 +502,8 @@
0x260 0x10 0x00 /* QSERDES_TX_HIGHZ_DRVR_EN */
0x2a4 0x12 0x00 /* QSERDES_TX_RCV_DETECT_LVL_2 */
0x28c 0xc6 0x00 /* QSERDES_TX_LANE_MODE_1 */
- 0x248 0x09 0x00 /* TX_RES_CODE_LANE_OFFSET_RX */
- 0x244 0x0d 0x00 /* TX_RES_CODE_LANE_OFFSET_TX */
+ 0x248 0x06 0x00 /* TX_RES_CODE_LANE_OFFSET_RX */
+ 0x244 0x06 0x00 /* TX_RES_CODE_LANE_OFFSET_TX */
0x8c8 0x83 0x00 /* USB3_UNI_PCS_FLL_CNTRL2 */
0x8cc 0x09 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_L */
0x8d0 0xa2 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_H_TOL */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-cdp-overlay.dts
new file mode 100644
index 0000000..3cd7678
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-cdp-overlay.dts
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-cdp.dtsi"
+#include "sdm845-cdp-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 v2 CDP";
+ compatible = "qcom,sdm845-cdp", "qcom,sdm845", "qcom,cdp";
+ qcom,msm-id = <321 0x20000>;
+ qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-cdp.dts
index 8ab0593..66ee4c7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-cdp.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845-v2.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-cdp.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-mtp-overlay.dts
new file mode 100644
index 0000000..e049357
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-mtp-overlay.dts
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 v2 MTP";
+ compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
+ qcom,msm-id = <321 0x20000>;
+ qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-mtp.dts
index 57c3e71..cea38e6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-mtp.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845-v2.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-mtp.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qrd-overlay.dts
new file mode 100644
index 0000000..a5a32ab
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qrd-overlay.dts
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-qrd.dtsi"
+#include "sdm845-qrd-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 v2 QRD";
+ compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+ qcom,msm-id = <321 0x20000>;
+ qcom,board-id = <11 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qrd.dts
index 8a9a544..9a87617 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qrd.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845-v2.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-qrd.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dts b/arch/arm64/boot/dts/qcom/sdm845-v2.dts
new file mode 100644
index 0000000..d36d0fd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dts
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm845-v2.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 v2 SoC";
+ compatible = "qcom,sdm845";
+ qcom,board-id = <0 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index 95ee14c..c20999b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -17,8 +17,12 @@
qcom,msm-id = <321 0x20000>;
};
-&spmi_debug_bus {
- status = "ok";
+&sdhc_2 {
+ qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+ 100000000 200000000 4294967295>;
+ qcom,clk-rates = <400000 20000000 25000000 50000000
+ 100000000 200000000>;
+ qcom,devfreq,freq-table = <50000000 200000000>;
};
&clock_gcc {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 8f9618d..202df95 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -645,7 +645,6 @@
#include "sdm845-sde-pll.dtsi"
#include "msm-rdbg.dtsi"
#include "sdm845-sde.dtsi"
-#include "sdm845-sde-display.dtsi"
#include "sdm845-qupv3.dtsi"
&soc {
@@ -741,6 +740,12 @@
reg-names = "pshold-base", "tcsr-boot-misc-detect";
};
+ aop-msg-client {
+ compatible = "qcom,debugfs-qmp-client";
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "aop";
+ };
+
spmi_bus: qcom,spmi@c440000 {
compatible = "qcom,spmi-pmic-arb";
reg = <0xc440000 0x1100>,
@@ -769,7 +774,6 @@
qcom,fuse-disable-bit = <12>;
#address-cells = <2>;
#size-cells = <0>;
- status = "disabled";
qcom,pm8998-debug@0 {
compatible = "qcom,spmi-pmic";
@@ -979,7 +983,7 @@
< 1209600 806400000 >,
< 1516800 883200000 >,
< 1593600 960000000 >,
- < 1708800 1094400000 >;
+ < 1708800 1305600000 >;
};
devfreq_l3lat_4: qcom,cpu4-l3lat-mon {
@@ -993,7 +997,7 @@
< 1190400 806400000 >,
< 1574400 883200000 >,
< 1804800 960000000 >,
- < 2092800 1094400000 >;
+ < 1958400 1305600000 >;
};
cpu_pmu: cpu-pmu {
@@ -1319,10 +1323,6 @@
< 2112000000 0x4054176e 0x00005858 0x3 24 >,
< 2208000000 0x40541873 0x00005c5c 0x3 25 >;
- qcom,l3-min-cpr-vc-bin0 = <7>;
- qcom,pwrcl-min-cpr-vc-bin0 = <6>;
- qcom,perfcl-min-cpr-vc-bin0 = <7>;
-
qcom,up-timer =
<1000 1000 1000>;
qcom,down-timer =
@@ -1676,11 +1676,18 @@
<81 512 1338562 4096000>,
<1 608 1338562 4096000>;
qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
- 100000000 200000000 4294967295>;
+ 100750000 200000000 4294967295>;
qcom,sdr104-wa;
- qcom,devfreq,freq-table = <50000000 200000000>;
+ qcom,restore-after-cx-collapse;
+
+ qcom,clk-rates = <400000 20000000 25000000
+ 50000000 100000000 201500000>;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50",
+ "SDR104";
+
+ qcom,devfreq,freq-table = <50000000 201500000>;
clocks = <&clock_gcc GCC_SDCC2_AHB_CLK>,
<&clock_gcc GCC_SDCC2_APPS_CLK>;
clock-names = "iface_clk", "core_clk";
@@ -1969,63 +1976,75 @@
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1401 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb2 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1402 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb3 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1403 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb4 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1404 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb5 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1405 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb6 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1406 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb7 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1407 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb8 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1408 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb9 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
qcom,secure-context-bank;
iommus = <&apps_smmu 0x1409 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb10 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
qcom,secure-context-bank;
iommus = <&apps_smmu 0x140A 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb11 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&apps_smmu 0x1823 0x0>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb12 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&apps_smmu 0x1824 0x0>;
+ dma-coherent;
};
};
@@ -2239,7 +2258,6 @@
compatible = "qcom,sdm845-llcc";
#cache-cells = <1>;
max-slices = <32>;
- qcom,dump-size = <0x3c0000>;
};
qcom,llcc-erp {
@@ -2253,19 +2271,19 @@
};
LLCC_1: llcc_1_dcache {
- qcom,dump-size = <0xd8000>;
+ qcom,dump-size = <0x114100>;
};
LLCC_2: llcc_2_dcache {
- qcom,dump-size = <0xd8000>;
+ qcom,dump-size = <0x114100>;
};
LLCC_3: llcc_3_dcache {
- qcom,dump-size = <0xd8000>;
+ qcom,dump-size = <0x114100>;
};
LLCC_4: llcc_4_dcache {
- qcom,dump-size = <0xd8000>;
+ qcom,dump-size = <0x114100>;
};
};
@@ -2687,6 +2705,9 @@
<&clock_gcc GCC_CE1_AXI_CLK>;
qcom,ce-opp-freq = <171430000>;
qcom,request-bw-before-clk;
+ qcom,smmu-s1-bypass;
+ iommus = <&apps_smmu 0x702 0x1>,
+ <&apps_smmu 0x712 0x1>;
};
qcom_crypto: qcrypto@1de0000 {
@@ -2721,6 +2742,9 @@
qcom,use-sw-ahash-algo;
qcom,use-sw-aead-algo;
qcom,use-sw-hmac-algo;
+ qcom,smmu-s1-bypass;
+ iommus = <&apps_smmu 0x704 0x3>,
+ <&apps_smmu 0x714 0x3>;
};
qcom,msm_gsi {
@@ -2975,6 +2999,7 @@
vdd-3.3-ch0-supply = <&pm8998_l25>;
qcom,vdd-0.8-cx-mx-config = <800000 800000>;
qcom,vdd-3.3-ch0-config = <3104000 3312000>;
+ qcom,smmu-s1-bypass;
};
qmi-tmd-devices {
@@ -3600,6 +3625,9 @@
&tsif0_sync_active
&tsif1_signals_active
&tsif1_sync_active>; /* dual-tsif-mode2 */
+
+ qcom,smmu-s1-bypass;
+ iommus = <&apps_smmu 0x20 0x0f>;
};
};
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 14243fb..f5c62aa 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -86,7 +86,6 @@
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-CONFIG_CPU_FREQ_MSM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -513,6 +512,7 @@
CONFIG_QCOM_DCC_V2=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
CONFIG_QCOMCCI_HWMON=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index b60dd01..f1dcb9d 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -131,6 +131,7 @@
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -402,6 +403,7 @@
CONFIG_USB_PD_POLICY=y
CONFIG_QPNP_USB_PDPHY=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
CONFIG_USB_MSM_SSPHY_QMP=y
@@ -512,6 +514,7 @@
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_ICNSS=y
@@ -520,12 +523,14 @@
CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_CDSP_LOADER=y
CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_SMCINVOKE=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
CONFIG_MSM_QBT1000=y
CONFIG_APSS_CORE_EA=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
CONFIG_QCOMCCI_HWMON=y
@@ -576,6 +581,7 @@
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 37f7d32..3aefe13 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -136,6 +136,7 @@
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -406,6 +407,7 @@
CONFIG_USB_PD_POLICY=y
CONFIG_QPNP_USB_PDPHY=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
CONFIG_USB_MSM_SSPHY_QMP=y
@@ -529,6 +531,7 @@
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_ICNSS=y
@@ -538,6 +541,7 @@
CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_CDSP_LOADER=y
CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_SMCINVOKE=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
CONFIG_MSM_QBT1000=y
@@ -545,6 +549,7 @@
CONFIG_QCOM_DCC_V2=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
CONFIG_QCOMCCI_HWMON=y
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index bb24b4e..b325f74 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1189,20 +1189,24 @@
{
int ret, cpu;
- for_each_possible_cpu(cpu)
- per_cpu(is_hotplugging, cpu) = false;
+ /* set to true so armv8pmu_idle_update doesn't try to load
+ * hw_events before arm_pmu_device_probe has initialized it.
+ */
+ for_each_possible_cpu(cpu) {
+ per_cpu(is_hotplugging, cpu) = true;
+ }
- ret = perf_event_cpu_hp_init();
+ ret = arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
+ (acpi_disabled ? NULL : armv8_pmu_probe_table));
- if (ret)
- return ret;
+ if (!ret) {
+ for_each_possible_cpu(cpu)
+ per_cpu(is_hotplugging, cpu) = false;
- if (acpi_disabled)
- return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
- NULL);
+ ret = perf_event_cpu_hp_init();
+ }
- return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
- armv8_pmu_probe_table);
+ return ret;
}
static struct platform_driver armv8_pmu_driver = {
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 053baff..14fc4d2 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -63,6 +63,7 @@
#define TIF_RESTORE_SIGMASK 7
#define TIF_NOTIFY_RESUME 8
#define TIF_SECCOMP 9 /* secure computing */
+#define TIF_MM_RELEASED 10 /* task MM has been released */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 0b1ff4c..fffb279 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -7,6 +7,7 @@
bool pat_enabled(void);
void pat_disable(const char *reason);
extern void pat_init(void);
+extern void init_cache_modes(void);
extern int reserve_memtype(u64 start, u64 end,
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9c337b0..feaab07 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1054,6 +1054,13 @@
max_possible_pfn = max_pfn;
/*
+ * This call is required when the CPU does not support PAT. If
+ * mtrr_bp_init() invoked it already via pat_init() the call has no
+ * effect.
+ */
+ init_cache_modes();
+
+ /*
* Define random base addresses for memory sections after max_pfn is
* defined and before each memory section base is used.
*/
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index d376e4b..04c067b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -84,7 +84,7 @@
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
- jz 17f
+ jz .L_copy_short_string
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
@@ -105,7 +105,8 @@
leaq 64(%rdi),%rdi
decl %ecx
jnz 1b
-17: movl %edx,%ecx
+.L_copy_short_string:
+ movl %edx,%ecx
andl $7,%edx
shrl $3,%ecx
jz 20f
@@ -221,6 +222,8 @@
*/
ENTRY(copy_user_enhanced_fast_string)
ASM_STAC
+ cmpl $64,%edx
+ jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
movl %edx,%ecx
1: rep
movsb
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 83e701f..89d7907 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -36,14 +36,14 @@
#undef pr_fmt
#define pr_fmt(fmt) "" fmt
-static bool boot_cpu_done;
-
-static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
-static void init_cache_modes(void);
+static bool __read_mostly boot_cpu_done;
+static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
+static bool __read_mostly pat_initialized;
+static bool __read_mostly init_cm_done;
void pat_disable(const char *reason)
{
- if (!__pat_enabled)
+ if (pat_disabled)
return;
if (boot_cpu_done) {
@@ -51,10 +51,8 @@
return;
}
- __pat_enabled = 0;
+ pat_disabled = true;
pr_info("x86/PAT: %s\n", reason);
-
- init_cache_modes();
}
static int __init nopat(char *str)
@@ -66,7 +64,7 @@
bool pat_enabled(void)
{
- return !!__pat_enabled;
+ return pat_initialized;
}
EXPORT_SYMBOL_GPL(pat_enabled);
@@ -204,6 +202,8 @@
update_cache_mode_entry(i, cache);
}
pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
+
+ init_cm_done = true;
}
#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
@@ -224,6 +224,7 @@
}
wrmsrl(MSR_IA32_CR_PAT, pat);
+ pat_initialized = true;
__init_cache_modes(pat);
}
@@ -241,10 +242,9 @@
wrmsrl(MSR_IA32_CR_PAT, pat);
}
-static void init_cache_modes(void)
+void init_cache_modes(void)
{
u64 pat = 0;
- static int init_cm_done;
if (init_cm_done)
return;
@@ -286,8 +286,6 @@
}
__init_cache_modes(pat);
-
- init_cm_done = 1;
}
/**
@@ -305,10 +303,8 @@
u64 pat;
struct cpuinfo_x86 *c = &boot_cpu_data;
- if (!pat_enabled()) {
- init_cache_modes();
+ if (pat_disabled)
return;
- }
if ((c->x86_vendor == X86_VENDOR_INTEL) &&
(((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 0c2fae8..73eb7fd 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -992,11 +992,12 @@
die("Segment relocations found but --realmode not specified\n");
/* Order the relocations for more efficient processing */
- sort_relocs(&relocs16);
sort_relocs(&relocs32);
#if ELF_BITS == 64
sort_relocs(&relocs32neg);
sort_relocs(&relocs64);
+#else
+ sort_relocs(&relocs16);
#endif
/* Print the relocations */
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 123d211..8cac3d3 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1691,6 +1691,7 @@
return PTR_ERR(sk_tfm);
}
drbg->ctr_handle = sk_tfm;
+ init_completion(&drbg->ctr_completion);
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
if (!req) {
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 8baab43..7830d30 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -496,7 +496,7 @@
goto done;
pos++;
- if (memcmp(out_buf + pos, digest_info->data, digest_info->size))
+ if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size))
goto done;
pos += digest_info->size;
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 3b7e4b0..4b7c726 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,3 @@
ccflags-y += -I$(src) # needed for trace events
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 6485c77..b351c85 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -15,6 +15,40 @@
*
*/
+/*
+ * Locking overview
+ *
+ * There are 3 main spinlocks which must be acquired in the
+ * order shown:
+ *
+ * 1) proc->outer_lock : protects binder_ref
+ * binder_proc_lock() and binder_proc_unlock() are
+ * used to acq/rel.
+ * 2) node->lock : protects most fields of binder_node.
+ * binder_node_lock() and binder_node_unlock() are
+ * used to acq/rel
+ * 3) proc->inner_lock : protects the thread and node lists
+ * (proc->threads, proc->nodes) and all todo lists associated
+ * with the binder_proc (proc->todo, thread->todo,
+ * proc->delivered_death and node->async_todo), as well as
+ * thread->transaction_stack
+ * binder_inner_proc_lock() and binder_inner_proc_unlock()
+ * are used to acq/rel
+ *
+ * Any lock under procA must never be nested under any lock at the same
+ * level or below on procB.
+ *
+ * Functions that require a lock held on entry indicate which lock
+ * in the suffix of the function name:
+ *
+ * foo_olocked() : requires node->outer_lock
+ * foo_nlocked() : requires node->lock
+ * foo_ilocked() : requires proc->inner_lock
+ * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
+ * foo_nilocked(): requires node->lock and proc->inner_lock
+ * ...
+ */
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cacheflush.h>
@@ -24,7 +58,6 @@
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nsproxy.h>
@@ -34,30 +67,31 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
+#include <linux/spinlock.h>
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
#define BINDER_IPC_32BIT 1
#endif
#include <uapi/linux/android/binder.h>
+#include "binder_alloc.h"
#include "binder_trace.h"
-static DEFINE_MUTEX(binder_main_lock);
+static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
-static DEFINE_MUTEX(binder_mmap_lock);
static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
-static HLIST_HEAD(binder_deferred_list);
+static DEFINE_MUTEX(binder_procs_lock);
+
static HLIST_HEAD(binder_dead_nodes);
+static DEFINE_SPINLOCK(binder_dead_nodes_lock);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
-static int binder_last_id;
+static atomic_t binder_last_id;
#define BINDER_DEBUG_ENTRY(name) \
static int binder_##name##_open(struct inode *inode, struct file *file) \
@@ -103,17 +137,13 @@
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
- BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
- BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
- BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
+ BINDER_DEBUG_SPINLOCKS = 1U << 14,
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
-static bool binder_debug_no_lock;
-module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
-
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, S_IRUGO);
@@ -170,26 +200,27 @@
};
struct binder_stats {
- int br[_IOC_NR(BR_FAILED_REPLY) + 1];
- int bc[_IOC_NR(BC_REPLY_SG) + 1];
- int obj_created[BINDER_STAT_COUNT];
- int obj_deleted[BINDER_STAT_COUNT];
+ atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
+ atomic_t obj_created[BINDER_STAT_COUNT];
+ atomic_t obj_deleted[BINDER_STAT_COUNT];
};
static struct binder_stats binder_stats;
static inline void binder_stats_deleted(enum binder_stat_types type)
{
- binder_stats.obj_deleted[type]++;
+ atomic_inc(&binder_stats.obj_deleted[type]);
}
static inline void binder_stats_created(enum binder_stat_types type)
{
- binder_stats.obj_created[type]++;
+ atomic_inc(&binder_stats.obj_created[type]);
}
struct binder_transaction_log_entry {
int debug_id;
+ int debug_id_done;
int call_type;
int from_proc;
int from_thread;
@@ -199,11 +230,14 @@
int to_node;
int data_size;
int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
+ uint32_t return_error_param;
const char *context_name;
};
struct binder_transaction_log {
- int next;
- int full;
+ atomic_t cur;
+ bool full;
struct binder_transaction_log_entry entry[32];
};
static struct binder_transaction_log binder_transaction_log;
@@ -213,19 +247,26 @@
struct binder_transaction_log *log)
{
struct binder_transaction_log_entry *e;
+ unsigned int cur = atomic_inc_return(&log->cur);
- e = &log->entry[log->next];
- memset(e, 0, sizeof(*e));
- log->next++;
- if (log->next == ARRAY_SIZE(log->entry)) {
- log->next = 0;
+ if (cur >= ARRAY_SIZE(log->entry))
log->full = 1;
- }
+ e = &log->entry[cur % ARRAY_SIZE(log->entry)];
+ WRITE_ONCE(e->debug_id_done, 0);
+ /*
+ * write-barrier to synchronize access to e->debug_id_done.
+ * We make sure the initialized 0 value is seen before
+ * memset() other fields are zeroed by memset.
+ */
+ smp_wmb();
+ memset(e, 0, sizeof(*e));
return e;
}
struct binder_context {
struct binder_node *binder_context_mgr_node;
+ struct mutex context_mgr_node_lock;
+
kuid_t binder_context_mgr_uid;
const char *name;
};
@@ -236,11 +277,20 @@
struct binder_context context;
};
+/**
+ * struct binder_work - work enqueued on a worklist
+ * @entry: node enqueued on list
+ * @type: type of work to be performed
+ *
+ * There are separate work lists for proc, thread, and node (async).
+ */
struct binder_work {
struct list_head entry;
+
enum {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
@@ -248,8 +298,72 @@
} type;
};
+struct binder_error {
+ struct binder_work work;
+ uint32_t cmd;
+};
+
+/**
+ * struct binder_node - binder node bookkeeping
+ * @debug_id: unique ID for debugging
+ * (invariant after initialized)
+ * @lock: lock for node fields
+ * @work: worklist element for node work
+ * (protected by @proc->inner_lock)
+ * @rb_node: element for proc->nodes tree
+ * (protected by @proc->inner_lock)
+ * @dead_node: element for binder_dead_nodes list
+ * (protected by binder_dead_nodes_lock)
+ * @proc: binder_proc that owns this node
+ * (invariant after initialized)
+ * @refs: list of references on this node
+ * (protected by @lock)
+ * @internal_strong_refs: used to take strong references when
+ * initiating a transaction
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_weak_refs: weak user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_strong_refs: strong user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @tmp_refs: temporary kernel refs
+ * (protected by @proc->inner_lock while @proc
+ * is valid, and by binder_dead_nodes_lock
+ * if @proc is NULL. During inc/dec and node release
+ * it is also protected by @lock to provide safety
+ * as the node dies and @proc becomes NULL)
+ * @ptr: userspace pointer for node
+ * (invariant, no lock needed)
+ * @cookie: userspace cookie for node
+ * (invariant, no lock needed)
+ * @has_strong_ref: userspace notified of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_strong_ref: userspace has acked notification of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_weak_ref: userspace notified of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_weak_ref: userspace has acked notification of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_async_transaction: async transaction to node in progress
+ * (protected by @lock)
+ * @accept_fds: file descriptor operations supported for node
+ * (invariant after initialized)
+ * @min_priority: minimum scheduling priority
+ * (invariant after initialized)
+ * @async_todo: list of async work items
+ * (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder nodes.
+ */
struct binder_node {
int debug_id;
+ spinlock_t lock;
struct binder_work work;
union {
struct rb_node rb_node;
@@ -260,64 +374,153 @@
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
+ int tmp_refs;
binder_uintptr_t ptr;
binder_uintptr_t cookie;
- unsigned has_strong_ref:1;
- unsigned pending_strong_ref:1;
- unsigned has_weak_ref:1;
- unsigned pending_weak_ref:1;
- unsigned has_async_transaction:1;
- unsigned accept_fds:1;
- unsigned min_priority:8;
+ struct {
+ /*
+ * bitfield elements protected by
+ * proc inner_lock
+ */
+ u8 has_strong_ref:1;
+ u8 pending_strong_ref:1;
+ u8 has_weak_ref:1;
+ u8 pending_weak_ref:1;
+ };
+ struct {
+ /*
+ * invariant after initialization
+ */
+ u8 accept_fds:1;
+ u8 min_priority;
+ };
+ bool has_async_transaction;
struct list_head async_todo;
};
struct binder_ref_death {
+ /**
+ * @work: worklist element for death notifications
+ * (protected by inner_lock of the proc that
+ * this ref belongs to)
+ */
struct binder_work work;
binder_uintptr_t cookie;
};
+/**
+ * struct binder_ref_data - binder_ref counts and id
+ * @debug_id: unique ID for the ref
+ * @desc: unique userspace handle for ref
+ * @strong: strong ref count (debugging only if not locked)
+ * @weak: weak ref count (debugging only if not locked)
+ *
+ * Structure to hold ref count and ref id information. Since
+ * the actual ref can only be accessed with a lock, this structure
+ * is used to return information about the ref to callers of
+ * ref inc/dec functions.
+ */
+struct binder_ref_data {
+ int debug_id;
+ uint32_t desc;
+ int strong;
+ int weak;
+};
+
+/**
+ * struct binder_ref - struct to track references on nodes
+ * @data: binder_ref_data containing id, handle, and current refcounts
+ * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
+ * @rb_node_node: node for lookup by @node in proc's rb_tree
+ * @node_entry: list entry for node->refs list in target node
+ * (protected by @node->lock)
+ * @proc: binder_proc containing ref
+ * @node: binder_node of target node. When cleaning up a
+ * ref for deletion in binder_cleanup_ref, a non-NULL
+ * @node indicates the node must be freed
+ * @death: pointer to death notification (ref_death) if requested
+ * (protected by @node->lock)
+ *
+ * Structure to track references from procA to target node (on procB). This
+ * structure is unsafe to access without holding @proc->outer_lock.
+ */
struct binder_ref {
/* Lookups needed: */
/* node + proc => ref (transaction) */
/* desc + proc => ref (transaction, inc/dec ref) */
/* node => refs + procs (proc exit) */
- int debug_id;
+ struct binder_ref_data data;
struct rb_node rb_node_desc;
struct rb_node rb_node_node;
struct hlist_node node_entry;
struct binder_proc *proc;
struct binder_node *node;
- uint32_t desc;
- int strong;
- int weak;
struct binder_ref_death *death;
};
-struct binder_buffer {
- struct list_head entry; /* free and allocated entries by address */
- struct rb_node rb_node; /* free entry by size or allocated entry */
- /* by address */
- unsigned free:1;
- unsigned allow_user_free:1;
- unsigned async_transaction:1;
- unsigned debug_id:29;
-
- struct binder_transaction *transaction;
-
- struct binder_node *target_node;
- size_t data_size;
- size_t offsets_size;
- size_t extra_buffers_size;
- uint8_t data[0];
-};
-
enum binder_deferred_state {
BINDER_DEFERRED_PUT_FILES = 0x01,
BINDER_DEFERRED_FLUSH = 0x02,
BINDER_DEFERRED_RELEASE = 0x04,
};
+/**
+ * struct binder_proc - binder process bookkeeping
+ * @proc_node: element for binder_procs list
+ * @threads: rbtree of binder_threads in this proc
+ * (protected by @inner_lock)
+ * @nodes: rbtree of binder nodes associated with
+ * this proc ordered by node->ptr
+ * (protected by @inner_lock)
+ * @refs_by_desc: rbtree of refs ordered by ref->desc
+ * (protected by @outer_lock)
+ * @refs_by_node: rbtree of refs ordered by ref->node
+ * (protected by @outer_lock)
+ * @pid PID of group_leader of process
+ * (invariant after initialized)
+ * @tsk task_struct for group_leader of process
+ * (invariant after initialized)
+ * @files files_struct for process
+ * (invariant after initialized)
+ * @deferred_work_node: element for binder_deferred_list
+ * (protected by binder_deferred_lock)
+ * @deferred_work: bitmap of deferred work to perform
+ * (protected by binder_deferred_lock)
+ * @is_dead: process is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @inner_lock)
+ * @todo: list of work for this process
+ * (protected by @inner_lock)
+ * @wait: wait queue head to wait for proc work
+ * (invariant after initialized)
+ * @stats: per-process binder statistics
+ * (atomics, no lock needed)
+ * @delivered_death: list of delivered death notification
+ * (protected by @inner_lock)
+ * @max_threads: cap on number of binder threads
+ * (protected by @inner_lock)
+ * @requested_threads: number of binder threads requested but not
+ * yet started. In current implementation, can
+ * only be 0 or 1.
+ * (protected by @inner_lock)
+ * @requested_threads_started: number binder threads started
+ * (protected by @inner_lock)
+ * @ready_threads: number of threads waiting for proc work
+ * (protected by @inner_lock)
+ * @tmp_ref: temporary reference to indicate proc is in use
+ * (protected by @inner_lock)
+ * @default_priority: default scheduler priority
+ * (invariant after initialized)
+ * @debugfs_entry: debugfs node
+ * @alloc: binder allocator bookkeeping
+ * @context: binder_context for this proc
+ * (invariant after initialized)
+ * @inner_lock: can nest under outer_lock and/or node lock
+ * @outer_lock: no nesting under innor or node lock
+ * Lock order: 1) outer, 2) node, 3) inner
+ *
+ * Bookkeeping structure for binder processes
+ */
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
@@ -325,23 +528,12 @@
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
int pid;
- struct vm_area_struct *vma;
- struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
- void *buffer;
- ptrdiff_t user_buffer_offset;
+ bool is_dead;
- struct list_head buffers;
- struct rb_root free_buffers;
- struct rb_root allocated_buffers;
- size_t free_async_space;
-
- struct page **pages;
- size_t buffer_size;
- uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
@@ -350,9 +542,13 @@
int requested_threads;
int requested_threads_started;
int ready_threads;
+ int tmp_ref;
long default_priority;
struct dentry *debugfs_entry;
+ struct binder_alloc alloc;
struct binder_context *context;
+ spinlock_t inner_lock;
+ spinlock_t outer_lock;
};
enum {
@@ -361,22 +557,54 @@
BINDER_LOOPER_STATE_EXITED = 0x04,
BINDER_LOOPER_STATE_INVALID = 0x08,
BINDER_LOOPER_STATE_WAITING = 0x10,
- BINDER_LOOPER_STATE_NEED_RETURN = 0x20
};
+/**
+ * struct binder_thread - binder thread bookkeeping
+ * @proc: binder process for this thread
+ * (invariant after initialization)
+ * @rb_node: element for proc->threads rbtree
+ * (protected by @proc->inner_lock)
+ * @pid: PID for this thread
+ * (invariant after initialization)
+ * @looper: bitmap of looping state
+ * (only accessed by this thread)
+ * @looper_needs_return: looping thread needs to exit driver
+ * (no lock needed)
+ * @transaction_stack: stack of in-progress transactions for this thread
+ * (protected by @proc->inner_lock)
+ * @todo: list of work to do for this thread
+ * (protected by @proc->inner_lock)
+ * @return_error: transaction errors reported by this thread
+ * (only accessed by this thread)
+ * @reply_error: transaction errors reported by target thread
+ * (protected by @proc->inner_lock)
+ * @wait: wait queue for thread work
+ * @stats: per-thread statistics
+ * (atomics, no lock needed)
+ * @tmp_ref: temporary reference to indicate thread is in use
+ * (atomic since @proc->inner_lock cannot
+ * always be acquired)
+ * @is_dead: thread is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder threads.
+ */
struct binder_thread {
struct binder_proc *proc;
struct rb_node rb_node;
int pid;
- int looper;
+ int looper; /* only modified by this thread */
+ bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
- uint32_t return_error; /* Write failed, return error code in read buf */
- uint32_t return_error2; /* Write failed, return error code in read */
- /* buffer. Used when sending a reply to a dead process that */
- /* we are also waiting on */
+ struct binder_error return_error;
+ struct binder_error reply_error;
wait_queue_head_t wait;
struct binder_stats stats;
+ atomic_t tmp_ref;
+ bool is_dead;
};
struct binder_transaction {
@@ -396,17 +624,259 @@
long priority;
long saved_priority;
kuid_t sender_euid;
+ /**
+ * @lock: protects @from, @to_proc, and @to_thread
+ *
+ * @from, @to_proc, and @to_thread can be set to NULL
+ * during thread teardown
+ */
+ spinlock_t lock;
};
+/**
+ * binder_proc_lock() - Acquire outer lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->outer_lock. Used to protect binder_ref
+ * structures associated with the given proc.
+ */
+#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
+static void
+_binder_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->outer_lock);
+}
+
+/**
+ * binder_proc_unlock() - Release spinlock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_proc_lock()
+ */
+#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
+static void
+_binder_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->outer_lock);
+}
+
+/**
+ * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->inner_lock. Used to protect todo lists
+ */
+#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
+static void
+_binder_inner_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->inner_lock);
+}
+
+/**
+ * binder_inner_proc_unlock() - Release inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_inner_proc_lock()
+ */
+#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
+static void
+_binder_inner_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->inner_lock);
+}
+
+/**
+ * binder_node_lock() - Acquire spinlock for given binder_node
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. Used to protect binder_node fields
+ */
+#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
+static void
+_binder_node_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+}
+
+/**
+ * binder_node_unlock() - Release spinlock for given binder_proc
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
+static void
+_binder_node_unlock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&node->lock);
+}
+
+/**
+ * binder_node_inner_lock() - Acquire node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. If node->proc also acquires
+ * proc->inner_lock. Used to protect binder_node fields
+ */
+#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
+static void
+_binder_node_inner_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+}
+
+/**
+ * binder_node_unlock() - Release node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
+static void
+_binder_node_inner_unlock(struct binder_node *node, int line)
+{
+ struct binder_proc *proc = node->proc;
+
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ spin_unlock(&node->lock);
+}
+
+static bool binder_worklist_empty_ilocked(struct list_head *list)
+{
+ return list_empty(list);
+}
+
+/**
+ * binder_worklist_empty() - Check if no items on the work list
+ * @proc: binder_proc associated with list
+ * @list: list to check
+ *
+ * Return: true if there are no items on list, else false
+ */
+static bool binder_worklist_empty(struct binder_proc *proc,
+ struct list_head *list)
+{
+ bool ret;
+
+ binder_inner_proc_lock(proc);
+ ret = binder_worklist_empty_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return ret;
+}
+
+static void
+binder_enqueue_work_ilocked(struct binder_work *work,
+ struct list_head *target_list)
+{
+ BUG_ON(target_list == NULL);
+ BUG_ON(work->entry.next && !list_empty(&work->entry));
+ list_add_tail(&work->entry, target_list);
+}
+
+/**
+ * binder_enqueue_work() - Add an item to the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to add to list
+ * @target_list: list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ */
+static void
+binder_enqueue_work(struct binder_proc *proc,
+ struct binder_work *work,
+ struct list_head *target_list)
+{
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(work, target_list);
+ binder_inner_proc_unlock(proc);
+}
+
+static void
+binder_dequeue_work_ilocked(struct binder_work *work)
+{
+ list_del_init(&work->entry);
+}
+
+/**
+ * binder_dequeue_work() - Removes an item from the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to remove from list
+ *
+ * Removes the specified work item from whatever list it is on.
+ * Can safely be called if work is not on any list.
+ */
+static void
+binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
+{
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(work);
+ binder_inner_proc_unlock(proc);
+}
+
+static struct binder_work *binder_dequeue_work_head_ilocked(
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ w = list_first_entry_or_null(list, struct binder_work, entry);
+ if (w)
+ list_del_init(&w->entry);
+ return w;
+}
+
+/**
+ * binder_dequeue_work_head() - Dequeues the item at head of list
+ * @proc: binder_proc associated with list
+ * @list: list to dequeue head
+ *
+ * Removes the head of the list if there are items on the list
+ *
+ * Return: pointer dequeued binder_work, NULL if list was empty
+ */
+static struct binder_work *binder_dequeue_work_head(
+ struct binder_proc *proc,
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ binder_inner_proc_lock(proc);
+ w = binder_dequeue_work_head_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return w;
+}
+
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+static void binder_free_thread(struct binder_thread *thread);
+static void binder_free_proc(struct binder_proc *proc);
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
struct files_struct *files = proc->files;
unsigned long rlim_cur;
unsigned long irqs;
- int ret;
if (files == NULL)
return -ESRCH;
@@ -417,11 +887,7 @@
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- preempt_enable_no_resched();
- ret = __alloc_fd(files, 0, rlim_cur, flags);
- preempt_disable();
-
- return ret;
+ return __alloc_fd(files, 0, rlim_cur, flags);
}
/*
@@ -430,11 +896,8 @@
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
- if (proc->files) {
- preempt_enable_no_resched();
+ if (proc->files)
__fd_install(proc->files, fd, file);
- preempt_disable();
- }
}
/*
@@ -458,74 +921,6 @@
return retval;
}
-static inline void binder_lock(const char *tag)
-{
- trace_binder_lock(tag);
- mutex_lock(&binder_main_lock);
- preempt_disable();
- trace_binder_locked(tag);
-}
-
-static inline void binder_unlock(const char *tag)
-{
- trace_binder_unlock(tag);
- mutex_unlock(&binder_main_lock);
- preempt_enable();
-}
-
-static inline void *kzalloc_preempt_disabled(size_t size)
-{
- void *ptr;
-
- ptr = kzalloc(size, GFP_NOWAIT);
- if (ptr)
- return ptr;
-
- preempt_enable_no_resched();
- ptr = kzalloc(size, GFP_KERNEL);
- preempt_disable();
-
- return ptr;
-}
-
-static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n)
-{
- long ret;
-
- preempt_enable_no_resched();
- ret = copy_to_user(to, from, n);
- preempt_disable();
- return ret;
-}
-
-static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n)
-{
- long ret;
-
- preempt_enable_no_resched();
- ret = copy_from_user(to, from, n);
- preempt_disable();
- return ret;
-}
-
-#define get_user_preempt_disabled(x, ptr) \
-({ \
- int __ret; \
- preempt_enable_no_resched(); \
- __ret = get_user(x, ptr); \
- preempt_disable(); \
- __ret; \
-})
-
-#define put_user_preempt_disabled(x, ptr) \
-({ \
- int __ret; \
- preempt_enable_no_resched(); \
- __ret = put_user(x, ptr); \
- preempt_disable(); \
- __ret; \
-})
-
static void binder_set_nice(long nice)
{
long min_nice;
@@ -544,439 +939,14 @@
binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
}
-static size_t binder_buffer_size(struct binder_proc *proc,
- struct binder_buffer *buffer)
-{
- if (list_is_last(&buffer->entry, &proc->buffers))
- return proc->buffer + proc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
-}
-
-static void binder_insert_free_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
-{
- struct rb_node **p = &proc->free_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
- size_t buffer_size;
- size_t new_buffer_size;
-
- BUG_ON(!new_buffer->free);
-
- new_buffer_size = binder_buffer_size(proc, new_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: add free buffer, size %zd, at %p\n",
- proc->pid, new_buffer_size, new_buffer);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- if (new_buffer_size < buffer_size)
- p = &parent->rb_left;
- else
- p = &parent->rb_right;
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
-}
-
-static void binder_insert_allocated_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
-{
- struct rb_node **p = &proc->allocated_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
-
- BUG_ON(new_buffer->free);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
-
- if (new_buffer < buffer)
- p = &parent->rb_left;
- else if (new_buffer > buffer)
- p = &parent->rb_right;
- else
- BUG();
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
-}
-
-static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
- uintptr_t user_ptr)
-{
- struct rb_node *n = proc->allocated_buffers.rb_node;
- struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
-
- kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
-
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
-
- if (kern_ptr < buffer)
- n = n->rb_left;
- else if (kern_ptr > buffer)
- n = n->rb_right;
- else
- return buffer;
- }
- return NULL;
-}
-
-static int binder_update_page_range(struct binder_proc *proc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
-{
- void *page_addr;
- unsigned long user_page_addr;
- struct page **page;
- struct mm_struct *mm;
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: %s pages %p-%p\n", proc->pid,
- allocate ? "allocate" : "free", start, end);
-
- if (end <= start)
- return 0;
-
- trace_binder_update_page_range(proc, allocate, start, end);
-
- if (vma)
- mm = NULL;
- else
- mm = get_task_mm(proc->tsk);
-
- preempt_enable_no_resched();
-
- if (mm) {
- down_write(&mm->mmap_sem);
- vma = proc->vma;
- if (vma && mm != proc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- proc->pid);
- vma = NULL;
- }
- }
-
- if (allocate == 0)
- goto free_range;
-
- if (vma == NULL) {
- pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
- proc->pid);
- goto err_no_vma;
- }
-
- for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
- int ret;
-
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
-
- BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (*page == NULL) {
- pr_err("%d: binder_alloc_buf failed for page at %p\n",
- proc->pid, page_addr);
- goto err_alloc_page_failed;
- }
- ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL, page);
- flush_cache_vmap((unsigned long)page_addr,
- (unsigned long)page_addr + PAGE_SIZE);
- if (ret != 1) {
- pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
- proc->pid, page_addr);
- goto err_map_kernel_failed;
- }
- user_page_addr =
- (uintptr_t)page_addr + proc->user_buffer_offset;
- ret = vm_insert_page(vma, user_page_addr, page[0]);
- if (ret) {
- pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
- proc->pid, user_page_addr);
- goto err_vm_insert_page_failed;
- }
- /* vm_insert_page does not seem to increment the refcount */
- }
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
-
- preempt_disable();
-
- return 0;
-
-free_range:
- for (page_addr = end - PAGE_SIZE; page_addr >= start;
- page_addr -= PAGE_SIZE) {
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
- if (vma)
- zap_page_range(vma, (uintptr_t)page_addr +
- proc->user_buffer_offset, PAGE_SIZE, NULL);
-err_vm_insert_page_failed:
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
- __free_page(*page);
- *page = NULL;
-err_alloc_page_failed:
- ;
- }
-err_no_vma:
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
-
- preempt_disable();
-
- return -ENOMEM;
-}
-
-static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async)
-{
- struct rb_node *n = proc->free_buffers.rb_node;
- struct binder_buffer *buffer;
- size_t buffer_size;
- struct rb_node *best_fit = NULL;
- void *has_page_addr;
- void *end_page_addr;
- size_t size, data_offsets_size;
-
- if (proc->vma == NULL) {
- pr_err("%d: binder_alloc_buf, no vma\n",
- proc->pid);
- return NULL;
- }
-
- data_offsets_size = ALIGN(data_size, sizeof(void *)) +
- ALIGN(offsets_size, sizeof(void *));
-
- if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
- binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
- proc->pid, data_size, offsets_size);
- return NULL;
- }
- size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
- if (size < data_offsets_size || size < extra_buffers_size) {
- binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
- proc->pid, extra_buffers_size);
- return NULL;
- }
- if (is_async &&
- proc->free_async_space < size + sizeof(struct binder_buffer)) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd failed, no async space left\n",
- proc->pid, size);
- return NULL;
- }
-
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
- buffer_size = binder_buffer_size(proc, buffer);
-
- if (size < buffer_size) {
- best_fit = n;
- n = n->rb_left;
- } else if (size > buffer_size)
- n = n->rb_right;
- else {
- best_fit = n;
- break;
- }
- }
- if (best_fit == NULL) {
- pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
- proc->pid, size);
- return NULL;
- }
- if (n == NULL) {
- buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
- buffer_size = binder_buffer_size(proc, buffer);
- }
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
- proc->pid, size, buffer, buffer_size);
-
- has_page_addr =
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
- else
- buffer_size = size + sizeof(struct binder_buffer);
- }
- end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
- if (end_page_addr > has_page_addr)
- end_page_addr = has_page_addr;
- if (binder_update_page_range(proc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
- return NULL;
-
- rb_erase(best_fit, &proc->free_buffers);
- buffer->free = 0;
- binder_insert_allocated_buffer(proc, buffer);
- if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
-
- list_add(&new_buffer->entry, &buffer->entry);
- new_buffer->free = 1;
- binder_insert_free_buffer(proc, new_buffer);
- }
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got %p\n",
- proc->pid, size, buffer);
- buffer->data_size = data_size;
- buffer->offsets_size = offsets_size;
- buffer->extra_buffers_size = extra_buffers_size;
- buffer->async_transaction = is_async;
- if (is_async) {
- proc->free_async_space -= size + sizeof(struct binder_buffer);
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_alloc_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
- }
-
- return buffer;
-}
-
-static void *buffer_start_page(struct binder_buffer *buffer)
-{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
-}
-
-static void *buffer_end_page(struct binder_buffer *buffer)
-{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
-}
-
-static void binder_delete_free_buffer(struct binder_proc *proc,
- struct binder_buffer *buffer)
-{
- struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
- BUG_ON(proc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
- BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
-
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
- }
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
- proc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(proc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
- }
-}
-
-static void binder_free_buf(struct binder_proc *proc,
- struct binder_buffer *buffer)
-{
- size_t size, buffer_size;
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- size = ALIGN(buffer->data_size, sizeof(void *)) +
- ALIGN(buffer->offsets_size, sizeof(void *)) +
- ALIGN(buffer->extra_buffers_size, sizeof(void *));
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_free_buf %p size %zd buffer_size %zd\n",
- proc->pid, buffer, size, buffer_size);
-
- BUG_ON(buffer->free);
- BUG_ON(size > buffer_size);
- BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < proc->buffer);
- BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
-
- if (buffer->async_transaction) {
- proc->free_async_space += size + sizeof(struct binder_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_free_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
- }
-
- binder_update_page_range(proc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
- rb_erase(&buffer->rb_node, &proc->allocated_buffers);
- buffer->free = 1;
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
-
- if (next->free) {
- rb_erase(&next->rb_node, &proc->free_buffers);
- binder_delete_free_buffer(proc, next);
- }
- }
- if (proc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
-
- if (prev->free) {
- binder_delete_free_buffer(proc, buffer);
- rb_erase(&prev->rb_node, &proc->free_buffers);
- buffer = prev;
- }
- }
- binder_insert_free_buffer(proc, buffer);
-}
-
-static struct binder_node *binder_get_node(struct binder_proc *proc,
- binder_uintptr_t ptr)
+static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
+ binder_uintptr_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
+
while (n) {
node = rb_entry(n, struct binder_node, rb_node);
@@ -984,21 +954,45 @@
n = n->rb_left;
else if (ptr > node->ptr)
n = n->rb_right;
- else
+ else {
+ /*
+ * take an implicit weak reference
+ * to ensure node stays alive until
+ * call to binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
return node;
+ }
}
return NULL;
}
-static struct binder_node *binder_new_node(struct binder_proc *proc,
- binder_uintptr_t ptr,
- binder_uintptr_t cookie)
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+ binder_uintptr_t ptr)
+{
+ struct binder_node *node;
+
+ binder_inner_proc_lock(proc);
+ node = binder_get_node_ilocked(proc, ptr);
+ binder_inner_proc_unlock(proc);
+ return node;
+}
+
+static struct binder_node *binder_init_node_ilocked(
+ struct binder_proc *proc,
+ struct binder_node *new_node,
+ struct flat_binder_object *fp)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
+ binder_uintptr_t ptr = fp ? fp->binder : 0;
+ binder_uintptr_t cookie = fp ? fp->cookie : 0;
+ __u32 flags = fp ? fp->flags : 0;
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
while (*p) {
+
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
@@ -1006,33 +1000,74 @@
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
- else
- return NULL;
+ else {
+ /*
+ * A matching node is already in
+ * the rb tree. Abandon the init
+ * and return it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ return node;
+ }
}
-
- node = kzalloc_preempt_disabled(sizeof(*node));
- if (node == NULL)
- return NULL;
+ node = new_node;
binder_stats_created(BINDER_STAT_NODE);
+ node->tmp_refs++;
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
- node->debug_id = ++binder_last_id;
+ node->debug_id = atomic_inc_return(&binder_last_id);
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
+ node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
(u64)node->ptr, (u64)node->cookie);
+
return node;
}
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
- struct list_head *target_list)
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+ struct flat_binder_object *fp)
{
+ struct binder_node *node;
+ struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
+
+ if (!new_node)
+ return NULL;
+ binder_inner_proc_lock(proc);
+ node = binder_init_node_ilocked(proc, new_node, fp);
+ binder_inner_proc_unlock(proc);
+ if (node != new_node)
+ /*
+ * The node was already added by another thread
+ */
+ kfree(new_node);
+
+ return node;
+}
+
+static void binder_free_node(struct binder_node *node)
+{
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+}
+
+static int binder_inc_node_nilocked(struct binder_node *node, int strong,
+ int internal,
+ struct list_head *target_list)
+{
+ struct binder_proc *proc = node->proc;
+
+ BUG_ON(!spin_is_locked(&node->lock));
+ if (proc)
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
if (strong) {
if (internal) {
if (target_list == NULL &&
@@ -1049,8 +1084,8 @@
} else
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
- list_del_init(&node->work.entry);
- list_add_tail(&node->work.entry, target_list);
+ binder_dequeue_work_ilocked(&node->work);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
if (!internal)
@@ -1061,58 +1096,169 @@
node->debug_id);
return -EINVAL;
}
- list_add_tail(&node->work.entry, target_list);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
}
return 0;
}
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
{
+ int ret;
+
+ binder_node_inner_lock(node);
+ ret = binder_inc_node_nilocked(node, strong, internal, target_list);
+ binder_node_inner_unlock(node);
+
+ return ret;
+}
+
+static bool binder_dec_node_nilocked(struct binder_node *node,
+ int strong, int internal)
+{
+ struct binder_proc *proc = node->proc;
+
+ BUG_ON(!spin_is_locked(&node->lock));
+ if (proc)
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
if (strong) {
if (internal)
node->internal_strong_refs--;
else
node->local_strong_refs--;
if (node->local_strong_refs || node->internal_strong_refs)
- return 0;
+ return false;
} else {
if (!internal)
node->local_weak_refs--;
- if (node->local_weak_refs || !hlist_empty(&node->refs))
- return 0;
+ if (node->local_weak_refs || node->tmp_refs ||
+ !hlist_empty(&node->refs))
+ return false;
}
- if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+
+ if (proc && (node->has_strong_ref || node->has_weak_ref)) {
if (list_empty(&node->work.entry)) {
- list_add_tail(&node->work.entry, &node->proc->todo);
+ binder_enqueue_work_ilocked(&node->work, &proc->todo);
wake_up_interruptible(&node->proc->wait);
}
} else {
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
- !node->local_weak_refs) {
- list_del_init(&node->work.entry);
- if (node->proc) {
- rb_erase(&node->rb_node, &node->proc->nodes);
+ !node->local_weak_refs && !node->tmp_refs) {
+ if (proc) {
+ binder_dequeue_work_ilocked(&node->work);
+ rb_erase(&node->rb_node, &proc->nodes);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"refless node %d deleted\n",
node->debug_id);
} else {
+ BUG_ON(!list_empty(&node->work.entry));
+ spin_lock(&binder_dead_nodes_lock);
+ /*
+ * tmp_refs could have changed so
+ * check it again
+ */
+ if (node->tmp_refs) {
+ spin_unlock(&binder_dead_nodes_lock);
+ return false;
+ }
hlist_del(&node->dead_node);
+ spin_unlock(&binder_dead_nodes_lock);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"dead node %d deleted\n",
node->debug_id);
}
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ return true;
}
}
-
- return 0;
+ return false;
}
+static void binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ bool free_node;
-static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- u32 desc, bool need_strong_ref)
+ binder_node_inner_lock(node);
+ free_node = binder_dec_node_nilocked(node, strong, internal);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
+}
+
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
+{
+ /*
+ * No call to binder_inc_node() is needed since we
+ * don't need to inform userspace of any changes to
+ * tmp_refs
+ */
+ node->tmp_refs++;
+}
+
+/**
+ * binder_inc_node_tmpref() - take a temporary reference on node
+ * @node: node to reference
+ *
+ * Take reference on node to prevent the node from being freed
+ * while referenced only by a local variable. The inner lock is
+ * needed to serialize with the node work on the queue (which
+ * isn't needed after the node is dead). If the node is dead
+ * (node->proc is NULL), use binder_dead_nodes_lock to protect
+ * node->tmp_refs against dead-node-only cases where the node
+ * lock cannot be acquired (eg traversing the dead node list to
+ * print nodes)
+ */
+static void binder_inc_node_tmpref(struct binder_node *node)
+{
+ binder_node_lock(node);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ else
+ spin_lock(&binder_dead_nodes_lock);
+ binder_inc_node_tmpref_ilocked(node);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ else
+ spin_unlock(&binder_dead_nodes_lock);
+ binder_node_unlock(node);
+}
+
+/**
+ * binder_dec_node_tmpref() - remove a temporary reference on node
+ * @node: node to reference
+ *
+ * Release temporary reference on node taken via binder_inc_node_tmpref()
+ */
+static void binder_dec_node_tmpref(struct binder_node *node)
+{
+ bool free_node;
+
+ binder_node_inner_lock(node);
+ if (!node->proc)
+ spin_lock(&binder_dead_nodes_lock);
+ node->tmp_refs--;
+ BUG_ON(node->tmp_refs < 0);
+ if (!node->proc)
+ spin_unlock(&binder_dead_nodes_lock);
+ /*
+ * Call binder_dec_node() to check if all refcounts are 0
+ * and cleanup is needed. Calling with strong=0 and internal=1
+ * causes no actual reference to be released in binder_dec_node().
+ * If that changes, a change is needed here too.
+ */
+ free_node = binder_dec_node_nilocked(node, 0, 1);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
+}
+
+static void binder_put_node(struct binder_node *node)
+{
+ binder_dec_node_tmpref(node);
+}
+
+static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1120,11 +1266,11 @@
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc) {
+ if (desc < ref->data.desc) {
n = n->rb_left;
- } else if (desc > ref->desc) {
+ } else if (desc > ref->data.desc) {
n = n->rb_right;
- } else if (need_strong_ref && !ref->strong) {
+ } else if (need_strong_ref && !ref->data.strong) {
binder_user_error("tried to use weak ref as strong ref\n");
return NULL;
} else {
@@ -1134,14 +1280,34 @@
return NULL;
}
-static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
- struct binder_node *node)
+/**
+ * binder_get_ref_for_node_olocked() - get the ref associated with given node
+ * @proc: binder_proc that owns the ref
+ * @node: binder_node of target
+ * @new_ref: newly allocated binder_ref to be initialized or %NULL
+ *
+ * Look up the ref for the given node and return it if it exists
+ *
+ * If it doesn't exist and the caller provides a newly allocated
+ * ref, initialize the fields of the newly allocated ref and insert
+ * into the given proc rb_trees and node refs list.
+ *
+ * Return: the ref for node. It is possible that another thread
+ * allocated/initialized the ref first in which case the
+ * returned ref would be different than the passed-in
+ * new_ref. new_ref must be kfree'd by the caller in
+ * this case.
+ */
+static struct binder_ref *binder_get_ref_for_node_olocked(
+ struct binder_proc *proc,
+ struct binder_node *node,
+ struct binder_ref *new_ref)
{
- struct rb_node *n;
+ struct binder_context *context = proc->context;
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
- struct binder_ref *ref, *new_ref;
- struct binder_context *context = proc->context;
+ struct binder_ref *ref;
+ struct rb_node *n;
while (*p) {
parent = *p;
@@ -1154,22 +1320,22 @@
else
return ref;
}
- new_ref = kzalloc_preempt_disabled(sizeof(*ref));
- if (new_ref == NULL)
+ if (!new_ref)
return NULL;
+
binder_stats_created(BINDER_STAT_REF);
- new_ref->debug_id = ++binder_last_id;
+ new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
new_ref->node = node;
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
+ new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->desc > new_ref->desc)
+ if (ref->data.desc > new_ref->data.desc)
break;
- new_ref->desc = ref->desc + 1;
+ new_ref->data.desc = ref->data.desc + 1;
}
p = &proc->refs_by_desc.rb_node;
@@ -1177,121 +1343,423 @@
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
- if (new_ref->desc < ref->desc)
+ if (new_ref->data.desc < ref->data.desc)
p = &(*p)->rb_left;
- else if (new_ref->desc > ref->desc)
+ else if (new_ref->data.desc > ref->data.desc)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new_ref->rb_node_desc, parent, p);
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
- if (node) {
- hlist_add_head(&new_ref->node_entry, &node->refs);
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for node %d\n",
- proc->pid, new_ref->debug_id, new_ref->desc,
- node->debug_id);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for dead node\n",
- proc->pid, new_ref->debug_id, new_ref->desc);
- }
+ binder_node_lock(node);
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d new ref %d desc %d for node %d\n",
+ proc->pid, new_ref->data.debug_id, new_ref->data.desc,
+ node->debug_id);
+ binder_node_unlock(node);
return new_ref;
}
-static void binder_delete_ref(struct binder_ref *ref)
+static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{
+ bool delete_node = false;
+
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d delete ref %d desc %d for node %d\n",
- ref->proc->pid, ref->debug_id, ref->desc,
+ ref->proc->pid, ref->data.debug_id, ref->data.desc,
ref->node->debug_id);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
- if (ref->strong)
- binder_dec_node(ref->node, 1, 1);
+
+ binder_node_inner_lock(ref->node);
+ if (ref->data.strong)
+ binder_dec_node_nilocked(ref->node, 1, 1);
+
hlist_del(&ref->node_entry);
- binder_dec_node(ref->node, 0, 1);
+ delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
+ binder_node_inner_unlock(ref->node);
+ /*
+ * Clear ref->node unless we want the caller to free the node
+ */
+ if (!delete_node) {
+ /*
+ * The caller uses ref->node to determine
+ * whether the node needs to be freed. Clear
+ * it since the node is still alive.
+ */
+ ref->node = NULL;
+ }
+
if (ref->death) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d delete ref %d desc %d has death notification\n",
- ref->proc->pid, ref->debug_id, ref->desc);
- list_del(&ref->death->work.entry);
- kfree(ref->death);
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc);
+ binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
- kfree(ref);
binder_stats_deleted(BINDER_STAT_REF);
}
-static int binder_inc_ref(struct binder_ref *ref, int strong,
- struct list_head *target_list)
+/**
+ * binder_inc_ref_olocked() - increment the ref for given handle
+ * @ref: ref to be incremented
+ * @strong: if true, strong increment, else weak
+ * @target_list: list to queue node work on
+ *
+ * Increment the ref. @ref->proc->outer_lock must be held on entry
+ *
+ * Return: 0, if successful, else errno
+ */
+static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
+ struct list_head *target_list)
{
int ret;
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
ret = binder_inc_node(ref->node, 1, 1, target_list);
if (ret)
return ret;
}
- ref->strong++;
+ ref->data.strong++;
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
ret = binder_inc_node(ref->node, 0, 1, target_list);
if (ret)
return ret;
}
- ref->weak++;
+ ref->data.weak++;
}
return 0;
}
-
-static int binder_dec_ref(struct binder_ref *ref, int strong)
+/**
+ * binder_dec_ref() - dec the ref for given handle
+ * @ref: ref to be decremented
+ * @strong: if true, strong decrement, else weak
+ *
+ * Decrement the ref.
+ *
+ * Return: true if ref is cleaned up and ready to be freed
+ */
+static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
{
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
- ref->strong--;
- if (ref->strong == 0) {
- int ret;
-
- ret = binder_dec_node(ref->node, strong, 1);
- if (ret)
- return ret;
- }
+ ref->data.strong--;
+ if (ref->data.strong == 0)
+ binder_dec_node(ref->node, strong, 1);
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
- ref->weak--;
+ ref->data.weak--;
}
- if (ref->strong == 0 && ref->weak == 0)
- binder_delete_ref(ref);
- return 0;
+ if (ref->data.strong == 0 && ref->data.weak == 0) {
+ binder_cleanup_ref_olocked(ref);
+ return true;
+ }
+ return false;
}
-static void binder_pop_transaction(struct binder_thread *target_thread,
- struct binder_transaction *t)
+/**
+ * binder_get_node_from_ref() - get the node from the given proc/desc
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @need_strong_ref: if true, only return node if ref is strong
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, return the associated binder_node
+ *
+ * Return: a binder_node or NULL if not found or not strong when strong required
+ */
+static struct binder_node *binder_get_node_from_ref(
+ struct binder_proc *proc,
+ u32 desc, bool need_strong_ref,
+ struct binder_ref_data *rdata)
{
- if (target_thread) {
- BUG_ON(target_thread->transaction_stack != t);
- BUG_ON(target_thread->transaction_stack->from != target_thread);
- target_thread->transaction_stack =
- target_thread->transaction_stack->from_parent;
- t->from = NULL;
+ struct binder_node *node;
+ struct binder_ref *ref;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
+ if (!ref)
+ goto err_no_ref;
+ node = ref->node;
+ /*
+ * Take an implicit reference on the node to ensure
+ * it stays alive until the call to binder_put_node()
+ */
+ binder_inc_node_tmpref(node);
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ return node;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return NULL;
+}
+
+/**
+ * binder_free_ref() - free the binder_ref
+ * @ref: ref to free
+ *
+ * Free the binder_ref. Free the binder_node indicated by ref->node
+ * (if non-NULL) and the binder_ref_death indicated by ref->death.
+ */
+static void binder_free_ref(struct binder_ref *ref)
+{
+ if (ref->node)
+ binder_free_node(ref->node);
+ kfree(ref->death);
+ kfree(ref);
+}
+
+/**
+ * binder_update_ref_for_handle() - inc/dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @increment: true=inc reference, false=dec reference
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, increment or decrement the ref
+ * according to "increment" arg.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_update_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool increment, bool strong,
+ struct binder_ref_data *rdata)
+{
+ int ret = 0;
+ struct binder_ref *ref;
+ bool delete_ref = false;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, strong);
+ if (!ref) {
+ ret = -EINVAL;
+ goto err_no_ref;
}
- t->need_reply = 0;
+ if (increment)
+ ret = binder_inc_ref_olocked(ref, strong, NULL);
+ else
+ delete_ref = binder_dec_ref_olocked(ref, strong);
+
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ if (delete_ref)
+ binder_free_ref(ref);
+ return ret;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return ret;
+}
+
+/**
+ * binder_dec_ref_for_handle() - dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Just calls binder_update_ref_for_handle() to decrement the ref.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_dec_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool strong, struct binder_ref_data *rdata)
+{
+ return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
+}
+
+
+/**
+ * binder_inc_ref_for_node() - increment the ref for given proc/node
+ * @proc: proc containing the ref
+ * @node: target node
+ * @strong: true=strong reference, false=weak reference
+ * @target_list: worklist to use if node is incremented
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and node, increment the ref. Create the ref if it
+ * doesn't already exist
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_inc_ref_for_node(struct binder_proc *proc,
+ struct binder_node *node,
+ bool strong,
+ struct list_head *target_list,
+ struct binder_ref_data *rdata)
+{
+ struct binder_ref *ref;
+ struct binder_ref *new_ref = NULL;
+ int ret = 0;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, NULL);
+ if (!ref) {
+ binder_proc_unlock(proc);
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!new_ref)
+ return -ENOMEM;
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
+ }
+ ret = binder_inc_ref_olocked(ref, strong, target_list);
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+ if (new_ref && ref != new_ref)
+ /*
+ * Another thread created the ref first so
+ * free the one we allocated
+ */
+ kfree(new_ref);
+ return ret;
+}
+
+static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
+ struct binder_transaction *t)
+{
+ BUG_ON(!target_thread);
+ BUG_ON(!spin_is_locked(&target_thread->proc->inner_lock));
+ BUG_ON(target_thread->transaction_stack != t);
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
+ target_thread->transaction_stack =
+ target_thread->transaction_stack->from_parent;
+ t->from = NULL;
+}
+
+/**
+ * binder_thread_dec_tmpref() - decrement thread->tmp_ref
+ * @thread: thread to decrement
+ *
+ * A thread needs to be kept alive while being used to create or
+ * handle a transaction. binder_get_txn_from() is used to safely
+ * extract t->from from a binder_transaction and keep the thread
+ * indicated by t->from from being freed. When done with that
+ * binder_thread, this function is called to decrement the
+ * tmp_ref and free if appropriate (thread has been released
+ * and no transaction being processed by the driver)
+ */
+static void binder_thread_dec_tmpref(struct binder_thread *thread)
+{
+ /*
+ * atomic is used to protect the counter value while
+ * it cannot reach zero or thread->is_dead is false
+ */
+ binder_inner_proc_lock(thread->proc);
+ atomic_dec(&thread->tmp_ref);
+ if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
+ binder_inner_proc_unlock(thread->proc);
+ binder_free_thread(thread);
+ return;
+ }
+ binder_inner_proc_unlock(thread->proc);
+}
+
+/**
+ * binder_proc_dec_tmpref() - decrement proc->tmp_ref
+ * @proc: proc to decrement
+ *
+ * A binder_proc needs to be kept alive while being used to create or
+ * handle a transaction. proc->tmp_ref is incremented when
+ * creating a new transaction or the binder_proc is currently in-use
+ * by threads that are being released. When done with the binder_proc,
+ * this function is called to decrement the counter and free the
+ * proc if appropriate (proc has been released, all threads have
+ * been released and not currenly in-use to process a transaction).
+ */
+static void binder_proc_dec_tmpref(struct binder_proc *proc)
+{
+ binder_inner_proc_lock(proc);
+ proc->tmp_ref--;
+ if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
+ !proc->tmp_ref) {
+ binder_inner_proc_unlock(proc);
+ binder_free_proc(proc);
+ return;
+ }
+ binder_inner_proc_unlock(proc);
+}
+
+/**
+ * binder_get_txn_from() - safely extract the "from" thread in transaction
+ * @t: binder transaction for t->from
+ *
+ * Atomically return the "from" thread and increment the tmp_ref
+ * count for the thread to ensure it stays alive until
+ * binder_thread_dec_tmpref() is called.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ spin_lock(&t->lock);
+ from = t->from;
+ if (from)
+ atomic_inc(&from->tmp_ref);
+ spin_unlock(&t->lock);
+ return from;
+}
+
+/**
+ * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
+ * @t: binder transaction for t->from
+ *
+ * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
+ * to guarantee that the thread cannot be released while operating on it.
+ * The caller must call binder_inner_proc_unlock() to release the inner lock
+ * as well as call binder_dec_thread_txn() to release the reference.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from_and_acq_inner(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ from = binder_get_txn_from(t);
+ if (!from)
+ return NULL;
+ binder_inner_proc_lock(from->proc);
+ if (t->from) {
+ BUG_ON(from != t->from);
+ return from;
+ }
+ binder_inner_proc_unlock(from->proc);
+ binder_thread_dec_tmpref(from);
+ return NULL;
+}
+
+static void binder_free_transaction(struct binder_transaction *t)
+{
if (t->buffer)
t->buffer->transaction = NULL;
kfree(t);
@@ -1306,30 +1774,28 @@
BUG_ON(t->flags & TF_ONE_WAY);
while (1) {
- target_thread = t->from;
+ target_thread = binder_get_txn_from_and_acq_inner(t);
if (target_thread) {
- if (target_thread->return_error != BR_OK &&
- target_thread->return_error2 == BR_OK) {
- target_thread->return_error2 =
- target_thread->return_error;
- target_thread->return_error = BR_OK;
- }
- if (target_thread->return_error == BR_OK) {
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "send failed reply for transaction %d to %d:%d\n",
- t->debug_id,
- target_thread->proc->pid,
- target_thread->pid);
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "send failed reply for transaction %d to %d:%d\n",
+ t->debug_id,
+ target_thread->proc->pid,
+ target_thread->pid);
- binder_pop_transaction(target_thread, t);
- target_thread->return_error = error_code;
+ binder_pop_transaction_ilocked(target_thread, t);
+ if (target_thread->reply_error.cmd == BR_OK) {
+ target_thread->reply_error.cmd = error_code;
+ binder_enqueue_work_ilocked(
+ &target_thread->reply_error.work,
+ &target_thread->todo);
wake_up_interruptible(&target_thread->wait);
} else {
- pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
- target_thread->proc->pid,
- target_thread->pid,
- target_thread->return_error);
+ WARN(1, "Unexpected reply error: %u\n",
+ target_thread->reply_error.cmd);
}
+ binder_inner_proc_unlock(target_thread->proc);
+ binder_thread_dec_tmpref(target_thread);
+ binder_free_transaction(t);
return;
}
next = t->from_parent;
@@ -1338,7 +1804,7 @@
"send failed reply for transaction %d, target dead\n",
t->debug_id);
- binder_pop_transaction(target_thread, t);
+ binder_free_transaction(t);
if (next == NULL) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"reply failed, no target thread at root\n");
@@ -1547,24 +2013,26 @@
node->debug_id, (u64)node->ptr);
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
0);
+ binder_put_node(node);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
- struct binder_ref *ref;
+ struct binder_ref_data rdata;
+ int ret;
fp = to_flat_binder_object(hdr);
- ref = binder_get_ref(proc, fp->handle,
- hdr->type == BINDER_TYPE_HANDLE);
- if (ref == NULL) {
- pr_err("transaction release %d bad handle %d\n",
- debug_id, fp->handle);
+ ret = binder_dec_ref_for_handle(proc, fp->handle,
+ hdr->type == BINDER_TYPE_HANDLE, &rdata);
+
+ if (ret) {
+ pr_err("transaction release %d bad handle %d, ret = %d\n",
+ debug_id, fp->handle, ret);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, ref->node->debug_id);
- binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
+ " ref %d desc %d\n",
+ rdata.debug_id, rdata.desc);
} break;
case BINDER_TYPE_FD: {
@@ -1603,7 +2071,8 @@
* back to kernel address space to access it
*/
parent_buffer = parent->buffer -
- proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &proc->alloc);
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -1635,102 +2104,122 @@
struct binder_thread *thread)
{
struct binder_node *node;
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_ref_data rdata;
+ int ret = 0;
node = binder_get_node(proc, fp->binder);
if (!node) {
- node = binder_new_node(proc, fp->binder, fp->cookie);
+ node = binder_new_node(proc, fp);
if (!node)
return -ENOMEM;
-
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid, (u64)fp->binder,
node->debug_id, (u64)fp->cookie,
(u64)node->cookie);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
+ }
- ref = binder_get_ref_for_node(target_proc, node);
- if (!ref)
- return -EINVAL;
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ &thread->todo, &rdata);
+ if (ret)
+ goto done;
if (fp->hdr.type == BINDER_TYPE_BINDER)
fp->hdr.type = BINDER_TYPE_HANDLE;
else
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0;
- fp->handle = ref->desc;
+ fp->handle = rdata.desc;
fp->cookie = 0;
- binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
- trace_binder_transaction_node_to_ref(t, node, ref);
+ trace_binder_transaction_node_to_ref(t, node, &rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx -> ref %d desc %d\n",
node->debug_id, (u64)node->ptr,
- ref->debug_id, ref->desc);
-
- return 0;
+ rdata.debug_id, rdata.desc);
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_handle(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
{
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_node *node;
+ struct binder_ref_data src_rdata;
+ int ret = 0;
- ref = binder_get_ref(proc, fp->handle,
- fp->hdr.type == BINDER_TYPE_HANDLE);
- if (!ref) {
+ node = binder_get_node_from_ref(proc, fp->handle,
+ fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
+ if (!node) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid, thread->pid, fp->handle);
return -EINVAL;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
+ }
- if (ref->node->proc == target_proc) {
+ binder_node_lock(node);
+ if (node->proc == target_proc) {
if (fp->hdr.type == BINDER_TYPE_HANDLE)
fp->hdr.type = BINDER_TYPE_BINDER;
else
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
- fp->binder = ref->node->ptr;
- fp->cookie = ref->node->cookie;
- binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
- 0, NULL);
- trace_binder_transaction_ref_to_node(t, ref);
+ fp->binder = node->ptr;
+ fp->cookie = node->cookie;
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ binder_inc_node_nilocked(node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ 0, NULL);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ trace_binder_transaction_ref_to_node(t, node, &src_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
- ref->debug_id, ref->desc, ref->node->debug_id,
- (u64)ref->node->ptr);
+ src_rdata.debug_id, src_rdata.desc, node->debug_id,
+ (u64)node->ptr);
+ binder_node_unlock(node);
} else {
- struct binder_ref *new_ref;
+ int ret;
+ struct binder_ref_data dest_rdata;
- new_ref = binder_get_ref_for_node(target_proc, ref->node);
- if (!new_ref)
- return -EINVAL;
+ binder_node_unlock(node);
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_HANDLE,
+ NULL, &dest_rdata);
+ if (ret)
+ goto done;
fp->binder = 0;
- fp->handle = new_ref->desc;
+ fp->handle = dest_rdata.desc;
fp->cookie = 0;
- binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
- NULL);
- trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+ trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
+ &dest_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, new_ref->debug_id,
- new_ref->desc, ref->node->debug_id);
+ src_rdata.debug_id, src_rdata.desc,
+ dest_rdata.debug_id, dest_rdata.desc,
+ node->debug_id);
}
- return 0;
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_fd(int fd,
@@ -1765,9 +2254,7 @@
ret = -EBADF;
goto err_fget;
}
- preempt_enable_no_resched();
ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
- preempt_disable();
if (ret < 0) {
ret = -EPERM;
goto err_security;
@@ -1823,7 +2310,8 @@
* Since the parent was already fixed up, convert it
* back to the kernel address space to access it
*/
- parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+ parent_buffer = parent->buffer -
+ binder_alloc_get_user_buffer_offset(&target_proc->alloc);
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
@@ -1891,7 +2379,8 @@
return -EINVAL;
}
parent_buffer = (u8 *)(parent->buffer -
- target_proc->user_buffer_offset);
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc));
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
return 0;
@@ -1908,19 +2397,23 @@
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
u8 *sg_bufp, *sg_buf_end;
- struct binder_proc *target_proc;
+ struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
- uint32_t return_error;
+ uint32_t return_error = 0;
+ uint32_t return_error_param = 0;
+ uint32_t return_error_line = 0;
struct binder_buffer_object *last_fixup_obj = NULL;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
+ int t_debug_id = atomic_inc_return(&binder_last_id);
e = binder_transaction_log_add(&binder_transaction_log);
+ e->debug_id = t_debug_id;
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
@@ -1930,29 +2423,40 @@
e->context_name = proc->context->name;
if (reply) {
+ binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
+ binder_inner_proc_unlock(proc);
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_empty_call_stack;
}
- binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
+ spin_lock(&in_reply_to->lock);
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
+ spin_unlock(&in_reply_to->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
- target_thread = in_reply_to->from;
+ binder_inner_proc_unlock(proc);
+ binder_set_nice(in_reply_to->saved_priority);
+ target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
@@ -1961,61 +2465,111 @@
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
+ binder_inner_proc_unlock(target_thread->proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_thread->proc);
} else {
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle, true);
- if (ref == NULL) {
+ /*
+ * There must already be a strong ref
+ * on this node. If so, do a strong
+ * increment on the node to ensure it
+ * stays alive until the transaction is
+ * done.
+ */
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, tr->target.handle,
+ true);
+ if (ref) {
+ binder_inc_node(ref->node, 1, 0, NULL);
+ target_node = ref->node;
+ }
+ binder_proc_unlock(proc);
+ if (target_node == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
- target_node = ref->node;
} else {
+ mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
+ mutex_unlock(&context->context_mgr_node_lock);
+ return_error_line = __LINE__;
goto err_no_context_mgr_node;
}
+ binder_inc_node(target_node, 1, 0, NULL);
+ mutex_unlock(&context->context_mgr_node_lock);
}
e->to_node = target_node->debug_id;
+ binder_node_lock(target_node);
target_proc = target_node->proc;
if (target_proc == NULL) {
+ binder_node_unlock(target_node);
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
+ binder_inner_proc_lock(target_proc);
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_proc);
+ binder_node_unlock(target_node);
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPERM;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
+ binder_inner_proc_lock(proc);
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
+ spin_lock(&tmp->lock);
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
+ spin_unlock(&tmp->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_bad_call_stack;
}
while (tmp) {
- if (tmp->from && tmp->from->proc == target_proc)
- target_thread = tmp->from;
+ struct binder_thread *from;
+
+ spin_lock(&tmp->lock);
+ from = tmp->from;
+ if (from && from->proc == target_proc) {
+ atomic_inc(&from->tmp_ref);
+ target_thread = from;
+ spin_unlock(&tmp->lock);
+ break;
+ }
+ spin_unlock(&tmp->lock);
tmp = tmp->from_parent;
}
}
+ binder_inner_proc_unlock(proc);
}
if (target_thread) {
e->to_thread = target_thread->pid;
@@ -2028,22 +2582,26 @@
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
- t = kzalloc_preempt_disabled(sizeof(*t));
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
- tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
+ tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = ++binder_last_id;
- e->debug_id = t->debug_id;
+ t->debug_id = t_debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -2077,11 +2635,18 @@
trace_binder_transaction(reply, t, target_node);
- t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
- if (t->buffer == NULL) {
- return_error = BR_FAILED_REPLY;
+ if (IS_ERR(t->buffer)) {
+ /*
+ * -ESRCH indicates VMA cleared. The target is dying.
+ */
+ return_error_param = PTR_ERR(t->buffer);
+ return_error = return_error_param == -ESRCH ?
+ BR_DEAD_REPLY : BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
@@ -2089,31 +2654,34 @@
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
- if (target_node)
- binder_inc_node(target_node, 1, 0, NULL);
-
off_start = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
- if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
+ if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
- if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
+ if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
@@ -2121,6 +2689,8 @@
proc->pid, thread->pid,
(u64)extra_buffers_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
off_end = (void *)off_start + tr->offsets_size;
@@ -2137,6 +2707,8 @@
(u64)off_min,
(u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
@@ -2151,6 +2723,8 @@
ret = binder_translate_binder(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2162,6 +2736,8 @@
ret = binder_translate_handle(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2173,6 +2749,8 @@
if (target_fd < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = target_fd;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
fp->pad_binder = 0;
@@ -2189,6 +2767,8 @@
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
if (!binder_validate_fixup(t->buffer, off_start,
@@ -2198,12 +2778,16 @@
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
ret = binder_translate_fd_array(fda, parent, t, thread,
in_reply_to);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = parent;
@@ -2219,20 +2803,24 @@
binder_user_error("%d:%d got transaction with too large buffer\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
- if (copy_from_user_preempt_disabled(
- sg_bufp,
- (const void __user *)(uintptr_t)
- bp->buffer, bp->length)) {
+ if (copy_from_user(sg_bufp,
+ (const void __user *)(uintptr_t)
+ bp->buffer, bp->length)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
+ return_error_param = -EFAULT;
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)sg_bufp +
- target_proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc);
sg_bufp += ALIGN(bp->length, sizeof(u64));
ret = binder_fixup_parent(t, thread, bp, off_start,
@@ -2241,6 +2829,8 @@
last_fixup_min_off);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = bp;
@@ -2250,34 +2840,89 @@
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, hdr->type);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_object_type;
}
}
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ binder_enqueue_work(proc, tcomplete, &thread->todo);
+ t->work.type = BINDER_WORK_TRANSACTION;
+
if (reply) {
+ binder_inner_proc_lock(target_proc);
+ if (target_thread->is_dead) {
+ binder_inner_proc_unlock(target_proc);
+ goto err_dead_proc_or_thread;
+ }
BUG_ON(t->buffer->async_transaction != 0);
- binder_pop_transaction(target_thread, in_reply_to);
+ binder_pop_transaction_ilocked(target_thread, in_reply_to);
+ binder_enqueue_work_ilocked(&t->work, target_list);
+ binder_inner_proc_unlock(target_proc);
+ binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
+ binder_inner_proc_lock(proc);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(proc);
+ binder_inner_proc_lock(target_proc);
+ if (target_proc->is_dead ||
+ (target_thread && target_thread->is_dead)) {
+ binder_inner_proc_unlock(target_proc);
+ binder_inner_proc_lock(proc);
+ binder_pop_transaction_ilocked(thread, t);
+ binder_inner_proc_unlock(proc);
+ goto err_dead_proc_or_thread;
+ }
+ binder_enqueue_work_ilocked(&t->work, target_list);
+ binder_inner_proc_unlock(target_proc);
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
+ binder_node_lock(target_node);
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
+ /*
+ * Test/set of has_async_transaction
+ * must be atomic with enqueue on
+ * async_todo
+ */
+ binder_inner_proc_lock(target_proc);
+ if (target_proc->is_dead ||
+ (target_thread && target_thread->is_dead)) {
+ binder_inner_proc_unlock(target_proc);
+ binder_node_unlock(target_node);
+ goto err_dead_proc_or_thread;
+ }
+ binder_enqueue_work_ilocked(&t->work, target_list);
+ binder_inner_proc_unlock(target_proc);
+ binder_node_unlock(target_node);
}
- t->work.type = BINDER_WORK_TRANSACTION;
- list_add_tail(&t->work.entry, target_list);
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- list_add_tail(&tcomplete->entry, &thread->todo);
- if (target_wait)
- wake_up_interruptible(target_wait);
+ if (target_wait) {
+ if (reply || !(tr->flags & TF_ONE_WAY))
+ wake_up_interruptible_sync(target_wait);
+ else
+ wake_up_interruptible(target_wait);
+ }
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ binder_proc_dec_tmpref(target_proc);
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
return;
+err_dead_proc_or_thread:
+ return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
err_translate_failed:
err_bad_object_type:
err_bad_offset:
@@ -2285,8 +2930,9 @@
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ target_node = NULL;
t->buffer->transaction = NULL;
- binder_free_buf(target_proc, t->buffer);
+ binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
@@ -2299,24 +2945,49 @@
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ if (target_proc)
+ binder_proc_dec_tmpref(target_proc);
+ if (target_node)
+ binder_dec_node(target_node, 1, 0);
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d, size %lld-%lld\n",
- proc->pid, thread->pid, return_error,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
+ proc->pid, thread->pid, return_error, return_error_param,
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ return_error_line);
{
struct binder_transaction_log_entry *fe;
+ e->return_error = return_error;
+ e->return_error_param = return_error_param;
+ e->return_error_line = return_error_line;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
+ WRITE_ONCE(fe->debug_id_done, t_debug_id);
}
- BUG_ON(thread->return_error != BR_OK);
+ BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
- thread->return_error = BR_TRANSACTION_COMPLETE;
+ thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
binder_send_failed_reply(in_reply_to, return_error);
- } else
- thread->return_error = return_error;
+ } else {
+ thread->return_error.cmd = return_error;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ }
}
static int binder_thread_write(struct binder_proc *proc,
@@ -2330,15 +3001,17 @@
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
- while (ptr < end && thread->return_error == BR_OK) {
- if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
+ while (ptr < end && thread->return_error.cmd == BR_OK) {
+ int ret;
+
+ if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
- binder_stats.bc[_IOC_NR(cmd)]++;
- proc->stats.bc[_IOC_NR(cmd)]++;
- thread->stats.bc[_IOC_NR(cmd)]++;
+ atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
}
switch (cmd) {
case BC_INCREFS:
@@ -2346,53 +3019,61 @@
case BC_RELEASE:
case BC_DECREFS: {
uint32_t target;
- struct binder_ref *ref;
const char *debug_string;
+ bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
+ bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
+ struct binder_ref_data rdata;
- if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
+ if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
+
ptr += sizeof(uint32_t);
- if (target == 0 && context->binder_context_mgr_node &&
- (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
- ref = binder_get_ref_for_node(proc,
- context->binder_context_mgr_node);
- if (ref->desc != target) {
- binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
- proc->pid, thread->pid,
- ref->desc);
- }
- } else
- ref = binder_get_ref(proc, target,
- cmd == BC_ACQUIRE ||
- cmd == BC_RELEASE);
- if (ref == NULL) {
- binder_user_error("%d:%d refcount change on invalid ref %d\n",
- proc->pid, thread->pid, target);
- break;
+ ret = -1;
+ if (increment && !target) {
+ struct binder_node *ctx_mgr_node;
+ mutex_lock(&context->context_mgr_node_lock);
+ ctx_mgr_node = context->binder_context_mgr_node;
+ if (ctx_mgr_node)
+ ret = binder_inc_ref_for_node(
+ proc, ctx_mgr_node,
+ strong, NULL, &rdata);
+ mutex_unlock(&context->context_mgr_node_lock);
+ }
+ if (ret)
+ ret = binder_update_ref_for_handle(
+ proc, target, increment, strong,
+ &rdata);
+ if (!ret && rdata.desc != target) {
+ binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
+ proc->pid, thread->pid,
+ target, rdata.desc);
}
switch (cmd) {
case BC_INCREFS:
debug_string = "IncRefs";
- binder_inc_ref(ref, 0, NULL);
break;
case BC_ACQUIRE:
debug_string = "Acquire";
- binder_inc_ref(ref, 1, NULL);
break;
case BC_RELEASE:
debug_string = "Release";
- binder_dec_ref(ref, 1);
break;
case BC_DECREFS:
default:
debug_string = "DecRefs";
- binder_dec_ref(ref, 0);
+ break;
+ }
+ if (ret) {
+ binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
+ proc->pid, thread->pid, debug_string,
+ strong, target, ret);
break;
}
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
- proc->pid, thread->pid, debug_string, ref->debug_id,
- ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+ "%d:%d %s ref %d desc %d s %d w %d\n",
+ proc->pid, thread->pid, debug_string,
+ rdata.debug_id, rdata.desc, rdata.strong,
+ rdata.weak);
break;
}
case BC_INCREFS_DONE:
@@ -2400,11 +3081,12 @@
binder_uintptr_t node_ptr;
binder_uintptr_t cookie;
struct binder_node *node;
+ bool free_node;
- if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr))
+ if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
node = binder_get_node(proc, node_ptr);
@@ -2424,13 +3106,17 @@
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
(u64)node_ptr, node->debug_id,
(u64)cookie, (u64)node->cookie);
+ binder_put_node(node);
break;
}
+ binder_node_inner_lock(node);
if (cmd == BC_ACQUIRE_DONE) {
if (node->pending_strong_ref == 0) {
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_strong_ref = 0;
@@ -2439,16 +3125,23 @@
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_weak_ref = 0;
}
- binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ free_node = binder_dec_node_nilocked(node,
+ cmd == BC_ACQUIRE_DONE, 0);
+ WARN_ON(free_node);
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s node %d ls %d lw %d\n",
+ "%d:%d %s node %d ls %d lw %d tr %d\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
- node->debug_id, node->local_strong_refs, node->local_weak_refs);
+ node->debug_id, node->local_strong_refs,
+ node->local_weak_refs, node->tmp_refs);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
case BC_ATTEMPT_ACQUIRE:
@@ -2462,11 +3155,12 @@
binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
- if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr))
+ if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- buffer = binder_buffer_lookup(proc, data_ptr);
+ buffer = binder_alloc_prepare_to_free(&proc->alloc,
+ data_ptr);
if (buffer == NULL) {
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
proc->pid, thread->pid, (u64)data_ptr);
@@ -2488,15 +3182,25 @@
buffer->transaction = NULL;
}
if (buffer->async_transaction && buffer->target_node) {
- BUG_ON(!buffer->target_node->has_async_transaction);
- if (list_empty(&buffer->target_node->async_todo))
- buffer->target_node->has_async_transaction = 0;
+ struct binder_node *buf_node;
+ struct binder_work *w;
+
+ buf_node = buffer->target_node;
+ binder_node_inner_lock(buf_node);
+ BUG_ON(!buf_node->has_async_transaction);
+ BUG_ON(buf_node->proc != proc);
+ w = binder_dequeue_work_head_ilocked(
+ &buf_node->async_todo);
+ if (!w)
+ buf_node->has_async_transaction = 0;
else
- list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+ binder_enqueue_work_ilocked(
+ w, &thread->todo);
+ binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, NULL);
- binder_free_buf(proc, buffer);
+ binder_alloc_free_buf(&proc->alloc, buffer);
break;
}
@@ -2504,8 +3208,7 @@
case BC_REPLY_SG: {
struct binder_transaction_data_sg tr;
- if (copy_from_user_preempt_disabled(&tr, ptr,
- sizeof(tr)))
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr.transaction_data,
@@ -2516,7 +3219,7 @@
case BC_REPLY: {
struct binder_transaction_data tr;
- if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr,
@@ -2528,6 +3231,7 @@
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
+ binder_inner_proc_lock(proc);
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
@@ -2541,6 +3245,7 @@
proc->requested_threads_started++;
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+ binder_inner_proc_unlock(proc);
break;
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
@@ -2565,15 +3270,37 @@
uint32_t target;
binder_uintptr_t cookie;
struct binder_ref *ref;
- struct binder_ref_death *death;
+ struct binder_ref_death *death = NULL;
- if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
+ if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target, false);
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+ /*
+ * Allocate memory for death notification
+ * before taking lock
+ */
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
+ if (death == NULL) {
+ WARN_ON(thread->return_error.cmd !=
+ BR_OK);
+ thread->return_error.cmd = BR_ERROR;
+ binder_enqueue_work(
+ thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ binder_debug(
+ BINDER_DEBUG_FAILED_TRANSACTION,
+ "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ }
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
@@ -2581,6 +3308,8 @@
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
target);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
@@ -2590,21 +3319,18 @@
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
- (u64)cookie, ref->debug_id, ref->desc,
- ref->strong, ref->weak, ref->node->debug_id);
+ (u64)cookie, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak, ref->node->debug_id);
+ binder_node_lock(ref->node);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
if (ref->death) {
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
proc->pid, thread->pid);
- break;
- }
- death = kzalloc_preempt_disabled(sizeof(*death));
- if (death == NULL) {
- thread->return_error = BR_ERROR;
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
- proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
binder_stats_created(BINDER_STAT_DEATH);
@@ -2613,17 +3339,28 @@
ref->death = death;
if (ref->node->proc == NULL) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&ref->death->work.entry, &thread->todo);
- } else {
- list_add_tail(&ref->death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work(
+ proc,
+ &ref->death->work,
+ &thread->todo);
+ else {
+ binder_enqueue_work(
+ proc,
+ &ref->death->work,
+ &proc->todo);
+ wake_up_interruptible(
+ &proc->wait);
}
}
} else {
if (ref->death == NULL) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
death = ref->death;
@@ -2632,33 +3369,52 @@
proc->pid, thread->pid,
(u64)death->cookie,
(u64)cookie);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
ref->death = NULL;
+ binder_inner_proc_lock(proc);
if (list_empty(&death->work.entry)) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ wake_up_interruptible(
+ &proc->wait);
}
} else {
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
}
+ binder_inner_proc_unlock(proc);
}
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
- if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
+
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(cookie);
- list_for_each_entry(w, &proc->delivered_death, entry) {
- struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ binder_inner_proc_lock(proc);
+ list_for_each_entry(w, &proc->delivered_death,
+ entry) {
+ struct binder_ref_death *tmp_death =
+ container_of(w,
+ struct binder_ref_death,
+ work);
if (tmp_death->cookie == cookie) {
death = tmp_death;
@@ -2672,21 +3428,26 @@
if (death == NULL) {
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
proc->pid, thread->pid, (u64)cookie);
+ binder_inner_proc_unlock(proc);
break;
}
-
- list_del_init(&death->work.entry);
+ binder_dequeue_work_ilocked(&death->work);
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work, &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
wake_up_interruptible(&proc->wait);
}
}
- }
- break;
+ binder_inner_proc_unlock(proc);
+ } break;
default:
pr_err("%d:%d unknown command %d\n",
@@ -2703,23 +3464,54 @@
{
trace_binder_return(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
- binder_stats.br[_IOC_NR(cmd)]++;
- proc->stats.br[_IOC_NR(cmd)]++;
- thread->stats.br[_IOC_NR(cmd)]++;
+ atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
}
}
static int binder_has_proc_work(struct binder_proc *proc,
struct binder_thread *thread)
{
- return !list_empty(&proc->todo) ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ return !binder_worklist_empty(proc, &proc->todo) ||
+ thread->looper_need_return;
}
static int binder_has_thread_work(struct binder_thread *thread)
{
- return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ return !binder_worklist_empty(thread->proc, &thread->todo) ||
+ thread->looper_need_return;
+}
+
+static int binder_put_node_cmd(struct binder_proc *proc,
+ struct binder_thread *thread,
+ void __user **ptrp,
+ binder_uintptr_t node_ptr,
+ binder_uintptr_t node_cookie,
+ int node_debug_id,
+ uint32_t cmd, const char *cmd_name)
+{
+ void __user *ptr = *ptrp;
+
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+
+ if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
+ proc->pid, thread->pid, cmd_name, node_debug_id,
+ (u64)node_ptr, (u64)node_cookie);
+
+ *ptrp = ptr;
+ return 0;
}
static int binder_thread_read(struct binder_proc *proc,
@@ -2735,43 +3527,24 @@
int wait_for_proc_work;
if (*consumed == 0) {
- if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
+ if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
retry:
+ binder_inner_proc_lock(proc);
wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo);
-
- if (thread->return_error != BR_OK && ptr < end) {
- if (thread->return_error2 != BR_OK) {
- if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error2);
- if (ptr == end)
- goto done;
- thread->return_error2 = BR_OK;
- }
- if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error);
- thread->return_error = BR_OK;
- goto done;
- }
-
-
- thread->looper |= BINDER_LOOPER_STATE_WAITING;
+ binder_worklist_empty_ilocked(&thread->todo);
if (wait_for_proc_work)
proc->ready_threads++;
+ binder_inner_proc_unlock(proc);
- binder_unlock(__func__);
+ thread->looper |= BINDER_LOOPER_STATE_WAITING;
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
- !list_empty(&thread->todo));
+ !binder_worklist_empty(proc, &thread->todo));
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
@@ -2794,10 +3567,10 @@
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
- binder_lock(__func__);
-
+ binder_inner_proc_lock(proc);
if (wait_for_proc_work)
proc->ready_threads--;
+ binder_inner_proc_unlock(proc);
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
@@ -2806,33 +3579,54 @@
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
- struct binder_work *w;
+ struct binder_work *w = NULL;
+ struct list_head *list = NULL;
struct binder_transaction *t = NULL;
+ struct binder_thread *t_from;
- if (!list_empty(&thread->todo)) {
- w = list_first_entry(&thread->todo, struct binder_work,
- entry);
- } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
- w = list_first_entry(&proc->todo, struct binder_work,
- entry);
- } else {
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&thread->todo))
+ list = &thread->todo;
+ else if (!binder_worklist_empty_ilocked(&proc->todo) &&
+ wait_for_proc_work)
+ list = &proc->todo;
+ else {
+ binder_inner_proc_unlock(proc);
+
/* no data added */
- if (ptr - buffer == 4 &&
- !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
+ if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
break;
}
- if (end - ptr < sizeof(tr) + 4)
+ if (end - ptr < sizeof(tr) + 4) {
+ binder_inner_proc_unlock(proc);
break;
+ }
+ w = binder_dequeue_work_head_ilocked(list);
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
+ binder_inner_proc_unlock(proc);
t = container_of(w, struct binder_transaction, work);
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ WARN_ON(e->cmd == BR_OK);
+ binder_inner_proc_unlock(proc);
+ if (put_user(e->cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ e->cmd = BR_OK;
+ ptr += sizeof(uint32_t);
+
+ binder_stat_br(proc, thread, cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
+ binder_inner_proc_unlock(proc);
cmd = BR_TRANSACTION_COMPLETE;
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
+ if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
@@ -2840,112 +3634,134 @@
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
"%d:%d BR_TRANSACTION_COMPLETE\n",
proc->pid, thread->pid);
-
- list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
- uint32_t cmd = BR_NOOP;
- const char *cmd_name;
- int strong = node->internal_strong_refs || node->local_strong_refs;
- int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
+ int strong, weak;
+ binder_uintptr_t node_ptr = node->ptr;
+ binder_uintptr_t node_cookie = node->cookie;
+ int node_debug_id = node->debug_id;
+ int has_weak_ref;
+ int has_strong_ref;
+ void __user *orig_ptr = ptr;
- if (weak && !node->has_weak_ref) {
- cmd = BR_INCREFS;
- cmd_name = "BR_INCREFS";
+ BUG_ON(proc != node->proc);
+ strong = node->internal_strong_refs ||
+ node->local_strong_refs;
+ weak = !hlist_empty(&node->refs) ||
+ node->local_weak_refs ||
+ node->tmp_refs || strong;
+ has_strong_ref = node->has_strong_ref;
+ has_weak_ref = node->has_weak_ref;
+
+ if (weak && !has_weak_ref) {
node->has_weak_ref = 1;
node->pending_weak_ref = 1;
node->local_weak_refs++;
- } else if (strong && !node->has_strong_ref) {
- cmd = BR_ACQUIRE;
- cmd_name = "BR_ACQUIRE";
+ }
+ if (strong && !has_strong_ref) {
node->has_strong_ref = 1;
node->pending_strong_ref = 1;
node->local_strong_refs++;
- } else if (!strong && node->has_strong_ref) {
- cmd = BR_RELEASE;
- cmd_name = "BR_RELEASE";
+ }
+ if (!strong && has_strong_ref)
node->has_strong_ref = 0;
- } else if (!weak && node->has_weak_ref) {
- cmd = BR_DECREFS;
- cmd_name = "BR_DECREFS";
+ if (!weak && has_weak_ref)
node->has_weak_ref = 0;
- }
- if (cmd != BR_NOOP) {
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *)
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *)
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
+ if (!weak && !strong) {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx deleted\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
+ binder_node_lock(node);
+ /*
+ * Acquire the node lock before freeing the
+ * node to serialize with other threads that
+ * may have been holding the node lock while
+ * decrementing this node (avoids race where
+ * this thread frees while the other thread
+ * is unlocking the node after the final
+ * decrement)
+ */
+ binder_node_unlock(node);
+ binder_free_node(node);
+ } else
+ binder_inner_proc_unlock(proc);
- binder_stat_br(proc, thread, cmd);
- binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s %d u%016llx c%016llx\n",
- proc->pid, thread->pid, cmd_name,
- node->debug_id,
- (u64)node->ptr, (u64)node->cookie);
- } else {
- list_del_init(&w->entry);
- if (!weak && !strong) {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx deleted\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- rb_erase(&node->rb_node, &proc->nodes);
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx state unchanged\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- }
- }
+ if (weak && !has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_INCREFS, "BR_INCREFS");
+ if (!ret && strong && !has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_ACQUIRE, "BR_ACQUIRE");
+ if (!ret && !strong && has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_RELEASE, "BR_RELEASE");
+ if (!ret && !weak && has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_DECREFS, "BR_DECREFS");
+ if (orig_ptr == ptr)
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx state unchanged\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ if (ret)
+ return ret;
} break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
uint32_t cmd;
+ binder_uintptr_t cookie;
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user_preempt_disabled(death->cookie, (binder_uintptr_t __user *) ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- binder_stat_br(proc, thread, cmd);
+ cookie = death->cookie;
+
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
- (u64)death->cookie);
-
+ (u64)cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
- list_del(&w->entry);
+ binder_inner_proc_unlock(proc);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
- } else
- list_move(&w->entry, &proc->delivered_death);
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->delivered_death);
+ binder_inner_proc_unlock(proc);
+ }
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(cookie,
+ (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+ binder_stat_br(proc, thread, cmd);
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
@@ -2977,8 +3793,9 @@
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
- if (t->from) {
- struct task_struct *sender = t->from->proc->tsk;
+ t_from = binder_get_txn_from(t);
+ if (t_from) {
+ struct task_struct *sender = t_from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
@@ -2988,18 +3805,24 @@
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (binder_uintptr_t)(
- (uintptr_t)t->buffer->data +
- proc->user_buffer_offset);
+ tr.data.ptr.buffer = (binder_uintptr_t)
+ ((uintptr_t)t->buffer->data +
+ binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
+ if (put_user(cmd, (uint32_t __user *)ptr)) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(uint32_t);
- if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
+ if (copy_to_user(ptr, &tr, sizeof(tr))) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(tr);
trace_binder_transaction_received(t);
@@ -3009,21 +3832,22 @@
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
- t->debug_id, t->from ? t->from->proc->pid : 0,
- t->from ? t->from->pid : 0, cmd,
+ t->debug_id, t_from ? t_from->proc->pid : 0,
+ t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
- list_del(&t->work.entry);
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(thread->proc);
} else {
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
break;
}
@@ -3031,29 +3855,35 @@
done:
*consumed = ptr - buffer;
+ binder_inner_proc_lock(proc);
if (proc->requested_threads + proc->ready_threads == 0 &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
- if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *) buffer))
+ if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
- }
+ } else
+ binder_inner_proc_unlock(proc);
return 0;
}
-static void binder_release_work(struct list_head *list)
+static void binder_release_work(struct binder_proc *proc,
+ struct list_head *list)
{
struct binder_work *w;
- while (!list_empty(list)) {
- w = list_first_entry(list, struct binder_work, entry);
- list_del_init(&w->entry);
+ while (1) {
+ w = binder_dequeue_work_head(proc, list);
+ if (!w)
+ return;
+
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
@@ -3066,11 +3896,17 @@
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d\n",
t->debug_id);
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered TRANSACTION_ERROR: %u\n",
+ e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_COMPLETE\n");
@@ -3097,7 +3933,8 @@
}
-static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+static struct binder_thread *binder_get_thread_ilocked(
+ struct binder_proc *proc, struct binder_thread *new_thread)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
@@ -3112,38 +3949,99 @@
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
- break;
+ return thread;
}
- if (*p == NULL) {
- thread = kzalloc_preempt_disabled(sizeof(*thread));
- if (thread == NULL)
+ if (!new_thread)
+ return NULL;
+ thread = new_thread;
+ binder_stats_created(BINDER_STAT_THREAD);
+ thread->proc = proc;
+ thread->pid = current->pid;
+ atomic_set(&thread->tmp_ref, 0);
+ init_waitqueue_head(&thread->wait);
+ INIT_LIST_HEAD(&thread->todo);
+ rb_link_node(&thread->rb_node, parent, p);
+ rb_insert_color(&thread->rb_node, &proc->threads);
+ thread->looper_need_return = true;
+ thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->return_error.cmd = BR_OK;
+ thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->reply_error.cmd = BR_OK;
+
+ return thread;
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+ struct binder_thread *thread;
+ struct binder_thread *new_thread;
+
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, NULL);
+ binder_inner_proc_unlock(proc);
+ if (!thread) {
+ new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ if (new_thread == NULL)
return NULL;
- binder_stats_created(BINDER_STAT_THREAD);
- thread->proc = proc;
- thread->pid = current->pid;
- init_waitqueue_head(&thread->wait);
- INIT_LIST_HEAD(&thread->todo);
- rb_link_node(&thread->rb_node, parent, p);
- rb_insert_color(&thread->rb_node, &proc->threads);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
- thread->return_error = BR_OK;
- thread->return_error2 = BR_OK;
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, new_thread);
+ binder_inner_proc_unlock(proc);
+ if (thread != new_thread)
+ kfree(new_thread);
}
return thread;
}
-static int binder_free_thread(struct binder_proc *proc,
- struct binder_thread *thread)
+static void binder_free_proc(struct binder_proc *proc)
+{
+ BUG_ON(!list_empty(&proc->todo));
+ BUG_ON(!list_empty(&proc->delivered_death));
+ binder_alloc_deferred_release(&proc->alloc);
+ put_task_struct(proc->tsk);
+ binder_stats_deleted(BINDER_STAT_PROC);
+ kfree(proc);
+}
+
+static void binder_free_thread(struct binder_thread *thread)
+{
+ BUG_ON(!list_empty(&thread->todo));
+ binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_proc_dec_tmpref(thread->proc);
+ kfree(thread);
+}
+
+static int binder_thread_release(struct binder_proc *proc,
+ struct binder_thread *thread)
{
struct binder_transaction *t;
struct binder_transaction *send_reply = NULL;
int active_transactions = 0;
+ struct binder_transaction *last_t = NULL;
+ binder_inner_proc_lock(thread->proc);
+ /*
+ * take a ref on the proc so it survives
+ * after we remove this thread from proc->threads.
+ * The corresponding dec is when we actually
+ * free the thread in binder_free_thread()
+ */
+ proc->tmp_ref++;
+ /*
+ * take a ref on this thread to ensure it
+ * survives while we are releasing it
+ */
+ atomic_inc(&thread->tmp_ref);
rb_erase(&thread->rb_node, &proc->threads);
t = thread->transaction_stack;
- if (t && t->to_thread == thread)
- send_reply = t;
+ if (t) {
+ spin_lock(&t->lock);
+ if (t->to_thread == thread)
+ send_reply = t;
+ }
+ thread->is_dead = true;
+
while (t) {
+ last_t = t;
active_transactions++;
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"release %d:%d transaction %d %s, still active\n",
@@ -3164,12 +4062,16 @@
t = t->from_parent;
} else
BUG();
+ spin_unlock(&last_t->lock);
+ if (t)
+ spin_lock(&t->lock);
}
+ binder_inner_proc_unlock(thread->proc);
+
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
- binder_release_work(&thread->todo);
- kfree(thread);
- binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_release_work(proc, &thread->todo);
+ binder_thread_dec_tmpref(thread);
return active_transactions;
}
@@ -3180,14 +4082,12 @@
struct binder_thread *thread = NULL;
int wait_for_proc_work;
- binder_lock(__func__);
-
thread = binder_get_thread(proc);
+ binder_inner_proc_lock(thread->proc);
wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo) && thread->return_error == BR_OK;
-
- binder_unlock(__func__);
+ binder_worklist_empty_ilocked(&thread->todo);
+ binder_inner_proc_unlock(thread->proc);
if (wait_for_proc_work) {
if (binder_has_proc_work(proc, thread))
@@ -3219,7 +4119,7 @@
ret = -EINVAL;
goto out;
}
- if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
@@ -3237,7 +4137,7 @@
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
- if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
@@ -3248,10 +4148,10 @@
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
- if (!list_empty(&proc->todo))
+ if (!binder_worklist_empty(proc, &proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
- if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
@@ -3261,7 +4161,7 @@
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
- if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
@@ -3274,9 +4174,10 @@
int ret = 0;
struct binder_proc *proc = filp->private_data;
struct binder_context *context = proc->context;
-
+ struct binder_node *new_node;
kuid_t curr_euid = current_euid();
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
@@ -3297,16 +4198,21 @@
} else {
context->binder_context_mgr_uid = curr_euid;
}
- context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
- if (!context->binder_context_mgr_node) {
+ new_node = binder_new_node(proc, NULL);
+ if (!new_node) {
ret = -ENOMEM;
goto out;
}
- context->binder_context_mgr_node->local_weak_refs++;
- context->binder_context_mgr_node->local_strong_refs++;
- context->binder_context_mgr_node->has_strong_ref = 1;
- context->binder_context_mgr_node->has_weak_ref = 1;
+ binder_node_lock(new_node);
+ new_node->local_weak_refs++;
+ new_node->local_strong_refs++;
+ new_node->has_strong_ref = 1;
+ new_node->has_weak_ref = 1;
+ context->binder_context_mgr_node = new_node;
+ binder_node_unlock(new_node);
+ binder_put_node(new_node);
out:
+ mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
@@ -3321,17 +4227,12 @@
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
- if (unlikely(current->mm != proc->vma_vm_mm)) {
- pr_err("current mm mismatch proc mm\n");
- return -EINVAL;
- }
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
- binder_lock(__func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
@@ -3344,12 +4245,19 @@
if (ret)
goto err;
break;
- case BINDER_SET_MAX_THREADS:
- if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ case BINDER_SET_MAX_THREADS: {
+ int max_threads;
+
+ if (copy_from_user(&max_threads, ubuf,
+ sizeof(max_threads))) {
ret = -EINVAL;
goto err;
}
+ binder_inner_proc_lock(proc);
+ proc->max_threads = max_threads;
+ binder_inner_proc_unlock(proc);
break;
+ }
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
@@ -3358,7 +4266,7 @@
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
- binder_free_thread(proc, thread);
+ binder_thread_release(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
@@ -3368,8 +4276,9 @@
ret = -EINVAL;
goto err;
}
- if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) {
- ret = -EINVAL;
+ if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
+ &ver->protocol_version)) {
+ ret = -EINVAL;
goto err;
}
break;
@@ -3381,8 +4290,7 @@
ret = 0;
err:
if (thread)
- thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
- binder_unlock(__func__);
+ thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
@@ -3411,8 +4319,7 @@
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
- proc->vma = NULL;
- proc->vma_vm_mm = NULL;
+ binder_alloc_vma_close(&proc->alloc);
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
@@ -3430,11 +4337,8 @@
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
-
- struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
- struct binder_buffer *buffer;
if (proc->tsk != current->group_leader)
return -EINVAL;
@@ -3443,8 +4347,8 @@
vma->vm_end = vma->vm_start + SZ_4M;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
- proc->pid, vma->vm_start, vma->vm_end,
+ "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ __func__, proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
@@ -3454,77 +4358,15 @@
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
-
- mutex_lock(&binder_mmap_lock);
- if (proc->buffer) {
- ret = -EBUSY;
- failure_string = "already mapped";
- goto err_already_mapped;
- }
-
- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
- if (area == NULL) {
- ret = -ENOMEM;
- failure_string = "get_vm_area";
- goto err_get_vm_area_failed;
- }
- proc->buffer = area->addr;
- proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
- mutex_unlock(&binder_mmap_lock);
-
-#ifdef CONFIG_CPU_CACHE_VIPT
- if (cache_is_vipt_aliasing()) {
- while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
- pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
- vma->vm_start += PAGE_SIZE;
- }
- }
-#endif
- proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
- if (proc->pages == NULL) {
- ret = -ENOMEM;
- failure_string = "alloc page array";
- goto err_alloc_pages_failed;
- }
- proc->buffer_size = vma->vm_end - vma->vm_start;
-
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
- /* binder_update_page_range assumes preemption is disabled */
- preempt_disable();
- ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
- preempt_enable_no_resched();
- if (ret) {
- ret = -ENOMEM;
- failure_string = "alloc small buf";
- goto err_alloc_small_buf_failed;
- }
- buffer = proc->buffer;
- INIT_LIST_HEAD(&proc->buffers);
- list_add(&buffer->entry, &proc->buffers);
- buffer->free = 1;
- binder_insert_free_buffer(proc, buffer);
- proc->free_async_space = proc->buffer_size / 2;
- barrier();
+ ret = binder_alloc_mmap_handler(&proc->alloc, vma);
+ if (ret)
+ return ret;
proc->files = get_files_struct(current);
- proc->vma = vma;
- proc->vma_vm_mm = vma->vm_mm;
-
- /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
- proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;
-err_alloc_small_buf_failed:
- kfree(proc->pages);
- proc->pages = NULL;
-err_alloc_pages_failed:
- mutex_lock(&binder_mmap_lock);
- vfree(proc->buffer);
- proc->buffer = NULL;
-err_get_vm_area_failed:
-err_already_mapped:
- mutex_unlock(&binder_mmap_lock);
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
@@ -3542,25 +4384,26 @@
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
+ spin_lock_init(&proc->inner_lock);
+ spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
- proc->vma_vm_mm = current->group_leader->mm;
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
binder_dev = container_of(filp->private_data, struct binder_device,
miscdev);
proc->context = &binder_dev->context;
-
- binder_lock(__func__);
+ binder_alloc_init(&proc->alloc);
binder_stats_created(BINDER_STAT_PROC);
- hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
filp->private_data = proc;
- binder_unlock(__func__);
+ mutex_lock(&binder_procs_lock);
+ hlist_add_head(&proc->proc_node, &binder_procs);
+ mutex_unlock(&binder_procs_lock);
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
@@ -3596,15 +4439,17 @@
struct rb_node *n;
int wake_count = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ thread->looper_need_return = true;
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
wake_up_interruptible(&thread->wait);
wake_count++;
}
}
+ binder_inner_proc_unlock(proc);
wake_up_interruptible_all(&proc->wait);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
@@ -3626,13 +4471,21 @@
{
struct binder_ref *ref;
int death = 0;
+ struct binder_proc *proc = node->proc;
- list_del_init(&node->work.entry);
- binder_release_work(&node->async_todo);
+ binder_release_work(proc, &node->async_todo);
- if (hlist_empty(&node->refs)) {
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ binder_node_lock(node);
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(&node->work);
+ /*
+ * The caller must have taken a temporary ref on the node,
+ */
+ BUG_ON(!node->tmp_refs);
+ if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ binder_free_node(node);
return refs;
}
@@ -3640,45 +4493,58 @@
node->proc = NULL;
node->local_strong_refs = 0;
node->local_weak_refs = 0;
+ binder_inner_proc_unlock(proc);
+
+ spin_lock(&binder_dead_nodes_lock);
hlist_add_head(&node->dead_node, &binder_dead_nodes);
+ spin_unlock(&binder_dead_nodes_lock);
hlist_for_each_entry(ref, &node->refs, node_entry) {
refs++;
-
- if (!ref->death)
+ /*
+ * Need the node lock to synchronize
+ * with new notification requests and the
+ * inner lock to synchronize with queued
+ * death notifications.
+ */
+ binder_inner_proc_lock(ref->proc);
+ if (!ref->death) {
+ binder_inner_proc_unlock(ref->proc);
continue;
+ }
death++;
- if (list_empty(&ref->death->work.entry)) {
- ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- list_add_tail(&ref->death->work.entry,
- &ref->proc->todo);
- wake_up_interruptible(&ref->proc->wait);
- } else
- BUG();
+ BUG_ON(!list_empty(&ref->death->work.entry));
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ binder_enqueue_work_ilocked(&ref->death->work,
+ &ref->proc->todo);
+ wake_up_interruptible(&ref->proc->wait);
+ binder_inner_proc_unlock(ref->proc);
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
+ binder_node_unlock(node);
+ binder_put_node(node);
return refs;
}
static void binder_deferred_release(struct binder_proc *proc)
{
- struct binder_transaction *t;
struct binder_context *context = proc->context;
struct rb_node *n;
- int threads, nodes, incoming_refs, outgoing_refs, buffers,
- active_transactions, page_count;
+ int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
- BUG_ON(proc->vma);
BUG_ON(proc->files);
+ mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
+ mutex_unlock(&binder_procs_lock);
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node &&
context->binder_context_mgr_node->proc == proc) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
@@ -3686,15 +4552,25 @@
__func__, proc->pid);
context->binder_context_mgr_node = NULL;
}
+ mutex_unlock(&context->context_mgr_node_lock);
+ binder_inner_proc_lock(proc);
+ /*
+ * Make sure proc stays alive after we
+ * remove all the threads
+ */
+ proc->tmp_ref++;
+ proc->is_dead = true;
threads = 0;
active_transactions = 0;
while ((n = rb_first(&proc->threads))) {
struct binder_thread *thread;
thread = rb_entry(n, struct binder_thread, rb_node);
+ binder_inner_proc_unlock(proc);
threads++;
- active_transactions += binder_free_thread(proc, thread);
+ active_transactions += binder_thread_release(proc, thread);
+ binder_inner_proc_lock(proc);
}
nodes = 0;
@@ -3704,73 +4580,42 @@
node = rb_entry(n, struct binder_node, rb_node);
nodes++;
+ /*
+ * take a temporary ref on the node before
+ * calling binder_node_release() which will either
+ * kfree() the node or call binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
incoming_refs = binder_node_release(node, incoming_refs);
+ binder_inner_proc_lock(proc);
}
+ binder_inner_proc_unlock(proc);
outgoing_refs = 0;
+ binder_proc_lock(proc);
while ((n = rb_first(&proc->refs_by_desc))) {
struct binder_ref *ref;
ref = rb_entry(n, struct binder_ref, rb_node_desc);
outgoing_refs++;
- binder_delete_ref(ref);
+ binder_cleanup_ref_olocked(ref);
+ binder_proc_unlock(proc);
+ binder_free_ref(ref);
+ binder_proc_lock(proc);
}
+ binder_proc_unlock(proc);
- binder_release_work(&proc->todo);
- binder_release_work(&proc->delivered_death);
-
- buffers = 0;
- while ((n = rb_first(&proc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
- buffer = rb_entry(n, struct binder_buffer, rb_node);
-
- t = buffer->transaction;
- if (t) {
- t->buffer = NULL;
- buffer->transaction = NULL;
- pr_err("release proc %d, transaction %d, not freed\n",
- proc->pid, t->debug_id);
- /*BUG();*/
- }
-
- binder_free_buf(proc, buffer);
- buffers++;
- }
-
- binder_stats_deleted(BINDER_STAT_PROC);
-
- page_count = 0;
- if (proc->pages) {
- int i;
-
- for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
- void *page_addr;
-
- if (!proc->pages[i])
- continue;
-
- page_addr = proc->buffer + i * PAGE_SIZE;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %p not freed\n",
- __func__, proc->pid, i, page_addr);
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(proc->pages[i]);
- page_count++;
- }
- kfree(proc->pages);
- vfree(proc->buffer);
- }
-
- put_task_struct(proc->tsk);
+ binder_release_work(proc, &proc->todo);
+ binder_release_work(proc, &proc->delivered_death);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
+ "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
__func__, proc->pid, threads, nodes, incoming_refs,
- outgoing_refs, active_transactions, buffers, page_count);
+ outgoing_refs, active_transactions);
- kfree(proc);
+ binder_proc_dec_tmpref(proc);
}
static void binder_deferred_func(struct work_struct *work)
@@ -3781,12 +4626,7 @@
int defer;
do {
- trace_binder_lock(__func__);
- mutex_lock(&binder_main_lock);
- trace_binder_locked(__func__);
-
mutex_lock(&binder_deferred_lock);
- preempt_disable();
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
struct binder_proc, deferred_work_node);
@@ -3812,9 +4652,6 @@
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
- trace_binder_unlock(__func__);
- mutex_unlock(&binder_main_lock);
- preempt_enable_no_resched();
if (files)
put_files_struct(files);
} while (proc);
@@ -3834,41 +4671,52 @@
mutex_unlock(&binder_deferred_lock);
}
-static void print_binder_transaction(struct seq_file *m, const char *prefix,
- struct binder_transaction *t)
+static void print_binder_transaction_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ struct binder_transaction *t)
{
+ struct binder_proc *to_proc;
+ struct binder_buffer *buffer = t->buffer;
+
+ WARN_ON(!spin_is_locked(&proc->inner_lock));
+ spin_lock(&t->lock);
+ to_proc = t->to_proc;
seq_printf(m,
"%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
- t->to_proc ? t->to_proc->pid : 0,
+ to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
t->code, t->flags, t->priority, t->need_reply);
- if (t->buffer == NULL) {
+ spin_unlock(&t->lock);
+
+ if (proc != to_proc) {
+ /*
+ * Can only safely deref buffer if we are holding the
+ * correct proc inner lock for this node
+ */
+ seq_puts(m, "\n");
+ return;
+ }
+
+ if (buffer == NULL) {
seq_puts(m, " buffer free\n");
return;
}
- if (t->buffer->target_node)
- seq_printf(m, " node %d",
- t->buffer->target_node->debug_id);
+ if (buffer->target_node)
+ seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %p\n",
- t->buffer->data_size, t->buffer->offsets_size,
- t->buffer->data);
-}
-
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
- struct binder_buffer *buffer)
-{
- seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
- prefix, buffer->debug_id, buffer->data,
buffer->data_size, buffer->offsets_size,
- buffer->transaction ? "active" : "delivered");
+ buffer->data);
}
-static void print_binder_work(struct seq_file *m, const char *prefix,
- const char *transaction_prefix,
- struct binder_work *w)
+static void print_binder_work_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w)
{
struct binder_node *node;
struct binder_transaction *t;
@@ -3876,8 +4724,16 @@
switch (w->type) {
case BINDER_WORK_TRANSACTION:
t = container_of(w, struct binder_transaction, work);
- print_binder_transaction(m, transaction_prefix, t);
+ print_binder_transaction_ilocked(
+ m, proc, transaction_prefix, t);
break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ seq_printf(m, "%stransaction error: %u\n",
+ prefix, e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE:
seq_printf(m, "%stransaction complete\n", prefix);
break;
@@ -3902,70 +4758,89 @@
}
}
-static void print_binder_thread(struct seq_file *m,
- struct binder_thread *thread,
- int print_always)
+static void print_binder_thread_ilocked(struct seq_file *m,
+ struct binder_thread *thread,
+ int print_always)
{
struct binder_transaction *t;
struct binder_work *w;
size_t start_pos = m->count;
size_t header_pos;
- seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+ WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
+ seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
+ thread->pid, thread->looper,
+ thread->looper_need_return,
+ atomic_read(&thread->tmp_ref));
header_pos = m->count;
t = thread->transaction_stack;
while (t) {
if (t->from == thread) {
- print_binder_transaction(m,
- " outgoing transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " outgoing transaction", t);
t = t->from_parent;
} else if (t->to_thread == thread) {
- print_binder_transaction(m,
+ print_binder_transaction_ilocked(m, thread->proc,
" incoming transaction", t);
t = t->to_parent;
} else {
- print_binder_transaction(m, " bad transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " bad transaction", t);
t = NULL;
}
}
list_for_each_entry(w, &thread->todo, entry) {
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, thread->proc, " ",
+ " pending transaction", w);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
-static void print_binder_node(struct seq_file *m, struct binder_node *node)
+static void print_binder_node_nilocked(struct seq_file *m,
+ struct binder_node *node)
{
struct binder_ref *ref;
struct binder_work *w;
int count;
+ WARN_ON(!spin_is_locked(&node->lock));
+ if (node->proc)
+ WARN_ON(!spin_is_locked(&node->proc->inner_lock));
+
count = 0;
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+ seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->debug_id, (u64)node->ptr, (u64)node->cookie,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
- node->internal_strong_refs, count);
+ node->internal_strong_refs, count, node->tmp_refs);
if (count) {
seq_puts(m, " proc");
hlist_for_each_entry(ref, &node->refs, node_entry)
seq_printf(m, " %d", ref->proc->pid);
}
seq_puts(m, "\n");
- list_for_each_entry(w, &node->async_todo, entry)
- print_binder_work(m, " ",
- " pending async transaction", w);
+ if (node->proc) {
+ list_for_each_entry(w, &node->async_todo, entry)
+ print_binder_work_ilocked(m, node->proc, " ",
+ " pending async transaction", w);
+ }
}
-static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+static void print_binder_ref_olocked(struct seq_file *m,
+ struct binder_ref *ref)
{
- seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
- ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
- ref->node->debug_id, ref->strong, ref->weak, ref->death);
+ WARN_ON(!spin_is_locked(&ref->proc->outer_lock));
+ binder_node_lock(ref->node);
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
+ ref->data.debug_id, ref->data.desc,
+ ref->node->proc ? "" : "dead ",
+ ref->node->debug_id, ref->data.strong,
+ ref->data.weak, ref->death);
+ binder_node_unlock(ref->node);
}
static void print_binder_proc(struct seq_file *m,
@@ -3975,36 +4850,60 @@
struct rb_node *n;
size_t start_pos = m->count;
size_t header_pos;
+ struct binder_node *last_node = NULL;
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
header_pos = m->count;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
- print_binder_thread(m, rb_entry(n, struct binder_thread,
+ print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
rb_node), print_all);
+
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
- if (print_all || node->has_async_transaction)
- print_binder_node(m, node);
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the tree
+ * while we print it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ /* Need to drop inner lock to take node lock */
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_inner_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_inner_unlock(node);
+ last_node = node;
+ binder_inner_proc_lock(proc);
}
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+
if (print_all) {
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc);
n != NULL;
n = rb_next(n))
- print_binder_ref(m, rb_entry(n, struct binder_ref,
- rb_node_desc));
+ print_binder_ref_olocked(m, rb_entry(n,
+ struct binder_ref,
+ rb_node_desc));
+ binder_proc_unlock(proc);
}
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- print_binder_buffer(m, " buffer",
- rb_entry(n, struct binder_buffer, rb_node));
+ binder_alloc_print_allocated(m, &proc->alloc);
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, proc, " ",
+ " pending transaction", w);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
}
+ binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
}
@@ -4070,17 +4969,21 @@
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
ARRAY_SIZE(binder_command_strings));
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
- if (stats->bc[i])
+ int temp = atomic_read(&stats->bc[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_command_strings[i], stats->bc[i]);
+ binder_command_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
ARRAY_SIZE(binder_return_strings));
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
- if (stats->br[i])
+ int temp = atomic_read(&stats->br[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_return_strings[i], stats->br[i]);
+ binder_return_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
@@ -4088,11 +4991,15 @@
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
ARRAY_SIZE(stats->obj_deleted));
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
- if (stats->obj_created[i] || stats->obj_deleted[i])
- seq_printf(m, "%s%s: active %d total %d\n", prefix,
+ int created = atomic_read(&stats->obj_created[i]);
+ int deleted = atomic_read(&stats->obj_deleted[i]);
+
+ if (created || deleted)
+ seq_printf(m, "%s%s: active %d total %d\n",
+ prefix,
binder_objstat_strings[i],
- stats->obj_created[i] - stats->obj_deleted[i],
- stats->obj_created[i]);
+ created - deleted,
+ created);
}
}
@@ -4102,10 +5009,13 @@
struct binder_work *w;
struct rb_node *n;
int count, strong, weak;
+ size_t free_async_space =
+ binder_alloc_get_free_async_space(&proc->alloc);
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
count = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
count++;
seq_printf(m, " threads: %d\n", count);
@@ -4113,38 +5023,37 @@
" ready threads %d\n"
" free async space %zd\n", proc->requested_threads,
proc->requested_threads_started, proc->max_threads,
- proc->ready_threads, proc->free_async_space);
+ proc->ready_threads,
+ free_async_space);
count = 0;
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++;
+ binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
count = 0;
strong = 0;
weak = 0;
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
- strong += ref->strong;
- weak += ref->weak;
+ strong += ref->data.strong;
+ weak += ref->data.weak;
}
+ binder_proc_unlock(proc);
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
- count = 0;
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- count++;
+ count = binder_alloc_get_allocated_count(&proc->alloc);
seq_printf(m, " buffers: %d\n", count);
count = 0;
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
- switch (w->type) {
- case BINDER_WORK_TRANSACTION:
+ if (w->type == BINDER_WORK_TRANSACTION)
count++;
- break;
- default:
- break;
- }
}
+ binder_inner_proc_unlock(proc);
seq_printf(m, " pending transactions: %d\n", count);
print_binder_stats(m, " ", &proc->stats);
@@ -4155,57 +5064,67 @@
{
struct binder_proc *proc;
struct binder_node *node;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
+ struct binder_node *last_node = NULL;
seq_puts(m, "binder state:\n");
+ spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
- print_binder_node(m, node);
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the list
+ * while we print it.
+ */
+ node->tmp_refs++;
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_unlock(node);
+ last_node = node;
+ spin_lock(&binder_dead_nodes_lock);
+ }
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
seq_puts(m, "binder stats:\n");
print_binder_stats(m, "", &binder_stats);
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc_stats(m, proc);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
seq_puts(m, "binder transactions:\n");
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 0);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
@@ -4213,44 +5132,63 @@
{
struct binder_proc *itr;
int pid = (unsigned long)m->private;
- int do_lock = !binder_debug_no_lock;
- if (do_lock)
- binder_lock(__func__);
-
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
print_binder_proc(m, itr, 1);
}
}
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static void print_binder_transaction_log_entry(struct seq_file *m,
struct binder_transaction_log_entry *e)
{
+ int debug_id = READ_ONCE(e->debug_id_done);
+ /*
+ * read barrier to guarantee debug_id_done read before
+ * we print the log values
+ */
+ smp_rmb();
seq_printf(m,
- "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
+ "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
e->from_thread, e->to_proc, e->to_thread, e->context_name,
- e->to_node, e->target_handle, e->data_size, e->offsets_size);
+ e->to_node, e->target_handle, e->data_size, e->offsets_size,
+ e->return_error, e->return_error_param,
+ e->return_error_line);
+ /*
+ * read-barrier to guarantee read of debug_id_done after
+ * done printing the fields of the entry
+ */
+ smp_rmb();
+ seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
+ "\n" : " (incomplete)\n");
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_transaction_log *log = m->private;
+ unsigned int log_cur = atomic_read(&log->cur);
+ unsigned int count;
+ unsigned int cur;
int i;
- if (log->full) {
- for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
+ count = log_cur + 1;
+ cur = count < ARRAY_SIZE(log->entry) && !log->full ?
+ 0 : count % ARRAY_SIZE(log->entry);
+ if (count > ARRAY_SIZE(log->entry) || log->full)
+ count = ARRAY_SIZE(log->entry);
+ for (i = 0; i < count; i++) {
+ unsigned int index = cur++ % ARRAY_SIZE(log->entry);
+
+ print_binder_transaction_log_entry(m, &log->entry[index]);
}
- for (i = 0; i < log->next; i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
return 0;
}
@@ -4285,6 +5223,7 @@
binder_device->context.binder_context_mgr_uid = INVALID_UID;
binder_device->context.name = name;
+ mutex_init(&binder_device->context.context_mgr_node_lock);
ret = misc_register(&binder_device->miscdev);
if (ret < 0) {
@@ -4304,6 +5243,9 @@
struct binder_device *device;
struct hlist_node *tmp;
+ atomic_set(&binder_transaction_log.cur, ~0U);
+ atomic_set(&binder_transaction_log_failed.cur, ~0U);
+
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
new file mode 100644
index 0000000..b90222a
--- /dev/null
+++ b/drivers/android/binder_alloc.c
@@ -0,0 +1,802 @@
+/* binder_alloc.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cacheflush.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rtmutex.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include "binder_alloc.h"
+#include "binder_trace.h"
+
+static DEFINE_MUTEX(binder_alloc_mmap_lock);
+
+enum {
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
+};
+static uint32_t binder_alloc_debug_mask;
+
+module_param_named(debug_mask, binder_alloc_debug_mask,
+ uint, 0644);
+
+#define binder_alloc_debug(mask, x...) \
+ do { \
+ if (binder_alloc_debug_mask & mask) \
+ pr_info(x); \
+ } while (0)
+
+static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &alloc->buffers))
+ return alloc->buffer +
+ alloc->buffer_size - (void *)buffer->data;
+ return (size_t)list_entry(buffer->entry.next,
+ struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: add free buffer, size %zd, at %pK\n",
+ alloc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer_locked(
+ struct binder_alloc *alloc, struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer < buffer)
+ p = &parent->rb_left;
+ else if (new_buffer > buffer)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_alloc_prepare_to_free_locked(
+ struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct rb_node *n = alloc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ struct binder_buffer *kern_ptr;
+
+ kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
+ - offsetof(struct binder_buffer, data));
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer)
+ n = n->rb_left;
+ else if (kern_ptr > buffer)
+ n = n->rb_right;
+ else {
+ /*
+ * Guard against user threads attempting to
+ * free the buffer twice
+ */
+ if (buffer->free_in_progress) {
+ pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+ alloc->pid, current->pid, (u64)user_ptr);
+ return NULL;
+ }
+ buffer->free_in_progress = 1;
+ return buffer;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * @alloc: binder_alloc for this proc
+ * @user_ptr: User pointer to buffer data
+ *
+ * Validate userspace pointer to buffer data and return buffer corresponding to
+ * that user pointer. Search the rb tree for buffer that matches user data
+ * pointer.
+ *
+ * Return: Pointer to buffer or NULL
+ */
+struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct page **page;
+ struct mm_struct *mm;
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: %s pages %pK-%pK\n", alloc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ trace_binder_update_page_range(alloc, allocate, start, end);
+
+ if (vma)
+ mm = NULL;
+ else
+ mm = get_task_mm(alloc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = alloc->vma;
+ if (vma && mm != alloc->vma_vm_mm) {
+ pr_err("%d: vma mm and task mm mismatch\n",
+ alloc->pid);
+ vma = NULL;
+ }
+ }
+
+ if (allocate == 0)
+ goto free_range;
+
+ if (vma == NULL) {
+ pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+ alloc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+
+ BUG_ON(*page);
+ *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (*page == NULL) {
+ pr_err("%d: binder_alloc_buf failed for page at %pK\n",
+ alloc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ ret = map_kernel_range_noflush((unsigned long)page_addr,
+ PAGE_SIZE, PAGE_KERNEL, page);
+ flush_cache_vmap((unsigned long)page_addr,
+ (unsigned long)page_addr + PAGE_SIZE);
+ if (ret != 1) {
+ pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
+ alloc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + alloc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0]);
+ if (ret) {
+ pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+ alloc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (vma)
+ zap_page_range(vma, (uintptr_t)page_addr +
+ alloc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(*page);
+ *page = NULL;
+err_alloc_page_failed:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return vma ? -ENOMEM : -ESRCH;
+}
+
+struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct rb_node *n = alloc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size, data_offsets_size;
+ int ret;
+
+ if (alloc->vma == NULL) {
+ pr_err("%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
+ return ERR_PTR(-ESRCH);
+ }
+
+ data_offsets_size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid size %zd-%zd\n",
+ alloc->pid, data_size, offsets_size);
+ return ERR_PTR(-EINVAL);
+ }
+ size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+ if (size < data_offsets_size || size < extra_buffers_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid extra_buffers_size %zd\n",
+ alloc->pid, extra_buffers_size);
+ return ERR_PTR(-EINVAL);
+ }
+ if (is_async &&
+ alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd failed, no async space left\n",
+ alloc->pid, size);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ size_t allocated_buffers = 0;
+ size_t largest_alloc_size = 0;
+ size_t total_alloc_size = 0;
+ size_t free_buffers = 0;
+ size_t largest_free_size = 0;
+ size_t total_free_size = 0;
+
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ allocated_buffers++;
+ total_alloc_size += buffer_size;
+ if (buffer_size > largest_alloc_size)
+ largest_alloc_size = buffer_size;
+ }
+ for (n = rb_first(&alloc->free_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ free_buffers++;
+ total_free_size += buffer_size;
+ if (buffer_size > largest_free_size)
+ largest_free_size = buffer_size;
+ }
+ pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+ alloc->pid, size);
+ pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+ total_alloc_size, allocated_buffers, largest_alloc_size,
+ total_free_size, free_buffers, largest_free_size);
+ return ERR_PTR(-ENOSPC);
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ }
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
+ alloc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ if (n == NULL) {
+ if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+ buffer_size = size; /* no room for other buffers */
+ else
+ buffer_size = size + sizeof(struct binder_buffer);
+ }
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ ret = binder_update_page_range(alloc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer = (void *)buffer->data + size;
+
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(alloc, new_buffer);
+ }
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got %pK\n",
+ alloc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ buffer->extra_buffers_size = extra_buffers_size;
+ if (is_async) {
+ alloc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_alloc_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+ return buffer;
+}
+
+/**
+ * binder_alloc_new_buf() - Allocate a new binder buffer
+ * @alloc: binder_alloc for this proc
+ * @data_size: size of user data buffer
+ * @offsets_size: user specified buffer offset
+ * @extra_buffers_size: size of extra space for meta-data (eg, security context)
+ * @is_async: buffer for async transaction
+ *
+ * Allocate a new buffer given the requested sizes. Returns
+ * the kernel version of the buffer pointer. The size allocated
+ * is the sum of the three given sizes (each rounded up to
+ * pointer-sized boundary)
+ *
+ * Return: The allocated buffer or %NULL if error
+ */
+struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
+ extra_buffers_size, is_async);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ int free_page_end = 1;
+ int free_page_start = 1;
+
+ BUG_ON(alloc->buffers.next == &buffer->entry);
+ prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ BUG_ON(!prev->free);
+ if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+ free_page_start = 0;
+ if (buffer_end_page(prev) == buffer_end_page(buffer))
+ free_page_end = 0;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer, prev);
+ }
+
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (buffer_start_page(next) == buffer_end_page(buffer)) {
+ free_page_end = 0;
+ if (buffer_start_page(next) ==
+ buffer_start_page(buffer))
+ free_page_start = 0;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer, prev);
+ }
+ }
+ list_del(&buffer->entry);
+ if (free_page_start || free_page_end) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
+ alloc->pid, buffer, free_page_start ? "" : " end",
+ free_page_end ? "" : " start", prev, next);
+ binder_update_page_range(alloc, 0, free_page_start ?
+ buffer_start_page(buffer) : buffer_end_page(buffer),
+ (free_page_end ? buffer_end_page(buffer) :
+ buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ }
+}
+
+static void binder_free_buf_locked(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *)) +
+ ALIGN(buffer->extra_buffers_size, sizeof(void *));
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
+ alloc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON((void *)buffer < alloc->buffer);
+ BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
+
+ if (buffer->async_transaction) {
+ alloc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_free_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+
+ rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ struct binder_buffer *next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+
+ if (next->free) {
+ rb_erase(&next->rb_node, &alloc->free_buffers);
+ binder_delete_free_buffer(alloc, next);
+ }
+ }
+ if (alloc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = list_entry(buffer->entry.prev,
+ struct binder_buffer, entry);
+
+ if (prev->free) {
+ binder_delete_free_buffer(alloc, buffer);
+ rb_erase(&prev->rb_node, &alloc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(alloc, buffer);
+}
+
+/**
+ * binder_alloc_free_buf() - free a binder buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: kernel pointer to buffer
+ *
+ * Free the buffer allocated via binder_alloc_new_buffer()
+ */
+void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ mutex_lock(&alloc->mutex);
+ binder_free_buf_locked(alloc, buffer);
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_mmap_handler() - map virtual address space for proc
+ * @alloc: alloc structure for this proc
+ * @vma: vma passed to mmap()
+ *
+ * Called by binder_mmap() to initialize the space specified in
+ * vma for allocating binder buffers
+ *
+ * Return:
+ * 0 = success
+ * -EBUSY = address space already mapped
+ * -ENOMEM = failed to map memory to given address space
+ */
+int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ mutex_lock(&binder_alloc_mmap_lock);
+ if (alloc->buffer) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ alloc->buffer = area->addr;
+ alloc->user_buffer_offset =
+ vma->vm_start - (uintptr_t)alloc->buffer;
+ mutex_unlock(&binder_alloc_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR(
+ (vma->vm_start ^ (uint32_t)alloc->buffer))) {
+ pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
+ __func__, alloc->pid, vma->vm_start,
+ vma->vm_end, alloc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
+ ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
+ GFP_KERNEL);
+ if (alloc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ alloc->buffer_size = vma->vm_end - vma->vm_start;
+
+ if (binder_update_page_range(alloc, 1, alloc->buffer,
+ alloc->buffer + PAGE_SIZE, vma)) {
+ ret = -ENOMEM;
+ failure_string = "alloc small buf";
+ goto err_alloc_small_buf_failed;
+ }
+ buffer = alloc->buffer;
+ INIT_LIST_HEAD(&alloc->buffers);
+ list_add(&buffer->entry, &alloc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(alloc, buffer);
+ alloc->free_async_space = alloc->buffer_size / 2;
+ barrier();
+ alloc->vma = vma;
+ alloc->vma_vm_mm = vma->vm_mm;
+
+ return 0;
+
+err_alloc_small_buf_failed:
+ kfree(alloc->pages);
+ alloc->pages = NULL;
+err_alloc_pages_failed:
+ mutex_lock(&binder_alloc_mmap_lock);
+ vfree(alloc->buffer);
+ alloc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+ mutex_unlock(&binder_alloc_mmap_lock);
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+ alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+
+void binder_alloc_deferred_release(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int buffers, page_count;
+
+ BUG_ON(alloc->vma);
+
+ buffers = 0;
+ mutex_lock(&alloc->mutex);
+ while ((n = rb_first(&alloc->allocated_buffers))) {
+ struct binder_buffer *buffer;
+
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+
+ /* Transaction should already have been freed */
+ BUG_ON(buffer->transaction);
+
+ binder_free_buf_locked(alloc, buffer);
+ buffers++;
+ }
+
+ page_count = 0;
+ if (alloc->pages) {
+ int i;
+
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ void *page_addr;
+
+ if (!alloc->pages[i])
+ continue;
+
+ page_addr = alloc->buffer + i * PAGE_SIZE;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%s: %d: page %d at %pK not freed\n",
+ __func__, alloc->pid, i, page_addr);
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+ __free_page(alloc->pages[i]);
+ page_count++;
+ }
+ kfree(alloc->pages);
+ vfree(alloc->buffer);
+ }
+ mutex_unlock(&alloc->mutex);
+
+ binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "%s: %d buffers %d, pages %d\n",
+ __func__, alloc->pid, buffers, page_count);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+ struct binder_buffer *buffer)
+{
+ seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->extra_buffers_size,
+ buffer->transaction ? "active" : "delivered");
+}
+
+/**
+ * binder_alloc_print_allocated() - print buffer info
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ *
+ * Prints information about every buffer associated with
+ * the binder_alloc state to the given seq_file
+ */
+void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ print_binder_buffer(m, " buffer",
+ rb_entry(n, struct binder_buffer, rb_node));
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_get_allocated_count() - return count of buffers
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: count of allocated buffers
+ */
+int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int count = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ count++;
+ mutex_unlock(&alloc->mutex);
+ return count;
+}
+
+
+/**
+ * binder_alloc_vma_close() - invalidate address space
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_vma_close() when releasing address space.
+ * Clears alloc->vma to prevent new incoming transactions from
+ * allocating more buffers.
+ */
+void binder_alloc_vma_close(struct binder_alloc *alloc)
+{
+ WRITE_ONCE(alloc->vma, NULL);
+ WRITE_ONCE(alloc->vma_vm_mm, NULL);
+}
+
+/**
+ * binder_alloc_init() - called by binder_open() for per-proc initialization
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_open() to initialize binder_alloc fields for
+ * new binder proc
+ */
+void binder_alloc_init(struct binder_alloc *alloc)
+{
+ alloc->tsk = current->group_leader;
+ alloc->pid = current->group_leader->pid;
+ mutex_init(&alloc->mutex);
+}
+
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
new file mode 100644
index 0000000..088e4ff
--- /dev/null
+++ b/drivers/android/binder_alloc.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_ALLOC_H
+#define _LINUX_BINDER_ALLOC_H
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+struct binder_transaction;
+
+/**
+ * struct binder_buffer - buffer used for binder transactions
+ * @entry: entry alloc->buffers
+ * @rb_node: node for allocated_buffers/free_buffers rb trees
+ * @free: true if buffer is free
+ * @allow_user_free: describe the second member of struct blah,
+ * @async_transaction: describe the second member of struct blah,
+ * @debug_id: describe the second member of struct blah,
+ * @transaction: describe the second member of struct blah,
+ * @target_node: describe the second member of struct blah,
+ * @data_size: describe the second member of struct blah,
+ * @offsets_size: describe the second member of struct blah,
+ * @extra_buffers_size: describe the second member of struct blah,
+ * @data:i describe the second member of struct blah,
+ *
+ * Bookkeeping structure for binder transaction buffers
+ */
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by address */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free:1;
+ unsigned allow_user_free:1;
+ unsigned async_transaction:1;
+ unsigned free_in_progress:1;
+ unsigned debug_id:28;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ size_t extra_buffers_size;
+ uint8_t data[0];
+};
+
+/**
+ * struct binder_alloc - per-binder proc state for binder allocator
+ * @vma: vm_area_struct passed to mmap_handler
+ * (invarient after mmap)
+ * @tsk: tid for task that called init for this proc
+ * (invariant after init)
+ * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
+ * @buffer: base of per-proc address space mapped via mmap
+ * @user_buffer_offset: offset between user and kernel VAs for buffer
+ * @buffers: list of all buffers for this proc
+ * @free_buffers: rb tree of buffers available for allocation
+ * sorted by size
+ * @allocated_buffers: rb tree of allocated buffers sorted by address
+ * @free_async_space: VA space available for async buffers. This is
+ * initialized at mmap time to 1/2 the full VA space
+ * @pages: array of physical page addresses for each
+ * page of mmap'd space
+ * @buffer_size: size of address space specified via mmap
+ * @pid: pid for associated binder_proc (invariant after init)
+ *
+ * Bookkeeping structure for per-proc address space management for binder
+ * buffers. It is normally initialized during binder_init() and binder_mmap()
+ * calls. The address space is used for both user-visible buffers and for
+ * struct binder_buffer objects used to track the user buffers
+ */
+struct binder_alloc {
+ struct mutex mutex;
+ struct task_struct *tsk;
+ struct vm_area_struct *vma;
+ struct mm_struct *vma_vm_mm;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+ struct page **pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ int pid;
+};
+
+extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async);
+extern void binder_alloc_init(struct binder_alloc *alloc);
+extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+extern struct binder_buffer *
+binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr);
+extern void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
+extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma);
+extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
+extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
+extern void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc);
+
+/**
+ * binder_alloc_get_free_async_space() - get free space available for async
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the bytes remaining in the address-space for async transactions
+ */
+static inline size_t
+binder_alloc_get_free_async_space(struct binder_alloc *alloc)
+{
+ size_t free_async_space;
+
+ mutex_lock(&alloc->mutex);
+ free_async_space = alloc->free_async_space;
+ mutex_unlock(&alloc->mutex);
+ return free_async_space;
+}
+
+/**
+ * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the offset between kernel and user-space addresses to use for
+ * virtual address conversion
+ */
+static inline ptrdiff_t
+binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
+{
+ /*
+ * user_buffer_offset is constant if vma is set and
+ * undefined if vma is not set. It is possible to
+ * get here with !alloc->vma if the target process
+ * is dying while a transaction is being initiated.
+ * Returning the old value is ok in this case and
+ * the transaction will fail.
+ */
+ return alloc->user_buffer_offset;
+}
+
+#endif /* _LINUX_BINDER_ALLOC_H */
+
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7f20f3d..7967db1 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -23,7 +23,8 @@
struct binder_buffer;
struct binder_node;
struct binder_proc;
-struct binder_ref;
+struct binder_alloc;
+struct binder_ref_data;
struct binder_thread;
struct binder_transaction;
@@ -146,8 +147,8 @@
TRACE_EVENT(binder_transaction_node_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
- struct binder_ref *ref),
- TP_ARGS(t, node, ref),
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -160,8 +161,8 @@
__entry->debug_id = t->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
),
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
@@ -170,8 +171,9 @@
);
TRACE_EVENT(binder_transaction_ref_to_node,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
- TP_ARGS(t, ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -182,10 +184,10 @@
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
- __entry->node_debug_id = ref->node->debug_id;
- __entry->node_ptr = ref->node->ptr;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
+ __entry->node_debug_id = node->debug_id;
+ __entry->node_ptr = node->ptr;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
@@ -194,9 +196,10 @@
);
TRACE_EVENT(binder_transaction_ref_to_ref,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
- struct binder_ref *dest_ref),
- TP_ARGS(t, src_ref, dest_ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *src_ref,
+ struct binder_ref_data *dest_ref),
+ TP_ARGS(t, node, src_ref, dest_ref),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -208,7 +211,7 @@
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->node_debug_id = src_ref->node->debug_id;
+ __entry->node_debug_id = node->debug_id;
__entry->src_ref_debug_id = src_ref->debug_id;
__entry->src_ref_desc = src_ref->desc;
__entry->dest_ref_debug_id = dest_ref->debug_id;
@@ -268,9 +271,9 @@
TP_ARGS(buffer));
TRACE_EVENT(binder_update_page_range,
- TP_PROTO(struct binder_proc *proc, bool allocate,
+ TP_PROTO(struct binder_alloc *alloc, bool allocate,
void *start, void *end),
- TP_ARGS(proc, allocate, start, end),
+ TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
__field(bool, allocate)
@@ -278,9 +281,9 @@
__field(size_t, size)
),
TP_fast_assign(
- __entry->proc = proc->pid;
+ __entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - proc->buffer;
+ __entry->offset = start - alloc->buffer;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 5ba619a..35ab4d5 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2106,7 +2106,11 @@
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
- if (dev->bus && dev->bus->shutdown) {
+ if (dev->class && dev->class->shutdown) {
+ if (initcall_debug)
+ dev_info(dev, "shutdown\n");
+ dev->class->shutdown(dev);
+ } else if (dev->bus && dev->bus->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 624f069..55687b8 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -208,145 +208,10 @@
#endif
-#ifdef CONFIG_SCHED_HMP
-
-static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- ssize_t rc;
- int cpuid = cpu->dev.id;
- unsigned int pwr_cost;
-
- pwr_cost = sched_get_static_cpu_pwr_cost(cpuid);
-
- rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
-
- return rc;
-}
-
-static ssize_t __ref store_sched_static_cpu_pwr_cost(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- int err;
- int cpuid = cpu->dev.id;
- unsigned int pwr_cost;
-
- err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
- if (err)
- return err;
-
- err = sched_set_static_cpu_pwr_cost(cpuid, pwr_cost);
-
- if (err >= 0)
- err = count;
-
- return err;
-}
-
-static ssize_t show_sched_static_cluster_pwr_cost(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- ssize_t rc;
- int cpuid = cpu->dev.id;
- unsigned int pwr_cost;
-
- pwr_cost = sched_get_static_cluster_pwr_cost(cpuid);
-
- rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
-
- return rc;
-}
-
-static ssize_t __ref store_sched_static_cluster_pwr_cost(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- int err;
- int cpuid = cpu->dev.id;
- unsigned int pwr_cost;
-
- err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
- if (err)
- return err;
-
- err = sched_set_static_cluster_pwr_cost(cpuid, pwr_cost);
-
- if (err >= 0)
- err = count;
-
- return err;
-}
-
-static ssize_t show_sched_cluser_wake_idle(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- ssize_t rc;
- int cpuid = cpu->dev.id;
- unsigned int wake_up_idle;
-
- wake_up_idle = sched_get_cluster_wake_idle(cpuid);
-
- rc = scnprintf(buf, PAGE_SIZE-2, "%d\n", wake_up_idle);
-
- return rc;
-}
-
-static ssize_t __ref store_sched_cluster_wake_idle(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- int err;
- int cpuid = cpu->dev.id;
- unsigned int wake_up_idle;
-
- err = kstrtouint(strstrip((char *)buf), 0, &wake_up_idle);
- if (err)
- return err;
-
- err = sched_set_cluster_wake_idle(cpuid, wake_up_idle);
-
- if (err >= 0)
- err = count;
-
- return err;
-}
-
-static DEVICE_ATTR(sched_static_cpu_pwr_cost, 0644,
- show_sched_static_cpu_pwr_cost,
- store_sched_static_cpu_pwr_cost);
-static DEVICE_ATTR(sched_static_cluster_pwr_cost, 0644,
- show_sched_static_cluster_pwr_cost,
- store_sched_static_cluster_pwr_cost);
-static DEVICE_ATTR(sched_cluster_wake_up_idle, 0644,
- show_sched_cluser_wake_idle,
- store_sched_cluster_wake_idle);
-
-static struct attribute *hmp_sched_cpu_attrs[] = {
- &dev_attr_sched_static_cpu_pwr_cost.attr,
- &dev_attr_sched_static_cluster_pwr_cost.attr,
- &dev_attr_sched_cluster_wake_up_idle.attr,
- NULL
-};
-
-static struct attribute_group sched_hmp_cpu_attr_group = {
- .attrs = hmp_sched_cpu_attrs,
-};
-
-#endif /* CONFIG_SCHED_HMP */
static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
#endif
-#ifdef CONFIG_SCHED_HMP
- &sched_hmp_cpu_attr_group,
-#endif
#ifdef CONFIG_HOTPLUG_CPU
&cpu_isolated_attr_group,
#endif
@@ -357,9 +222,6 @@
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
#endif
-#ifdef CONFIG_SCHED_HMP
- &sched_hmp_cpu_attr_group,
-#endif
#ifdef CONFIG_HOTPLUG_CPU
&cpu_isolated_attr_group,
#endif
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index c4af003..5eba478 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -856,7 +856,7 @@
const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
- char *driver_override, *old = pdev->driver_override, *cp;
+ char *driver_override, *old, *cp;
if (count > PATH_MAX)
return -EINVAL;
@@ -869,12 +869,15 @@
if (cp)
*cp = '\0';
+ device_lock(dev);
+ old = pdev->driver_override;
if (strlen(driver_override)) {
pdev->driver_override = driver_override;
} else {
kfree(driver_override);
pdev->driver_override = NULL;
}
+ device_unlock(dev);
kfree(old);
@@ -885,8 +888,12 @@
struct device_attribute *attr, char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
+ ssize_t len;
- return sprintf(buf, "%s\n", pdev->driver_override);
+ device_lock(dev);
+ len = sprintf(buf, "%s\n", pdev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index c9914d65..3868665 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1271,6 +1271,7 @@
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
+ __set_bit(QUEUE_FLAG_FAST, &zram->disk->queue->queue_flags);
/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index bfc3648..f927756 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -708,6 +708,7 @@
} else {
BT_PWR_ERR("BT chip state is already :%d no change d\n"
, pwr_state);
+ ret = 0;
}
break;
default:
diff --git a/drivers/bluetooth/btfm_slim.h b/drivers/bluetooth/btfm_slim.h
index 161be78..ed3a743 100644
--- a/drivers/bluetooth/btfm_slim.h
+++ b/drivers/bluetooth/btfm_slim.h
@@ -13,7 +13,7 @@
#define BTFM_SLIM_H
#include <linux/slimbus/slimbus.h>
-#define BTFMSLIM_DBG(fmt, arg...) pr_debug(fmt "\n", ## arg)
+#define BTFMSLIM_DBG(fmt, arg...) pr_debug("%s: " fmt "\n", __func__, ## arg)
#define BTFMSLIM_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
#define BTFMSLIM_ERR(fmt, arg...) pr_err("%s: " fmt "\n", __func__, ## arg)
@@ -68,6 +68,7 @@
uint32_t num_rx_port;
uint32_t num_tx_port;
+ uint32_t sample_rate;
struct btfmslim_ch *rx_chs;
struct btfmslim_ch *tx_chs;
diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c
index 73a789c..791ea29 100644
--- a/drivers/bluetooth/btfm_slim_codec.c
+++ b/drivers/bluetooth/btfm_slim_codec.c
@@ -54,7 +54,7 @@
int ret;
struct btfmslim *btfmslim = dai->dev->platform_data;
- BTFMSLIM_DBG("substream = %s stream = %d dai name = %s",
+ BTFMSLIM_DBG("substream = %s stream = %d dai->name = %s",
substream->name, substream->stream, dai->name);
ret = btfm_slim_hw_init(btfmslim);
return ret;
@@ -63,10 +63,52 @@
static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
+ int i;
struct btfmslim *btfmslim = dai->dev->platform_data;
+ struct btfmslim_ch *ch;
+ uint8_t rxport, grp = false, nchan = 1;
- BTFMSLIM_DBG("substream = %s stream = %d dai name = %s",
- substream->name, substream->stream, dai->name);
+ BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
+ dai->id, dai->rate);
+
+ switch (dai->id) {
+ case BTFM_FM_SLIM_TX:
+ grp = true; nchan = 2;
+ ch = btfmslim->tx_chs;
+ rxport = 0;
+ break;
+ case BTFM_BT_SCO_SLIM_TX:
+ ch = btfmslim->tx_chs;
+ rxport = 0;
+ break;
+ case BTFM_BT_SCO_A2DP_SLIM_RX:
+ case BTFM_BT_SPLIT_A2DP_SLIM_RX:
+ ch = btfmslim->rx_chs;
+ rxport = 1;
+ break;
+ case BTFM_SLIM_NUM_CODEC_DAIS:
+ default:
+ BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
+ return;
+ }
+
+ if (dai->id == BTFM_FM_SLIM_TX)
+ goto out;
+
+ /* Search for dai->id matched port handler */
+ for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) &&
+ (ch->id != BTFM_SLIM_NUM_CODEC_DAIS) &&
+ (ch->id != dai->id); ch++, i++)
+ ;
+
+ if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
+ (ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
+ BTFMSLIM_ERR("ch is invalid!!");
+ return;
+ }
+
+ btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+out:
btfm_slim_hw_deinit(btfmslim);
}
@@ -74,14 +116,14 @@
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- BTFMSLIM_DBG("dai name = %s DAI-ID %x rate %d num_ch %d",
+ BTFMSLIM_DBG("dai->name = %s DAI-ID %x rate %d num_ch %d",
dai->name, dai->id, params_rate(params),
params_channels(params));
return 0;
}
-int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
+static int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
int i, ret = -EINVAL;
@@ -89,9 +131,12 @@
struct btfmslim_ch *ch;
uint8_t rxport, grp = false, nchan = 1;
- BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name,
+ BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
+ /* save sample rate */
+ btfmslim->sample_rate = dai->rate;
+
switch (dai->id) {
case BTFM_FM_SLIM_TX:
grp = true; nchan = 2;
@@ -129,15 +174,15 @@
return ret;
}
-int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
+static int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- int i, ret = -EINVAL;
+ int ret = -EINVAL, i;
struct btfmslim *btfmslim = dai->dev->platform_data;
struct btfmslim_ch *ch;
uint8_t rxport, grp = false, nchan = 1;
- BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name,
+ BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
switch (dai->id) {
@@ -158,7 +203,12 @@
case BTFM_SLIM_NUM_CODEC_DAIS:
default:
BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
- return ret;
+ goto out;
+ }
+
+ if (dai->id != BTFM_FM_SLIM_TX) {
+ ret = 0;
+ goto out;
}
/* Search for dai->id matched port handler */
@@ -170,9 +220,12 @@
if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
(ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
BTFMSLIM_ERR("ch is invalid!!");
- return ret;
+ goto out;
}
- ret = btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+
+ btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+
+out:
return ret;
}
@@ -282,6 +335,9 @@
*tx_num = 0;
*rx_num = num;
break;
+ default:
+ BTFMSLIM_ERR("Unsupported DAI %d", dai->id);
+ return -EINVAL;
}
do {
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c
index 72e28da..77e2973 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.c
+++ b/drivers/bluetooth/btfm_slim_wcn3990.c
@@ -39,6 +39,7 @@
{
int ret = 0;
uint8_t reg_val;
+ uint16_t reg;
BTFMSLIM_DBG("");
@@ -46,20 +47,20 @@
return -EINVAL;
/* Get SB_SLAVE_HW_REV_MSB value*/
- ret = btfm_slim_read(btfmslim, CHRK_SB_SLAVE_HW_REV_MSB, 1,
- ®_val, IFD);
+ reg = CHRK_SB_SLAVE_HW_REV_MSB;
+ ret = btfm_slim_read(btfmslim, reg, 1, ®_val, IFD);
if (ret) {
- BTFMSLIM_ERR("failed to read (%d)", ret);
+ BTFMSLIM_ERR("failed to read (%d) reg 0x%x", ret, reg);
goto error;
}
BTFMSLIM_DBG("Major Rev: 0x%x, Minor Rev: 0x%x",
(reg_val & 0xF0) >> 4, (reg_val & 0x0F));
/* Get SB_SLAVE_HW_REV_LSB value*/
- ret = btfm_slim_read(btfmslim, CHRK_SB_SLAVE_HW_REV_LSB, 1,
- ®_val, IFD);
+ reg = CHRK_SB_SLAVE_HW_REV_LSB;
+ ret = btfm_slim_read(btfmslim, reg, 1, ®_val, IFD);
if (ret) {
- BTFMSLIM_ERR("failed to read (%d)", ret);
+ BTFMSLIM_ERR("failed to read (%d) reg 0x%x", ret, reg);
goto error;
}
BTFMSLIM_DBG("Step Rev: 0x%x", reg_val);
@@ -68,62 +69,87 @@
return ret;
}
+static inline int is_fm_port(uint8_t port_num)
+{
+ if (port_num == CHRK_SB_PGD_PORT_TX1_FM ||
+ port_num == CHRK_SB_PGD_PORT_TX2_FM)
+ return 1;
+ else
+ return 0;
+}
int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
uint8_t rxport, uint8_t enable)
{
int ret = 0;
uint8_t reg_val = 0;
+ uint8_t port_bit = 0;
uint16_t reg;
BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
if (rxport) {
- /* Port enable */
- reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
- } else { /* txport */
- /* Multiple Channel Setting - only FM Tx will be multiple
- * channel
- */
- if (enable && (port_num == CHRK_SB_PGD_PORT_TX1_FM ||
- port_num == CHRK_SB_PGD_PORT_TX2_FM)) {
-
- reg_val = (0x1 << CHRK_SB_PGD_PORT_TX1_FM) |
- (0x1 << CHRK_SB_PGD_PORT_TX2_FM);
- reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num);
+ if (enable && btfmslim->sample_rate == 48000) {
+ /* For A2DP Rx */
+ reg_val = 0x1;
+ port_bit = port_num - 0x10;
+ reg = CHRK_SB_PGD_RX_PORTn_MULTI_CHNL_0(port_bit);
+ BTFMSLIM_DBG("writing reg_val (%d) to reg(%x) for A2DP",
+ reg_val, reg);
ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD);
if (ret) {
- BTFMSLIM_ERR("failed to write (%d)", ret);
+ BTFMSLIM_ERR("failed to write (%d) reg 0x%x",
+ ret, reg);
goto error;
}
}
+ /* Port enable */
+ reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
+ goto enable_disable_rxport;
+ }
+ if (!enable)
+ goto enable_disable_txport;
- /* Enable Tx port hw auto recovery for underrun or
- * overrun error
- */
- reg_val = (enable) ? (CHRK_ENABLE_OVERRUN_AUTO_RECOVERY |
- CHRK_ENABLE_UNDERRUN_AUTO_RECOVERY) : 0x0;
-
- ret = btfm_slim_write(btfmslim,
- CHRK_SB_PGD_PORT_TX_OR_UR_CFGN(port_num), 1,
- ®_val, IFD);
+ /* txport */
+ /* Multiple Channel Setting */
+ if (is_fm_port(port_num)) {
+ reg_val = (0x1 << CHRK_SB_PGD_PORT_TX1_FM) |
+ (0x1 << CHRK_SB_PGD_PORT_TX2_FM);
+ reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num);
+ ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD);
if (ret) {
- BTFMSLIM_ERR("failed to write (%d)", ret);
+ BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
goto error;
}
-
- /* Port enable */
- reg = CHRK_SB_PGD_PORT_TX_CFGN(port_num);
}
- if (enable)
- /* Set water mark to 1 and enable the port */
- reg_val = CHRK_SB_PGD_PORT_ENABLE | CHRK_SB_PGD_PORT_WM_LB;
- else
+ /* Enable Tx port hw auto recovery for underrun or overrun error */
+ reg_val = (CHRK_ENABLE_OVERRUN_AUTO_RECOVERY |
+ CHRK_ENABLE_UNDERRUN_AUTO_RECOVERY);
+ reg = CHRK_SB_PGD_PORT_TX_OR_UR_CFGN(port_num);
+ ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD);
+ if (ret) {
+ BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
+ goto error;
+ }
+
+enable_disable_txport:
+ /* Port enable */
+ reg = CHRK_SB_PGD_PORT_TX_CFGN(port_num);
+
+enable_disable_rxport:
+ if (enable) {
+ if (is_fm_port(port_num))
+ reg_val = CHRK_SB_PGD_PORT_ENABLE |
+ CHRK_SB_PGD_PORT_WM_L3;
+ else
+ reg_val = CHRK_SB_PGD_PORT_ENABLE |
+ CHRK_SB_PGD_PORT_WM_LB;
+ } else
reg_val = CHRK_SB_PGD_PORT_DISABLE;
ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD);
if (ret)
- BTFMSLIM_ERR("failed to write (%d)", ret);
+ BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
error:
return ret;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 13fac71..7bc263c 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -211,6 +211,7 @@
struct device *dev;
struct fastrpc_session_ctx session[NUM_SESSIONS];
struct completion work;
+ struct completion workport;
struct notifier_block nb;
struct kref kref;
int sesscount;
@@ -1378,6 +1379,7 @@
me->channel = &gcinfo[0];
for (i = 0; i < NUM_CHANNELS; i++) {
init_completion(&me->channel[i].work);
+ init_completion(&me->channel[i].workport);
me->channel[i].sesscount = 0;
}
}
@@ -1828,7 +1830,7 @@
switch (event) {
case GLINK_CONNECTED:
link->port_state = FASTRPC_LINK_CONNECTED;
- complete(&me->channel[cid].work);
+ complete(&me->channel[cid].workport);
break;
case GLINK_LOCAL_DISCONNECTED:
link->port_state = FASTRPC_LINK_DISCONNECTED;
@@ -1978,8 +1980,7 @@
return;
link = &gfa.channel[cid].link;
- if (link->port_state == FASTRPC_LINK_CONNECTED ||
- link->port_state == FASTRPC_LINK_CONNECTING) {
+ if (link->port_state == FASTRPC_LINK_CONNECTED) {
link->port_state = FASTRPC_LINK_DISCONNECTING;
glink_close(chan);
}
@@ -2161,7 +2162,8 @@
if (err)
goto bail;
- VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
+ VERIFY(err,
+ wait_for_completion_timeout(&me->channel[cid].workport,
RPC_TIMEOUT));
if (err) {
me->channel[cid].chan = 0;
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 8f0597f..e2d39e7 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -2208,11 +2208,28 @@
{
struct list_head *start, *temp;
struct diag_dci_client_tbl *entry = NULL;
+ struct pid *pid_struct = NULL;
+ struct task_struct *task_s = NULL;
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl, track);
- if (entry->client->tgid == tgid)
- return entry;
+ pid_struct = find_get_pid(entry->tgid);
+ if (!pid_struct) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "diag: valid pid doesn't exist for pid = %d\n",
+ entry->tgid);
+ continue;
+ }
+ task_s = get_pid_task(pid_struct, PIDTYPE_PID);
+ if (!task_s) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "diag: valid task doesn't exist for pid = %d\n",
+ entry->tgid);
+ continue;
+ }
+ if (task_s == entry->client)
+ if (entry->client->tgid == tgid)
+ return entry;
}
return NULL;
}
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index b5a594a..8aefb5a1 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -113,10 +113,12 @@
else
mask_info = &log_mask;
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr || !mask_info->update_buf)
return;
mask = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!mask->ptr)
+ return;
buf = mask_info->update_buf;
switch (mask_info->status) {
@@ -225,7 +227,7 @@
else
mask_info = &event_mask;
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr || !mask_info->update_buf)
return;
buf = mask_info->update_buf;
@@ -289,6 +291,7 @@
struct diag_mask_info *mask_info = NULL;
struct diag_msg_mask_t *mask = NULL;
struct diag_ctrl_msg_mask header;
+ uint8_t msg_mask_tbl_count_local;
if (peripheral >= NUM_PERIPHERALS)
return;
@@ -306,11 +309,17 @@
else
mask_info = &msg_mask;
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr || !mask_info->update_buf)
return;
-
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ mutex_unlock(&driver->msg_mask_lock);
+ return;
+ }
buf = mask_info->update_buf;
+ msg_mask_tbl_count_local = driver->msg_mask_tbl_count;
+ mutex_unlock(&driver->msg_mask_lock);
mutex_lock(&mask_info->lock);
switch (mask_info->status) {
case DIAG_CTRL_MASK_ALL_DISABLED:
@@ -327,9 +336,11 @@
goto err;
}
- for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
- if (((first < mask->ssid_first) ||
- (last > mask->ssid_last_tools)) && first != ALL_SSID) {
+ for (i = 0; i < msg_mask_tbl_count_local; i++, mask++) {
+ mutex_lock(&driver->msg_mask_lock);
+ if (((mask->ssid_first > first) ||
+ (mask->ssid_last_tools < last)) && first != ALL_SSID) {
+ mutex_unlock(&driver->msg_mask_lock);
continue;
}
@@ -370,12 +381,13 @@
if (mask_size > 0)
memcpy(buf + header_len, mask->ptr, mask_size);
mutex_unlock(&mask->lock);
+ mutex_unlock(&driver->msg_mask_lock);
err = diagfwd_write(peripheral, TYPE_CNTL, buf,
header_len + mask_size);
if (err && err != -ENODEV)
- pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d\n",
- peripheral);
+ pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d, error = %d\n",
+ peripheral, err);
if (first != ALL_SSID)
break;
@@ -495,7 +507,7 @@
if (!diag_apps_responds())
return 0;
-
+ mutex_lock(&driver->msg_mask_lock);
rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE;
rsp.status = MSG_STATUS_SUCCESS;
@@ -503,7 +515,6 @@
rsp.count = driver->msg_mask_tbl_count;
memcpy(dest_buf, &rsp, sizeof(rsp));
write_len += sizeof(rsp);
-
mask_ptr = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask_ptr++) {
if (write_len + sizeof(ssid_range) > dest_len) {
@@ -516,7 +527,7 @@
memcpy(dest_buf + write_len, &ssid_range, sizeof(ssid_range));
write_len += sizeof(ssid_range);
}
-
+ mutex_unlock(&driver->msg_mask_lock);
return write_len;
}
@@ -540,7 +551,7 @@
if (!diag_apps_responds())
return 0;
-
+ mutex_lock(&driver->msg_mask_lock);
req = (struct diag_build_mask_req_t *)src_buf;
rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
rsp.sub_cmd = DIAG_CMD_OP_GET_BUILD_MASK;
@@ -548,9 +559,8 @@
rsp.ssid_last = req->ssid_last;
rsp.status = MSG_STATUS_FAIL;
rsp.padding = 0;
-
build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
- for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
+ for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
if (build_mask->ssid_first != req->ssid_first)
continue;
num_entries = req->ssid_last - req->ssid_first + 1;
@@ -571,7 +581,7 @@
}
memcpy(dest_buf, &rsp, sizeof(rsp));
write_len += sizeof(rsp);
-
+ mutex_unlock(&driver->msg_mask_lock);
return write_len;
}
@@ -599,6 +609,7 @@
if (!diag_apps_responds())
return 0;
+ mutex_lock(&driver->msg_mask_lock);
req = (struct diag_build_mask_req_t *)src_buf;
rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
rsp.sub_cmd = DIAG_CMD_OP_GET_MSG_MASK;
@@ -606,7 +617,6 @@
rsp.ssid_last = req->ssid_last;
rsp.status = MSG_STATUS_FAIL;
rsp.padding = 0;
-
mask = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if ((req->ssid_first < mask->ssid_first) ||
@@ -624,7 +634,7 @@
}
memcpy(dest_buf, &rsp, sizeof(rsp));
write_len += sizeof(rsp);
-
+ mutex_unlock(&driver->msg_mask_lock);
return write_len;
}
@@ -655,8 +665,8 @@
}
req = (struct diag_msg_build_mask_t *)src_buf;
-
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if (i < (driver->msg_mask_tbl_count - 1)) {
@@ -696,6 +706,8 @@
pr_err_ratelimited("diag: In %s, unable to allocate memory for msg mask ptr, mask_size: %d\n",
__func__, mask_size);
mutex_unlock(&mask->lock);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
return -ENOMEM;
}
mask->ptr = temp;
@@ -714,8 +726,8 @@
mask_info->status = DIAG_CTRL_MASK_VALID;
break;
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
-
if (diag_check_update(APPS_DATA))
diag_update_userspace_clients(MSG_MASKS_TYPE);
@@ -740,7 +752,9 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
+ mutex_unlock(&driver->md_session_lock);
}
end:
return write_len;
@@ -769,8 +783,10 @@
req = (struct diag_msg_config_rsp_t *)src_buf;
- mask = (struct diag_msg_mask_t *)mask_info->ptr;
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
+
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
DIAG_CTRL_MASK_ALL_DISABLED;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
@@ -779,6 +795,7 @@
mask->range * sizeof(uint32_t));
mutex_unlock(&mask->lock);
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
if (diag_check_update(APPS_DATA))
@@ -799,7 +816,9 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -893,7 +912,9 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_event_mask_update(i);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -940,7 +961,9 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_event_mask_update(i);
+ mutex_unlock(&driver->md_session_lock);
}
memcpy(dest_buf, &header, sizeof(header));
write_len += sizeof(header);
@@ -1194,7 +1217,9 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_log_mask_update(i, req->equip_id);
+ mutex_unlock(&driver->md_session_lock);
}
end:
return write_len;
@@ -1245,7 +1270,9 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_log_mask_update(i, ALL_EQUIP_ID);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -1284,6 +1311,7 @@
struct diag_ssid_range_t range;
mutex_lock(&msg_mask.lock);
+ mutex_lock(&driver->msg_mask_lock);
driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
range.ssid_first = msg_mask_tbl[i].ssid_first;
@@ -1292,6 +1320,7 @@
if (err)
break;
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&msg_mask.lock);
return err;
}
@@ -1306,8 +1335,10 @@
struct diag_ssid_range_t range;
mutex_lock(&msg_bt_mask.lock);
+ mutex_lock(&driver->msg_mask_lock);
+ driver->bt_msg_mask_tbl_count = MSG_MASK_TBL_CNT;
build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
- for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
+ for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
range.ssid_first = msg_mask_tbl[i].ssid_first;
range.ssid_last = msg_mask_tbl[i].ssid_last;
err = diag_create_msg_mask_table_entry(build_mask, &range);
@@ -1417,8 +1448,8 @@
}
memcpy(build_mask->ptr, tbl, tbl_size);
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&msg_bt_mask.lock);
-
return err;
}
@@ -1565,10 +1596,11 @@
pr_err("diag: Unable to create msg masks, err: %d\n", err);
return err;
}
+ mutex_lock(&driver->msg_mask_lock);
driver->msg_mask = &msg_mask;
-
for (i = 0; i < NUM_PERIPHERALS; i++)
driver->max_ssid_count[i] = 0;
+ mutex_unlock(&driver->msg_mask_lock);
return 0;
}
@@ -1587,8 +1619,8 @@
err = __diag_mask_init(dest, MSG_MASK_SIZE, APPS_BUF_SIZE);
if (err)
return err;
-
mutex_lock(&dest->lock);
+ mutex_lock(&driver->msg_mask_lock);
src_mask = (struct diag_msg_mask_t *)src->ptr;
dest_mask = (struct diag_msg_mask_t *)dest->ptr;
@@ -1605,8 +1637,8 @@
src_mask++;
dest_mask++;
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&dest->lock);
-
return err;
}
@@ -1617,15 +1649,15 @@
if (!mask_info)
return;
-
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
-
__diag_mask_exit(mask_info);
}
@@ -1633,15 +1665,17 @@
{
int i;
struct diag_msg_mask_t *mask = NULL;
-
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)(msg_mask.ptr);
if (mask) {
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
kfree(mask->ptr);
kfree(msg_mask.ptr);
+ msg_mask.ptr = NULL;
}
-
kfree(msg_mask.update_buf);
+ msg_mask.update_buf = NULL;
+ mutex_unlock(&driver->msg_mask_lock);
}
static int diag_build_time_mask_init(void)
@@ -1666,13 +1700,15 @@
{
int i;
struct diag_msg_mask_t *mask = NULL;
-
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)(msg_bt_mask.ptr);
if (mask) {
- for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
+ for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, mask++)
kfree(mask->ptr);
- kfree(msg_mask.ptr);
+ kfree(msg_bt_mask.ptr);
+ msg_bt_mask.ptr = NULL;
}
+ mutex_unlock(&driver->msg_mask_lock);
}
static int diag_log_mask_init(void)
@@ -1790,8 +1826,9 @@
return -EIO;
}
mutex_unlock(&driver->diag_maskclear_mutex);
-
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
+
mask = (struct diag_msg_mask_t *)(mask_info->ptr);
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
ptr = mask_info->update_buf;
@@ -1828,8 +1865,8 @@
}
total_len += len;
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
-
return err ? err : total_len;
}
@@ -1898,9 +1935,11 @@
diag_send_feature_mask_update(peripheral);
if (driver->time_sync_enabled)
diag_send_time_sync_update(peripheral);
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
diag_send_event_mask_update(peripheral);
+ mutex_unlock(&driver->md_session_lock);
diag_send_real_time_update(peripheral,
driver->real_time_mode[DIAG_LOCAL_PROC]);
diag_send_peripheral_buffering_mode(
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index d3dde50..8051d5d 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -580,6 +580,7 @@
unsigned char *buf_feature_mask_update;
uint8_t hdlc_disabled;
struct mutex hdlc_disable_mutex;
+ struct mutex hdlc_recovery_mutex;
struct timer_list hdlc_reset_timer;
struct mutex diag_hdlc_mutex;
unsigned char *hdlc_buf;
@@ -625,8 +626,10 @@
struct diag_mask_info *event_mask;
struct diag_mask_info *build_time_mask;
uint8_t msg_mask_tbl_count;
+ uint8_t bt_msg_mask_tbl_count;
uint16_t event_mask_size;
uint16_t last_event_id;
+ struct mutex msg_mask_lock;
/* Variables for Mask Centralization */
uint16_t num_event_id[NUM_PERIPHERALS];
uint32_t num_equip_id[NUM_PERIPHERALS];
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 128d6ce..e4397c5 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -975,14 +975,34 @@
else
hdlc_disabled = driver->hdlc_disabled;
if (hdlc_disabled) {
+ if (len < 4) {
+ pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+ __func__, len);
+ return -EBADMSG;
+ }
payload = *(uint16_t *)(buf + 2);
+ if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
+ pr_err("diag: Dropping packet, payload size is %d\n",
+ payload);
+ return -EBADMSG;
+ }
driver->hdlc_encode_buf_len = payload;
/*
- * Adding 4 bytes for start (1 byte), version (1 byte) and
- * payload (2 bytes)
+ * Adding 5 bytes for start (1 byte), version (1 byte),
+ * payload (2 bytes) and end (1 byte)
*/
- memcpy(driver->hdlc_encode_buf, buf + 4, payload);
- goto send_data;
+ if (len == (payload + 5)) {
+ /*
+ * Adding 4 bytes for start (1 byte), version (1 byte)
+ * and payload (2 bytes)
+ */
+ memcpy(driver->hdlc_encode_buf, buf + 4, payload);
+ goto send_data;
+ } else {
+ pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+ __func__, len);
+ return -EBADMSG;
+ }
}
if (hdlc_flag) {
@@ -1713,14 +1733,18 @@
{
int i;
+ mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
if (driver->client_map[i].pid == current->tgid)
break;
- if (i == driver->num_clients)
+ if (i == driver->num_clients) {
+ mutex_unlock(&driver->diagchar_mutex);
return -EINVAL;
+ }
driver->data_ready[i] |= DEINIT_TYPE;
+ mutex_unlock(&driver->diagchar_mutex);
wake_up_interruptible(&driver->wait_q);
return 1;
@@ -3463,6 +3487,8 @@
mutex_init(&driver->diag_file_mutex);
mutex_init(&driver->delayed_rsp_mutex);
mutex_init(&apps_data_mutex);
+ mutex_init(&driver->msg_mask_lock);
+ mutex_init(&driver->hdlc_recovery_mutex);
for (i = 0; i < NUM_PERIPHERALS; i++)
mutex_init(&driver->diagfwd_channel_mutex[i]);
init_waitqueue_head(&driver->wait_q);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index cd49f00..3f00a7e 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1443,7 +1443,9 @@
if (start_ptr) {
/* Discard any partial packet reads */
+ mutex_lock(&driver->hdlc_recovery_mutex);
driver->incoming_pkt.processing = 0;
+ mutex_unlock(&driver->hdlc_recovery_mutex);
diag_process_non_hdlc_pkt(start_ptr, len - i, info);
}
}
@@ -1457,18 +1459,24 @@
const uint32_t header_len = sizeof(struct diag_pkt_frame_t);
struct diag_pkt_frame_t *actual_pkt = NULL;
unsigned char *data_ptr = NULL;
- struct diag_partial_pkt_t *partial_pkt = &driver->incoming_pkt;
+ struct diag_partial_pkt_t *partial_pkt = NULL;
- if (!buf || len <= 0)
+ mutex_lock(&driver->hdlc_recovery_mutex);
+ if (!buf || len <= 0) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
return;
-
- if (!partial_pkt->processing)
+ }
+ partial_pkt = &driver->incoming_pkt;
+ if (!partial_pkt->processing) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
goto start;
+ }
if (partial_pkt->remaining > len) {
if ((partial_pkt->read_len + len) > partial_pkt->capacity) {
pr_err("diag: Invalid length %d, %d received in %s\n",
partial_pkt->read_len, len, __func__);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
goto end;
}
memcpy(partial_pkt->data + partial_pkt->read_len, buf, len);
@@ -1482,6 +1490,7 @@
pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
partial_pkt->read_len,
partial_pkt->remaining, __func__);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
goto end;
}
memcpy(partial_pkt->data + partial_pkt->read_len, buf,
@@ -1495,20 +1504,27 @@
if (partial_pkt->remaining == 0) {
actual_pkt = (struct diag_pkt_frame_t *)(partial_pkt->data);
data_ptr = partial_pkt->data + header_len;
- if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR)
+ if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
+ CONTROL_CHAR) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
diag_hdlc_start_recovery(buf, len, info);
+ mutex_lock(&driver->hdlc_recovery_mutex);
+ }
err = diag_process_apps_pkt(data_ptr,
actual_pkt->length, info);
if (err) {
pr_err("diag: In %s, unable to process incoming data packet, err: %d\n",
__func__, err);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
goto end;
}
partial_pkt->read_len = 0;
partial_pkt->total_len = 0;
partial_pkt->processing = 0;
+ mutex_unlock(&driver->hdlc_recovery_mutex);
goto start;
}
+ mutex_unlock(&driver->hdlc_recovery_mutex);
goto end;
start:
@@ -1521,14 +1537,14 @@
diag_send_error_rsp(buf, len);
goto end;
}
-
+ mutex_lock(&driver->hdlc_recovery_mutex);
if (pkt_len + header_len > partial_pkt->capacity) {
pr_err("diag: In %s, incoming data is too large for the request buffer %d\n",
__func__, pkt_len);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
diag_hdlc_start_recovery(buf, len, info);
break;
}
-
if ((pkt_len + header_len) > (len - read_bytes)) {
partial_pkt->read_len = len - read_bytes;
partial_pkt->total_len = pkt_len + header_len;
@@ -1536,19 +1552,27 @@
partial_pkt->read_len;
partial_pkt->processing = 1;
memcpy(partial_pkt->data, buf, partial_pkt->read_len);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
break;
}
data_ptr = buf + header_len;
- if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR)
+ if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
+ CONTROL_CHAR) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
diag_hdlc_start_recovery(buf, len, info);
+ mutex_lock(&driver->hdlc_recovery_mutex);
+ }
else
hdlc_reset = 0;
err = diag_process_apps_pkt(data_ptr,
actual_pkt->length, info);
- if (err)
+ if (err) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
break;
+ }
read_bytes += header_len + pkt_len + 1;
buf += header_len + pkt_len + 1; /* advance to next pkt */
+ mutex_unlock(&driver->hdlc_recovery_mutex);
}
end:
return;
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index e13871e..5282e02 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -68,7 +68,6 @@
driver->feature[peripheral].sent_feature_mask = 0;
driver->feature[peripheral].rcvd_feature_mask = 0;
- flush_workqueue(driver->cntl_wq);
reg_dirty |= PERIPHERAL_MASK(peripheral);
diag_cmd_remove_reg_by_proc(peripheral);
driver->feature[peripheral].stm_support = DISABLE_STM;
@@ -511,6 +510,7 @@
/* Don't account for pkt_id and length */
read_len += header_len - (2 * sizeof(uint32_t));
+ mutex_lock(&driver->msg_mask_lock);
driver->max_ssid_count[peripheral] = header->count;
for (i = 0; i < header->count && read_len < len; i++) {
ssid_range = (struct diag_ssid_range_t *)ptr;
@@ -554,6 +554,7 @@
}
driver->msg_mask_tbl_count += 1;
}
+ mutex_unlock(&driver->msg_mask_lock);
}
static void diag_build_time_mask_update(uint8_t *buf,
@@ -578,11 +579,11 @@
__func__, range->ssid_first, range->ssid_last);
return;
}
-
+ mutex_lock(&driver->msg_mask_lock);
build_mask = (struct diag_msg_mask_t *)(driver->build_time_mask->ptr);
num_items = range->ssid_last - range->ssid_first + 1;
- for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
+ for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
if (build_mask->ssid_first != range->ssid_first)
continue;
found = 1;
@@ -601,7 +602,7 @@
if (found)
goto end;
- new_size = (driver->msg_mask_tbl_count + 1) *
+ new_size = (driver->bt_msg_mask_tbl_count + 1) *
sizeof(struct diag_msg_mask_t);
temp = krealloc(driver->build_time_mask->ptr, new_size, GFP_KERNEL);
if (!temp) {
@@ -616,8 +617,9 @@
__func__, err);
goto end;
}
- driver->msg_mask_tbl_count += 1;
+ driver->bt_msg_mask_tbl_count += 1;
end:
+ mutex_unlock(&driver->msg_mask_lock);
return;
}
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index ebc37f6..e9683e0 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -361,13 +361,44 @@
diagfwd_channel_read(glink_info->fwd_ctxt);
}
+struct diag_glink_read_work {
+ struct diag_glink_info *glink_info;
+ const void *ptr_read_done;
+ const void *ptr_rx_done;
+ size_t ptr_read_size;
+ struct work_struct work;
+};
+
+static void diag_glink_notify_rx_work_fn(struct work_struct *work)
+{
+ struct diag_glink_read_work *read_work = container_of(work,
+ struct diag_glink_read_work, work);
+ struct diag_glink_info *glink_info = read_work->glink_info;
+
+ if (!glink_info || !glink_info->hdl) {
+ kfree(read_work);
+ return;
+ }
+
+ diagfwd_channel_read_done(glink_info->fwd_ctxt,
+ (unsigned char *)(read_work->ptr_read_done),
+ read_work->ptr_read_size);
+
+ glink_rx_done(glink_info->hdl, read_work->ptr_rx_done, false);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Rx done for packet %pK of len: %d periph: %d ch: %d\n",
+ read_work->ptr_rx_done, (int)read_work->ptr_read_size,
+ glink_info->peripheral, glink_info->type);
+ kfree(read_work);
+}
static void diag_glink_notify_rx(void *hdl, const void *priv,
const void *pkt_priv, const void *ptr,
size_t size)
{
struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
- int err = 0;
+ struct diag_glink_read_work *read_work;
if (!glink_info || !glink_info->hdl || !ptr || !pkt_priv || !hdl)
return;
@@ -379,12 +410,25 @@
"diag: received a packet %pK of len:%d from periph:%d ch:%d\n",
ptr, (int)size, glink_info->peripheral, glink_info->type);
+ read_work = kmalloc(sizeof(*read_work), GFP_ATOMIC);
+ if (!read_work) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Could not allocate read_work\n");
+ glink_rx_done(glink_info->hdl, ptr, true);
+ return;
+ }
+
memcpy((void *)pkt_priv, ptr, size);
- err = diagfwd_channel_read_done(glink_info->fwd_ctxt,
- (unsigned char *)pkt_priv, size);
- glink_rx_done(glink_info->hdl, ptr, false);
+
+ read_work->glink_info = glink_info;
+ read_work->ptr_read_done = pkt_priv;
+ read_work->ptr_rx_done = ptr;
+ read_work->ptr_read_size = size;
+ INIT_WORK(&read_work->work, diag_glink_notify_rx_work_fn);
+ queue_work(glink_info->wq, &read_work->work);
+
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
- "diag: Rx done for packet %pK of len:%d periph:%d ch:%d\n",
+ "diag: Rx queued for packet %pK of len: %d periph: %d ch: %d\n",
ptr, (int)size, glink_info->peripheral, glink_info->type);
}
@@ -462,6 +506,45 @@
return err;
}
+
+static void diag_glink_connect_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ connect_work);
+ if (!glink_info || !glink_info->hdl)
+ return;
+ atomic_set(&glink_info->opened, 1);
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+ diagfwd_late_open(glink_info->fwd_ctxt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink channel open: p: %d t: %d\n",
+ glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_remote_disconnect_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ remote_disconnect_work);
+ if (!glink_info || !glink_info->hdl)
+ return;
+ atomic_set(&glink_info->opened, 0);
+ diagfwd_channel_close(glink_info->fwd_ctxt);
+ atomic_set(&glink_info->tx_intent_ready, 0);
+}
+
+static void diag_glink_late_init_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ late_init_work);
+ if (!glink_info || !glink_info->hdl)
+ return;
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink late init p: %d t: %d\n",
+ glink_info->peripheral, glink_info->type);
+}
+
static void diag_glink_transport_notify_state(void *handle, const void *priv,
unsigned int event)
{
@@ -475,9 +558,7 @@
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s received channel connect for periph:%d\n",
glink_info->name, glink_info->peripheral);
- atomic_set(&glink_info->opened, 1);
- diagfwd_channel_open(glink_info->fwd_ctxt);
- diagfwd_late_open(glink_info->fwd_ctxt);
+ queue_work(glink_info->wq, &glink_info->connect_work);
break;
case GLINK_LOCAL_DISCONNECTED:
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -489,9 +570,7 @@
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s received channel remote disconnect for periph:%d\n",
glink_info->name, glink_info->peripheral);
- atomic_set(&glink_info->opened, 0);
- diagfwd_channel_close(glink_info->fwd_ctxt);
- atomic_set(&glink_info->tx_intent_ready, 0);
+ queue_work(glink_info->wq, &glink_info->remote_disconnect_work);
break;
default:
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -596,7 +675,7 @@
glink_info->inited = 1;
if (atomic_read(&glink_info->opened))
- diagfwd_channel_open(glink_info->fwd_ctxt);
+ queue_work(glink_info->wq, &(glink_info->late_init_work));
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
glink_info->name);
@@ -641,6 +720,10 @@
INIT_WORK(&(glink_info->open_work), diag_glink_open_work_fn);
INIT_WORK(&(glink_info->close_work), diag_glink_close_work_fn);
INIT_WORK(&(glink_info->read_work), diag_glink_read_work_fn);
+ INIT_WORK(&(glink_info->connect_work), diag_glink_connect_work_fn);
+ INIT_WORK(&(glink_info->remote_disconnect_work),
+ diag_glink_remote_disconnect_work_fn);
+ INIT_WORK(&(glink_info->late_init_work), diag_glink_late_init_work_fn);
link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
link_info.transport = NULL;
link_info.edge = glink_info->edge;
@@ -681,6 +764,8 @@
struct diag_glink_info *glink_info = NULL;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
glink_info = &glink_cntl[peripheral];
__diag_glink_init(glink_info);
diagfwd_cntl_register(TRANSPORT_GLINK, glink_info->peripheral,
@@ -719,6 +804,8 @@
int peripheral = 0;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
__diag_glink_exit(&glink_cntl[peripheral]);
glink_unregister_link_state_cb(&glink_cntl[peripheral].hdl);
}
@@ -729,6 +816,8 @@
int peripheral = 0;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
__diag_glink_exit(&glink_data[peripheral]);
__diag_glink_exit(&glink_cmd[peripheral]);
__diag_glink_exit(&glink_dci[peripheral]);
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
index 73f2fe8..6cad445 100644
--- a/drivers/char/diag/diagfwd_glink.h
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -35,6 +35,9 @@
struct work_struct open_work;
struct work_struct close_work;
struct work_struct read_work;
+ struct work_struct connect_work;
+ struct work_struct remote_disconnect_work;
+ struct work_struct late_init_work;
struct diagfwd_info *fwd_ctxt;
};
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 119f5ac..dd5a552 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -668,7 +668,16 @@
dest_info->buf_ptr[i] = fwd_info->buf_ptr[i];
if (!check_channel_state(dest_info->ctxt))
diagfwd_late_open(dest_info);
- diagfwd_cntl_open(dest_info);
+
+ /*
+ * Open control channel to update masks after buffers are
+ * initialized for peripherals that have transport other than
+ * GLINK. GLINK supported peripheral mask update will
+ * happen after glink buffers are initialized.
+ */
+
+ if (dest_info->transport != TRANSPORT_GLINK)
+ diagfwd_cntl_open(dest_info);
init_fn(peripheral);
mutex_unlock(&driver->diagfwd_channel_mutex[peripheral]);
diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]);
@@ -851,7 +860,18 @@
mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
fwd_info->ch_open = 1;
diagfwd_buffers_init(fwd_info);
- diagfwd_write_buffers_init(fwd_info);
+
+ /*
+ * Initialize buffers for glink supported
+ * peripherals only. Open control channel to update
+ * masks after buffers are initialized.
+ */
+ if (fwd_info->transport == TRANSPORT_GLINK) {
+ diagfwd_write_buffers_init(fwd_info);
+ if (fwd_info->type == TYPE_CNTL)
+ diagfwd_cntl_open(fwd_info);
+ }
+
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
fwd_info->c_ops->open(fwd_info);
for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
@@ -877,6 +897,9 @@
if (!fwd_info)
return -EIO;
+ if (fwd_info->type == TYPE_CNTL)
+ flush_workqueue(driver->cntl_wq);
+
mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
fwd_info->ch_open = 0;
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index a7511a1..8069b36 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -59,7 +59,7 @@
/*
* Assigned numbers, used for dynamic minors
*/
-#define DYNAMIC_MINORS 75 /* like dynamic majors */
+#define DYNAMIC_MINORS 64 /* like dynamic majors */
static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
#ifdef CONFIG_PROC_FS
diff --git a/drivers/char/rdbg.c b/drivers/char/rdbg.c
index 92d9399..8612112 100644
--- a/drivers/char/rdbg.c
+++ b/drivers/char/rdbg.c
@@ -22,7 +22,7 @@
#include <linux/uaccess.h>
#include <linux/interrupt.h>
-#define SMP2P_NUM_PROCS 8
+#define SMP2P_NUM_PROCS 16
#define MAX_RETRIES 20
#define SM_VERSION 1
@@ -146,9 +146,17 @@
{"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/
{0}, /*SMP2P_RESERVED_PROC_1*/
{"rdbg_wcnss", 0, 0}, /*WCNSS*/
- {0}, /*SMP2P_RESERVED_PROC_2*/
- {0}, /*SMP2P_POWER_PROC*/
- {0} /*SMP2P_REMOTE_MOCK_PROC*/
+ {"rdbg_cdsp", SMEM_LC_DEBUGGER, 16*1024}, /*CDSP*/
+ {NULL}, /*SMP2P_POWER_PROC*/
+ {NULL}, /*SMP2P_TZ_PROC*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL} /*SMP2P_REMOTE_MOCK_PROC*/
};
static int smq_blockmap_get(struct smq_block_map *block_map,
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index a017ccd..9ff8532 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -130,6 +130,41 @@
kfree(chip);
}
+
+/**
+ * tpm_class_shutdown() - prepare the TPM device for loss of power.
+ * @dev: device to which the chip is associated.
+ *
+ * Issues a TPM2_Shutdown command prior to loss of power, as required by the
+ * TPM 2.0 spec.
+ * Then, calls bus- and device- specific shutdown code.
+ *
+ * XXX: This codepath relies on the fact that sysfs is not enabled for
+ * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
+ * has sysfs support enabled before TPM sysfs's implicit locking is fixed.
+ */
+static int tpm_class_shutdown(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ down_write(&chip->ops_sem);
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ chip->ops = NULL;
+ up_write(&chip->ops_sem);
+ }
+ /* Allow bus- and device-specific code to run. Note: since chip->ops
+ * is NULL, more-specific shutdown code will not be able to issue TPM
+ * commands.
+ */
+ if (dev->bus && dev->bus->shutdown)
+ dev->bus->shutdown(dev);
+ else if (dev->driver && dev->driver->shutdown)
+ dev->driver->shutdown(dev);
+ return 0;
+}
+
+
/**
* tpm_chip_alloc() - allocate a new struct tpm_chip instance
* @pdev: device to which the chip is associated
@@ -168,6 +203,7 @@
device_initialize(&chip->dev);
chip->dev.class = tpm_class;
+ chip->dev.class->shutdown = tpm_class_shutdown;
chip->dev.release = tpm_dev_release;
chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index a76ab4a..edf8c59 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -35,9 +35,10 @@
ssize_t err;
int i, rc;
char *str = buf;
-
struct tpm_chip *chip = to_tpm_chip(dev);
+ memset(&tpm_cmd, 0, sizeof(tpm_cmd));
+
tpm_cmd.header.in = tpm_readpubek_header;
err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, 0,
"attempting to read the PUBEK");
@@ -284,6 +285,11 @@
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
+ /* XXX: If you wish to remove this restriction, you must first update
+ * tpm_sysfs to explicitly lock chip->ops.
+ */
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return;
/* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
* is called before ops is null'd and the sysfs core synchronizes this
* removal so that no callbacks are running or can run again
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index bb7c862..3e13186 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -283,10 +283,14 @@
const struct clk_div_table *table, u8 width,
unsigned long flags)
{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
int i, bestdiv = 0;
unsigned long parent_rate, best = 0, now, maxdiv;
unsigned long parent_rate_saved = *best_parent_rate;
+ if (!parent)
+ return -EINVAL;
+
if (!rate)
rate = 1;
@@ -317,8 +321,7 @@
*best_parent_rate = parent_rate_saved;
return i;
}
- parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
- rate * i);
+ parent_rate = clk_hw_round_rate(parent, rate * i);
now = DIV_ROUND_UP_ULL((u64)parent_rate, i);
if (_is_best_div(rate, now, best, flags)) {
bestdiv = i;
@@ -329,7 +332,7 @@
if (!bestdiv) {
bestdiv = _get_maxdiv(table, width, flags);
- *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
+ *best_parent_rate = clk_hw_round_rate(parent, 1);
}
return bestdiv;
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 3ca8e1c..fa0ca36 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -92,7 +92,7 @@
} else if (br->halt_check == BRANCH_HALT_ENABLE ||
br->halt_check == BRANCH_HALT ||
(enabling && voted)) {
- int count = 200;
+ int count = 500;
while (count-- > 0) {
if (check_halt(br, enabling))
@@ -287,20 +287,30 @@
static int clk_branch2_prepare(struct clk_hw *hw)
{
- struct clk_branch *branch = to_clk_branch(hw);
- struct clk_hw *parent = clk_hw_get_parent(hw);
- unsigned long curr_rate, branch_rate = branch->rate;
+ struct clk_branch *branch;
+ struct clk_hw *parent;
+ unsigned long curr_rate;
int ret = 0;
+ if (!hw)
+ return -EINVAL;
+
+ branch = to_clk_branch(hw);
+ parent = clk_hw_get_parent(hw);
+ if (!branch)
+ return -EINVAL;
+
/*
* Do the rate aggregation and scaling of the RCG in the prepare/
* unprepare functions to avoid potential RPM(/h) communication due to
* votes on the voltage rails.
*/
if (branch->aggr_sibling_rates) {
+ if (!parent)
+ return -EINVAL;
curr_rate = clk_aggregate_rate(hw, parent->core);
- if (branch_rate > curr_rate) {
- ret = clk_set_rate(parent->clk, branch_rate);
+ if (branch->rate > curr_rate) {
+ ret = clk_set_rate(parent->clk, branch->rate);
if (ret)
goto exit;
}
@@ -316,13 +326,23 @@
static void clk_branch2_unprepare(struct clk_hw *hw)
{
- struct clk_branch *branch = to_clk_branch(hw);
- struct clk_hw *parent = clk_hw_get_parent(hw);
- unsigned long curr_rate, new_rate, branch_rate = branch->rate;
+ struct clk_branch *branch;
+ struct clk_hw *parent;
+ unsigned long curr_rate, new_rate;
+
+ if (!hw)
+ return;
+
+ branch = to_clk_branch(hw);
+ parent = clk_hw_get_parent(hw);
+ if (!branch)
+ return;
if (branch->aggr_sibling_rates) {
+ if (!parent)
+ return;
new_rate = clk_aggregate_rate(hw, parent->core);
- curr_rate = max(new_rate, branch_rate);
+ curr_rate = max(new_rate, branch->rate);
if (new_rate < curr_rate)
if (clk_set_rate(parent->clk, new_rate))
pr_err("Failed to scale %s to %lu\n",
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index f12f03d..47e9fab 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -150,12 +150,17 @@
#define DATA_MEM(n) (0x400 + (n) * 4)
-#define DCVS_PERF_STATE_DESIRED_REG_0 0x780
-#define DCVS_PERF_STATE_DESIRED_REG(n) (DCVS_PERF_STATE_DESIRED_REG_0 + \
- (4 * n))
-#define OSM_CYCLE_COUNTER_STATUS_REG_0 0x7d0
-#define OSM_CYCLE_COUNTER_STATUS_REG(n) (OSM_CYCLE_COUNTER_STATUS_REG_0 + \
- (4 * n))
+#define DCVS_PERF_STATE_DESIRED_REG_0_V1 0x780
+#define DCVS_PERF_STATE_DESIRED_REG_0_V2 0x920
+#define DCVS_PERF_STATE_DESIRED_REG(n, v2) \
+ (((v2) ? DCVS_PERF_STATE_DESIRED_REG_0_V2 \
+ : DCVS_PERF_STATE_DESIRED_REG_0_V1) + 4 * (n))
+
+#define OSM_CYCLE_COUNTER_STATUS_REG_0_V1 0x7d0
+#define OSM_CYCLE_COUNTER_STATUS_REG_0_V2 0x9c0
+#define OSM_CYCLE_COUNTER_STATUS_REG(n, v2) \
+ (((v2) ? OSM_CYCLE_COUNTER_STATUS_REG_0_V2 \
+ : OSM_CYCLE_COUNTER_STATUS_REG_0_V1) + 4 * (n))
/* ACD registers */
#define ACD_HW_VERSION 0x0
@@ -444,6 +449,8 @@
return 0;
}
+static bool is_v2;
+
static inline struct clk_osm *to_clk_osm(struct clk_hw *_hw)
{
return container_of(_hw, struct clk_osm, hw);
@@ -511,6 +518,9 @@
int i;
unsigned long rrate = 0;
+ if (!hw)
+ return -EINVAL;
+
/*
* If the rate passed in is 0, return the first frequency in the
* FMAX table.
@@ -604,8 +614,8 @@
}
pr_debug("rate: %lu --> index %d\n", rate, index);
- clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG_0,
- OSM_BASE);
+ clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG(0, is_v2),
+ OSM_BASE);
/* Make sure the write goes through before proceeding */
clk_osm_mb(cpuclk, OSM_BASE);
@@ -622,7 +632,7 @@
if (!cpuclk)
return -EINVAL;
- index = clk_osm_read_reg(cpuclk, DCVS_PERF_STATE_DESIRED_REG_0);
+ index = clk_osm_read_reg(cpuclk, DCVS_PERF_STATE_DESIRED_REG(0, is_v2));
pr_debug("%s: Index %d, freq %ld\n", __func__, index,
cpuclk->osm_table[index].frequency);
@@ -890,7 +900,8 @@
static void
osm_set_index(struct clk_osm *c, unsigned int index, unsigned int num)
{
- clk_osm_write_reg(c, index, DCVS_PERF_STATE_DESIRED_REG(num), OSM_BASE);
+ clk_osm_write_reg(c, index, DCVS_PERF_STATE_DESIRED_REG(num, is_v2),
+ OSM_BASE);
/* Make sure the write goes through before proceeding */
clk_osm_mb(c, OSM_BASE);
@@ -915,8 +926,8 @@
return 0;
c = policy->driver_data;
- index = clk_osm_read_reg(c, DCVS_PERF_STATE_DESIRED_REG(c->core_num));
-
+ index = clk_osm_read_reg(c,
+ DCVS_PERF_STATE_DESIRED_REG(c->core_num, is_v2));
return policy->freq_table[index].frequency;
}
@@ -1872,6 +1883,7 @@
static u64 clk_osm_get_cpu_cycle_counter(int cpu)
{
u32 val;
+ int core_num;
unsigned long flags;
struct clk_osm *parent, *c = logical_cpu_to_clk(cpu);
@@ -1887,12 +1899,9 @@
* Use core 0's copy as proxy for the whole cluster when per
* core DCVS is disabled.
*/
- if (parent->per_core_dcvs)
- val = clk_osm_read_reg_no_log(parent,
- OSM_CYCLE_COUNTER_STATUS_REG(c->core_num));
- else
- val = clk_osm_read_reg_no_log(parent,
- OSM_CYCLE_COUNTER_STATUS_REG(0));
+ core_num = parent->per_core_dcvs ? c->core_num : 0;
+ val = clk_osm_read_reg_no_log(parent,
+ OSM_CYCLE_COUNTER_STATUS_REG(core_num, is_v2));
if (val < c->prev_cycle_counter) {
/* Handle counter overflow */
@@ -2061,6 +2070,10 @@
c->osm_table[j].override_data,
c->osm_table[j].mem_acc_level);
+ data = (array[i + FREQ_DATA] & GENMASK(29, 28)) >> 28;
+ if (j && !c->min_cpr_vc && !data)
+ c->min_cpr_vc = c->osm_table[j].virtual_corner;
+
data = (array[i + FREQ_DATA] & GENMASK(18, 16)) >> 16;
if (!last_entry && data == MAX_CORE_COUNT) {
fmax_temp[k] = array[i];
@@ -2243,9 +2256,6 @@
u32 *array;
int rc = 0;
struct resource *res;
- char l3_min_cpr_vc_str[] = "qcom,l3-min-cpr-vc-bin0";
- char pwrcl_min_cpr_vc_str[] = "qcom,pwrcl-min-cpr-vc-bin0";
- char perfcl_min_cpr_vc_str[] = "qcom,perfcl-min-cpr-vc-bin0";
array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
GFP_KERNEL);
@@ -2463,35 +2473,6 @@
return -ENOMEM;
}
- snprintf(l3_min_cpr_vc_str, ARRAY_SIZE(l3_min_cpr_vc_str),
- "qcom,l3-min-cpr-vc-bin%d", l3_clk.speedbin);
- rc = of_property_read_u32(of, l3_min_cpr_vc_str, &l3_clk.min_cpr_vc);
- if (rc) {
- dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
- l3_min_cpr_vc_str, rc);
- return -EINVAL;
- }
-
- snprintf(pwrcl_min_cpr_vc_str, ARRAY_SIZE(pwrcl_min_cpr_vc_str),
- "qcom,pwrcl-min-cpr-vc-bin%d", pwrcl_clk.speedbin);
- rc = of_property_read_u32(of, pwrcl_min_cpr_vc_str,
- &pwrcl_clk.min_cpr_vc);
- if (rc) {
- dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
- pwrcl_min_cpr_vc_str, rc);
- return -EINVAL;
- }
-
- snprintf(perfcl_min_cpr_vc_str, ARRAY_SIZE(perfcl_min_cpr_vc_str),
- "qcom,perfcl-min-cpr-vc-bin%d", perfcl_clk.speedbin);
- rc = of_property_read_u32(of, perfcl_min_cpr_vc_str,
- &perfcl_clk.min_cpr_vc);
- if (rc) {
- dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
- perfcl_min_cpr_vc_str, rc);
- return -EINVAL;
- }
-
l3_clk.secure_init = perfcl_clk.secure_init = pwrcl_clk.secure_init =
of_property_read_bool(pdev->dev.of_node, "qcom,osm-no-tz");
@@ -3025,6 +3006,9 @@
return PTR_ERR(ext_xo_clk);
}
+ is_v2 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,clk-cpu-osm-v2");
+
clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
GFP_KERNEL);
if (!clk_data)
@@ -3037,18 +3021,6 @@
clk_data->clk_num = num_clks;
- rc = clk_osm_parse_dt_configs(pdev);
- if (rc) {
- dev_err(&pdev->dev, "Unable to parse OSM device tree configurations\n");
- return rc;
- }
-
- rc = clk_osm_parse_acd_dt_configs(pdev);
- if (rc) {
- dev_err(&pdev->dev, "Unable to parse ACD device tree configurations\n");
- return rc;
- }
-
rc = clk_osm_resources_init(pdev);
if (rc) {
if (rc != -EPROBE_DEFER)
@@ -3057,13 +3029,6 @@
return rc;
}
- rc = clk_osm_acd_resources_init(pdev);
- if (rc) {
- dev_err(&pdev->dev, "ACD resources init failed, rc=%d\n",
- rc);
- return rc;
- }
-
if (l3_clk.vbases[EFUSE_BASE]) {
/* Multiple speed-bins are supported */
pte_efuse = readl_relaxed(l3_clk.vbases[EFUSE_BASE]);
@@ -3123,6 +3088,25 @@
return rc;
}
+ rc = clk_osm_parse_dt_configs(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to parse OSM device tree configurations\n");
+ return rc;
+ }
+
+ rc = clk_osm_parse_acd_dt_configs(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to parse ACD device tree configurations\n");
+ return rc;
+ }
+
+ rc = clk_osm_acd_resources_init(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "ACD resources init failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
rc = clk_osm_resolve_open_loop_voltages(&l3_clk);
if (rc) {
if (rc == -EPROBE_DEFER)
@@ -3313,8 +3297,10 @@
rc);
goto provider_err;
}
- WARN(clk_prepare_enable(l3_clk.hw.clk),
- "clk: Failed to enable clock for L3\n");
+ WARN(clk_prepare_enable(l3_cluster0_vote_clk.hw.clk),
+ "clk: Failed to enable cluster0 clock for L3\n");
+ WARN(clk_prepare_enable(l3_cluster1_vote_clk.hw.clk),
+ "clk: Failed to enable cluster1 clock for L3\n");
udelay(300);
/* Configure default rate to lowest frequency */
@@ -3352,6 +3338,7 @@
static const struct of_device_id match_table[] = {
{ .compatible = "qcom,clk-cpu-osm" },
+ { .compatible = "qcom,clk-cpu-osm-v2" },
{}
};
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index b63c3c3..dd69b31 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -934,10 +934,11 @@
EXPORT_SYMBOL_GPL(clk_byte2_ops);
static const struct frac_entry frac_table_pixel[] = {
+ { 1, 1 },
+ { 2, 3 },
+ { 4, 9 },
{ 3, 8 },
{ 2, 9 },
- { 4, 9 },
- { 1, 1 },
{ }
};
@@ -1028,6 +1029,7 @@
unsigned long parent_rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct clk_hw *parent = clk_hw_get_parent(hw);
struct freq_tbl f = { 0 };
unsigned long src_rate;
unsigned long num, den;
@@ -1035,7 +1037,12 @@
u32 hid_div, cfg;
int i, num_parents = clk_hw_get_num_parents(hw);
- src_rate = clk_get_rate(clk_hw_get_parent(hw)->clk);
+ if (!parent) {
+ pr_err("RCG parent isn't initialized\n");
+ return -EINVAL;
+ }
+
+ src_rate = clk_get_rate(parent->clk);
if (src_rate <= 0) {
pr_err("Invalid RCG parent rate\n");
return -EINVAL;
@@ -1196,13 +1203,15 @@
u32 *mode, u32 *pre_div)
{
struct clk_rcg2 *rcg;
- int num_parents = clk_hw_get_num_parents(hw);
+ int num_parents;
u32 cfg, mask;
int i, ret;
if (!hw)
return -EINVAL;
+ num_parents = clk_hw_get_num_parents(hw);
+
rcg = to_clk_rcg2(hw);
ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + offset, &cfg);
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
index 5348491..c314d2c 100644
--- a/drivers/clk/qcom/clk-regmap-divider.c
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -28,8 +28,10 @@
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
- return divider_round_rate(hw, rate, prate, NULL, divider->width,
- CLK_DIVIDER_ROUND_CLOSEST);
+ return divider_round_rate(hw, rate, prate, divider->table,
+ divider->width,
+ CLK_DIVIDER_ROUND_CLOSEST |
+ divider->flags);
}
static int div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -39,8 +41,9 @@
struct clk_regmap *clkr = ÷r->clkr;
u32 div;
- div = divider_get_val(rate, parent_rate, NULL, divider->width,
- CLK_DIVIDER_ROUND_CLOSEST);
+ div = divider_get_val(rate, parent_rate, divider->table,
+ divider->width, CLK_DIVIDER_ROUND_CLOSEST |
+ divider->flags);
return regmap_update_bits(clkr->regmap, divider->reg,
(BIT(divider->width) - 1) << divider->shift,
@@ -58,8 +61,8 @@
div >>= divider->shift;
div &= BIT(divider->width) - 1;
- return divider_recalc_rate(hw, parent_rate, div, NULL,
- CLK_DIVIDER_ROUND_CLOSEST);
+ return divider_recalc_rate(hw, parent_rate, div, divider->table,
+ CLK_DIVIDER_ROUND_CLOSEST | divider->flags);
}
const struct clk_ops clk_regmap_div_ops = {
diff --git a/drivers/clk/qcom/clk-regmap-divider.h b/drivers/clk/qcom/clk-regmap-divider.h
index fc4492e..1c5e087 100644
--- a/drivers/clk/qcom/clk-regmap-divider.h
+++ b/drivers/clk/qcom/clk-regmap-divider.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -18,10 +18,12 @@
#include "clk-regmap.h"
struct clk_regmap_div {
- u32 reg;
- u32 shift;
- u32 width;
- struct clk_regmap clkr;
+ u32 reg;
+ u32 shift;
+ u32 width;
+ u32 flags;
+ const struct clk_div_table *table;
+ struct clk_regmap clkr;
};
extern const struct clk_ops clk_regmap_div_ops;
diff --git a/drivers/clk/qcom/clk-regmap.c b/drivers/clk/qcom/clk-regmap.c
index 1c856d3..aa024c2d 100644
--- a/drivers/clk/qcom/clk-regmap.c
+++ b/drivers/clk/qcom/clk-regmap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -103,9 +103,12 @@
*/
int devm_clk_register_regmap(struct device *dev, struct clk_regmap *rclk)
{
- if (dev && dev_get_regmap(dev, NULL))
+ if (!dev || !rclk)
+ return -EINVAL;
+
+ if (dev_get_regmap(dev, NULL))
rclk->regmap = dev_get_regmap(dev, NULL);
- else if (dev && dev->parent)
+ else if (dev->parent)
rclk->regmap = dev_get_regmap(dev->parent, NULL);
return devm_clk_hw_register(dev, &rclk->hw);
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 25f9d62..4e16155 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -53,7 +53,9 @@
P_CORE_BI_PLL_TEST_SE,
P_GPLL0_OUT_EVEN,
P_GPLL0_OUT_MAIN,
+ P_GPLL1_OUT_MAIN,
P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_MAIN,
P_SLEEP_CLK,
};
@@ -166,6 +168,36 @@
"core_bi_pll_test_se",
};
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL1_OUT_MAIN, 4 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_9[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll1",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_10[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
static struct clk_dummy measure_only_snoc_clk = {
.rrate = 1000,
.hw.init = &(struct clk_init_data){
@@ -225,6 +257,28 @@
},
};
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .type = FABIA_PLL,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_fabia_fixed_pll_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
+ },
+ },
+};
+
static const struct clk_div_table post_div_table_fabia_even[] = {
{ 0x0, 1 },
{ 0x1, 2 },
@@ -460,6 +514,7 @@
F(19200000, P_BI_TCXO, 1, 0, 0),
F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(38400000, P_GPLL0_OUT_EVEN, 1, 16, 125),
F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
@@ -814,6 +869,17 @@
F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(201500000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src_sdm845_v2[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
{ }
};
@@ -822,12 +888,12 @@
.cmd_rcgr = 0x1400c,
.mnd_width = 8,
.hid_width = 5,
- .parent_map = gcc_parent_map_5,
+ .parent_map = gcc_parent_map_10,
.freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk_src",
- .parent_names = gcc_parent_names_5,
+ .parent_names = gcc_parent_names_10,
.num_parents = 5,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
@@ -835,7 +901,7 @@
MIN, 9600000,
LOWER, 19200000,
LOW, 100000000,
- LOW_L1, 200000000),
+ LOW_L1, 201500000),
},
};
@@ -1224,6 +1290,49 @@
},
};
+static struct clk_rcg2 gcc_vs_ctrl_clk_src = {
+ .cmd_rcgr = 0x7a030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP1(
+ MIN, 19200000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_vsensor_clk_src = {
+ .cmd_rcgr = 0x7a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_vsensor_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vsensor_clk_src",
+ .parent_names = gcc_parent_names_9,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 19200000,
+ LOW, 300000000,
+ LOW_L1, 600000000),
+ },
+};
+
static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
.halt_reg = 0x90014,
.halt_check = BRANCH_HALT,
@@ -1347,6 +1456,24 @@
},
};
+static struct clk_branch gcc_apc_vs_clk = {
+ .halt_reg = 0x7a050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apc_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_boot_rom_ahb_clk = {
.halt_reg = 0x38004,
.halt_check = BRANCH_HALT_VOTED,
@@ -1774,6 +1901,24 @@
},
};
+static struct clk_branch gcc_gpu_vs_clk = {
+ .halt_reg = 0x7a04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_mss_axis2_clk = {
.halt_reg = 0x8a008,
.halt_check = BRANCH_HALT,
@@ -1855,6 +2000,24 @@
},
};
+static struct clk_branch gcc_mss_vs_clk = {
+ .halt_reg = 0x7a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_pcie_0_aux_clk = {
.halt_reg = 0x6b01c,
.halt_check = BRANCH_HALT_VOTED,
@@ -1914,14 +2077,15 @@
},
};
-static struct clk_gate2 gcc_pcie_0_pipe_clk = {
- .udelay = 500,
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x5200c,
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk",
- .ops = &clk_gate2_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2013,14 +2177,15 @@
},
};
-static struct clk_gate2 gcc_pcie_1_pipe_clk = {
- .udelay = 500,
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x8d020,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x52004,
.enable_mask = BIT(30),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk",
- .ops = &clk_gate2_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -3322,6 +3487,60 @@
},
};
+static struct clk_branch gcc_vdda_vs_clk = {
+ .halt_reg = 0x7a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vdda_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddcx_vs_clk = {
+ .halt_reg = 0x7a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddcx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddmx_vs_clk = {
+ .halt_reg = 0x7a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddmx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_video_ahb_clk = {
.halt_reg = 0xb004,
.halt_check = BRANCH_HALT,
@@ -3363,6 +3582,39 @@
},
};
+static struct clk_branch gcc_vs_ctrl_ahb_clk = {
+ .halt_reg = 0x7a014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7a014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_clk = {
+ .halt_reg = 0x7a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk",
+ .parent_names = (const char *[]){
+ "gcc_vs_ctrl_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
struct clk_hw *gcc_sdm845_hws[] = {
[MEASURE_ONLY_SNOC_CLK] = &measure_only_snoc_clk.hw,
[MEASURE_ONLY_CNOC_CLK] = &measure_only_cnoc_clk.hw,
@@ -3380,6 +3632,7 @@
&gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
[GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_APC_VS_CLK] = &gcc_apc_vs_clk.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
[GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
[GCC_CAMERA_AXI_CLK] = &gcc_camera_axi_clk.clkr,
@@ -3413,12 +3666,14 @@
[GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_VS_CLK] = &gcc_gpu_vs_clk.clkr,
[GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
[GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
[GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
[GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr,
[GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
[GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_VS_CLK] = &gcc_mss_vs_clk.clkr,
[GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
[GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
[GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
@@ -3558,11 +3813,19 @@
[GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
[GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
[GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_VDDA_VS_CLK] = &gcc_vdda_vs_clk.clkr,
+ [GCC_VDDCX_VS_CLK] = &gcc_vddcx_vs_clk.clkr,
+ [GCC_VDDMX_VS_CLK] = &gcc_vddmx_vs_clk.clkr,
[GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
[GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
[GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GCC_VS_CTRL_AHB_CLK] = &gcc_vs_ctrl_ahb_clk.clkr,
+ [GCC_VS_CTRL_CLK] = &gcc_vs_ctrl_clk.clkr,
+ [GCC_VS_CTRL_CLK_SRC] = &gcc_vs_ctrl_clk_src.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
[GPLL0] = &gpll0.clkr,
[GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
};
static const struct qcom_reset_map gcc_sdm845_resets[] = {
@@ -3740,6 +4003,9 @@
50000000;
gcc_qupv3_wrap1_s7_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
128000000;
+ gcc_sdcc2_apps_clk_src.freq_tbl = ftbl_gcc_sdcc2_apps_clk_src_sdm845_v2;
+ gcc_sdcc2_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+ 200000000;
gcc_ufs_card_axi_clk_src.freq_tbl =
ftbl_gcc_ufs_card_axi_clk_src_sdm845_v2;
gcc_ufs_card_axi_clk_src.clkr.hw.init->rate_max[VDD_CX_HIGH] =
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index b2f6a3c..5f1b1ef 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -36,6 +36,11 @@
#include "clk-alpha-pll.h"
#include "vdd-level-sdm845.h"
+#define CX_GMU_CBCR_SLEEP_MASK 0xF
+#define CX_GMU_CBCR_SLEEP_SHIFT 4
+#define CX_GMU_CBCR_WAKE_MASK 0xF
+#define CX_GMU_CBCR_WAKE_SHIFT 8
+
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
static int vdd_gx_corner[] = {
@@ -648,6 +653,7 @@
{
struct regmap *regmap;
int ret = 0;
+ unsigned int value, mask;
regmap = qcom_cc_map(pdev, &gpu_cc_sdm845_desc);
if (IS_ERR(regmap))
@@ -668,6 +674,12 @@
return ret;
}
+ mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+ mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+ value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
+ regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg,
+ mask, value);
+
dev_info(&pdev->dev, "Registered GPU CC clocks\n");
return ret;
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 93ad1b0..eb6c658 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -94,7 +94,6 @@
struct dsi_pll_regs {
u32 pll_prop_gain_rate;
- u32 pll_outdiv_rate;
u32 pll_lockdet_rate;
u32 decimal_div_start;
u32 frac_div_start_low;
@@ -134,6 +133,165 @@
struct dsi_pll_regs reg_setup;
};
+static inline int pll_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = MDSS_PLL_REG_R(rsc->pll_base, reg);
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
+static inline int pll_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ MDSS_PLL_REG_W(rsc->pll_base, reg, val);
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
+static inline int phy_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
+static inline int phy_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ MDSS_PLL_REG_W(rsc->phy_base, reg, val);
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
+static inline int phy_reg_update_bits_sub(struct mdss_pll_resources *rsc,
+ unsigned int reg, unsigned int mask, unsigned int val)
+{
+ u32 reg_val;
+ int rc = 0;
+
+ reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+ reg_val &= ~mask;
+ reg_val |= (val & mask);
+ MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
+
+ return rc;
+}
+
+static inline int phy_reg_update_bits(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = phy_reg_update_bits_sub(rsc, reg, mask, val);
+ if (!rc && rsc->slave)
+ rc = phy_reg_update_bits_sub(rsc->slave, reg, mask, val);
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
+static inline int pclk_mux_read_sel(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc)
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ else
+ *val = (MDSS_PLL_REG_R(rsc->pll_base, reg) & 0x3);
+
+ (void)mdss_pll_resource_enable(rsc, false);
+ return rc;
+}
+
+
+static inline int pclk_mux_write_sel_sub(struct mdss_pll_resources *rsc,
+ unsigned int reg, unsigned int val)
+{
+ u32 reg_val;
+ int rc = 0;
+
+ reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+ reg_val &= ~0x03;
+ reg_val |= val;
+
+ MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
+
+ return rc;
+}
+
+static inline int pclk_mux_write_sel(void *context, unsigned int reg,
+ unsigned int val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = pclk_mux_write_sel_sub(rsc, reg, val);
+ if (!rc && rsc->slave)
+ rc = pclk_mux_write_sel_sub(rsc->slave, reg, val);
+
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
static struct dsi_pll_10nm plls[DSI_PLL_MAX];
@@ -203,54 +361,14 @@
{
struct dsi_pll_config *config = &pll->pll_configuration;
struct dsi_pll_regs *regs = &pll->reg_setup;
- u64 target_freq;
u64 fref = rsc->vco_ref_clk_rate;
- u32 computed_output_div, div_log = 0;
u64 pll_freq;
u64 divider;
u64 dec, dec_multiple;
u32 frac;
u64 multiplier;
- u32 i;
- target_freq = rsc->vco_current_rate;
- pr_debug("target_freq = %llu\n", target_freq);
-
- if (config->div_override) {
- computed_output_div = config->output_div;
-
- /*
- * Computed_output_div = 2 ^ div_log
- * To get div_log from output div just get the index of the
- * 1 bit in the value.
- * div_log ranges from 0-3. so check the 4 lsbs
- */
-
- for (i = 0; i < 4; i++) {
- if (computed_output_div & (1 << i)) {
- div_log = i;
- break;
- }
- }
-
- } else {
- if (target_freq < MHZ_250) {
- computed_output_div = 8;
- div_log = 3;
- } else if (target_freq < MHZ_500) {
- computed_output_div = 4;
- div_log = 2;
- } else if (target_freq < MHZ_1000) {
- computed_output_div = 2;
- div_log = 1;
- } else {
- computed_output_div = 1;
- div_log = 0;
- }
- }
- pr_debug("computed_output_div = %d\n", computed_output_div);
-
- pll_freq = target_freq * computed_output_div;
+ pll_freq = rsc->vco_current_rate;
if (config->disable_prescaler)
divider = fref;
@@ -274,7 +392,6 @@
else
regs->pll_clock_inverters = 0;
- regs->pll_outdiv_rate = div_log;
regs->pll_lockdet_rate = config->lock_timer;
regs->decimal_div_start = dec;
regs->frac_div_start_low = (frac & 0xff);
@@ -394,7 +511,6 @@
MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
reg->frac_div_start_high);
MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x40);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, reg->pll_outdiv_rate);
MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x10);
MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
@@ -605,7 +721,9 @@
}
pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- pr_debug("cfg0=%d,cfg1=%d\n", pll->cached_cfg0, pll->cached_cfg1);
+ pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
+ pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
+ pll->cached_cfg1, pll->cached_outdiv);
pll->vco_cached_rate = clk_hw_get_rate(hw);
dsi_pll_disable(vco);
@@ -646,6 +764,8 @@
pll->cached_cfg0);
MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1,
pll->cached_cfg1);
+ MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
+ pll->cached_outdiv);
}
rc = dsi_pll_enable(vco);
@@ -855,176 +975,6 @@
return rc;
}
-static int post_vco_clk_get_div(void *context, unsigned int reg,
- unsigned int *div)
-{
- int rc;
- struct mdss_pll_resources *pll = context;
- u32 reg_val;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- reg_val &= 0x3;
-
- if (reg_val == 2)
- *div = 1;
- else if (reg_val == 3)
- *div = 4;
- else
- *div = 1;
-
- /**
- *Common clock framework the divider value is interpreted as one less
- * hence we return one less for all dividers except when zero
- */
- if (*div != 0)
- *div -= 1;
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return rc;
-}
-
-static int post_vco_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
-{
- u32 reg_val;
- int rc = 0;
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- reg_val &= ~0x03;
- if (div == 1) {
- reg_val |= 0x2;
- } else if (div == 4) {
- reg_val |= 0x3;
- } else {
- rc = -EINVAL;
- pr_err("unsupported divider %d\n", div);
- goto error;
- }
-
- MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
-
-error:
- return rc;
-}
-
-static int post_vco_clk_set_div(void *context, unsigned int reg,
- unsigned int div)
-{
- int rc = 0;
- struct mdss_pll_resources *pll = context;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- /**
- * In common clock framework the divider value provided is one less and
- * and hence adjusting the divider value by one prior to writing it to
- * hardware
- */
- div++;
- rc = post_vco_clk_set_div_sub(pll, div);
- if (!rc && pll->slave)
- rc = post_vco_clk_set_div_sub(pll->slave, div);
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return rc;
-}
-
-static int post_bit_clk_get_div(void *context, unsigned int reg,
- unsigned int *div)
-{
- int rc;
- struct mdss_pll_resources *pll = context;
- u32 reg_val;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- reg_val &= 0x3;
-
- if (reg_val == 0)
- *div = 1;
- else if (reg_val == 1)
- *div = 2;
- else
- *div = 1;
-
- /**
- *Common clock framework the divider value is interpreted as one less
- * hence we return one less for all dividers except when zero
- */
- if (*div != 0)
- *div -= 1;
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return rc;
-}
-
-static int post_bit_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
-{
- int rc = 0;
- u32 reg_val;
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- reg_val &= ~0x03;
- if (div == 1) {
- reg_val |= 0x0;
- } else if (div == 2) {
- reg_val |= 0x1;
- } else {
- rc = -EINVAL;
- pr_err("unsupported divider %d\n", div);
- goto error;
- }
-
- MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
-
-error:
- return rc;
-}
-
-static int post_bit_clk_set_div(void *context, unsigned int reg,
- unsigned int div)
-{
- int rc = 0;
- struct mdss_pll_resources *pll = context;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- /**
- * In common clock framework the divider value provided is one less and
- * and hence adjusting the divider value by one prior to writing it to
- * hardware
- */
- div++;
- rc = post_bit_clk_set_div_sub(pll, div);
- if (!rc && pll->slave)
- rc = post_bit_clk_set_div_sub(pll->slave, div);
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return rc;
-}
-
static struct regmap_config dsi_pll_10nm_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -1032,14 +982,14 @@
.max_register = 0x7c0,
};
-static struct regmap_bus post_vco_regmap_bus = {
- .reg_write = post_vco_clk_set_div,
- .reg_read = post_vco_clk_get_div,
+static struct regmap_bus pll_regmap_bus = {
+ .reg_write = pll_reg_write,
+ .reg_read = pll_reg_read,
};
-static struct regmap_bus post_bit_regmap_bus = {
- .reg_write = post_bit_clk_set_div,
- .reg_read = post_bit_clk_get_div,
+static struct regmap_bus pclk_mux_regmap_bus = {
+ .reg_read = phy_reg_read,
+ .reg_write = pclk_mux_write_sel,
};
static struct regmap_bus pclk_src_regmap_bus = {
@@ -1073,23 +1023,30 @@
* | vco_clk |
* +-------+-------+
* |
- * +--------------------------------------+
- * | |
- * +-------v-------+ |
- * | bitclk_src | |
- * | DIV(1..15) | |
- * +-------+-------+ |
- * | |
- * +--------------------+ |
- * Shadow Path | | |
- * + +-------v-------+ +------v------+ +------v-------+
- * | | byteclk_src | |post_bit_div | |post_vco_div |
- * | | DIV(8) | |DIV(1,2) | |DIV(1,4) |
- * | +-------+-------+ +------+------+ +------+-------+
- * | | | |
- * | | +------+ +----+
- * | +--------+ | |
- * | | +----v-----v------+
+ * |
+ * +---------------+
+ * | pll_out_div |
+ * | DIV(1,2,4,8) |
+ * +-------+-------+
+ * |
+ * +-----------------------------+--------+
+ * | | |
+ * +-------v-------+ | |
+ * | bitclk_src | | |
+ * | DIV(1..15) | | |
+ * +-------+-------+ | |
+ * | | |
+ * +----------+---------+ | |
+ * Shadow Path | | | | |
+ * + +-------v-------+ | +------v------+ | +------v-------+
+ * | | byteclk_src | | |post_bit_div | | |post_vco_div |
+ * | | DIV(8) | | |DIV (2) | | |DIV(4) |
+ * | +-------+-------+ | +------+------+ | +------+-------+
+ * | | | | | | |
+ * | | | +------+ | |
+ * | | +-------------+ | | +----+
+ * | +--------+ | | | |
+ * | | +-v--v-v---v------+
* +-v---------v----+ \ pclk_src_mux /
* \ byteclk_mux / \ /
* \ / +-----+-----+
@@ -1140,13 +1097,45 @@
},
};
+static struct clk_regmap_div dsi0pll_pll_out_div = {
+ .reg = PLL_PLL_OUTDIV_RATE,
+ .shift = 0,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_pll_out_div",
+ .parent_names = (const char *[]){"dsi0pll_vco_clk"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi1pll_pll_out_div = {
+ .reg = PLL_PLL_OUTDIV_RATE,
+ .shift = 0,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_pll_out_div",
+ .parent_names = (const char *[]){"dsi1pll_vco_clk"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
static struct clk_regmap_div dsi0pll_bitclk_src = {
.shift = 0,
.width = 4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_bitclk_src",
- .parent_names = (const char *[]){"dsi0pll_vco_clk"},
+ .parent_names = (const char *[]){"dsi0pll_pll_out_div"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_div_ops,
@@ -1160,7 +1149,7 @@
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_bitclk_src",
- .parent_names = (const char *[]){"dsi1pll_vco_clk"},
+ .parent_names = (const char *[]){"dsi1pll_pll_out_div"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_div_ops,
@@ -1168,31 +1157,27 @@
},
};
-static struct clk_regmap_div dsi0pll_post_vco_div = {
- .shift = 0,
- .width = 2,
- .clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "dsi0pll_post_vco_div",
- .parent_names = (const char *[]){"dsi0pll_vco_clk"},
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
- .ops = &clk_regmap_div_ops,
- },
+static struct clk_fixed_factor dsi0pll_post_vco_div = {
+ .div = 4,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_post_vco_div",
+ .parent_names = (const char *[]){"dsi0pll_pll_out_div"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
},
};
-static struct clk_regmap_div dsi1pll_post_vco_div = {
- .shift = 0,
- .width = 2,
- .clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "dsi1pll_post_vco_div",
- .parent_names = (const char *[]){"dsi1pll_vco_clk"},
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
- .ops = &clk_regmap_div_ops,
- },
+static struct clk_fixed_factor dsi1pll_post_vco_div = {
+ .div = 4,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_post_vco_div",
+ .parent_names = (const char *[]){"dsi1pll_pll_out_div"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
},
};
@@ -1220,31 +1205,27 @@
},
};
-static struct clk_regmap_div dsi0pll_post_bit_div = {
- .shift = 0,
- .width = 1,
- .clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "dsi0pll_post_bit_div",
- .parent_names = (const char *[]){"dsi0pll_bitclk_src"},
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
- .ops = &clk_regmap_div_ops,
- },
+static struct clk_fixed_factor dsi0pll_post_bit_div = {
+ .div = 2,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_post_bit_div",
+ .parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_fixed_factor_ops,
},
};
-static struct clk_regmap_div dsi1pll_post_bit_div = {
- .shift = 0,
- .width = 1,
- .clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "dsi1pll_post_bit_div",
- .parent_names = (const char *[]){"dsi1pll_bitclk_src"},
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
- .ops = &clk_regmap_div_ops,
- },
+static struct clk_fixed_factor dsi1pll_post_bit_div = {
+ .div = 2,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_post_bit_div",
+ .parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_fixed_factor_ops,
},
};
@@ -1277,30 +1258,36 @@
};
static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
+ .reg = PHY_CMN_CLK_CFG1,
.shift = 0,
- .width = 1,
+ .width = 2,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_pclk_src_mux",
- .parent_names = (const char *[]){"dsi0pll_post_bit_div",
- "dsi0pll_post_vco_div"},
- .num_parents = 2,
- .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .parent_names = (const char *[]){"dsi0pll_bitclk_src",
+ "dsi0pll_post_bit_div",
+ "dsi0pll_pll_out_div",
+ "dsi0pll_post_vco_div"},
+ .num_parents = 4,
+ .flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_regmap_mux_closest_ops,
},
},
};
static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
+ .reg = PHY_CMN_CLK_CFG1,
.shift = 0,
- .width = 1,
+ .width = 2,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_pclk_src_mux",
- .parent_names = (const char *[]){"dsi1pll_post_bit_div",
- "dsi1pll_post_vco_div"},
- .num_parents = 2,
- .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .parent_names = (const char *[]){"dsi1pll_bitclk_src",
+ "dsi1pll_post_bit_div",
+ "dsi1pll_pll_out_div",
+ "dsi1pll_post_vco_div"},
+ .num_parents = 4,
+ .flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_regmap_mux_closest_ops,
},
},
@@ -1366,24 +1353,25 @@
static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
+ [PLL_OUT_DIV_0_CLK] = &dsi0pll_pll_out_div.clkr.hw,
[BITCLK_SRC_0_CLK] = &dsi0pll_bitclk_src.clkr.hw,
[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
- [POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.clkr.hw,
- [POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.clkr.hw,
+ [POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.hw,
+ [POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.hw,
[BYTECLK_MUX_0_CLK] = &dsi0pll_byteclk_mux.clkr.hw,
[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
+ [PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
- [POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.clkr.hw,
- [POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.clkr.hw,
+ [POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.hw,
+ [POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.hw,
[BYTECLK_MUX_1_CLK] = &dsi1pll_byteclk_mux.clkr.hw,
[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
-
};
int dsi_pll_clock_register_10nm(struct platform_device *pdev,
@@ -1428,13 +1416,10 @@
/* Establish client data */
if (ndx == 0) {
- rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
- pll_res, &dsi_pll_10nm_config);
- dsi0pll_post_vco_div.clkr.regmap = rmap;
- rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
+ rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
pll_res, &dsi_pll_10nm_config);
- dsi0pll_post_bit_div.clkr.regmap = rmap;
+ dsi0pll_pll_out_div.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
pll_res, &dsi_pll_10nm_config);
@@ -1448,10 +1433,9 @@
pll_res, &dsi_pll_10nm_config);
dsi0pll_pclk_mux.clkr.regmap = rmap;
- rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+ rmap = devm_regmap_init(&pdev->dev, &pclk_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi0pll_pclk_src_mux.clkr.regmap = rmap;
-
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi0pll_byteclk_mux.clkr.regmap = rmap;
@@ -1475,13 +1459,9 @@
} else {
- rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
+ rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
pll_res, &dsi_pll_10nm_config);
- dsi1pll_post_vco_div.clkr.regmap = rmap;
-
- rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
- pll_res, &dsi_pll_10nm_config);
- dsi1pll_post_bit_div.clkr.regmap = rmap;
+ dsi1pll_pll_out_div.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
pll_res, &dsi_pll_10nm_config);
@@ -1491,14 +1471,13 @@
pll_res, &dsi_pll_10nm_config);
dsi1pll_pclk_src.clkr.regmap = rmap;
- rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+ rmap = devm_regmap_init(&pdev->dev, &pclk_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi1pll_pclk_mux.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi1pll_pclk_src_mux.clkr.regmap = rmap;
-
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi1pll_byteclk_mux.clkr.regmap = rmap;
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 033462d..2f92270 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -99,6 +99,7 @@
unsigned long vco_cached_rate;
u32 cached_cfg0;
u32 cached_cfg1;
+ u32 cached_outdiv;
/* dsi/edp/hmdi pll interface type */
u32 pll_interface_type;
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 12eb6d8..a6edf2f 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -699,7 +699,8 @@
spin_lock_irqsave(&speedchange_cpumask_lock, flags);
cpumask_set_cpu(max_cpu, &speedchange_cpumask);
spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
- wake_up_process_no_notif(speedchange_task);
+
+ wake_up_process(speedchange_task);
rearm:
cpufreq_interactive_timer_resched(data, false);
@@ -814,7 +815,7 @@
spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
if (anyboost)
- wake_up_process_no_notif(speedchange_task);
+ wake_up_process(speedchange_task);
}
static int load_change_callback(struct notifier_block *nb, unsigned long val,
@@ -1926,7 +1927,7 @@
get_task_struct(speedchange_task);
/* NB: wake up so the thread does not look hung to the freezer */
- wake_up_process_no_notif(speedchange_task);
+ wake_up_process(speedchange_task);
return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
}
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 827ee8f..e11ea50 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -66,7 +66,8 @@
CPU_EXIT,
CLUSTER_ENTER,
CLUSTER_EXIT,
- PRE_PC_CB,
+ CPU_HP_STARTING,
+ CPU_HP_DYING,
};
struct lpm_debug {
@@ -324,6 +325,9 @@
{
struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
+ update_debug_pc_event(CPU_HP_DYING, cpu,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], false);
cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
return 0;
}
@@ -332,6 +336,9 @@
{
struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
+ update_debug_pc_event(CPU_HP_STARTING, cpu,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], false);
cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
return 0;
}
@@ -568,7 +575,7 @@
static int cpu_power_select(struct cpuidle_device *dev,
struct lpm_cpu *cpu)
{
- int best_level = -1;
+ int best_level = 0;
uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
dev->cpu);
s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
@@ -582,10 +589,7 @@
uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
- if (!cpu)
- return -EINVAL;
-
- if (sleep_disabled || sleep_us < 0)
+ if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
return 0;
idx_restrict = cpu->nlevels + 1;
@@ -626,8 +630,8 @@
if (next_wakeup_us > max_residency[i]) {
predicted = lpm_cpuidle_predict(dev, cpu,
&idx_restrict, &idx_restrict_time);
- if (predicted < min_residency[i])
- predicted = 0;
+ if (predicted && (predicted < min_residency[i]))
+ predicted = min_residency[i];
} else
invalidate_predict_history(dev);
}
@@ -957,8 +961,9 @@
best_level = i;
- if (predicted ? (pred_us <= pwr_params->max_residency)
- : (sleep_us <= pwr_params->max_residency))
+ if (from_idle &&
+ (predicted ? (pred_us <= pwr_params->max_residency)
+ : (sleep_us <= pwr_params->max_residency)))
break;
}
@@ -1292,17 +1297,11 @@
struct cpuidle_device *dev)
{
struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
- int idx;
if (!cpu)
return 0;
- idx = cpu_power_select(dev, cpu);
-
- if (idx < 0)
- return 0;
-
- return idx;
+ return cpu_power_select(dev, cpu);
}
static void update_history(struct cpuidle_device *dev, int idx)
@@ -1357,7 +1356,7 @@
trace_cpu_idle_enter(idx);
lpm_stats_cpu_enter(idx, start_time);
- if (need_resched() || (idx < 0))
+ if (need_resched())
goto exit;
success = psci_enter_sleep(cpu, idx, true);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index c310318..3bda6e5 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2601,8 +2601,7 @@
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, dst_nents = 0, sec4_sg_bytes;
struct ablkcipher_edesc *edesc;
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index d9ebe113..82a316b 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -33,12 +33,17 @@
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <soc/qcom/socinfo.h>
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
#include "qce.h"
#include "qce50.h"
#include "qcryptohw_50.h"
#include "qce_ota.h"
+#define CRYPTO_SMMU_IOVA_START 0x10000000
+#define CRYPTO_SMMU_IOVA_SIZE 0x40000000
+
#define CRYPTO_CONFIG_RESET 0xE01EF
#define MAX_SPS_DESC_FIFO_SIZE 0xfff0
#define QCE_MAX_NUM_DSCR 0x200
@@ -156,6 +161,8 @@
atomic_t last_intr_seq;
bool cadence_flag;
uint8_t *dummyreq_in_buf;
+ struct dma_iommu_mapping *smmu_mapping;
+ bool bypass_s1_smmu;
};
static void print_notify_debug(struct sps_event_notify *notify);
@@ -5703,6 +5710,10 @@
pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
}
+
+ if (of_property_read_bool((&pdev->dev)->of_node, "qcom,smmu-s1-bypass"))
+ pce_dev->bypass_s1_smmu = true;
+
pce_dev->ce_bam_info.dest_pipe_index =
2 * pce_dev->ce_bam_info.pipe_pair_index;
pce_dev->ce_bam_info.src_pipe_index =
@@ -5936,6 +5947,48 @@
return 0;
}
+static void qce_iommu_release_iomapping(struct qce_device *pce_dev)
+{
+ if (pce_dev->smmu_mapping)
+ arm_iommu_release_mapping(pce_dev->smmu_mapping);
+
+ pce_dev->smmu_mapping = NULL;
+}
+
+static int qce_smmu_init(struct qce_device *pce_dev)
+{
+ struct dma_iommu_mapping *mapping;
+ int s1_bypass = 1;
+ int ret = 0;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ CRYPTO_SMMU_IOVA_START, CRYPTO_SMMU_IOVA_SIZE);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ pr_err("Create mapping failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+ if (ret < 0) {
+ pr_err("Set s1_bypass attribute failed, err = %d\n", ret);
+ goto ext_fail_set_attr;
+ }
+
+ ret = arm_iommu_attach_device(pce_dev->pdev, mapping);
+ if (ret < 0) {
+ pr_err("Attach device failed, err = %d\n", ret);
+ goto ext_fail_set_attr;
+ }
+ pce_dev->smmu_mapping = mapping;
+ return ret;
+
+ext_fail_set_attr:
+ qce_iommu_release_iomapping(pce_dev);
+ return ret;
+}
+
/* crypto engine open function. */
void *qce_open(struct platform_device *pdev, int *rc)
{
@@ -5993,6 +6046,13 @@
if (*rc)
goto err_enable_clk;
+ if (pce_dev->bypass_s1_smmu) {
+ if (qce_smmu_init(pce_dev)) {
+ *rc = -EIO;
+ goto err_smmu;
+ }
+ }
+
if (_probe_ce_engine(pce_dev)) {
*rc = -ENXIO;
goto err;
@@ -6019,6 +6079,9 @@
mutex_unlock(&qce_iomap_mutex);
return pce_dev;
err:
+ if (pce_dev->bypass_s1_smmu)
+ qce_iommu_release_iomapping(pce_dev);
+err_smmu:
qce_disable_clk(pce_dev);
err_enable_clk:
@@ -6060,6 +6123,9 @@
kfree(pce_dev->dummyreq_in_buf);
kfree(pce_dev->iovec_vmem);
+ if (pce_dev->bypass_s1_smmu)
+ qce_iommu_release_iomapping(pce_dev);
+
qce_disable_clk(pce_dev);
__qce_deinit_clk(pce_dev);
mutex_unlock(&qce_iomap_mutex);
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 6e6f28f..3cc035c 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -2564,7 +2564,7 @@
static int gpi_smmu_init(struct gpi_dev *gpi_dev)
{
- u64 size = U64_MAX;
+ u64 size = PAGE_SIZE;
dma_addr_t base = 0x0;
struct dma_iommu_mapping *map;
int attr, ret;
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 7c1e3a7..0e1d428 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -482,6 +482,21 @@
}
EXPORT_SYMBOL_GPL(extcon_sync);
+int extcon_blocking_sync(struct extcon_dev *edev, unsigned int id, bool val)
+{
+ int index;
+
+ if (!edev)
+ return -EINVAL;
+
+ index = find_cable_index_by_id(edev, id);
+ if (index < 0)
+ return index;
+
+ return blocking_notifier_call_chain(&edev->bnh[index], val, edev);
+}
+EXPORT_SYMBOL(extcon_blocking_sync);
+
/**
* extcon_get_state() - Get the state of a external connector.
* @edev: the extcon device that has the cable.
@@ -940,6 +955,38 @@
}
EXPORT_SYMBOL_GPL(extcon_register_notifier);
+int extcon_register_blocking_notifier(struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb)
+{
+ int idx = -EINVAL;
+
+ if (!edev || !nb)
+ return -EINVAL;
+
+ idx = find_cable_index_by_id(edev, id);
+ if (idx < 0)
+ return idx;
+
+ return blocking_notifier_chain_register(&edev->bnh[idx], nb);
+}
+EXPORT_SYMBOL(extcon_register_blocking_notifier);
+
+int extcon_unregister_blocking_notifier(struct extcon_dev *edev,
+ unsigned int id, struct notifier_block *nb)
+{
+ int idx;
+
+ if (!edev || !nb)
+ return -EINVAL;
+
+ idx = find_cable_index_by_id(edev, id);
+ if (idx < 0)
+ return idx;
+
+ return blocking_notifier_chain_unregister(&edev->bnh[idx], nb);
+}
+EXPORT_SYMBOL(extcon_unregister_blocking_notifier);
+
/**
* extcon_unregister_notifier() - Unregister a notifiee from the extcon device.
* @edev: the extcon device that has the external connecotr.
@@ -1222,6 +1269,13 @@
goto err_dev;
}
+ edev->bnh = devm_kzalloc(&edev->dev,
+ sizeof(*edev->bnh) * edev->max_supported, GFP_KERNEL);
+ if (!edev->bnh) {
+ ret = -ENOMEM;
+ goto err_dev;
+ }
+
for (index = 0; index < edev->max_supported; index++)
RAW_INIT_NOTIFIER_HEAD(&edev->nh[index]);
diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c
index 471476c..9c1c81b 100644
--- a/drivers/firmware/qcom/tz_log.c
+++ b/drivers/firmware/qcom/tz_log.c
@@ -960,7 +960,7 @@
for (i = 0; i < TZDBG_STATS_MAX; i++) {
tzdbg.debug_tz[i] = i;
- dent = debugfs_create_file(tzdbg.stat[i].name,
+ dent = debugfs_create_file_unsafe(tzdbg.stat[i].name,
0444, dent_dir,
&tzdbg.debug_tz[i], &tzdbg_fops);
if (dent == NULL) {
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 70b47ca..eeb7c49 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -28,16 +28,28 @@
config DRM_MSM_HDMI_HDCP
bool "Enable HDMI HDCP support in MSM DRM driver"
depends on DRM_MSM && QCOM_SCM
- default y
+ default n
help
- Choose this option to enable HDCP state machine
+ Compile in support for logging register reads/writes in a format
+ that can be parsed by envytools demsm tool. If enabled, register
+ logging can be switched on via msm.reglog=y module param.
+
+config DRM_MSM_HDMI
+ bool "Enable HDMI support in MSM DRM driver"
+ depends on DRM_MSM
+ default n
+ help
+ Compile in support for HDMI driver in msm drm
+ driver. HDMI external display support is enabled
+ through this config option. It can be primary or
+ secondary display on device.
config DRM_MSM_DSI
bool "Enable DSI support in MSM DRM driver"
depends on DRM_MSM
select DRM_PANEL
select DRM_MIPI_DSI
- default y
+ default n
help
Choose this option if you have a need for MIPI DSI connector
support.
@@ -83,6 +95,17 @@
help
Choose this option if the 28nm DSI PHY 8960 variant is used on the
platform.
+
+config DRM_MSM_MDP5
+ tristate "MSM MDP5 DRM driver"
+ depends on DRM_MSM
+ default n
+ help
+ Choose this option if MSM MDP5 revision support is
+ needed in DRM/KMS. This is not required if sde/mdp4
+ only target enabled. MDP5 supports DSI and HDMI
+ displays.
+
config DRM_MSM_MDP4
tristate "MSM MDP4 DRM driver"
depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 1ac5c6c..b698b65 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -15,32 +15,6 @@
dp/dp_ctrl.o \
dp/dp_display.o \
dp/dp_drm.o \
- hdmi/hdmi.o \
- hdmi/hdmi_audio.o \
- hdmi/hdmi_bridge.o \
- hdmi/hdmi_connector.o \
- hdmi/hdmi_i2c.o \
- hdmi/hdmi_phy.o \
- hdmi/hdmi_phy_8960.o \
- hdmi/hdmi_phy_8x60.o \
- hdmi/hdmi_phy_8x74.o \
- edp/edp.o \
- edp/edp_aux.o \
- edp/edp_bridge.o \
- edp/edp_connector.o \
- edp/edp_ctrl.o \
- edp/edp_phy.o \
- mdp/mdp_format.o \
- mdp/mdp_kms.o \
- mdp/mdp5/mdp5_cfg.o \
- mdp/mdp5/mdp5_ctl.o \
- mdp/mdp5/mdp5_crtc.o \
- mdp/mdp5/mdp5_encoder.o \
- mdp/mdp5/mdp5_irq.o \
- mdp/mdp5/mdp5_mdss.o \
- mdp/mdp5/mdp5_kms.o \
- mdp/mdp5/mdp5_plane.o \
- mdp/mdp5/mdp5_smp.o \
sde/sde_crtc.o \
sde/sde_encoder.o \
sde/sde_encoder_phys_vid.o \
@@ -61,7 +35,36 @@
sde/sde_hw_reg_dma_v1_color_proc.o \
sde/sde_hw_color_proc_v4.o \
sde/sde_hw_ad4.o \
- sde_edid_parser.o
+ sde_edid_parser.o \
+
+msm_drm-$(CONFIG_DRM_MSM_HDMI) += hdmi/hdmi.o \
+ hdmi/hdmi_audio.o \
+ hdmi/hdmi_bridge.o \
+ hdmi/hdmi_connector.o \
+ hdmi/hdmi_i2c.o \
+ hdmi/hdmi_phy.o \
+ hdmi/hdmi_phy_8960.o \
+ hdmi/hdmi_phy_8x60.o \
+ hdmi/hdmi_phy_8x74.o \
+
+msm_drm-$(CONFIG_DRM_MSM_EDP) += edp/edp.o \
+ edp/edp_aux.o \
+ edp/edp_bridge.o \
+ edp/edp_connector.o \
+ edp/edp_ctrl.o \
+ edp/edp_phy.o \
+
+msm_drm-$(CONFIG_DRM_MSM_MDP5) += mdp/mdp_format.o \
+ mdp/mdp_kms.o \
+ mdp/mdp5/mdp5_cfg.o \
+ mdp/mdp5/mdp5_ctl.o \
+ mdp/mdp5/mdp5_crtc.o \
+ mdp/mdp5/mdp5_encoder.o \
+ mdp/mdp5/mdp5_irq.o \
+ mdp/mdp5/mdp5_mdss.o \
+ mdp/mdp5/mdp5_kms.o \
+ mdp/mdp5/mdp5_plane.o \
+ mdp/mdp5/mdp5_smp.o \
msm_drm-$(CONFIG_DRM_SDE_RSC) += sde_rsc.o \
sde_rsc_hw.o \
@@ -85,9 +88,9 @@
msm_drm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm_drm-$(CONFIG_SYNC_FILE) += sde/sde_fence.o
-msm_drm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
-msm_drm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
-msm_drm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
+msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_lvds_pll.o
+msm_drm-$(CONFIG_DRM_MSM_HDMI) += hdmi/hdmi_pll_8960.o
+msm_drm-$(CONFIG_DRM_MSM_HDMI) += hdmi/hdmi_phy_8996.o
msm_drm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 2beac0d..7e3d81f 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -211,7 +211,7 @@
static int dp_link_get_period(struct dp_link_private *link, int const addr)
{
int ret = 0;
- u8 *bp;
+ u8 bp;
u8 data;
u32 const param_len = 0x1;
u32 const max_audio_period = 0xA;
@@ -224,7 +224,7 @@
goto exit;
}
- data = *bp;
+ data = bp;
/* Period - Bits 3:0 */
data = data & 0xF;
@@ -322,7 +322,7 @@
static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
{
int ret = 0;
- u8 *bp;
+ u8 bp;
u8 data;
int rlen;
int const param_len = 0x1;
@@ -337,7 +337,7 @@
ret = -EINVAL;
goto exit;
}
- data = *bp;
+ data = bp;
/* Audio Pattern Type - Bits 7:0 */
if ((int)data > max_audio_pattern_type) {
@@ -356,7 +356,7 @@
static int dp_link_parse_audio_mode(struct dp_link_private *link)
{
int ret = 0;
- u8 *bp;
+ u8 bp;
u8 data;
int rlen;
int const param_len = 0x1;
@@ -374,7 +374,7 @@
ret = -EINVAL;
goto exit;
}
- data = *bp;
+ data = bp;
/* Sampling Rate - Bits 3:0 */
sampling_rate = data & 0xF;
@@ -528,14 +528,14 @@
static int dp_link_parse_timing_params1(struct dp_link_private *link,
int const addr, int const len, u32 *val)
{
- u8 *bp;
+ u8 bp[2];
int rlen;
if (len < 2)
return -EINVAL;
/* Read the requested video link pattern (Byte 0x221). */
- rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, bp, len);
if (rlen < len) {
pr_err("failed to read 0x%x\n", addr);
return -EINVAL;
@@ -549,14 +549,14 @@
static int dp_link_parse_timing_params2(struct dp_link_private *link,
int const addr, int const len, u32 *val1, u32 *val2)
{
- u8 *bp;
+ u8 bp[2];
int rlen;
if (len < 2)
return -EINVAL;
/* Read the requested video link pattern (Byte 0x221). */
- rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, bp, len);
if (rlen < len) {
pr_err("failed to read 0x%x\n", addr);
return -EINVAL;
@@ -571,7 +571,7 @@
static int dp_link_parse_timing_params3(struct dp_link_private *link,
int const addr, u32 *val)
{
- u8 *bp;
+ u8 bp;
u32 len = 1;
int rlen;
@@ -581,7 +581,7 @@
pr_err("failed to read 0x%x\n", addr);
return -EINVAL;
}
- *val = bp[0];
+ *val = bp;
return 0;
}
@@ -597,7 +597,7 @@
{
int ret = 0;
int rlen;
- u8 *bp;
+ u8 bp;
u8 data;
u32 dyn_range;
int const param_len = 0x1;
@@ -612,7 +612,7 @@
ret = -EINVAL;
goto exit;
}
- data = *bp;
+ data = bp;
if (!dp_link_is_video_pattern_valid(data)) {
pr_err("invalid link video pattern = 0x%x\n", data);
@@ -634,7 +634,7 @@
ret = -EINVAL;
goto exit;
}
- data = *bp;
+ data = bp;
/* Dynamic Range */
dyn_range = (data & BIT(3)) >> 3;
@@ -789,7 +789,7 @@
*/
static int dp_link_parse_link_training_params(struct dp_link_private *link)
{
- u8 *bp;
+ u8 bp;
u8 data;
int ret = 0;
int rlen;
@@ -803,7 +803,7 @@
ret = -EINVAL;
goto exit;
}
- data = *bp;
+ data = bp;
if (!dp_link_is_link_rate_valid(data)) {
pr_err("invalid link rate = 0x%x\n", data);
@@ -822,7 +822,7 @@
ret = -EINVAL;
goto exit;
}
- data = *bp;
+ data = bp;
data &= 0x1F;
if (!dp_link_is_lane_count_valid(data)) {
@@ -861,7 +861,7 @@
*/
static int dp_link_parse_phy_test_params(struct dp_link_private *link)
{
- u8 *bp;
+ u8 bp;
u8 data;
int rlen;
int const param_len = 0x1;
@@ -876,7 +876,7 @@
goto end;
}
- data = *bp;
+ data = bp;
link->request.phy_test_pattern_sel = data;
@@ -939,7 +939,7 @@
static int dp_link_parse_request(struct dp_link_private *link)
{
int ret = 0;
- u8 *bp;
+ u8 bp;
u8 data;
int rlen;
u32 const param_len = 0x1;
@@ -957,12 +957,12 @@
goto end;
}
- data = *bp;
+ data = bp;
pr_debug("device service irq vector = 0x%x\n", data);
if (!(data & BIT(1))) {
- pr_debug("no link requested\n");
+ pr_debug("no test requested\n");
goto end;
}
@@ -978,7 +978,7 @@
goto end;
}
- data = *bp;
+ data = bp;
if (!dp_link_is_test_supported(data)) {
pr_debug("link 0x%x not supported\n", data);
@@ -1032,7 +1032,7 @@
*/
static void dp_link_parse_sink_count(struct dp_link_private *link)
{
- u8 *bp;
+ u8 bp;
u8 data;
int rlen;
int const param_len = 0x1;
@@ -1044,7 +1044,7 @@
return;
}
- data = *bp;
+ data = bp;
/* BIT 7, BIT 5:0 */
link->sink_count.count = (data & BIT(7)) << 6 | (data & 0x63);
@@ -1109,7 +1109,7 @@
static int dp_link_parse_vx_px(struct dp_link_private *link)
{
- u8 *bp;
+ u8 bp;
u8 data;
int const param_len = 0x1;
int const addr1 = 0x206;
@@ -1127,7 +1127,7 @@
goto end;
}
- data = *bp;
+ data = bp;
pr_debug("lanes 0/1 (Byte 0x206): 0x%x\n", data);
@@ -1148,7 +1148,7 @@
goto end;
}
- data = *bp;
+ data = bp;
pr_debug("lanes 2/3 (Byte 0x207): 0x%x\n", data);
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c
index c6ed918..7bc1433 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.c
@@ -345,6 +345,9 @@
pd->vdo = *vdos;
dp_usbpd_get_status(pd);
+ if (pd->dp_cb && pd->dp_cb->attention)
+ pd->dp_cb->attention(pd->dev);
+
if (!pd->dp_usbpd.alt_mode_cfg_done)
dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE);
break;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index 3625ed0..5e76ce7 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -56,6 +56,8 @@
dsi_ctrl_hw_cmn_trigger_cmd_test_pattern;
ctrl->ops.clear_phy0_ln_err = dsi_ctrl_hw_dln0_phy_err;
ctrl->ops.phy_reset_config = dsi_ctrl_hw_cmn_phy_reset_config;
+ ctrl->ops.setup_misr = dsi_ctrl_hw_cmn_setup_misr;
+ ctrl->ops.collect_misr = dsi_ctrl_hw_cmn_collect_misr;
switch (version) {
case DSI_CTRL_VERSION_1_4:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 2d7b174..e8a6ab4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -147,6 +147,12 @@
void dsi_ctrl_hw_cmn_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_cmn_soft_reset(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_cmn_setup_misr(struct dsi_ctrl_hw *ctrl,
+ enum dsi_op_mode panel_mode,
+ bool enable, u32 frame_count);
+u32 dsi_ctrl_hw_cmn_collect_misr(struct dsi_ctrl_hw *ctrl,
+ enum dsi_op_mode panel_mode);
+
void dsi_ctrl_hw_cmn_kickoff_command(struct dsi_ctrl_hw *ctrl,
struct dsi_ctrl_cmd_dma_info *cmd,
u32 flags);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index b2aef9c..21ef811 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -2258,6 +2258,28 @@
}
/**
+ * _dsi_ctrl_cache_misr - Cache frame MISR value
+ * @dsi_ctrl: Pointer to associated dsi_ctrl structure
+ */
+static void _dsi_ctrl_cache_misr(struct dsi_ctrl *dsi_ctrl)
+{
+ u32 misr;
+
+ if (!dsi_ctrl || !dsi_ctrl->hw.ops.collect_misr)
+ return;
+
+ misr = dsi_ctrl->hw.ops.collect_misr(&dsi_ctrl->hw,
+ dsi_ctrl->host_config.panel_mode);
+
+ if (misr)
+ dsi_ctrl->misr_cache = misr;
+
+ pr_debug("DSI_%d misr_cache = %x\n", dsi_ctrl->cell_index,
+ dsi_ctrl->misr_cache);
+
+}
+
+/**
* dsi_ctrl_set_power_state() - set power state for dsi controller
* @dsi_ctrl: DSI controller handle.
* @state: Power state.
@@ -2295,6 +2317,9 @@
goto error;
}
} else if (state == DSI_CTRL_POWER_VREG_OFF) {
+ if (dsi_ctrl->misr_enable)
+ _dsi_ctrl_cache_misr(dsi_ctrl);
+
rc = dsi_ctrl_enable_supplies(dsi_ctrl, false);
if (rc) {
pr_err("[%d]failed to disable vreg supplies, rc=%d\n",
@@ -2609,6 +2634,59 @@
}
/**
+ * dsi_ctrl_setup_misr() - Setup frame MISR
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable MISR.
+ * @frame_count: Number of frames to accumulate MISR.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_setup_misr(struct dsi_ctrl *dsi_ctrl,
+ bool enable,
+ u32 frame_count)
+{
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!dsi_ctrl->hw.ops.setup_misr)
+ return 0;
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ dsi_ctrl->misr_enable = enable;
+ dsi_ctrl->hw.ops.setup_misr(&dsi_ctrl->hw,
+ dsi_ctrl->host_config.panel_mode,
+ enable, frame_count);
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return 0;
+}
+
+/**
+ * dsi_ctrl_collect_misr() - Read frame MISR
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Return: MISR value.
+ */
+u32 dsi_ctrl_collect_misr(struct dsi_ctrl *dsi_ctrl)
+{
+ u32 misr;
+
+ if (!dsi_ctrl || !dsi_ctrl->hw.ops.collect_misr)
+ return 0;
+
+ misr = dsi_ctrl->hw.ops.collect_misr(&dsi_ctrl->hw,
+ dsi_ctrl->host_config.panel_mode);
+ if (!misr)
+ misr = dsi_ctrl->misr_cache;
+
+ pr_debug("DSI_%d cached misr = %x, final = %x\n",
+ dsi_ctrl->cell_index, dsi_ctrl->misr_cache, misr);
+
+ return misr;
+}
+
+/**
* dsi_ctrl_drv_register() - register platform driver for dsi controller
*/
void dsi_ctrl_drv_register(void)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index ec535ce11..95dac1c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -188,6 +188,8 @@
* @vaddr: CPU virtual address of cmd buffer.
* @cmd_buffer_size: Size of command buffer.
* @debugfs_root: Root for debugfs entries.
+ * @misr_enable: Frame MISR enable/disable
+ * @misr_cache: Cached Frame MISR value
*/
struct dsi_ctrl {
struct platform_device *pdev;
@@ -226,6 +228,10 @@
/* Debug Information */
struct dentry *debugfs_root;
+ /* MISR */
+ bool misr_enable;
+ u32 misr_cache;
+
};
/**
@@ -571,6 +577,26 @@
struct dsi_ctrl *dsi_ctrl, uint32_t intr_idx);
/**
+ * dsi_ctrl_setup_misr() - Setup frame MISR
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable MISR.
+ * @frame_count: Number of frames to accumulate MISR.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_setup_misr(struct dsi_ctrl *dsi_ctrl,
+ bool enable,
+ u32 frame_count);
+
+/**
+ * dsi_ctrl_collect_misr() - Read frame MISR
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Return: MISR value.
+ */
+u32 dsi_ctrl_collect_misr(struct dsi_ctrl *dsi_ctrl);
+
+/**
* dsi_ctrl_drv_register() - register platform driver for dsi controller
*/
void dsi_ctrl_drv_register(void);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 74be279..2130144 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -675,6 +675,26 @@
ssize_t (*reg_dump_to_buffer)(struct dsi_ctrl_hw *ctrl,
char *buf,
u32 size);
+
+ /**
+ * setup_misr() - Setup frame MISR
+ * @ctrl: Pointer to the controller host hardware.
+ * @panel_mode: CMD or VIDEO mode indicator
+ * @enable: Enable/disable MISR.
+ * @frame_count: Number of frames to accumulate MISR.
+ */
+ void (*setup_misr)(struct dsi_ctrl_hw *ctrl,
+ enum dsi_op_mode panel_mode,
+ bool enable, u32 frame_count);
+
+ /**
+ * collect_misr() - Read frame MISR
+ * @ctrl: Pointer to the controller host hardware.
+ * @panel_mode: CMD or VIDEO mode indicator
+ */
+ u32 (*collect_misr)(struct dsi_ctrl_hw *ctrl,
+ enum dsi_op_mode panel_mode);
+
};
/*
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c
index c22849a..6421dc2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c
@@ -157,6 +157,10 @@
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RESET_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_MISR_CMD_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_MISR_VIDEO_CTRL));
+ len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_CTRL));
@@ -193,6 +197,12 @@
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_AXI2AHB_CTRL));
len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_MISR_CMD_MDP0_32BIT));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_MISR_CMD_MDP1_32BIT));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_MISR_VIDEO_32BIT));
+ len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_CTRL));
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 0af6f25..8e8e353 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -20,9 +20,12 @@
#include "dsi_ctrl_reg.h"
#include "dsi_hw.h"
#include "dsi_panel.h"
+#include "dsi_catalog.h"
#define MMSS_MISC_CLAMP_REG_OFF 0x0014
#define DSI_CTRL_DYNAMIC_FORCE_ON (0x23F|BIT(8)|BIT(9)|BIT(11)|BIT(21))
+#define DSI_CTRL_CMD_MISR_ENABLE BIT(28)
+#define DSI_CTRL_VIDEO_MISR_ENABLE BIT(16)
/* Unsupported formats default to RGB888 */
static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
@@ -149,6 +152,70 @@
}
/**
+ * setup_misr() - Setup frame MISR
+ * @ctrl: Pointer to the controller host hardware.
+ * @panel_mode: CMD or VIDEO mode indicator
+ * @enable: Enable/disable MISR.
+ * @frame_count: Number of frames to accumulate MISR.
+ */
+void dsi_ctrl_hw_cmn_setup_misr(struct dsi_ctrl_hw *ctrl,
+ enum dsi_op_mode panel_mode,
+ bool enable,
+ u32 frame_count)
+{
+ u32 addr;
+ u32 config = 0;
+
+ if (panel_mode == DSI_OP_CMD_MODE) {
+ addr = DSI_MISR_CMD_CTRL;
+ if (enable)
+ config = DSI_CTRL_CMD_MISR_ENABLE;
+ } else {
+ addr = DSI_MISR_VIDEO_CTRL;
+ if (enable)
+ config = DSI_CTRL_VIDEO_MISR_ENABLE;
+ if (frame_count > 255)
+ frame_count = 255;
+ config |= frame_count << 8;
+ }
+
+ pr_debug("[DSI_%d] MISR ctrl: 0x%x\n", ctrl->index,
+ config);
+ DSI_W32(ctrl, addr, config);
+ wmb(); /* make sure MISR is configured */
+}
+
+/**
+ * collect_misr() - Read frame MISR
+ * @ctrl: Pointer to the controller host hardware.
+ * @panel_mode: CMD or VIDEO mode indicator
+ */
+u32 dsi_ctrl_hw_cmn_collect_misr(struct dsi_ctrl_hw *ctrl,
+ enum dsi_op_mode panel_mode)
+{
+ u32 addr;
+ u32 enabled;
+ u32 misr = 0;
+
+ if (panel_mode == DSI_OP_CMD_MODE) {
+ addr = DSI_MISR_CMD_MDP0_32BIT;
+ enabled = DSI_R32(ctrl, DSI_MISR_CMD_CTRL) &
+ DSI_CTRL_CMD_MISR_ENABLE;
+ } else {
+ addr = DSI_MISR_VIDEO_32BIT;
+ enabled = DSI_R32(ctrl, DSI_MISR_VIDEO_CTRL) &
+ DSI_CTRL_VIDEO_MISR_ENABLE;
+ }
+
+ if (enabled)
+ misr = DSI_R32(ctrl, addr);
+
+ pr_debug("[DSI_%d] MISR enabled %x value: 0x%x\n", ctrl->index,
+ enabled, misr);
+ return misr;
+}
+
+/**
* set_video_timing() - set up the timing for video frame
* @ctrl: Pointer to controller host hardware.
* @mode: Video mode information.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 6f6c559..c0c6698 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -32,6 +32,8 @@
#define to_dsi_display(x) container_of(x, struct dsi_display, host)
#define INT_BASE_10 10
+#define MISR_BUFF_SIZE 256
+
static DEFINE_MUTEX(dsi_display_list_lock);
static LIST_HEAD(dsi_display_list);
static char dsi_display_primary[MAX_CMDLINE_PARAM_LEN];
@@ -128,9 +130,20 @@
return format;
}
+static void _dsi_display_setup_misr(struct dsi_display *display)
+{
+ int i;
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ dsi_ctrl_setup_misr(display->ctrl[i].ctrl,
+ display->misr_enable,
+ display->misr_frame_count);
+ }
+}
+
static ssize_t debugfs_dump_info_read(struct file *file,
- char __user *buff,
- size_t count,
+ char __user *user_buf,
+ size_t user_len,
loff_t *ppos)
{
struct dsi_display *display = file->private_data;
@@ -168,7 +181,7 @@
"\tClock master = %s\n",
display->ctrl[display->clk_master_idx].ctrl->name);
- if (copy_to_user(buff, buf, len)) {
+ if (copy_to_user(user_buf, buf, len)) {
kfree(buf);
return -EFAULT;
}
@@ -179,16 +192,151 @@
return len;
}
+static ssize_t debugfs_misr_setup(struct file *file,
+ const char __user *user_buf,
+ size_t user_len,
+ loff_t *ppos)
+{
+ struct dsi_display *display = file->private_data;
+ char *buf;
+ int rc = 0;
+ size_t len;
+ u32 enable, frame_count;
+
+ if (!display)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(MISR_BUFF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* leave room for termination char */
+ len = min_t(size_t, user_len, MISR_BUFF_SIZE - 1);
+ if (copy_from_user(buf, user_buf, len)) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ buf[len] = '\0'; /* terminate the string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ display->misr_enable = enable;
+ display->misr_frame_count = frame_count;
+
+ mutex_lock(&display->display_lock);
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_CORE_CLK, DSI_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
+ display->name, rc);
+ goto unlock;
+ }
+
+ _dsi_display_setup_misr(display);
+
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_CORE_CLK, DSI_CLK_OFF);
+ if (rc) {
+ pr_err("[%s] failed to disable DSI core clocks, rc=%d\n",
+ display->name, rc);
+ goto unlock;
+ }
+
+ rc = user_len;
+unlock:
+ mutex_unlock(&display->display_lock);
+error:
+ kfree(buf);
+ return rc;
+}
+
+static ssize_t debugfs_misr_read(struct file *file,
+ char __user *user_buf,
+ size_t user_len,
+ loff_t *ppos)
+{
+ struct dsi_display *display = file->private_data;
+ char *buf;
+ u32 len = 0;
+ int rc = 0;
+ struct dsi_ctrl *dsi_ctrl;
+ int i;
+ u32 misr;
+ size_t max_len = min_t(size_t, user_len, MISR_BUFF_SIZE);
+
+ if (!display)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(max_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&display->display_lock);
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_CORE_CLK, DSI_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ dsi_ctrl = display->ctrl[i].ctrl;
+ misr = dsi_ctrl_collect_misr(display->ctrl[i].ctrl);
+
+ len += snprintf((buf + len), max_len - len,
+ "DSI_%d MISR: 0x%x\n", dsi_ctrl->cell_index, misr);
+
+ if (len >= max_len)
+ break;
+ }
+
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_CORE_CLK, DSI_CLK_OFF);
+ if (rc) {
+ pr_err("[%s] failed to disable DSI core clocks, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ if (copy_to_user(user_buf, buf, len)) {
+ rc = -EFAULT;
+ goto error;
+ }
+
+ *ppos += len;
+
+error:
+ mutex_unlock(&display->display_lock);
+ kfree(buf);
+ return len;
+}
static const struct file_operations dump_info_fops = {
.open = simple_open,
.read = debugfs_dump_info_read,
};
+static const struct file_operations misr_data_fops = {
+ .open = simple_open,
+ .read = debugfs_misr_read,
+ .write = debugfs_misr_setup,
+};
+
static int dsi_display_debugfs_init(struct dsi_display *display)
{
int rc = 0;
- struct dentry *dir, *dump_file;
+ struct dentry *dir, *dump_file, *misr_data;
dir = debugfs_create_dir(display->name, NULL);
if (IS_ERR_OR_NULL(dir)) {
@@ -199,13 +347,25 @@
}
dump_file = debugfs_create_file("dump_info",
- 0444,
+ 0400,
dir,
display,
&dump_info_fops);
if (IS_ERR_OR_NULL(dump_file)) {
rc = PTR_ERR(dump_file);
- pr_err("[%s] debugfs create file failed, rc=%d\n",
+ pr_err("[%s] debugfs create dump info file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ misr_data = debugfs_create_file("misr_data",
+ 0600,
+ dir,
+ display,
+ &misr_data_fops);
+ if (IS_ERR_OR_NULL(misr_data)) {
+ rc = PTR_ERR(misr_data);
+ pr_err("[%s] debugfs create misr datafile failed, rc=%d\n",
display->name, rc);
goto error_remove_dir;
}
@@ -3114,7 +3274,8 @@
info->frame_rate = timing->refresh_rate;
info->vtotal = DSI_V_TOTAL(timing);
info->prefill_lines = display->panel->panel_prefill_lines;
- info->jitter = display->panel->panel_jitter;
+ info->jitter_numer = display->panel->panel_jitter_numer;
+ info->jitter_denom = display->panel->panel_jitter_denom;
info->width_mm = phy_props.panel_width_mm;
info->height_mm = phy_props.panel_height_mm;
info->max_width = 1920;
@@ -3532,6 +3693,10 @@
{
int rc = 0;
+ /* check and setup MISR */
+ if (display->misr_enable)
+ _dsi_display_setup_misr(display);
+
rc = dsi_display_set_roi(display, params->rois);
return rc;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index b382e4a..359e04f3 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -154,6 +154,8 @@
* @dsi_clk_handle: DSI clock handle.
* @mdp_clk_handle: MDP clock handle.
* @root: Debugfs root directory
+ * @misr_enable Frame MISR enable/disable
+ * @misr_frame_count Number of frames to accumulate the MISR value
*/
struct dsi_display {
struct platform_device *pdev;
@@ -201,6 +203,9 @@
/* DEBUG FS */
struct dentry *root;
+
+ bool misr_enable;
+ u32 misr_frame_count;
};
int dsi_display_dev_probe(struct platform_device *pdev);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index b8bf7a8..8bc82f5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -35,20 +35,66 @@
#define DEFAULT_MDP_TRANSFER_TIME 14000
-#define DEFAULT_PANEL_JITTER 5
-#define MAX_PANEL_JITTER 25
-#define DEFAULT_PANEL_PREFILL_LINES 16
+#define DEFAULT_PANEL_JITTER_NUMERATOR 2
+#define DEFAULT_PANEL_JITTER_DENOMINATOR 1
+#define DEFAULT_PANEL_JITTER_ARRAY_SIZE 2
+#define MAX_PANEL_JITTER 10
+#define DEFAULT_PANEL_PREFILL_LINES 25
+
+enum dsi_dsc_ratio_type {
+ DSC_8BPC_8BPP,
+ DSC_10BPC_8BPP,
+ DSC_12BPC_8BPP,
+ DSC_RATIO_TYPE_MAX
+};
static u32 dsi_dsc_rc_buf_thresh[] = {0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54,
0x62, 0x69, 0x70, 0x77, 0x79, 0x7b, 0x7d, 0x7e};
-static char dsi_dsc_rc_range_min_qp_1_1[] = {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5,
- 5, 5, 7, 13};
-static char dsi_dsc_rc_range_min_qp_1_1_scr1[] = {0, 0, 1, 1, 3, 3, 3, 3, 3, 3,
- 5, 5, 5, 9, 12};
-static char dsi_dsc_rc_range_max_qp_1_1[] = {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11,
- 12, 13, 13, 15};
-static char dsi_dsc_rc_range_max_qp_1_1_scr1[] = {4, 4, 5, 6, 7, 7, 7, 8, 9, 10,
- 11, 11, 12, 13};
+
+/*
+ * DSC 1.1
+ * Rate control - Min QP values for each ratio type in dsi_dsc_ratio_type
+ */
+static char dsi_dsc_rc_range_min_qp_1_1[][15] = {
+ {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 13},
+ {0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 17},
+ {0, 4, 9, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 21},
+ };
+
+/*
+ * DSC 1.1 SCR
+ * Rate control - Min QP values for each ratio type in dsi_dsc_ratio_type
+ */
+static char dsi_dsc_rc_range_min_qp_1_1_scr1[][15] = {
+ {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 9, 12},
+ {0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 13, 16},
+ {0, 4, 9, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 17, 20},
+ };
+
+/*
+ * DSC 1.1
+ * Rate control - Max QP values for each ratio type in dsi_dsc_ratio_type
+ */
+static char dsi_dsc_rc_range_max_qp_1_1[][15] = {
+ {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11, 12, 13, 13, 15},
+ {8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 15, 16, 17, 17, 19},
+ {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 19, 20, 21, 21, 23},
+ };
+
+/*
+ * DSC 1.1 SCR
+ * Rate control - Max QP values for each ratio type in dsi_dsc_ratio_type
+ */
+static char dsi_dsc_rc_range_max_qp_1_1_scr1[][15] = {
+ {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13},
+ {8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17},
+ {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21},
+ };
+
+/*
+ * DSC 1.1 and DSC 1.1 SCR
+ * Rate control - bpg offset values
+ */
static char dsi_dsc_rc_range_bpg_offset[] = {2, 0, 0, -2, -4, -6, -8, -8,
-8, -10, -10, -12, -12, -12, -12};
@@ -1579,16 +1625,24 @@
struct device_node *of_node)
{
int rc;
+ u32 jitter[DEFAULT_PANEL_JITTER_ARRAY_SIZE] = {0, 0};
+ u64 jitter_val = 0;
- rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-jitter",
- &panel->panel_jitter);
+ rc = of_property_read_u32_array(of_node, "qcom,mdss-dsi-panel-jitter",
+ jitter, DEFAULT_PANEL_JITTER_ARRAY_SIZE);
if (rc) {
- pr_debug("panel jitter is not defined rc=%d\n", rc);
- panel->panel_jitter = DEFAULT_PANEL_JITTER;
- } else if (panel->panel_jitter > MAX_PANEL_JITTER) {
- pr_debug("invalid jitter config=%d setting to:%d\n",
- panel->panel_jitter, DEFAULT_PANEL_JITTER);
- panel->panel_jitter = DEFAULT_PANEL_JITTER;
+ pr_debug("panel jitter not defined rc=%d\n", rc);
+ } else {
+ jitter_val = jitter[0];
+ jitter_val = div_u64(jitter_val, jitter[1]);
+ }
+
+ if (rc || !jitter_val || (jitter_val > MAX_PANEL_JITTER)) {
+ panel->panel_jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR;
+ panel->panel_jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR;
+ } else {
+ panel->panel_jitter_numer = jitter[0];
+ panel->panel_jitter_denom = jitter[1];
}
rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-prefill-lines",
@@ -1848,6 +1902,7 @@
int target_bpp_x16;
int data;
int final_value, final_scale;
+ int ratio_index;
dsc->version = 0x11;
dsc->scr_rev = 0;
@@ -1857,12 +1912,7 @@
else
dsc->first_line_bpg_offset = 12;
- dsc->min_qp_flatness = 3;
- dsc->max_qp_flatness = 12;
- dsc->line_buf_depth = 9;
dsc->edge_factor = 6;
- dsc->quant_incr_limit0 = 11;
- dsc->quant_incr_limit1 = 11;
dsc->tgt_offset_hi = 3;
dsc->tgt_offset_lo = 3;
dsc->enable_422 = 0;
@@ -1870,27 +1920,60 @@
dsc->vbr_enable = 0;
dsc->buf_thresh = dsi_dsc_rc_buf_thresh;
- if (dsc->version == 0x11 && dsc->scr_rev == 0x1) {
- dsc->range_min_qp = dsi_dsc_rc_range_min_qp_1_1_scr1;
- dsc->range_max_qp = dsi_dsc_rc_range_max_qp_1_1_scr1;
- } else {
- dsc->range_min_qp = dsi_dsc_rc_range_min_qp_1_1;
- dsc->range_max_qp = dsi_dsc_rc_range_max_qp_1_1;
- }
- dsc->range_bpg_offset = dsi_dsc_rc_range_bpg_offset;
bpp = dsc->bpp;
bpc = dsc->bpc;
+ if (bpc == 12)
+ ratio_index = DSC_12BPC_8BPP;
+ else if (bpc == 10)
+ ratio_index = DSC_10BPC_8BPP;
+ else
+ ratio_index = DSC_8BPC_8BPP;
+
+ if (dsc->version == 0x11 && dsc->scr_rev == 0x1) {
+ dsc->range_min_qp =
+ dsi_dsc_rc_range_min_qp_1_1_scr1[ratio_index];
+ dsc->range_max_qp =
+ dsi_dsc_rc_range_max_qp_1_1_scr1[ratio_index];
+ } else {
+ dsc->range_min_qp = dsi_dsc_rc_range_min_qp_1_1[ratio_index];
+ dsc->range_max_qp = dsi_dsc_rc_range_max_qp_1_1[ratio_index];
+ }
+ dsc->range_bpg_offset = dsi_dsc_rc_range_bpg_offset;
+
if (bpp == 8)
dsc->initial_offset = 6144;
else
dsc->initial_offset = 2048; /* bpp = 12 */
- if (bpc <= 8)
- mux_words_size = 48;
+ if (bpc == 12)
+ mux_words_size = 64;
else
- mux_words_size = 64; /* bpc == 12 */
+ mux_words_size = 48; /* bpc == 8/10 */
+
+ if (bpc == 8) {
+ dsc->line_buf_depth = 9;
+ dsc->input_10_bits = 0;
+ dsc->min_qp_flatness = 3;
+ dsc->max_qp_flatness = 12;
+ dsc->quant_incr_limit0 = 11;
+ dsc->quant_incr_limit1 = 11;
+ } else if (bpc == 10) { /* 10bpc */
+ dsc->line_buf_depth = 11;
+ dsc->input_10_bits = 1;
+ dsc->min_qp_flatness = 7;
+ dsc->max_qp_flatness = 16;
+ dsc->quant_incr_limit0 = 15;
+ dsc->quant_incr_limit1 = 15;
+ } else { /* 12 bpc */
+ dsc->line_buf_depth = 9;
+ dsc->input_10_bits = 0;
+ dsc->min_qp_flatness = 11;
+ dsc->max_qp_flatness = 20;
+ dsc->quant_incr_limit0 = 19;
+ dsc->quant_incr_limit1 = 19;
+ }
dsc->slice_last_group_size = 3 - (dsc->slice_width % 3);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 3569b5b..5380049 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -184,7 +184,8 @@
bool ulps_enabled;
bool allow_phy_power_off;
- u32 panel_jitter;
+ u32 panel_jitter_numer;
+ u32 panel_jitter_denom;
u32 panel_prefill_lines;
bool panel_initialized;
bool te_using_watchdog_timer;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 11f1c4f..d2ac684 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -284,7 +284,7 @@
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
- kthread_queue_work(&priv->event_thread[crtc_id].worker,
+ kthread_queue_work(&priv->disp_thread[crtc_id].worker,
&vbl_ctrl->work);
return 0;
@@ -521,6 +521,7 @@
struct msm_kms *kms;
struct sde_dbg_power_ctrl dbg_power_ctrl = { 0 };
int ret, i;
+ struct sched_param param;
ddev = drm_dev_alloc(drv, dev);
if (!ddev) {
@@ -624,6 +625,12 @@
}
ddev->mode_config.funcs = &mode_config_funcs;
+ /**
+ * this priority was found during empiric testing to have appropriate
+ * realtime scheduling to process display updates and interact with
+ * other real time and normal priority task
+ */
+ param.sched_priority = 16;
for (i = 0; i < priv->num_crtcs; i++) {
/* initialize display thread */
@@ -634,6 +641,11 @@
kthread_run(kthread_worker_fn,
&priv->disp_thread[i].worker,
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, ¶m);
+ if (ret)
+ pr_warn("display thread priority update failed: %d\n",
+ ret);
if (IS_ERR(priv->disp_thread[i].thread)) {
dev_err(dev, "failed to create crtc_commit kthread\n");
@@ -648,6 +660,18 @@
kthread_run(kthread_worker_fn,
&priv->event_thread[i].worker,
"crtc_event:%d", priv->event_thread[i].crtc_id);
+ /**
+ * event thread should also run at same priority as disp_thread
+ * because it is handling frame_done events. A lower priority
+ * event thread and higher priority disp_thread can causes
+ * frame_pending counters beyond 2. This can lead to commit
+ * failure at crtc commit level.
+ */
+ ret = sched_setscheduler(priv->event_thread[i].thread,
+ SCHED_FIFO, ¶m);
+ if (ret)
+ pr_warn("display event thread priority update failed: %d\n",
+ ret);
if (IS_ERR(priv->event_thread[i].thread)) {
dev_err(dev, "failed to create crtc_event kthread\n");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 7edd534..96ab883 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -413,7 +413,8 @@
* @frame_rate: Display frame rate
* @prefill_lines: prefill lines based on porches.
* @vtotal: display vertical total
- * @jitter: display jitter configuration
+ * @jitter_numer: display panel jitter numerator configuration
+ * @jitter_denom: display panel jitter denominator configuration
* @comp_info: Compression supported by the display
* @roi_caps: Region of interest capability info
*/
@@ -437,7 +438,8 @@
uint32_t frame_rate;
uint32_t prefill_lines;
uint32_t vtotal;
- uint32_t jitter;
+ uint32_t jitter_numer;
+ uint32_t jitter_denom;
struct msm_compression_info comp_info;
struct msm_roi_caps roi_caps;
@@ -741,16 +743,34 @@
void msm_fbdev_free(struct drm_device *dev);
struct hdmi;
+#ifdef CONFIG_DRM_MSM_HDMI
int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
struct drm_encoder *encoder);
void __init msm_hdmi_register(void);
void __exit msm_hdmi_unregister(void);
+#else
+static inline void __init msm_hdmi_register(void)
+{
+}
+static inline void __exit msm_hdmi_unregister(void)
+{
+}
+#endif
struct msm_edp;
+#ifdef CONFIG_DRM_MSM_EDP
void __init msm_edp_register(void);
void __exit msm_edp_unregister(void);
int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
struct drm_encoder *encoder);
+#else
+static inline void __init msm_edp_register(void)
+{
+}
+static inline void __exit msm_edp_unregister(void)
+{
+}
+#endif
struct msm_dsi;
enum msm_dsi_encoder_id {
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 35e6b71..7692bef 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -115,9 +115,24 @@
static inline
struct msm_kms *mdp4_kms_init(struct drm_device *dev) { return NULL; };
#endif
-struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+
+#ifdef CONFIG_DRM_MSM_MDP5
int msm_mdss_init(struct drm_device *dev);
void msm_mdss_destroy(struct drm_device *dev);
+struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+#else
+static inline int msm_mdss_init(struct drm_device *dev)
+{
+ return 0;
+}
+static inline void msm_mdss_destroy(struct drm_device *dev)
+{
+}
+static inline struct msm_kms *mdp5_kms_init(struct drm_device *dev)
+{
+ return NULL;
+}
+#endif
struct msm_kms *sde_kms_init(struct drm_device *dev);
/**
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
index f2996dd..d1991a4 100644
--- a/drivers/gpu/drm/msm/msm_prop.c
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -21,8 +21,6 @@
uint32_t blob_count,
uint32_t state_size)
{
- int i;
-
/* prevent access if any of these are NULL */
if (!base || !dev || !property_array || !property_data) {
property_count = 0;
@@ -60,10 +58,6 @@
0,
sizeof(struct msm_property_data) *
property_count);
- INIT_LIST_HEAD(&info->dirty_list);
-
- for (i = 0; i < property_count; ++i)
- INIT_LIST_HEAD(&property_data[i].dirty_node);
}
}
@@ -72,9 +66,6 @@
if (!info)
return;
- /* reset dirty list */
- INIT_LIST_HEAD(&info->dirty_list);
-
/* free state cache */
while (info->state_cache_size > 0)
kfree(info->state_cache[--(info->state_cache_size)]);
@@ -82,24 +73,25 @@
mutex_destroy(&info->property_lock);
}
-int msm_property_pop_dirty(struct msm_property_info *info)
+int msm_property_pop_dirty(struct msm_property_info *info,
+ struct msm_property_state *property_state)
{
struct list_head *item;
int rc = 0;
- if (!info) {
- DRM_ERROR("invalid info\n");
+ if (!info || !property_state || !property_state->values) {
+ DRM_ERROR("invalid argument(s)\n");
return -EINVAL;
}
mutex_lock(&info->property_lock);
- if (list_empty(&info->dirty_list)) {
+ if (list_empty(&property_state->dirty_list)) {
rc = -EAGAIN;
} else {
- item = info->dirty_list.next;
+ item = property_state->dirty_list.next;
list_del_init(item);
- rc = container_of(item, struct msm_property_data, dirty_node)
- - info->property_data;
+ rc = container_of(item, struct msm_property_value, dirty_node)
+ - property_state->values;
DRM_DEBUG_KMS("property %d dirty\n", rc);
}
mutex_unlock(&info->property_lock);
@@ -112,26 +104,28 @@
* This function doesn't mutex protect the
* dirty linked list.
* @info: Pointer to property info container struct
+ * @property_state: Pointer to property state container struct
* @property_idx: Property index
*/
static void _msm_property_set_dirty_no_lock(
struct msm_property_info *info,
+ struct msm_property_state *property_state,
uint32_t property_idx)
{
- if (!info || property_idx >= info->property_count) {
- DRM_ERROR("invalid argument(s), info %pK, idx %u\n",
- info, property_idx);
+ if (!info || !property_state || !property_state->values ||
+ property_idx >= info->property_count) {
+ DRM_ERROR("invalid argument(s), idx %u\n", property_idx);
return;
}
/* avoid re-inserting if already dirty */
- if (!list_empty(&info->property_data[property_idx].dirty_node)) {
+ if (!list_empty(&property_state->values[property_idx].dirty_node)) {
DRM_DEBUG_KMS("property %u already dirty\n", property_idx);
return;
}
- list_add_tail(&info->property_data[property_idx].dirty_node,
- &info->dirty_list);
+ list_add_tail(&property_state->values[property_idx].dirty_node,
+ &property_state->dirty_list);
}
/**
@@ -371,35 +365,36 @@
return rc;
}
-int msm_property_set_dirty(struct msm_property_info *info, int property_idx)
+int msm_property_set_dirty(struct msm_property_info *info,
+ struct msm_property_state *property_state,
+ int property_idx)
{
- if (!info) {
- DRM_ERROR("invalid property info\n");
+ if (!info || !property_state || !property_state->values) {
+ DRM_ERROR("invalid argument(s)\n");
return -EINVAL;
}
mutex_lock(&info->property_lock);
- _msm_property_set_dirty_no_lock(info, property_idx);
+ _msm_property_set_dirty_no_lock(info, property_state, property_idx);
mutex_unlock(&info->property_lock);
return 0;
}
int msm_property_atomic_set(struct msm_property_info *info,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs,
+ struct msm_property_state *property_state,
struct drm_property *property, uint64_t val)
{
struct drm_property_blob *blob;
int property_idx, rc = -EINVAL;
property_idx = msm_property_index(info, property);
- if (!info || (property_idx == -EINVAL) || !property_values) {
- DRM_DEBUG("Invalid argument(s)\n");
+ if (!info || !property_state ||
+ (property_idx == -EINVAL) || !property_state->values) {
+ DRM_DEBUG("invalid argument(s)\n");
} else {
/* extra handling for incoming properties */
mutex_lock(&info->property_lock);
if ((property->flags & DRM_MODE_PROP_BLOB) &&
- (property_idx < info->blob_count) &&
- property_blobs) {
+ (property_idx < info->blob_count)) {
/* DRM lookup also takes a reference */
blob = drm_property_lookup_blob(info->dev,
(uint32_t)val);
@@ -411,18 +406,21 @@
val = blob->base.id;
/* save blob - need to clear previous ref */
- if (property_blobs[property_idx])
+ if (property_state->values[property_idx].blob)
drm_property_unreference_blob(
- property_blobs[property_idx]);
- property_blobs[property_idx] = blob;
+ property_state->values[
+ property_idx].blob);
+ property_state->values[property_idx].blob =
+ blob;
}
}
/* update value and flag as dirty */
- if (property_values[property_idx] != val ||
+ if (property_state->values[property_idx].value != val ||
info->property_data[property_idx].force_dirty) {
- property_values[property_idx] = val;
- _msm_property_set_dirty_no_lock(info, property_idx);
+ property_state->values[property_idx].value = val;
+ _msm_property_set_dirty_no_lock(info, property_state,
+ property_idx);
DBG("%s - %lld", property->name, val);
}
@@ -434,18 +432,18 @@
}
int msm_property_atomic_get(struct msm_property_info *info,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs,
+ struct msm_property_state *property_state,
struct drm_property *property, uint64_t *val)
{
int property_idx, rc = -EINVAL;
property_idx = msm_property_index(info, property);
- if (!info || (property_idx == -EINVAL) || !property_values || !val) {
+ if (!info || (property_idx == -EINVAL) ||
+ !property_state->values || !val) {
DRM_DEBUG("Invalid argument(s)\n");
} else {
mutex_lock(&info->property_lock);
- *val = property_values[property_idx];
+ *val = property_state->values[property_idx].value;
mutex_unlock(&info->property_lock);
rc = 0;
}
@@ -495,8 +493,8 @@
}
void msm_property_reset_state(struct msm_property_info *info, void *state,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs)
+ struct msm_property_state *property_state,
+ struct msm_property_value *property_values)
{
uint32_t i;
@@ -508,24 +506,29 @@
if (state)
memset(state, 0, info->state_size);
+ if (property_state) {
+ property_state->property_count = info->property_count;
+ property_state->values = property_values;
+ INIT_LIST_HEAD(&property_state->dirty_list);
+ }
+
/*
* Assign default property values. This helper is mostly used
* to initialize newly created state objects.
*/
if (property_values)
- for (i = 0; i < info->property_count; ++i)
- property_values[i] =
+ for (i = 0; i < info->property_count; ++i) {
+ property_values[i].value =
info->property_data[i].default_value;
-
- if (property_blobs)
- for (i = 0; i < info->blob_count; ++i)
- property_blobs[i] = 0;
+ property_values[i].blob = NULL;
+ INIT_LIST_HEAD(&property_values[i].dirty_node);
+ }
}
void msm_property_duplicate_state(struct msm_property_info *info,
void *old_state, void *state,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs)
+ struct msm_property_state *property_state,
+ struct msm_property_value *property_values)
{
uint32_t i;
@@ -536,17 +539,24 @@
memcpy(state, old_state, info->state_size);
- if (property_blobs) {
- /* add ref count for blobs */
- for (i = 0; i < info->blob_count; ++i)
- if (property_blobs[i])
- drm_property_reference_blob(property_blobs[i]);
- }
+ if (!property_state)
+ return;
+
+ INIT_LIST_HEAD(&property_state->dirty_list);
+ property_state->values = property_values;
+
+ if (property_state->values)
+ /* add ref count for blobs and initialize dirty nodes */
+ for (i = 0; i < info->property_count; ++i) {
+ if (property_state->values[i].blob)
+ drm_property_reference_blob(
+ property_state->values[i].blob);
+ INIT_LIST_HEAD(&property_state->values[i].dirty_node);
+ }
}
void msm_property_destroy_state(struct msm_property_info *info, void *state,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs)
+ struct msm_property_state *property_state)
{
uint32_t i;
@@ -554,19 +564,21 @@
DRM_ERROR("invalid argument(s)\n");
return;
}
- if (property_blobs) {
+ if (property_state && property_state->values) {
/* remove ref count for blobs */
- for (i = 0; i < info->blob_count; ++i)
- if (property_blobs[i])
+ for (i = 0; i < info->property_count; ++i)
+ if (property_state->values[i].blob) {
drm_property_unreference_blob(
- property_blobs[i]);
+ property_state->values[i].blob);
+ property_state->values[i].blob = NULL;
+ }
}
_msm_property_free_state(info, state);
}
void *msm_property_get_blob(struct msm_property_info *info,
- struct drm_property_blob **property_blobs,
+ struct msm_property_state *property_state,
size_t *byte_len,
uint32_t property_idx)
{
@@ -574,10 +586,11 @@
size_t len = 0;
void *rc = 0;
- if (!info || !property_blobs || (property_idx >= info->blob_count)) {
+ if (!info || !property_state || !property_state->values ||
+ (property_idx >= info->blob_count)) {
DRM_ERROR("invalid argument(s)\n");
} else {
- blob = property_blobs[property_idx];
+ blob = property_state->values[property_idx].blob;
if (blob) {
len = blob->length;
rc = &blob->data;
@@ -636,14 +649,15 @@
}
int msm_property_set_property(struct msm_property_info *info,
- uint64_t *property_values,
+ struct msm_property_state *property_state,
uint32_t property_idx,
uint64_t val)
{
int rc = -EINVAL;
if (!info || (property_idx >= info->property_count) ||
- property_idx < info->blob_count || !property_values) {
+ property_idx < info->blob_count ||
+ !property_state || !property_state->values) {
DRM_ERROR("invalid argument(s)\n");
} else {
struct drm_property *drm_prop;
@@ -651,8 +665,7 @@
mutex_lock(&info->property_lock);
/* update cached value */
- if (property_values)
- property_values[property_idx] = val;
+ property_state->values[property_idx].value = val;
/* update the new default value for immutables */
drm_prop = info->property_array[property_idx];
diff --git a/drivers/gpu/drm/msm/msm_prop.h b/drivers/gpu/drm/msm/msm_prop.h
index e54c796..9a53e56 100644
--- a/drivers/gpu/drm/msm/msm_prop.h
+++ b/drivers/gpu/drm/msm/msm_prop.h
@@ -22,17 +22,28 @@
* struct msm_property_data - opaque structure for tracking per
* drm-object per property stuff
* @default_value: Default property value for this drm object
- * @dirty_node: Linked list node to track if property is dirty or not
* @force_dirty: Always dirty property on incoming sets, rather than checking
* for modified values
*/
struct msm_property_data {
uint64_t default_value;
- struct list_head dirty_node;
bool force_dirty;
};
/**
+ * struct msm_property_value - opaque structure for tracking per
+ * drm-object per property stuff
+ * @value: Current property value for this drm object
+ * @blob: Pointer to associated blob data, if available
+ * @dirty_node: Linked list node to track if property is dirty or not
+ */
+struct msm_property_value {
+ uint64_t value;
+ struct drm_property_blob *blob;
+ struct list_head dirty_node;
+};
+
+/**
* struct msm_property_info: Structure for property/state helper functions
* @base: Pointer to base drm object (plane/crtc/etc.)
* @dev: Pointer to drm device object
@@ -43,8 +54,6 @@
* @install_request: Total number of property 'install' requests
* @install_count: Total number of successful 'install' requests
* @recent_idx: Index of property most recently accessed by set/get
- * @dirty_list: List of all properties that have been 'atomic_set' but not
- * yet cleared with 'msm_property_pop_dirty'
* @is_active: Whether or not drm component properties are 'active'
* @state_cache: Cache of local states, to prevent alloc/free thrashing
* @state_size: Size of local state structures
@@ -64,7 +73,6 @@
int32_t recent_idx;
- struct list_head dirty_list;
bool is_active;
void *state_cache[MSM_PROP_STATE_CACHE_SIZE];
@@ -74,6 +82,19 @@
};
/**
+ * struct msm_property_state - Structure for local property state information
+ * @property_count: Total number of properties
+ * @values: Pointer to array of msm_property_value objects
+ * @dirty_list: List of all properties that have been 'atomic_set' but not
+ * yet cleared with 'msm_property_pop_dirty'
+ */
+struct msm_property_state {
+ uint32_t property_count;
+ struct msm_property_value *values;
+ struct list_head dirty_list;
+};
+
+/**
* msm_property_get_default - query default value of a property
* @info: Pointer to property info container struct
* @property_idx: Property index
@@ -134,12 +155,14 @@
* msm_property_pop_dirty - determine next dirty property and clear
* its dirty flag
* @info: Pointer to property info container struct
+ * @property_state: Pointer to property state container struct
* Returns: Valid msm property index on success,
* -EAGAIN if no dirty properties are available
* Property indicies returned from this function are similar
* to those returned by the msm_property_index function.
*/
-int msm_property_pop_dirty(struct msm_property_info *info);
+int msm_property_pop_dirty(struct msm_property_info *info,
+ struct msm_property_state *property_state);
/**
* msm_property_init - initialize property info structure
@@ -268,38 +291,37 @@
/**
* msm_property_set_dirty - forcibly flag a property as dirty
* @info: Pointer to property info container struct
+ * @property_state: Pointer to property state container struct
* @property_idx: Property index
* Returns: Zero on success
*/
-int msm_property_set_dirty(struct msm_property_info *info, int property_idx);
+int msm_property_set_dirty(struct msm_property_info *info,
+ struct msm_property_state *property_state,
+ int property_idx);
/**
* msm_property_atomic_set - helper function for atomic property set callback
* @info: Pointer to property info container struct
- * @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
+ * @property_state: Pointer to local state structure
* @property: Incoming property pointer
* @val: Incoming property value
* Returns: Zero on success
*/
int msm_property_atomic_set(struct msm_property_info *info,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs,
+ struct msm_property_state *property_state,
struct drm_property *property,
uint64_t val);
/**
* msm_property_atomic_get - helper function for atomic property get callback
* @info: Pointer to property info container struct
- * @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
+ * @property_state: Pointer to local state structure
* @property: Incoming property pointer
* @val: Pointer to variable for receiving property value
* Returns: Zero on success
*/
int msm_property_atomic_get(struct msm_property_info *info,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs,
+ struct msm_property_state *property_state,
struct drm_property *property,
uint64_t *val);
@@ -313,50 +335,47 @@
* msm_property_reset_state - helper function for state reset callback
* @info: Pointer to property info container struct
* @state: Pointer to local state structure
+ * @property_state: Pointer to property state container struct
* @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
*/
-void msm_property_reset_state(struct msm_property_info *info,
- void *state,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs);
+void msm_property_reset_state(struct msm_property_info *info, void *state,
+ struct msm_property_state *property_state,
+ struct msm_property_value *property_values);
/**
* msm_property_duplicate_state - helper function for duplicate state cb
* @info: Pointer to property info container struct
* @old_state: Pointer to original state structure
* @state: Pointer to newly created state structure
+ * @property_state: Pointer to destination property state container struct
* @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
*/
void msm_property_duplicate_state(struct msm_property_info *info,
void *old_state,
void *state,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs);
+ struct msm_property_state *property_state,
+ struct msm_property_value *property_values);
/**
* msm_property_destroy_state - helper function for destroy state cb
* @info: Pointer to property info container struct
* @state: Pointer to local state structure
- * @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
+ * @property_state: Pointer to property state container struct
*/
void msm_property_destroy_state(struct msm_property_info *info,
void *state,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs);
+ struct msm_property_state *property_state);
/**
* msm_property_get_blob - obtain cached data pointer for drm blob property
* @info: Pointer to property info container struct
- * @property_blobs: Pointer to property blobs cache array
+ * @property_state: Pointer to property state container struct
* @byte_len: Optional pointer to variable for accepting blob size
* @property_idx: Property index
* Returns: Pointer to blob data
*/
void *msm_property_get_blob(struct msm_property_info *info,
- struct drm_property_blob **property_blobs,
+ struct msm_property_state *property_state,
size_t *byte_len,
uint32_t property_idx);
@@ -385,13 +404,13 @@
* DRM_MODE_PROP_IMMUTABLE flag set.
* Note: This function cannot be called on a blob.
* @info: Pointer to property info container struct
- * @property_values: Pointer to property values cache array
+ * @property_state: Pointer to property state container struct
* @property_idx: Property index
* @val: value of the property to set
* Returns: Zero on success
*/
int msm_property_set_property(struct msm_property_info *info,
- uint64_t *property_values,
+ struct msm_property_state *property_state,
uint32_t property_idx,
uint64_t val);
diff --git a/drivers/gpu/drm/msm/sde/sde_ad4.h b/drivers/gpu/drm/msm/sde/sde_ad4.h
index 4a664a8..5a646e9 100644
--- a/drivers/gpu/drm/msm/sde/sde_ad4.h
+++ b/drivers/gpu/drm/msm/sde/sde_ad4.h
@@ -48,6 +48,9 @@
AD_SUSPEND,
AD_ASSERTIVE,
AD_BACKLIGHT,
+ AD_IPC_SUSPEND,
+ AD_IPC_RESUME,
+ AD_IPC_RESET,
AD_PROPMAX,
};
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index e999a6a..9409066 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -60,6 +60,8 @@
static void dspp_gc_install_property(struct drm_crtc *crtc);
+static void dspp_igc_install_property(struct drm_crtc *crtc);
+
typedef void (*dspp_prop_install_func_t)(struct drm_crtc *crtc);
static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
@@ -72,6 +74,9 @@
static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg);
+static void sde_cp_ad_set_prop(struct sde_crtc *sde_crtc,
+ enum ad_property ad_prop);
+
#define setup_dspp_prop_install_funcs(func) \
do { \
func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
@@ -80,6 +85,7 @@
func[SDE_DSPP_VLUT] = dspp_vlut_install_property; \
func[SDE_DSPP_GAMUT] = dspp_gamut_install_property; \
func[SDE_DSPP_GC] = dspp_gc_install_property; \
+ func[SDE_DSPP_IGC] = dspp_igc_install_property; \
} while (0)
typedef void (*lm_prop_install_func_t)(struct drm_crtc *crtc);
@@ -750,6 +756,7 @@
DRM_DEBUG_DRIVER("Dirty list is empty\n");
return;
}
+ sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESET);
set_dspp_flush = true;
}
@@ -1061,6 +1068,7 @@
"SDE_DSPP_PCC_V", version);
switch (version) {
case 1:
+ case 4:
sde_cp_crtc_install_blob_property(crtc, feature_name,
SDE_CP_CRTC_DSPP_PCC, sizeof(struct drm_msm_pcc));
break;
@@ -1241,6 +1249,30 @@
}
}
+static void dspp_igc_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+
+ version = catalog->dspp[0].sblk->igc.version >> 16;
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_DSPP_IGC_V", version);
+ switch (version) {
+ case 3:
+ sde_cp_crtc_install_blob_property(crtc, feature_name,
+ SDE_CP_CRTC_DSPP_IGC, sizeof(struct drm_msm_igc_lut));
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
+
static void sde_cp_update_list(struct sde_cp_node *prop_node,
struct sde_crtc *crtc, bool dirty_list)
{
@@ -1429,3 +1461,61 @@
exit:
return ret;
}
+
+static void sde_cp_ad_set_prop(struct sde_crtc *sde_crtc,
+ enum ad_property ad_prop)
+{
+ struct sde_ad_hw_cfg ad_cfg;
+ struct sde_hw_cp_cfg hw_cfg;
+ struct sde_hw_dspp *hw_dspp = NULL;
+ struct sde_hw_mixer *hw_lm = NULL;
+ u32 num_mixers = sde_crtc->num_mixers;
+ int i = 0, ret = 0;
+
+ hw_cfg.num_of_mixers = sde_crtc->num_mixers;
+ hw_cfg.displayh = sde_crtc->base.mode.hdisplay;
+ hw_cfg.displayv = sde_crtc->base.mode.vdisplay;
+
+ for (i = 0; i < num_mixers && !ret; i++) {
+ hw_lm = sde_crtc->mixers[i].hw_lm;
+ hw_dspp = sde_crtc->mixers[i].hw_dspp;
+ if (!hw_lm || !hw_dspp || !hw_dspp->ops.validate_ad ||
+ !hw_dspp->ops.setup_ad) {
+ ret = -EINVAL;
+ continue;
+ }
+
+ hw_cfg.mixer_info = hw_lm;
+ ad_cfg.prop = ad_prop;
+ ad_cfg.hw_cfg = &hw_cfg;
+ ret = hw_dspp->ops.validate_ad(hw_dspp, (u32 *)&ad_prop);
+ if (!ret)
+ hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+ }
+}
+
+void sde_cp_crtc_pre_ipc(struct drm_crtc *drm_crtc)
+{
+ struct sde_crtc *sde_crtc;
+
+ sde_crtc = to_sde_crtc(drm_crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ sde_cp_ad_set_prop(sde_crtc, AD_IPC_SUSPEND);
+}
+
+void sde_cp_crtc_post_ipc(struct drm_crtc *drm_crtc)
+{
+ struct sde_crtc *sde_crtc;
+
+ sde_crtc = to_sde_crtc(drm_crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESUME);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
index e78f690..08e345d 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.h
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.h
@@ -103,4 +103,18 @@
*/
int sde_cp_ad_interrupt(struct drm_crtc *crtc, bool en,
struct sde_irq_callback *irq);
+
+/**
+ * sde_cp_crtc_pre_ipc: Handle color processing features
+ * before entering IPC
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_pre_ipc(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_post_ipc: Handle color processing features
+ * after exiting IPC
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_post_ipc(struct drm_crtc *crtc);
#endif /*_SDE_COLOR_PROCESSING_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 6c9d496..f5e2ada 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -233,8 +233,8 @@
}
if (!c_conn->ops.get_dst_format) {
- SDE_ERROR("get_dst_format is invalid\n");
- return -EINVAL;
+ SDE_DEBUG("get_dst_format is unavailable\n");
+ return 0;
}
dst_format = c_conn->ops.get_dst_format(c_conn->display);
@@ -331,7 +331,7 @@
/* try to get user config data first */
*cfg = msm_property_get_blob(&c_conn->property_info,
- c_state->property_blobs,
+ &c_state->property_state,
&dither_sz,
CONNECTOR_PROP_PP_DITHER);
/* if user config data doesn't exist, use default dither blob */
@@ -459,13 +459,12 @@
drm_framebuffer_unreference(c_state->out_fb);
c_state->out_fb = NULL;
- if (c_conn) {
- c_state->property_values[CONNECTOR_PROP_OUT_FB] =
+ if (c_conn)
+ c_state->property_values[CONNECTOR_PROP_OUT_FB].value =
msm_property_get_default(&c_conn->property_info,
CONNECTOR_PROP_OUT_FB);
- } else {
- c_state->property_values[CONNECTOR_PROP_OUT_FB] = ~0;
- }
+ else
+ c_state->property_values[CONNECTOR_PROP_OUT_FB].value = ~0;
}
static void sde_connector_atomic_destroy_state(struct drm_connector *connector,
@@ -496,8 +495,7 @@
} else {
/* destroy value helper */
msm_property_destroy_state(&c_conn->property_info, c_state,
- c_state->property_values,
- c_state->property_blobs);
+ &c_state->property_state);
}
}
@@ -526,7 +524,8 @@
/* reset value helper, zero out state structure and reset properties */
msm_property_reset_state(&c_conn->property_info, c_state,
- c_state->property_values, c_state->property_blobs);
+ &c_state->property_state,
+ c_state->property_values);
c_state->base.connector = connector;
connector->state = &c_state->base;
@@ -554,8 +553,8 @@
/* duplicate value helper */
msm_property_duplicate_state(&c_conn->property_info,
- c_oldstate, c_state, c_state->property_values,
- c_state->property_blobs);
+ c_oldstate, c_state,
+ &c_state->property_state, c_state->property_values);
/* additional handling for drm framebuffer objects */
if (c_state->out_fb) {
@@ -755,8 +754,7 @@
/* generic property handling */
rc = msm_property_atomic_set(&c_conn->property_info,
- c_state->property_values, c_state->property_blobs,
- property, val);
+ &c_state->property_state, property, val);
if (rc)
goto end;
@@ -863,8 +861,7 @@
else
/* get cached property value */
rc = msm_property_atomic_get(&c_conn->property_info,
- c_state->property_values,
- c_state->property_blobs, property, val);
+ &c_state->property_state, property, val);
/* allow for custom override */
if (c_conn->ops.get_property)
@@ -886,7 +883,8 @@
sde_fence_prepare(&to_sde_connector(connector)->retire_fence);
}
-void sde_connector_complete_commit(struct drm_connector *connector)
+void sde_connector_complete_commit(struct drm_connector *connector,
+ ktime_t ts)
{
if (!connector) {
SDE_ERROR("invalid connector\n");
@@ -894,7 +892,7 @@
}
/* signal connector's retire fence */
- sde_fence_signal(&to_sde_connector(connector)->retire_fence, 0);
+ sde_fence_signal(&to_sde_connector(connector)->retire_fence, ts, 0);
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 8e46a11..1598968 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -315,6 +315,7 @@
* @base: Base drm connector structure
* @out_fb: Pointer to output frame buffer, if applicable
* @aspace: Address space for accessing frame buffer objects, if applicable
+ * @property_state: Local storage for msm_prop properties
* @property_values: Local cache of current connector property values
* @rois: Regions of interest structure for mapping CRTC to Connector output
* @property_blobs: blob properties
@@ -323,7 +324,8 @@
struct drm_connector_state base;
struct drm_framebuffer *out_fb;
struct msm_gem_address_space *aspace;
- uint64_t property_values[CONNECTOR_PROP_COUNT];
+ struct msm_property_state property_state;
+ struct msm_property_value property_values[CONNECTOR_PROP_COUNT];
struct msm_roi_list rois;
struct drm_property_blob *property_blobs[CONNECTOR_PROP_BLOBCOUNT];
@@ -346,15 +348,15 @@
*/
#define sde_connector_get_property(S, X) \
((S) && ((X) < CONNECTOR_PROP_COUNT) ? \
- (to_sde_connector_state((S))->property_values[(X)]) : 0)
+ (to_sde_connector_state((S))->property_values[(X)].value) : 0)
/**
- * sde_connector_get_property_values - retrieve property values cache
+ * sde_connector_get_property_state - retrieve property state cache
* @S: Pointer to drm connector state
- * Returns: Integer value of requested property
+ * Returns: Pointer to local property state structure
*/
-#define sde_connector_get_property_values(S) \
- ((S) ? (to_sde_connector_state((S))->property_values) : 0)
+#define sde_connector_get_property_state(S) \
+ ((S) ? (&to_sde_connector_state((S))->property_state) : NULL)
/**
* sde_connector_get_out_fb - query out_fb value from sde connector state
@@ -406,8 +408,9 @@
/**
* sde_connector_complete_commit - signal completion of current commit
* @connector: Pointer to drm connector object
+ * @ts: timestamp to be updated in the fence signalling
*/
-void sde_connector_complete_commit(struct drm_connector *connector);
+void sde_connector_complete_commit(struct drm_connector *connector, ktime_t ts);
/**
* sde_connector_get_info - query display specific information
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index cec2b5f..dfdfc1a 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -34,8 +34,12 @@
pr_debug("irq_idx=%d\n", irq_idx);
- if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
+ if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
SDE_ERROR("irq_idx=%d has no registered callback\n", irq_idx);
+ SDE_EVT32_IRQ(irq_idx, atomic_read(
+ &sde_kms->irq_obj.enable_counts[irq_idx]),
+ SDE_EVTLOG_ERROR);
+ }
atomic_inc(&irq_obj->irq_counts[irq_idx]);
@@ -53,7 +57,7 @@
* NOTE: sde_core_irq_callback_handler is protected by top-level
* spinlock, so it is safe to clear any interrupt status here.
*/
- sde_kms->hw_intr->ops.clear_interrupt_status(
+ sde_kms->hw_intr->ops.clear_intr_status_nolock(
sde_kms->hw_intr,
irq_idx);
}
@@ -94,7 +98,6 @@
SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
- spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
SDE_EVT32(irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
if (atomic_inc_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1) {
@@ -107,26 +110,33 @@
SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
/* empty callback list but interrupt is enabled */
if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]))
SDE_ERROR("irq_idx=%d enabled with no callback\n",
irq_idx);
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
}
- spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
return ret;
}
int sde_core_irq_enable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
{
- int i;
- int ret = 0;
+ int i, ret = 0, counts;
if (!sde_kms || !irq_idxs || !irq_count) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
+ counts = atomic_read(&sde_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts) {
+ SDE_ERROR("%pS: irq_idx=%d enable_count=%d\n",
+ __builtin_return_address(0), irq_idxs[0], counts);
+ SDE_EVT32(irq_idxs[0], counts, SDE_EVTLOG_ERROR);
+ }
+
for (i = 0; (i < irq_count) && !ret; i++)
ret = _sde_core_irq_enable(sde_kms, irq_idxs[i]);
@@ -140,7 +150,6 @@
*/
static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
{
- unsigned long irq_flags;
int ret = 0;
if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
@@ -156,7 +165,6 @@
SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
- spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
SDE_EVT32(irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
if (atomic_dec_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0) {
@@ -168,27 +176,48 @@
irq_idx);
SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
}
- spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
return ret;
}
int sde_core_irq_disable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
{
- int i;
- int ret = 0;
+ int i, ret = 0, counts;
if (!sde_kms || !irq_idxs || !irq_count) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
+ counts = atomic_read(&sde_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts == 2) {
+ SDE_ERROR("%pS: irq_idx=%d enable_count=%d\n",
+ __builtin_return_address(0), irq_idxs[0], counts);
+ SDE_EVT32(irq_idxs[0], counts, SDE_EVTLOG_ERROR);
+ }
+
for (i = 0; (i < irq_count) && !ret; i++)
ret = _sde_core_irq_disable(sde_kms, irq_idxs[i]);
return ret;
}
+u32 sde_core_irq_read_nolock(struct sde_kms *sde_kms, int irq_idx, bool clear)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.get_interrupt_status)
+ return 0;
+
+ if (irq_idx < 0) {
+ SDE_ERROR("[%pS] invalid irq_idx=%d\n",
+ __builtin_return_address(0), irq_idx);
+ return 0;
+ }
+
+ return sde_kms->hw_intr->ops.get_intr_status_nolock(sde_kms->hw_intr,
+ irq_idx, clear);
+}
+
u32 sde_core_irq_read(struct sde_kms *sde_kms, int irq_idx, bool clear)
{
if (!sde_kms || !sde_kms->hw_intr ||
@@ -210,12 +239,19 @@
{
unsigned long irq_flags;
- if (!sde_kms || !register_irq_cb || !register_irq_cb->func ||
- !sde_kms->irq_obj.irq_cb_tbl) {
+ if (!sde_kms || !sde_kms->irq_obj.irq_cb_tbl) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
+ if (!register_irq_cb || !register_irq_cb->func) {
+ SDE_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
@@ -238,12 +274,19 @@
{
unsigned long irq_flags;
- if (!sde_kms || !register_irq_cb || !register_irq_cb->func ||
- !sde_kms->irq_obj.irq_cb_tbl) {
+ if (!sde_kms || !sde_kms->irq_obj.irq_cb_tbl) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
+ if (!register_irq_cb || !register_irq_cb->func) {
+ SDE_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
index c775f8c..c32c19c 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.h
@@ -114,6 +114,18 @@
bool clear);
/**
+ * sde_core_irq_read - no lock version of sde_core_irq_read
+ * @sde_kms: SDE handle
+ * @irq_idx: irq index
+ * @clear: True to clear the irq after read
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
+u32 sde_core_irq_read_nolock(
+ struct sde_kms *sde_kms,
+ int irq_idx,
+ bool clear);
+
+/**
* sde_core_irq_register_callback - For registering callback function on IRQ
* interrupt
* @sde_kms: SDE handle
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 71dfc12..7243fe2 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -225,8 +225,17 @@
struct sde_crtc_state *tmp_cstate =
to_sde_crtc_state(tmp_crtc->state);
- bw_sum_of_intfs +=
- tmp_cstate->new_perf.bw_ctl[i];
+ SDE_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+ tmp_crtc->base.id,
+ tmp_cstate->new_perf.bw_ctl[i],
+ tmp_cstate->bw_control);
+ /*
+ * For bw check only use the bw if the
+ * atomic property has been already set
+ */
+ if (tmp_cstate->bw_control)
+ bw_sum_of_intfs +=
+ tmp_cstate->new_perf.bw_ctl[i];
}
}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 63979dd..01e4f93 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -55,9 +55,13 @@
static int sde_crtc_power_interrupt_handler(struct drm_crtc *crtc_drm,
bool en, struct sde_irq_callback *ad_irq);
+static int sde_crtc_pm_event_handler(struct drm_crtc *crtc_drm,
+ bool en, struct sde_irq_callback *noirq);
+
static struct sde_crtc_custom_events custom_events[] = {
{DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt},
- {DRM_EVENT_CRTC_POWER, sde_crtc_power_interrupt_handler}
+ {DRM_EVENT_CRTC_POWER, sde_crtc_power_interrupt_handler},
+ {DRM_EVENT_SDE_POWER, sde_crtc_pm_event_handler},
};
/* default input fence timeout, in ms */
@@ -1516,6 +1520,8 @@
struct sde_crtc_state *cstate;
struct sde_kms *sde_kms;
unsigned long flags;
+ bool frame_done = false;
+ int i;
if (!work) {
SDE_ERROR("invalid work handle\n");
@@ -1538,13 +1544,16 @@
return;
}
priv = sde_kms->dev->dev_private;
+ SDE_ATRACE_BEGIN("crtc_frame_event");
SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
ktime_to_ns(fevent->ts));
- if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
- (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR) ||
- (fevent->event & SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+ SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_ENTRY);
+
+ if (fevent->event & (SDE_ENCODER_FRAME_EVENT_DONE
+ | SDE_ENCODER_FRAME_EVENT_ERROR
+ | SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
if (atomic_read(&sde_crtc->frame_pending) < 1) {
/* this should not happen */
@@ -1567,26 +1576,39 @@
SDE_EVTLOG_FUNC_CASE3);
}
- if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
- (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR))
- complete_all(&sde_crtc->frame_done_comp);
-
- if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
+ if (fevent->event & SDE_ENCODER_FRAME_EVENT_DONE)
sde_core_perf_crtc_update(crtc, 0, false);
- } else {
- SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
- ktime_to_ns(fevent->ts),
- fevent->event);
- SDE_EVT32(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_CASE4);
+
+ if (fevent->event & (SDE_ENCODER_FRAME_EVENT_DONE
+ | SDE_ENCODER_FRAME_EVENT_ERROR))
+ frame_done = true;
+ }
+
+ if (fevent->event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE) {
+ SDE_ATRACE_BEGIN("signal_release_fence");
+ sde_fence_signal(&sde_crtc->output_fence, fevent->ts, 0);
+ SDE_ATRACE_END("signal_release_fence");
+ }
+
+ if (fevent->event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE) {
+ SDE_ATRACE_BEGIN("signal_retire_fence");
+ for (i = 0; i < cstate->num_connectors; ++i)
+ sde_connector_complete_commit(cstate->connectors[i],
+ fevent->ts);
+ SDE_ATRACE_END("signal_retire_fence");
}
if (fevent->event & SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)
SDE_ERROR("crtc%d ts:%lld received panel dead event\n",
crtc->base.id, ktime_to_ns(fevent->ts));
+ if (frame_done)
+ complete_all(&sde_crtc->frame_done_comp);
+
spin_lock_irqsave(&sde_crtc->spin_lock, flags);
list_add_tail(&fevent->list, &sde_crtc->frame_event_list);
spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+ SDE_ATRACE_END("crtc_frame_event");
}
static void sde_crtc_frame_event_cb(void *data, u32 event)
@@ -1629,30 +1651,6 @@
kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
}
-void sde_crtc_complete_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
-{
- struct sde_crtc *sde_crtc;
- struct sde_crtc_state *cstate;
- int i;
-
- if (!crtc || !crtc->state) {
- SDE_ERROR("invalid crtc\n");
- return;
- }
-
- sde_crtc = to_sde_crtc(crtc);
- cstate = to_sde_crtc_state(crtc->state);
- SDE_EVT32_VERBOSE(DRMID(crtc));
-
- /* signal release fence */
- sde_fence_signal(&sde_crtc->output_fence, 0);
-
- /* signal retire fence */
- for (i = 0; i < cstate->num_connectors; ++i)
- sde_connector_complete_commit(cstate->connectors[i]);
-}
-
/**
* _sde_crtc_set_input_fence_timeout - update ns version of in fence timeout
* @cstate: Pointer to sde crtc state
@@ -2086,7 +2084,7 @@
/* destroy value helper */
msm_property_destroy_state(&sde_crtc->property_info, cstate,
- cstate->property_values, cstate->property_blobs);
+ &cstate->property_state);
}
static int _sde_crtc_wait_for_frame_done(struct drm_crtc *crtc)
@@ -2127,6 +2125,7 @@
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
struct sde_crtc_state *cstate;
+ int ret;
if (!crtc) {
SDE_ERROR("invalid argument\n");
@@ -2170,7 +2169,10 @@
}
/* wait for frame_event_done completion */
- if (_sde_crtc_wait_for_frame_done(crtc)) {
+ SDE_ATRACE_BEGIN("wait_for_frame_done_event");
+ ret = _sde_crtc_wait_for_frame_done(crtc);
+ SDE_ATRACE_END("wait_for_frame_done_event");
+ if (ret) {
SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
crtc->base.id,
atomic_read(&sde_crtc->frame_pending));
@@ -2301,13 +2303,13 @@
}
/*
- * If the vblank refcount != 0, release a power reference on suspend
- * and take it back during resume (if it is still != 0).
+ * If the vblank is enabled, release a power reference on suspend
+ * and take it back during resume (if it is still enabled).
*/
if (sde_crtc->suspend == enable)
SDE_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
crtc->base.id, enable);
- else if (atomic_read(&sde_crtc->vblank_refcount) != 0)
+ else if (sde_crtc->vblank_enable)
_sde_crtc_vblank_enable_nolock(sde_crtc, !enable);
sde_crtc->suspend = enable;
@@ -2342,7 +2344,7 @@
/* duplicate value helper */
msm_property_duplicate_state(&sde_crtc->property_info,
old_cstate, cstate,
- cstate->property_values, cstate->property_blobs);
+ &cstate->property_state, cstate->property_values);
/* duplicate base helper */
__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
@@ -2387,7 +2389,8 @@
/* reset value helper */
msm_property_reset_state(&sde_crtc->property_info, cstate,
- cstate->property_values, cstate->property_blobs);
+ &cstate->property_state,
+ cstate->property_values);
_sde_crtc_set_input_fence_timeout(cstate);
@@ -2402,25 +2405,15 @@
if (!sde_crtc) {
SDE_ERROR("invalid crtc\n");
return -EINVAL;
- } else if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
- SDE_DEBUG("crtc%d vblank enable\n", sde_crtc->base.base.id);
- if (!sde_crtc->suspend)
- _sde_crtc_vblank_enable_nolock(sde_crtc, true);
- } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
- SDE_ERROR("crtc%d invalid vblank disable\n",
- sde_crtc->base.base.id);
- return -EINVAL;
- } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
- SDE_DEBUG("crtc%d vblank disable\n", sde_crtc->base.base.id);
- if (!sde_crtc->suspend)
- _sde_crtc_vblank_enable_nolock(sde_crtc, false);
- } else {
- SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
- sde_crtc->base.base.id,
- en ? "enable" : "disable",
- atomic_read(&sde_crtc->vblank_refcount));
}
+ if (!sde_crtc->base.enabled || sde_crtc->suspend)
+ SDE_EVT32(DRMID(&sde_crtc->base), sde_crtc->base.enabled, en,
+ sde_crtc->vblank_enable, sde_crtc->suspend);
+ else if (sde_crtc->vblank_enable != en)
+ _sde_crtc_vblank_enable_nolock(sde_crtc, en);
+ sde_crtc->vblank_enable = en;
+
return 0;
}
@@ -2429,6 +2422,8 @@
struct drm_crtc *crtc = arg;
struct sde_crtc *sde_crtc;
struct drm_encoder *encoder;
+ struct drm_event event;
+ u32 power_on = 0;
if (!crtc) {
SDE_ERROR("invalid crtc\n");
@@ -2448,7 +2443,13 @@
sde_encoder_virt_restore(encoder);
}
+ sde_cp_crtc_post_ipc(crtc);
+ event.type = DRM_EVENT_SDE_POWER;
+ event.length = sizeof(power_on);
+ power_on = 1;
+ msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
+ (u8 *)&power_on);
} else if (event_type == SDE_POWER_EVENT_POST_DISABLE) {
struct drm_plane *plane;
@@ -2460,6 +2461,14 @@
sde_plane_set_revalidate(plane, true);
sde_cp_crtc_suspend(crtc);
+
+ event.type = DRM_EVENT_SDE_POWER;
+ event.length = sizeof(power_on);
+ power_on = 0;
+ msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
+ (u8 *)&power_on);
+ } else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
+ sde_cp_crtc_pre_ipc(crtc);
}
mutex_unlock(&sde_crtc->crtc_lock);
@@ -2497,14 +2506,12 @@
crtc->base.id,
atomic_read(&sde_crtc->frame_pending));
- if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) {
- SDE_ERROR("crtc%d invalid vblank refcount\n",
+ if (sde_crtc->vblank_enable && !sde_crtc->suspend) {
+ SDE_DEBUG("crtc%d vblank left enabled at disable time\n",
crtc->base.id);
- SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->vblank_refcount),
- SDE_EVTLOG_FUNC_CASE1);
- while (atomic_read(&sde_crtc->vblank_refcount))
- if (_sde_crtc_vblank_no_lock(sde_crtc, false))
- break;
+ SDE_EVT32(DRMID(crtc), sde_crtc->vblank_enable,
+ SDE_EVTLOG_FUNC_CASE1);
+ _sde_crtc_vblank_enable_nolock(sde_crtc, false);
}
if (atomic_read(&sde_crtc->frame_pending)) {
@@ -2575,6 +2582,16 @@
sde_crtc_frame_event_cb, (void *)crtc);
}
+ mutex_lock(&sde_crtc->crtc_lock);
+ if (sde_crtc->vblank_enable) {
+ /* honor user vblank request on crtc while it was disabled */
+ SDE_DEBUG("%s vblank found enabled at crtc enable time\n",
+ sde_crtc->name);
+ SDE_EVT32(DRMID(crtc), sde_crtc->vblank_enable);
+ _sde_crtc_vblank_enable_nolock(sde_crtc, true);
+ }
+ mutex_unlock(&sde_crtc->crtc_lock);
+
spin_lock_irqsave(&sde_crtc->spin_lock, flags);
list_for_each_entry(node, &sde_crtc->user_event_list, list) {
ret = 0;
@@ -2588,7 +2605,8 @@
sde_crtc->power_event = sde_power_handle_register_event(
&priv->phandle,
- SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE,
+ SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE |
+ SDE_POWER_EVENT_PRE_DISABLE,
sde_crtc_handle_power_event, crtc, sde_crtc->name);
}
@@ -2758,7 +2776,8 @@
/* check dim layer stage with every plane */
for (i = 0; i < cstate->num_dim_layers; i++) {
- if (pstates[cnt].stage == cstate->dim_layer[i].stage) {
+ if (cstate->dim_layer[i].stage
+ == (pstates[cnt].stage + SDE_STAGE_0)) {
SDE_ERROR(
"plane:%d/dim_layer:%i-same stage:%d\n",
plane->base.id, i,
@@ -3178,8 +3197,7 @@
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(state);
ret = msm_property_atomic_set(&sde_crtc->property_info,
- cstate->property_values, cstate->property_blobs,
- property, val);
+ &cstate->property_state, property, val);
if (!ret) {
idx = msm_property_index(&sde_crtc->property_info,
property);
@@ -3253,8 +3271,10 @@
{
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
+ struct drm_encoder *encoder;
int i, ret = -EINVAL;
bool conn_offset = 0;
+ bool is_cmd = true;
if (!crtc || !state) {
SDE_ERROR("invalid argument(s)\n");
@@ -3269,19 +3289,36 @@
break;
}
+ /**
+ * set the cmd flag only when all the encoders attached
+ * to the crtc are in cmd mode. Consider all other cases
+ * as video mode.
+ */
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc == crtc)
+ is_cmd &= sde_encoder_is_cmd_mode(encoder);
+ }
+
i = msm_property_index(&sde_crtc->property_info, property);
if (i == CRTC_PROP_OUTPUT_FENCE) {
uint32_t offset = sde_crtc_get_property(cstate,
CRTC_PROP_OUTPUT_FENCE_OFFSET);
+ /**
+ * set the offset to 0 only for cmd mode panels, so
+ * the release fence for the current frame can be
+ * triggered right after PP_DONE interrupt.
+ */
+ offset = is_cmd ? 0 : (offset + conn_offset);
+
ret = sde_fence_create(&sde_crtc->output_fence, val,
- offset + conn_offset);
+ offset);
if (ret)
SDE_ERROR("fence create failed\n");
} else {
ret = msm_property_atomic_get(&sde_crtc->property_info,
- cstate->property_values,
- cstate->property_blobs, property, val);
+ &cstate->property_state,
+ property, val);
if (ret)
ret = sde_cp_crtc_get_property(crtc,
property, val);
@@ -3423,8 +3460,7 @@
sde_crtc->vblank_cb_time = ktime_set(0, 0);
}
- seq_printf(s, "vblank_refcount:%d\n",
- atomic_read(&sde_crtc->vblank_refcount));
+ seq_printf(s, "vblank_enable:%d\n", sde_crtc->vblank_enable);
mutex_unlock(&sde_crtc->crtc_lock);
@@ -3790,7 +3826,6 @@
crtc = &sde_crtc->base;
crtc->dev = dev;
- atomic_set(&sde_crtc->vblank_refcount, 0);
mutex_init(&sde_crtc->crtc_lock);
spin_lock_init(&sde_crtc->spin_lock);
@@ -3975,3 +4010,13 @@
{
return 0;
}
+
+static int sde_crtc_pm_event_handler(struct drm_crtc *crtc, bool en,
+ struct sde_irq_callback *noirq)
+{
+ /*
+ * IRQ object noirq is not being used here since there is
+ * no crtc irq from pm event.
+ */
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 0d72ff1..84f9ce1 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -30,7 +30,7 @@
#define SDE_CRTC_NAME_SIZE 12
/* define the maximum number of in-flight frame events */
-#define SDE_CRTC_FRAME_EVENT_SIZE 2
+#define SDE_CRTC_FRAME_EVENT_SIZE 4
/**
* enum sde_crtc_client_type: crtc client type
@@ -123,7 +123,7 @@
* @vblank_cb_count : count of vblank callback since last reset
* @play_count : frame count between crtc enable and disable
* @vblank_cb_time : ktime at vblank count reset
- * @vblank_refcount : reference count for vblank enable request
+ * @vblank_enable : whether the user has requested vblank events
* @suspend : whether or not a suspend operation is in progress
* @feature_list : list of color processing features supported on a crtc
* @active_list : list of color processing features are active
@@ -171,7 +171,7 @@
u32 vblank_cb_count;
u64 play_count;
ktime_t vblank_cb_time;
- atomic_t vblank_refcount;
+ bool vblank_enable;
bool suspend;
struct list_head feature_list;
@@ -269,9 +269,9 @@
* @lm_roi : Current LM ROI, possibly sub-rectangle of mode.
* Origin top left of CRTC.
* @user_roi_list : List of user's requested ROIs as from set property
+ * @property_state: Local storage for msm_prop properties
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
- * @property_blobs: Reference pointers for blob properties
* @num_dim_layers: Number of dim layers
* @dim_layer: Dim layer configs
* @new_perf: new performance state being requested
@@ -296,9 +296,9 @@
struct sde_rect lm_roi[CRTC_DUAL_MIXERS];
struct msm_roi_list user_roi_list;
- uint64_t property_values[CRTC_PROP_COUNT];
+ struct msm_property_state property_state;
+ struct msm_property_value property_values[CRTC_PROP_COUNT];
uint64_t input_fence_timeout_ns;
- struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
uint32_t num_dim_layers;
struct sde_hw_dim_layer dim_layer[SDE_MAX_DIM_LAYERS];
@@ -320,7 +320,7 @@
* Returns: Integer value of requested property
*/
#define sde_crtc_get_property(S, X) \
- ((S) && ((X) < CRTC_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+ ((S) && ((X) < CRTC_PROP_COUNT) ? ((S)->property_values[(X)].value) : 0)
static inline int sde_crtc_mixer_width(struct sde_crtc *sde_crtc,
struct drm_display_mode *mode)
@@ -369,14 +369,6 @@
struct drm_crtc_state *old_state);
/**
- * sde_crtc_complete_commit - callback signalling completion of current commit
- * @crtc: Pointer to drm crtc object
- * @old_state: Pointer to drm crtc old state object
- */
-void sde_crtc_complete_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state);
-
-/**
* sde_crtc_init - create a new crtc object
* @dev: sde device
* @plane: base plane
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 56e1151..0e94085 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -17,6 +17,7 @@
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/kthread.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/sde_rsc.h>
@@ -206,7 +207,7 @@
bool idle_pc_supported;
struct mutex rc_lock;
enum sde_enc_rc_states rc_state;
- struct delayed_work delayed_off_work;
+ struct kthread_delayed_work delayed_off_work;
struct msm_display_topology topology;
bool mode_set_complete;
@@ -417,6 +418,9 @@
sde_core_irq_unregister_callback(phys_enc->sde_kms,
irq->irq_idx, &irq->cb);
+
+ SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, SDE_EVTLOG_ERROR);
irq->irq_idx = -EINVAL;
return ret;
}
@@ -432,6 +436,7 @@
enum sde_intr_idx intr_idx)
{
struct sde_encoder_irq *irq;
+ int ret;
if (!phys_enc) {
SDE_ERROR("invalid encoder\n");
@@ -440,17 +445,32 @@
irq = &phys_enc->irq[intr_idx];
/* silently skip irqs that weren't registered */
- if (irq->irq_idx < 0)
+ if (irq->irq_idx < 0) {
+ SDE_ERROR(
+ "extra unregister irq, enc%d intr_idx:0x%x hw_idx:0x%x irq_idx:0x%x\n",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, SDE_EVTLOG_ERROR);
return 0;
+ }
- sde_core_irq_disable(phys_enc->sde_kms, &irq->irq_idx, 1);
- sde_core_irq_unregister_callback(phys_enc->sde_kms, irq->irq_idx,
+ ret = sde_core_irq_disable(phys_enc->sde_kms, &irq->irq_idx, 1);
+ if (ret)
+ SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret, SDE_EVTLOG_ERROR);
+
+ ret = sde_core_irq_unregister_callback(phys_enc->sde_kms, irq->irq_idx,
&irq->cb);
- irq->irq_idx = -EINVAL;
+ if (ret)
+ SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret, SDE_EVTLOG_ERROR);
SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx, irq->irq_idx);
SDE_DEBUG_PHYS(phys_enc, "unregistered %d\n", irq->irq_idx);
+ irq->irq_idx = -EINVAL;
+
return 0;
}
@@ -1101,6 +1121,62 @@
return ret;
}
+static void _sde_encoder_update_vsync_source(struct sde_encoder_virt *sde_enc,
+ struct msm_display_info *disp_info, bool is_dummy)
+{
+ struct sde_vsync_source_cfg vsync_cfg = { 0 };
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct sde_hw_mdp *hw_mdptop;
+ struct drm_encoder *drm_enc;
+ int i;
+
+ if (!sde_enc || !disp_info) {
+ SDE_ERROR("invalid param sde_enc:%d or disp_info:%d\n",
+ sde_enc != NULL, disp_info != NULL);
+ return;
+ } else if (sde_enc->num_phys_encs > ARRAY_SIZE(sde_enc->hw_pp)) {
+ SDE_ERROR("invalid num phys enc %d/%d\n",
+ sde_enc->num_phys_encs,
+ (int) ARRAY_SIZE(sde_enc->hw_pp));
+ return;
+ }
+
+ drm_enc = &sde_enc->base;
+ /* this pointers are checked in virt_enable_helper */
+ priv = drm_enc->dev->dev_private;
+
+ sde_kms = to_sde_kms(priv->kms);
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ }
+
+ hw_mdptop = sde_kms->hw_mdp;
+ if (!hw_mdptop) {
+ SDE_ERROR("invalid mdptop\n");
+ return;
+ }
+
+ if (hw_mdptop->ops.setup_vsync_source &&
+ disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+ for (i = 0; i < sde_enc->num_phys_encs; i++)
+ vsync_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
+
+ vsync_cfg.pp_count = sde_enc->num_phys_encs;
+ vsync_cfg.frame_rate = sde_enc->disp_info.frame_rate;
+ if (is_dummy)
+ vsync_cfg.vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_1;
+ else if (disp_info->is_te_using_watchdog_timer)
+ vsync_cfg.vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_0;
+ else
+ vsync_cfg.vsync_source = SDE_VSYNC0_SOURCE_GPIO;
+ vsync_cfg.is_dummy = is_dummy;
+
+ hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
+ }
+}
+
static int sde_encoder_update_rsc_client(
struct drm_encoder *drm_enc,
struct sde_encoder_rsc_config *config, bool enable)
@@ -1143,7 +1219,8 @@
rsc_config.fps = disp_info->frame_rate;
rsc_config.vtotal = disp_info->vtotal;
rsc_config.prefill_lines = disp_info->prefill_lines;
- rsc_config.jitter = disp_info->jitter;
+ rsc_config.jitter_numer = disp_info->jitter_numer;
+ rsc_config.jitter_denom = disp_info->jitter_denom;
rsc_config.prefill_lines += config ?
config->inline_rotate_prefill : 0;
/* update it only once */
@@ -1216,6 +1293,9 @@
rsc_cfg.inline_rotate_prefill =
sde_crtc_get_inline_prefill(drm_enc->crtc);
+ _sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info,
+ false);
+
/* enable RSC */
sde_encoder_update_rsc_client(drm_enc, &rsc_cfg, true);
@@ -1224,6 +1304,14 @@
/* disable RSC */
sde_encoder_update_rsc_client(drm_enc, NULL, false);
+ /**
+ * this call is for hardware workaround on sdm845 and should
+ * not be removed without considering the design changes for
+ * sde rsc + command mode concurrency. It may lead to pp
+ * timeout due to vsync from panel for command mode panel.
+ */
+ _sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info,
+ true);
/* disable all the irq */
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys =
@@ -1248,12 +1336,22 @@
{
bool schedule_off = false;
struct sde_encoder_virt *sde_enc;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *disp_thread;
- if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
+ !drm_enc->crtc) {
SDE_ERROR("invalid parameters\n");
return -EINVAL;
}
sde_enc = to_sde_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
+ SDE_ERROR("invalid crtc index\n");
+ return -EINVAL;
+ }
+ disp_thread = &priv->disp_thread[drm_enc->crtc->index];
/*
* when idle_pc is not supported, process only KICKOFF and STOP
@@ -1272,7 +1370,8 @@
switch (sw_event) {
case SDE_ENC_RC_EVENT_KICKOFF:
/* cancel delayed off work, if any */
- if (cancel_delayed_work_sync(&sde_enc->delayed_off_work))
+ if (kthread_cancel_delayed_work_sync(
+ &sde_enc->delayed_off_work))
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
sw_event);
@@ -1319,8 +1418,10 @@
}
/* schedule delayed off work */
- schedule_delayed_work(&sde_enc->delayed_off_work,
- msecs_to_jiffies(IDLE_TIMEOUT));
+ kthread_queue_delayed_work(
+ &disp_thread->worker,
+ &sde_enc->delayed_off_work,
+ msecs_to_jiffies(IDLE_TIMEOUT));
SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
SDE_EVTLOG_FUNC_CASE2);
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
@@ -1329,7 +1430,8 @@
case SDE_ENC_RC_EVENT_STOP:
/* cancel delayed off work, if any */
- if (cancel_delayed_work_sync(&sde_enc->delayed_off_work))
+ if (kthread_cancel_delayed_work_sync(
+ &sde_enc->delayed_off_work))
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
sw_event);
@@ -1353,6 +1455,7 @@
SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
SDE_ENC_RC_STATE_OFF, SDE_EVTLOG_FUNC_CASE3);
+
sde_enc->rc_state = SDE_ENC_RC_STATE_OFF;
mutex_unlock(&sde_enc->rc_lock);
@@ -1360,7 +1463,8 @@
case SDE_ENC_RC_EVENT_EARLY_WAKE_UP:
/* cancel delayed off work, if any */
- if (cancel_delayed_work_sync(&sde_enc->delayed_off_work)) {
+ if (kthread_cancel_delayed_work_sync(
+ &sde_enc->delayed_off_work)) {
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
sw_event);
schedule_off = true;
@@ -1398,7 +1502,9 @@
*/
if (schedule_off && !sde_crtc_frame_pending(drm_enc->crtc)) {
/* schedule delayed off work */
- schedule_delayed_work(&sde_enc->delayed_off_work,
+ kthread_queue_delayed_work(
+ &disp_thread->worker,
+ &sde_enc->delayed_off_work,
msecs_to_jiffies(IDLE_TIMEOUT));
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
sw_event);
@@ -1413,6 +1519,22 @@
if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc:%d !ON state\n",
sw_event, sde_enc->rc_state);
+ SDE_EVT32_VERBOSE(DRMID(drm_enc), sw_event,
+ sde_enc->rc_state);
+ mutex_unlock(&sde_enc->rc_lock);
+ return 0;
+ }
+
+ /*
+ * if we are in ON but a frame was just kicked off,
+ * ignore the IDLE event, it's probably a stale timer event
+ */
+ if (sde_enc->frame_busy_mask[0]) {
+ SDE_DEBUG_ENC(sde_enc,
+ "sw_event:%d, rc:%d frame pending\n",
+ sw_event, sde_enc->rc_state);
+ SDE_EVT32_VERBOSE(DRMID(drm_enc), sw_event,
+ sde_enc->rc_state);
mutex_unlock(&sde_enc->rc_lock);
return 0;
}
@@ -1436,11 +1558,10 @@
return 0;
}
-static void sde_encoder_off_work(struct work_struct *work)
+static void sde_encoder_off_work(struct kthread_work *work)
{
- struct delayed_work *dw = to_delayed_work(work);
- struct sde_encoder_virt *sde_enc = container_of(dw,
- struct sde_encoder_virt, delayed_off_work);
+ struct sde_encoder_virt *sde_enc = container_of(work,
+ struct sde_encoder_virt, delayed_off_work.work);
if (!sde_enc) {
SDE_ERROR("invalid sde encoder\n");
@@ -1550,15 +1671,18 @@
struct sde_encoder_virt *sde_enc = NULL;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
- struct sde_hw_mdp *hw_mdptop;
- int i = 0;
- struct sde_watchdog_te_status te_cfg = { 0 };
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
SDE_ERROR("invalid parameters\n");
return;
}
+
priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ }
sde_enc = to_sde_encoder_virt(drm_enc);
if (!sde_enc || !sde_enc->cur_master) {
@@ -1566,35 +1690,13 @@
return;
}
- sde_kms = to_sde_kms(priv->kms);
- hw_mdptop = sde_kms->hw_mdp;
-
- if (!hw_mdptop) {
- SDE_ERROR("invalid mdptop\n");
- return;
- }
-
- sde_kms = to_sde_kms(priv->kms);
- if (!sde_kms) {
- SDE_ERROR("invalid sde_kms\n");
- return;
- }
-
if (sde_enc->cur_master->hw_mdptop &&
sde_enc->cur_master->hw_mdptop->ops.reset_ubwc)
sde_enc->cur_master->hw_mdptop->ops.reset_ubwc(
sde_enc->cur_master->hw_mdptop,
sde_kms->catalog);
- if (hw_mdptop->ops.setup_vsync_sel) {
- for (i = 0; i < sde_enc->num_phys_encs; i++)
- te_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
-
- te_cfg.pp_count = sde_enc->num_phys_encs;
- te_cfg.frame_rate = sde_enc->disp_info.frame_rate;
- hw_mdptop->ops.setup_vsync_sel(hw_mdptop, &te_cfg,
- sde_enc->disp_info.is_te_using_watchdog_timer);
- }
+ _sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info, false);
memset(&sde_enc->prv_conn_roi, 0, sizeof(sde_enc->prv_conn_roi));
memset(&sde_enc->cur_conn_roi, 0, sizeof(sde_enc->cur_conn_roi));
@@ -1709,15 +1811,15 @@
}
}
+ if (sde_enc->cur_master && sde_enc->cur_master->ops.disable)
+ sde_enc->cur_master->ops.disable(sde_enc->cur_master);
+
/* after phys waits for frame-done, should be no more frames pending */
if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) {
SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id);
del_timer_sync(&sde_enc->frame_done_timer);
}
- if (sde_enc->cur_master && sde_enc->cur_master->ops.disable)
- sde_enc->cur_master->ops.disable(sde_enc->cur_master);
-
sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_STOP);
if (sde_enc->cur_master) {
@@ -1847,27 +1949,41 @@
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
unsigned int i;
- if (!sde_enc->frame_busy_mask[0]) {
- /* suppress frame_done without waiter, likely autorefresh */
- SDE_EVT32(DRMID(drm_enc), event, ready_phys->intf_idx);
- return;
- }
+ if (event & (SDE_ENCODER_FRAME_EVENT_DONE
+ | SDE_ENCODER_FRAME_EVENT_ERROR
+ | SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
- /* One of the physical encoders has become idle */
- for (i = 0; i < sde_enc->num_phys_encs; i++)
- if (sde_enc->phys_encs[i] == ready_phys) {
- clear_bit(i, sde_enc->frame_busy_mask);
- SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
- sde_enc->frame_busy_mask[0]);
+ if (!sde_enc->frame_busy_mask[0]) {
+ /**
+ * suppress frame_done without waiter,
+ * likely autorefresh
+ */
+ SDE_EVT32(DRMID(drm_enc), event, ready_phys->intf_idx);
+ return;
}
- if (!sde_enc->frame_busy_mask[0]) {
- atomic_set(&sde_enc->frame_done_timeout, 0);
- del_timer(&sde_enc->frame_done_timer);
+ /* One of the physical encoders has become idle */
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ if (sde_enc->phys_encs[i] == ready_phys) {
+ clear_bit(i, sde_enc->frame_busy_mask);
+ SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
+ sde_enc->frame_busy_mask[0]);
+ }
+ }
- sde_encoder_resource_control(drm_enc,
- SDE_ENC_RC_EVENT_FRAME_DONE);
+ if (!sde_enc->frame_busy_mask[0]) {
+ atomic_set(&sde_enc->frame_done_timeout, 0);
+ del_timer(&sde_enc->frame_done_timer);
+ sde_encoder_resource_control(drm_enc,
+ SDE_ENC_RC_EVENT_FRAME_DONE);
+
+ if (sde_enc->crtc_frame_event_cb)
+ sde_enc->crtc_frame_event_cb(
+ sde_enc->crtc_frame_event_cb_data,
+ event);
+ }
+ } else {
if (sde_enc->crtc_frame_event_cb)
sde_enc->crtc_frame_event_cb(
sde_enc->crtc_frame_event_cb_data, event);
@@ -1908,6 +2024,9 @@
pending_kickoff_cnt = sde_encoder_phys_inc_pending(phys);
+ if (phys->ops.is_master && phys->ops.is_master(phys))
+ atomic_inc(&phys->pending_retire_fence_cnt);
+
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@@ -2017,7 +2136,8 @@
if (rc) {
SDE_ERROR_ENC(sde_enc,
"connector soft reset failure\n");
- SDE_DBG_DUMP("panic");
+ SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus",
+ "panic");
}
}
}
@@ -2025,7 +2145,7 @@
rc = ctl->ops.reset(ctl);
if (rc) {
SDE_ERROR_ENC(sde_enc, "ctl %d reset failure\n", ctl->idx);
- SDE_DBG_DUMP("panic");
+ SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus", "panic");
}
phys_enc->enable_state = SDE_ENC_ENABLED;
@@ -2309,6 +2429,7 @@
SDE_EVT32(DRMID(drm_enc));
/* prepare for next kickoff, may include waiting on previous kickoff */
+ SDE_ATRACE_BEGIN("enc_prepare_for_kickoff");
for (i = 0; i < sde_enc->num_phys_encs; i++) {
phys = sde_enc->phys_encs[i];
if (phys) {
@@ -2319,6 +2440,7 @@
_sde_encoder_setup_dither(phys);
}
}
+ SDE_ATRACE_END("enc_prepare_for_kickoff");
sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
@@ -3009,7 +3131,8 @@
}
mutex_init(&sde_enc->rc_lock);
- INIT_DELAYED_WORK(&sde_enc->delayed_off_work, sde_encoder_off_work);
+ kthread_init_delayed_work(&sde_enc->delayed_off_work,
+ sde_encoder_off_work);
memcpy(&sde_enc->disp_info, disp_info, sizeof(*disp_info));
@@ -3052,7 +3175,9 @@
};
if (phys && fn_wait) {
+ SDE_ATRACE_BEGIN("wait_for_completion_event");
ret = fn_wait(phys);
+ SDE_ATRACE_END("wait_for_completion_event");
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 9c2d3e9..3dae994 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -24,9 +24,11 @@
#include "msm_prop.h"
#include "sde_hw_mdss.h"
-#define SDE_ENCODER_FRAME_EVENT_DONE BIT(0)
-#define SDE_ENCODER_FRAME_EVENT_ERROR BIT(1)
-#define SDE_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
+#define SDE_ENCODER_FRAME_EVENT_DONE BIT(0)
+#define SDE_ENCODER_FRAME_EVENT_ERROR BIT(1)
+#define SDE_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
+#define SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE BIT(3)
+#define SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE BIT(4)
/**
* Encoder functions and data types
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 4b12651..c1a40f5 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -235,6 +235,8 @@
* scheduled. Decremented in irq handler
* @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
* pending.
+ * @pending_retire_fence_cnt: Atomic counter tracking the pending retire
+ * fences that have to be signalled.
* @pending_kickoff_wq: Wait queue for blocking until kickoff completes
* @irq: IRQ tracking structures
*/
@@ -261,6 +263,7 @@
atomic_t underrun_cnt;
atomic_t pending_ctlstart_cnt;
atomic_t pending_kickoff_cnt;
+ atomic_t pending_retire_fence_cnt;
wait_queue_head_t pending_kickoff_wq;
struct sde_encoder_irq irq[INTR_IDX_MAX];
};
@@ -307,6 +310,8 @@
* @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt
* after ctl_start instead of before next frame kickoff
* @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_rd_ptr_cnt: atomic counter to indicate if retire fence can be
+ * signaled at the next rd_ptr_irq
* @autorefresh: autorefresh feature state
*/
struct sde_encoder_phys_cmd {
@@ -315,6 +320,7 @@
bool serialize_wait4pp;
int pp_timeout_report_cnt;
struct sde_encoder_phys_cmd_autorefresh autorefresh;
+ atomic_t pending_rd_ptr_cnt;
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 6ee1aae..2a46636 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -17,6 +17,7 @@
#include "sde_hw_interrupts.h"
#include "sde_core_irq.h"
#include "sde_formats.h"
+#include "sde_trace.h"
#define SDE_DEBUG_CMDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
@@ -160,24 +161,28 @@
struct sde_encoder_phys *phys_enc = arg;
unsigned long lock_flags;
int new_cnt;
+ u32 event = SDE_ENCODER_FRAME_EVENT_DONE |
+ SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
- if (!phys_enc)
+ if (!phys_enc || !phys_enc->hw_pp)
return;
+ SDE_ATRACE_BEGIN("pp_done_irq");
/* notify all synchronous clients first, then asynchronous clients */
if (phys_enc->parent_ops.handle_frame_done)
phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
- phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
+ phys_enc, event);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
SDE_EVT32_IRQ(DRMID(phys_enc->parent),
- phys_enc->hw_pp->idx - PINGPONG_0, new_cnt);
+ phys_enc->hw_pp->idx - PINGPONG_0, new_cnt, event);
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
+ SDE_ATRACE_END("pp_done_irq");
}
static void sde_encoder_phys_cmd_autorefresh_done_irq(void *arg, int irq_idx)
@@ -206,35 +211,88 @@
static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
{
struct sde_encoder_phys *phys_enc = arg;
+ struct sde_encoder_phys_cmd *cmd_enc;
+ bool signal_fence = false;
- if (!phys_enc)
+ if (!phys_enc || !phys_enc->hw_pp)
return;
+ SDE_ATRACE_BEGIN("rd_ptr_irq");
+ cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+
+ /**
+ * signal only for master,
+ * - when the ctl_start irq is done and incremented
+ * the pending_rd_ptr_cnt.
+ * - when ctl_start irq status bit is set. This handles the case
+ * where ctl_start status bit is set in hardware, but the interrupt
+ * is delayed due to some reason.
+ */
+ if (sde_encoder_phys_cmd_is_master(phys_enc) &&
+ atomic_read(&phys_enc->pending_retire_fence_cnt)) {
+
+ if (atomic_add_unless(
+ &cmd_enc->pending_rd_ptr_cnt, -1, 0)) {
+ signal_fence = true;
+ } else {
+ signal_fence =
+ sde_core_irq_read_nolock(phys_enc->sde_kms,
+ phys_enc->irq[INTR_IDX_CTL_START].irq_idx,
+ false);
+ if (signal_fence)
+ SDE_EVT32_IRQ(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(
+ &phys_enc->pending_retire_fence_cnt),
+ SDE_EVTLOG_FUNC_CASE1);
+ }
+
+ if (signal_fence && phys_enc->parent_ops.handle_frame_done) {
+ atomic_add_unless(
+ &phys_enc->pending_retire_fence_cnt, -1, 0);
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
+ }
+ }
+
SDE_EVT32_IRQ(DRMID(phys_enc->parent),
- phys_enc->hw_pp->idx - PINGPONG_0, 0xfff);
+ phys_enc->hw_pp->idx - PINGPONG_0, signal_fence, 0xfff);
if (phys_enc->parent_ops.handle_vblank_virt)
phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
phys_enc);
+
+ SDE_ATRACE_END("rd_ptr_irq");
}
static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
{
struct sde_encoder_phys *phys_enc = arg;
+ struct sde_encoder_phys_cmd *cmd_enc;
struct sde_hw_ctl *ctl;
- if (!phys_enc)
+ if (!phys_enc || !phys_enc->hw_ctl)
return;
- if (!phys_enc->hw_ctl)
- return;
+ SDE_ATRACE_BEGIN("ctl_start_irq");
+ cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
ctl = phys_enc->hw_ctl;
SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0, 0xfff);
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+ /*
+ * this is required for the fence signalling to be done in rd_ptr_irq
+ * after ctrl_start_irq
+ */
+ if (sde_encoder_phys_cmd_is_master(phys_enc)
+ && atomic_read(&phys_enc->pending_retire_fence_cnt))
+ atomic_inc(&cmd_enc->pending_rd_ptr_cnt);
+
/* Signal any waiting ctl start interrupt */
wake_up_all(&phys_enc->pending_kickoff_wq);
+ SDE_ATRACE_END("ctl_start_irq");
}
static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
@@ -332,7 +390,8 @@
{
struct sde_encoder_phys_cmd *cmd_enc =
to_sde_encoder_phys_cmd(phys_enc);
- u32 frame_event = SDE_ENCODER_FRAME_EVENT_ERROR;
+ u32 frame_event = SDE_ENCODER_FRAME_EVENT_ERROR
+ | SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
bool do_log = false;
cmd_enc->pp_timeout_report_cnt++;
@@ -345,7 +404,8 @@
SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
cmd_enc->pp_timeout_report_cnt,
- atomic_read(&phys_enc->pending_kickoff_cnt));
+ atomic_read(&phys_enc->pending_kickoff_cnt),
+ frame_event);
/* to avoid flooding, only log first time, and "dead" time */
if (do_log) {
@@ -359,9 +419,7 @@
SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
- SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
- "dsi1_phy", "vbif", "dbg_bus",
- "vbif_dbg_bus", "panic");
+ SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus", "panic");
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -418,9 +476,7 @@
phys_enc->hw_pp->idx - PINGPONG_0,
timeout_us,
ret);
- SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
- "dsi1_phy", "vbif_rt", "dbg_bus",
- "vbif_dbg_bus", "panic");
+ SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus", "panic");
}
return ret;
@@ -527,7 +583,6 @@
{
struct sde_encoder_phys_cmd *cmd_enc =
to_sde_encoder_phys_cmd(phys_enc);
- unsigned long lock_flags;
int ret = 0;
if (!phys_enc) {
@@ -543,8 +598,6 @@
__builtin_return_address(0),
enable, atomic_read(&phys_enc->vblank_refcount));
- spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-
SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
enable, atomic_read(&phys_enc->vblank_refcount));
@@ -554,8 +607,6 @@
ret = sde_encoder_helper_unregister_irq(phys_enc,
INTR_IDX_RDPTR);
- spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-
end:
if (ret)
SDE_ERROR_CMDENC(cmd_enc,
@@ -575,6 +626,9 @@
cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+ SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
if (enable) {
sde_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
sde_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
@@ -607,7 +661,7 @@
struct sde_encoder_phys_cmd *cmd_enc =
to_sde_encoder_phys_cmd(phys_enc);
struct sde_hw_tear_check tc_cfg = { 0 };
- struct drm_display_mode *mode = &phys_enc->cached_mode;
+ struct drm_display_mode *mode;
bool tc_enable = true;
u32 vsync_hz;
struct msm_drm_private *priv;
@@ -617,6 +671,7 @@
SDE_ERROR("invalid encoder\n");
return;
}
+ mode = &phys_enc->cached_mode;
SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
@@ -627,7 +682,12 @@
}
sde_kms = phys_enc->sde_kms;
+ if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
+ SDE_ERROR("invalid device\n");
+ return;
+ }
priv = sde_kms->dev->dev_private;
+
/*
* TE default: dsi byte clock calculated base on 70 fps;
* around 14 ms to complete a kickoff cycle if te disabled;
@@ -638,8 +698,10 @@
* frequency divided by the no. of rows (lines) in the LCDpanel.
*/
vsync_hz = sde_power_clk_get_rate(&priv->phandle, "vsync_clk");
- if (!vsync_hz) {
- SDE_DEBUG_CMDENC(cmd_enc, "invalid vsync clock rate\n");
+ if (!vsync_hz || !mode->vtotal || !mode->vrefresh) {
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "invalid params - vsync_hz %u vtot %u vrefresh %u\n",
+ vsync_hz, mode->vtotal, mode->vrefresh);
return;
}
@@ -687,8 +749,8 @@
struct sde_encoder_phys_cmd *cmd_enc =
to_sde_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !phys_enc->hw_ctl ||
- !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
+ || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
SDE_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
return;
}
@@ -717,7 +779,7 @@
struct sde_hw_ctl *ctl;
u32 flush_mask = 0;
- if (!phys_enc || !phys_enc->hw_ctl) {
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
return;
}
@@ -747,10 +809,11 @@
struct sde_encoder_phys_cmd *cmd_enc =
to_sde_encoder_phys_cmd(phys_enc);
- if (!phys_enc) {
+ if (!phys_enc || !phys_enc->hw_pp) {
SDE_ERROR("invalid phys encoder\n");
return;
}
+
SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
if (phys_enc->enable_state == SDE_ENC_ENABLED) {
@@ -803,7 +866,7 @@
to_sde_encoder_phys_cmd(phys_enc);
int ret;
- if (!phys_enc) {
+ if (!phys_enc || !phys_enc->hw_pp) {
SDE_ERROR("invalid encoder\n");
return;
}
@@ -857,6 +920,12 @@
SDE_ERROR("invalid encoder\n");
return;
}
+
+ if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
+ SDE_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
+ return;
+ }
+
SDE_DEBUG_CMDENC(cmd_enc, "\n");
hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
}
@@ -869,7 +938,7 @@
to_sde_encoder_phys_cmd(phys_enc);
int ret;
- if (!phys_enc) {
+ if (!phys_enc || !phys_enc->hw_pp) {
SDE_ERROR("invalid encoder\n");
return;
}
@@ -1219,6 +1288,8 @@
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+ atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
+ atomic_set(&cmd_enc->pending_rd_ptr_cnt, 0);
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0);
init_waitqueue_head(&cmd_enc->autorefresh.kickoff_wq);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index afd61ae..933e4812 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -16,6 +16,7 @@
#include "sde_core_irq.h"
#include "sde_formats.h"
#include "dsi_display.h"
+#include "sde_trace.h"
#define SDE_DEBUG_VIDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
@@ -49,6 +50,23 @@
struct intf_timing_params *timing)
{
memset(timing, 0, sizeof(*timing));
+
+ if ((mode->htotal < mode->hsync_end)
+ || (mode->hsync_start < mode->hdisplay)
+ || (mode->vtotal < mode->vsync_end)
+ || (mode->vsync_start < mode->vdisplay)
+ || (mode->hsync_end < mode->hsync_start)
+ || (mode->vsync_end < mode->vsync_start)) {
+ SDE_ERROR(
+ "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+ mode->hsync_start, mode->hsync_end,
+ mode->htotal, mode->hdisplay);
+ SDE_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+ mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->vdisplay);
+ return;
+ }
+
/*
* https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
* Active Region Front Porch Sync Back Porch
@@ -139,6 +157,15 @@
u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
u32 actual_vfp_lines = 0;
+ if (worst_case_needed_lines < start_of_frame_lines) {
+ needed_vfp_lines = 0;
+ SDE_ERROR("invalid params - needed_lines:%d, frame_lines:%d\n",
+ worst_case_needed_lines, start_of_frame_lines);
+ } else {
+ needed_vfp_lines = worst_case_needed_lines
+ - start_of_frame_lines;
+ }
+
/* Fetch must be outside active lines, otherwise undefined. */
if (start_of_frame_lines >= worst_case_needed_lines) {
SDE_DEBUG_VIDENC(vid_enc,
@@ -352,11 +379,25 @@
unsigned long lock_flags;
u32 flush_register = 0;
int new_cnt = -1, old_cnt = -1;
+ u32 event = 0;
if (!phys_enc)
return;
hw_ctl = phys_enc->hw_ctl;
+ SDE_ATRACE_BEGIN("vblank_irq");
+
+ /* signal only for master, where there is a pending kickoff */
+ if (sde_encoder_phys_vid_is_master(phys_enc)
+ && atomic_add_unless(
+ &phys_enc->pending_retire_fence_cnt, -1, 0)) {
+ event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE
+ | SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
+
+ if (phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
+ phys_enc, event);
+ }
if (phys_enc->parent_ops.handle_vblank_virt)
phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
@@ -379,10 +420,11 @@
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
- old_cnt, new_cnt, flush_register);
+ old_cnt, new_cnt, flush_register, event);
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
+ SDE_ATRACE_END("vblank_irq");
}
static void sde_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
@@ -442,15 +484,18 @@
struct sde_encoder_phys_vid *vid_enc;
if (!phys_enc || !phys_enc->sde_kms) {
- SDE_ERROR("invalid encoder\n");
+ SDE_ERROR("invalid encoder/kms\n");
return;
}
rm = &phys_enc->sde_kms->rm;
vid_enc = to_sde_encoder_phys_vid(phys_enc);
- phys_enc->cached_mode = *adj_mode;
- SDE_DEBUG_VIDENC(vid_enc, "caching mode:\n");
- drm_mode_debug_printmodeline(adj_mode);
+
+ if (adj_mode) {
+ phys_enc->cached_mode = *adj_mode;
+ drm_mode_debug_printmodeline(adj_mode);
+ SDE_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+ }
instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
@@ -476,7 +521,6 @@
{
int ret = 0;
struct sde_encoder_phys_vid *vid_enc;
- unsigned long lock_flags;
if (!phys_enc) {
SDE_ERROR("invalid encoder\n");
@@ -493,8 +537,6 @@
__builtin_return_address(0),
enable, atomic_read(&phys_enc->vblank_refcount));
- spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-
SDE_EVT32(DRMID(phys_enc->parent), enable,
atomic_read(&phys_enc->vblank_refcount));
@@ -504,8 +546,6 @@
ret = sde_encoder_helper_unregister_irq(phys_enc,
INTR_IDX_VSYNC);
- spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-
if (ret)
SDE_ERROR_VIDENC(vid_enc,
"control vblank irq error %d, enable %d\n",
@@ -620,13 +660,18 @@
static int sde_encoder_phys_vid_wait_for_vblank(
struct sde_encoder_phys *phys_enc, bool notify)
{
- struct sde_encoder_wait_info wait_info = {
- .wq = &phys_enc->pending_kickoff_wq,
- .atomic_cnt = &phys_enc->pending_kickoff_cnt,
- .timeout_ms = KICKOFF_TIMEOUT_MS,
- };
+ struct sde_encoder_wait_info wait_info;
int ret;
+ if (!phys_enc) {
+ pr_err("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
if (!sde_encoder_phys_vid_is_master(phys_enc)) {
/* signal done for slave video encoder, unless it is pp-split */
if (!_sde_encoder_phys_is_ppsplit(phys_enc) &&
@@ -654,11 +699,7 @@
static int sde_encoder_phys_vid_wait_for_commit_done(
struct sde_encoder_phys *phys_enc)
{
- int ret;
-
- ret = sde_encoder_phys_vid_wait_for_vblank(phys_enc, true);
-
- return ret;
+ return sde_encoder_phys_vid_wait_for_vblank(phys_enc, true);
}
static void sde_encoder_phys_vid_prepare_for_kickoff(
@@ -688,7 +729,7 @@
SDE_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
- SDE_DBG_DUMP("panic");
+ SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus", "panic");
}
programmable_rot_fetch_config(phys_enc, params->inline_rotate_prefill);
@@ -918,6 +959,7 @@
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
phys_enc->enable_state = SDE_ENC_DISABLED;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 875d99d..c95fb47 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -644,6 +644,7 @@
struct sde_encoder_phys_wb *wb_enc = arg;
struct sde_encoder_phys *phys_enc = &wb_enc->base;
struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ u32 event = 0;
SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0,
wb_enc->frame_count);
@@ -652,12 +653,20 @@
if (phys_enc->enable_state == SDE_ENC_DISABLING)
goto complete;
+ event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE
+ | SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE
+ | SDE_ENCODER_FRAME_EVENT_DONE;
+
+ atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0);
if (phys_enc->parent_ops.handle_frame_done)
phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
- phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
+ phys_enc, event);
- phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
- phys_enc);
+ if (phys_enc->parent_ops.handle_vblank_virt)
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ SDE_EVT32_IRQ(DRMID(phys_enc->parent), hw_wb->idx - WB_0, event);
complete:
complete_all(&wb_enc->wbdone_complete);
@@ -783,7 +792,7 @@
{
unsigned long ret;
struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
- u32 irq_status;
+ u32 irq_status, event = 0;
u64 wb_time = 0;
int rc = 0;
u32 timeout = max_t(u32, wb_enc->wbdone_timeout, KICKOFF_TIMEOUT_MS);
@@ -802,7 +811,6 @@
if (!ret) {
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc),
wb_enc->frame_count);
-
irq_status = sde_core_irq_read(phys_enc->sde_kms,
wb_enc->irq_idx, true);
if (irq_status) {
@@ -812,10 +820,15 @@
} else {
SDE_ERROR("wb:%d kickoff timed out\n",
wb_enc->wb_dev->wb_idx - WB_0);
+ atomic_add_unless(
+ &phys_enc->pending_retire_fence_cnt, -1, 0);
+
+ event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE
+ | SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE
+ | SDE_ENCODER_FRAME_EVENT_ERROR;
if (phys_enc->parent_ops.handle_frame_done)
phys_enc->parent_ops.handle_frame_done(
- phys_enc->parent, phys_enc,
- SDE_ENCODER_FRAME_EVENT_ERROR);
+ phys_enc->parent, phys_enc, event);
rc = -ETIMEDOUT;
}
}
@@ -844,7 +857,7 @@
}
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count,
- wb_time);
+ wb_time, event, rc);
return rc;
}
@@ -1296,6 +1309,7 @@
phys_enc->intf_mode = INTF_MODE_WB_LINE;
phys_enc->intf_idx = p->intf_idx;
phys_enc->enc_spinlock = p->enc_spinlock;
+ atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
INIT_LIST_HEAD(&wb_enc->irq_cb.list);
/* create internal buffer for disable logic */
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index bd9fdac..b654e5a 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -338,7 +338,7 @@
return rc;
}
-void sde_fence_signal(struct sde_fence_context *ctx, bool is_error)
+void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts, bool is_error)
{
unsigned long flags;
struct sde_fence *fc, *next;
@@ -358,16 +358,19 @@
if ((int)(ctx->done_count - ctx->commit_count) < 0) {
++ctx->done_count;
SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
- ctx->commit_count, ctx->done_count);
+ ctx->done_count, ctx->commit_count);
} else {
SDE_ERROR("extra signal attempt! done count:%d commit:%d\n",
ctx->done_count, ctx->commit_count);
+ SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
+ ktime_to_us(ts), SDE_EVTLOG_FATAL);
spin_unlock_irqrestore(&ctx->lock, flags);
return;
}
spin_unlock_irqrestore(&ctx->lock, flags);
- SDE_EVT32(ctx->drm_id, ctx->done_count);
+ SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
+ ktime_to_us(ts));
spin_lock(&ctx->list_lock);
if (list_empty(&ctx->fence_list_head)) {
@@ -382,6 +385,7 @@
list_for_each_entry_safe(fc, next, &local_list_head, fence_list) {
spin_lock_irqsave(&ctx->lock, flags);
+ fc->base.timestamp = ts;
is_signaled = fence_is_signaled_locked(&fc->base);
spin_unlock_irqrestore(&ctx->lock, flags);
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
index 207f29c..51afdae 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.h
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -127,9 +127,11 @@
/**
* sde_fence_signal - advance fence timeline to signal outstanding fences
* @fence: Pointer fence container
+ * @ts: fence timestamp
* @is_error: Set to non-zero if the commit didn't complete successfully
*/
-void sde_fence_signal(struct sde_fence_context *fence, bool is_error);
+void sde_fence_signal(struct sde_fence_context *fence, ktime_t ts,
+ bool is_error);
#else
static inline void *sde_sync_get(uint64_t fd)
{
@@ -168,7 +170,7 @@
}
static inline void sde_fence_signal(struct sde_fence_context *fence,
- bool is_error)
+ ktime_t ts, bool is_error)
{
/* do nothing */
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
index 35fc2b5..5307464 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -19,6 +19,7 @@
#define IDLE_2_RUN(x) ((x) == (ad4_init | ad4_cfg | ad4_mode | ad4_input))
#define MERGE_WIDTH_RIGHT 6
#define MERGE_WIDTH_LEFT 5
+#define AD_IPC_FRAME_COUNT 2
enum ad4_ops_bitmask {
ad4_init = BIT(AD_INIT),
@@ -31,34 +32,66 @@
enum ad4_state {
ad4_state_idle,
ad4_state_run,
+ /* idle power collapse resume state */
+ ad4_state_ipcr,
ad4_state_max,
};
typedef int (*ad4_prop_setup)(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *ad);
+static int ad4_params_check(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+
+static int ad4_no_op_setup(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode);
static int ad4_mode_setup_common(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
+static int ad4_init_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
static int ad4_init_setup_idle(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
+static int ad4_init_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_init_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
static int ad4_cfg_setup_idle(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_input_setup(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
static int ad4_input_setup_idle(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
-static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode);
-static int ad4_init_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
-static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
-static int ad4_input_setup(struct sde_hw_dspp *dspp,
+static int ad4_input_setup_ipcr(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
static int ad4_suspend_setup(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
-static int ad4_params_check(struct sde_hw_dspp *dspp,
- struct sde_ad_hw_cfg *cfg);
static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
static int ad4_backlight_setup(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
+static int ad4_ipc_suspend_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_ipc_resume_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_ipc_resume_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_ipc_reset_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_mem_init_enable(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_mem_init_disable(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_ipc_resume(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_ipc_reset(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+
static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
[ad4_state_idle][AD_MODE] = ad4_mode_setup_common,
[ad4_state_idle][AD_INIT] = ad4_init_setup_idle,
@@ -67,13 +100,29 @@
[ad4_state_idle][AD_SUSPEND] = ad4_suspend_setup,
[ad4_state_idle][AD_ASSERTIVE] = ad4_assertive_setup,
[ad4_state_idle][AD_BACKLIGHT] = ad4_backlight_setup,
+ [ad4_state_idle][AD_IPC_SUSPEND] = ad4_no_op_setup,
+ [ad4_state_idle][AD_IPC_RESUME] = ad4_no_op_setup,
+ [ad4_state_idle][AD_IPC_RESET] = ad4_no_op_setup,
[ad4_state_run][AD_MODE] = ad4_mode_setup_common,
- [ad4_state_run][AD_INIT] = ad4_init_setup,
- [ad4_state_run][AD_CFG] = ad4_cfg_setup,
+ [ad4_state_run][AD_INIT] = ad4_init_setup_run,
+ [ad4_state_run][AD_CFG] = ad4_cfg_setup_run,
[ad4_state_run][AD_INPUT] = ad4_input_setup,
[ad4_state_run][AD_SUSPEND] = ad4_suspend_setup,
[ad4_state_run][AD_ASSERTIVE] = ad4_assertive_setup,
[ad4_state_run][AD_BACKLIGHT] = ad4_backlight_setup,
+ [ad4_state_run][AD_IPC_SUSPEND] = ad4_ipc_suspend_setup_run,
+ [ad4_state_run][AD_IPC_RESUME] = ad4_ipc_resume_setup_run,
+ [ad4_state_run][AD_IPC_RESET] = ad4_no_op_setup,
+ [ad4_state_ipcr][AD_MODE] = ad4_mode_setup_common,
+ [ad4_state_ipcr][AD_INIT] = ad4_init_setup_ipcr,
+ [ad4_state_ipcr][AD_CFG] = ad4_cfg_setup_ipcr,
+ [ad4_state_ipcr][AD_INPUT] = ad4_input_setup_ipcr,
+ [ad4_state_ipcr][AD_SUSPEND] = ad4_suspend_setup,
+ [ad4_state_ipcr][AD_ASSERTIVE] = ad4_assertive_setup,
+ [ad4_state_ipcr][AD_BACKLIGHT] = ad4_backlight_setup,
+ [ad4_state_ipcr][AD_IPC_SUSPEND] = ad4_no_op_setup,
+ [ad4_state_ipcr][AD_IPC_RESUME] = ad4_ipc_resume_setup_ipcr,
+ [ad4_state_ipcr][AD_IPC_RESET] = ad4_ipc_reset_setup_ipcr,
};
struct ad4_info {
@@ -81,14 +130,19 @@
u32 completed_ops_mask;
bool ad4_support;
enum ad4_modes cached_mode;
+ bool is_master;
+ u32 frame_count;
+ u32 tf_ctrl;
+ u32 vc_control_0;
+ u32 last_str;
u32 cached_als;
};
static struct ad4_info info[DSPP_MAX] = {
- [DSPP_0] = {ad4_state_idle, 0, true, AD4_OFF},
- [DSPP_1] = {ad4_state_idle, 0, true, AD4_OFF},
- [DSPP_2] = {ad4_state_max, 0, false, AD4_OFF},
- [DSPP_3] = {ad4_state_max, 0, false, AD4_OFF},
+ [DSPP_0] = {ad4_state_idle, 0, true, AD4_OFF, false},
+ [DSPP_1] = {ad4_state_idle, 0, true, AD4_OFF, false},
+ [DSPP_2] = {ad4_state_max, 0, false, AD4_OFF, false},
+ [DSPP_3] = {ad4_state_max, 0, false, AD4_OFF, false},
};
void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *ad_cfg)
@@ -118,7 +172,7 @@
return -EINVAL;
}
- if (dspp->idx > DSPP_MAX || !info[dspp->idx].ad4_support) {
+ if (dspp->idx >= DSPP_MAX || !info[dspp->idx].ad4_support) {
DRM_ERROR("ad4 not supported for dspp idx %d\n", dspp->idx);
return -EINVAL;
}
@@ -142,7 +196,7 @@
return -EINVAL;
}
- if (dspp->idx > DSPP_MAX || !info[dspp->idx].ad4_support) {
+ if (dspp->idx >= DSPP_MAX || !info[dspp->idx].ad4_support) {
DRM_ERROR("ad4 not supported for dspp idx %d\n", dspp->idx);
return -EINVAL;
}
@@ -170,6 +224,10 @@
return -EINVAL;
}
hw_lm = cfg->hw_cfg->mixer_info;
+ if (!hw_lm) {
+ DRM_ERROR("invalid mixer info\n");
+ return -EINVAL;
+ }
if (cfg->hw_cfg->num_of_mixers == 1 &&
hw_lm->cfg.out_height != cfg->hw_cfg->displayv &&
@@ -179,7 +237,7 @@
cfg->hw_cfg->displayh, cfg->hw_cfg->displayv);
return -EINVAL;
} else if (hw_lm->cfg.out_height != cfg->hw_cfg->displayv &&
- hw_lm->cfg.out_width != (cfg->hw_cfg->displayh >> 1)) {
+ hw_lm->cfg.out_width != (cfg->hw_cfg->displayh >> 1)) {
DRM_ERROR("dual_lm lmh %d lmw %d displayh %d displayw %d\n",
hw_lm->cfg.out_height, hw_lm->cfg.out_width,
cfg->hw_cfg->displayh, cfg->hw_cfg->displayv);
@@ -189,6 +247,11 @@
return 0;
}
+static int ad4_no_op_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
+{
+ return 0;
+}
+
static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode)
{
u32 blk_offset;
@@ -200,7 +263,8 @@
info[dspp->idx].state = ad4_state_idle;
info[dspp->idx].completed_ops_mask = 0;
} else {
- info[dspp->idx].state = ad4_state_run;
+ if (info[dspp->idx].state == ad4_state_idle)
+ info[dspp->idx].state = ad4_state_run;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
0x100);
}
@@ -235,6 +299,7 @@
proc_start = 0;
proc_end = 0xffff;
tile_ctl = 0;
+ info[dspp->idx].is_master = true;
} else {
tile_ctl = 0x5;
if (hw_lm->cfg.right_mixer) {
@@ -244,6 +309,7 @@
proc_start = (cfg->hw_cfg->displayh >> 1);
proc_end = frame_end;
tile_ctl |= 0x10;
+ info[dspp->idx].is_master = false;
} else {
frame_start = 0;
frame_end = (cfg->hw_cfg->displayh >> 1) +
@@ -251,23 +317,21 @@
proc_start = 0;
proc_end = (cfg->hw_cfg->displayh >> 1) - 1;
tile_ctl |= 0x10;
+ info[dspp->idx].is_master = true;
}
}
init = cfg->hw_cfg->payload;
- blk_offset = 8;
- SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
- init->init_param_009);
blk_offset = 0xc;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
- init->init_param_010);
+ init->init_param_010);
init->init_param_012 = cfg->hw_cfg->displayv & (BIT(17) - 1);
init->init_param_011 = cfg->hw_cfg->displayh & (BIT(17) - 1);
blk_offset = 0x10;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
- ((init->init_param_011 << 16) | init->init_param_012));
+ ((init->init_param_011 << 16) | init->init_param_012));
blk_offset = 0x14;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
@@ -275,8 +339,8 @@
blk_offset = 0x44;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
- ((((init->init_param_013) & (BIT(17) - 1)) << 16) |
- (init->init_param_014 & (BIT(17) - 1))));
+ ((((init->init_param_013) & (BIT(17) - 1)) << 16) |
+ (init->init_param_014 & (BIT(17) - 1))));
blk_offset = 0x5c;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
@@ -583,23 +647,25 @@
val = (ad_cfg->cfg_param_004 & (BIT(16) - 1));
val |= ((ad_cfg->cfg_param_003 & (BIT(16) - 1)) << 16);
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
+
+ blk_offset = 0x20;
val = (ad_cfg->cfg_param_005 & (BIT(8) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
+ blk_offset = 0x24;
val = (ad_cfg->cfg_param_006 & (BIT(7) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
blk_offset = 0x30;
val = (ad_cfg->cfg_param_007 & (BIT(8) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
- val = (ad_cfg->cfg_param_008 & (BIT(8) - 1));
- SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
+
+ info[dspp->idx].tf_ctrl = (ad_cfg->cfg_param_008 & (BIT(8) - 1));
+
+ blk_offset = 0x38;
val = (ad_cfg->cfg_param_009 & (BIT(10) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
+
+ blk_offset = 0x3c;
val = (ad_cfg->cfg_param_010 & (BIT(12) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
blk_offset += 4;
@@ -607,7 +673,6 @@
val |= (ad_cfg->cfg_param_012 & (BIT(16) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
blk_offset = 0x88;
val = (ad_cfg->cfg_param_013 & (BIT(8) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
@@ -697,14 +762,10 @@
blk_offset = 0x134;
val = (ad_cfg->cfg_param_040 & (BIT(12) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
- val = (ad_cfg->cfg_param_041 & (BIT(7) - 1));
- SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset = 0x15c;
- val = (ad_cfg->cfg_param_042 & (BIT(10) - 1));
- SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
+ info[dspp->idx].vc_control_0 = (ad_cfg->cfg_param_041 & (BIT(7) - 1));
+
+ blk_offset += 160;
val = (ad_cfg->cfg_param_043 & (BIT(10) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
@@ -791,6 +852,52 @@
if (ret)
return ret;
+ ret = ad4_mem_init_enable(dspp, cfg);
+ if (ret)
+ return ret;
+
+ info[dspp->idx].completed_ops_mask |= ad4_init;
+
+ if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+ ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+ return 0;
+}
+
+static int ad4_init_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+
+ if (!cfg->hw_cfg->payload) {
+ info[dspp->idx].completed_ops_mask &= ~ad4_init;
+ return 0;
+ }
+
+ ret = ad4_init_setup(dspp, cfg);
+ if (ret)
+ return ret;
+ ret = ad4_mem_init_disable(dspp, cfg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ad4_init_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+
+ if (!cfg->hw_cfg->payload) {
+ info[dspp->idx].completed_ops_mask &= ~ad4_init;
+ return 0;
+ }
+
+ ret = ad4_init_setup(dspp, cfg);
+ if (ret)
+ return ret;
+
info[dspp->idx].completed_ops_mask |= ad4_init;
if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
@@ -812,6 +919,52 @@
ret = ad4_cfg_setup(dspp, cfg);
if (ret)
return ret;
+ ret = ad4_cfg_ipc_reset(dspp, cfg);
+ if (ret)
+ return ret;
+
+ info[dspp->idx].completed_ops_mask |= ad4_cfg;
+ if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+ ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+ return 0;
+}
+
+static int ad4_cfg_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+
+ if (!cfg->hw_cfg->payload) {
+ info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
+ return 0;
+ }
+
+ ret = ad4_cfg_setup(dspp, cfg);
+ if (ret)
+ return ret;
+ ret = ad4_cfg_ipc_reset(dspp, cfg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ad4_cfg_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+
+ if (!cfg->hw_cfg->payload) {
+ info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
+ return 0;
+ }
+
+ ret = ad4_cfg_setup(dspp, cfg);
+ if (ret)
+ return ret;
+ ret = ad4_cfg_ipc_resume(dspp, cfg);
+ if (ret)
+ return ret;
info[dspp->idx].completed_ops_mask |= ad4_cfg;
if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
@@ -835,6 +988,22 @@
return 0;
}
+static int ad4_input_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+
+ ret = ad4_input_setup(dspp, cfg);
+ if (ret)
+ return ret;
+
+ info[dspp->idx].completed_ops_mask |= ad4_input;
+ if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+ ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+ return 0;
+}
+
static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg)
{
@@ -900,3 +1069,182 @@
break;
}
}
+
+static int ad4_ipc_suspend_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ u32 strength = 0, i = 0;
+ struct sde_hw_mixer *hw_lm;
+
+ hw_lm = cfg->hw_cfg->mixer_info;
+ if ((cfg->hw_cfg->num_of_mixers == 2) && hw_lm->cfg.right_mixer) {
+ /* this AD core is the salve core */
+ for (i = DSPP_0; i < DSPP_MAX; i++) {
+ if (info[i].is_master) {
+ strength = info[i].last_str;
+ break;
+ }
+ }
+ } else {
+ strength = SDE_REG_READ(&dspp->hw,
+ dspp->cap->sblk->ad.base + 0x4c);
+ }
+ info[dspp->idx].last_str = strength;
+
+ return 0;
+}
+
+static int ad4_ipc_resume_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+
+ info[dspp->idx].state = ad4_state_ipcr;
+
+ info[dspp->idx].frame_count = 0;
+ ret = ad4_cfg_ipc_resume(dspp, cfg);
+
+ return ret;
+}
+
+static int ad4_ipc_resume_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ info[dspp->idx].frame_count = 0;
+ return 0;
+}
+
+static int ad4_ipc_reset_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+ u32 strength = 0, i = 0;
+ struct sde_hw_mixer *hw_lm;
+
+ /* Read AD calculator strength output during the 2 frames of manual
+ * strength mode, and assign the strength output to last_str
+ * when frame count reaches AD_IPC_FRAME_COUNT to avoid flickers
+ * caused by strength was not converged before entering IPC mode
+ */
+ hw_lm = cfg->hw_cfg->mixer_info;
+ if ((cfg->hw_cfg->num_of_mixers == 2) && hw_lm->cfg.right_mixer) {
+ /* this AD core is the salve core */
+ for (i = DSPP_0; i < DSPP_MAX; i++) {
+ if (info[i].is_master) {
+ strength = info[i].last_str;
+ break;
+ }
+ }
+ } else {
+ strength = SDE_REG_READ(&dspp->hw,
+ dspp->cap->sblk->ad.base + 0x4c);
+ }
+
+ if (info[dspp->idx].frame_count == AD_IPC_FRAME_COUNT) {
+ info[dspp->idx].state = ad4_state_run;
+ info[dspp->idx].last_str = strength;
+ ret = ad4_cfg_ipc_reset(dspp, cfg);
+ if (ret)
+ return ret;
+ } else {
+ info[dspp->idx].frame_count++;
+ }
+
+ return 0;
+}
+
+static int ad4_mem_init_enable(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ u32 blk_offset;
+ struct drm_msm_ad4_init *init;
+
+ if (!cfg->hw_cfg->payload) {
+ info[dspp->idx].completed_ops_mask &= ~ad4_init;
+ return 0;
+ }
+
+ if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_init)) {
+ DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+ sizeof(struct drm_msm_ad4_init), cfg->hw_cfg->len,
+ cfg->hw_cfg->payload);
+ return -EINVAL;
+ }
+
+ init = cfg->hw_cfg->payload;
+ blk_offset = 0x8;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+ (init->init_param_009 & 0xdfff));
+ blk_offset = 0x450;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 1);
+
+ return 0;
+}
+
+static int ad4_mem_init_disable(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ u32 blk_offset;
+ struct drm_msm_ad4_init *init;
+
+ if (!cfg->hw_cfg->payload) {
+ info[dspp->idx].completed_ops_mask &= ~ad4_init;
+ return 0;
+ }
+
+ if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_init)) {
+ DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+ sizeof(struct drm_msm_ad4_init), cfg->hw_cfg->len,
+ cfg->hw_cfg->payload);
+ return -EINVAL;
+ }
+
+ init = cfg->hw_cfg->payload;
+ blk_offset = 0x8;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+ (init->init_param_009 | 0x2000));
+ blk_offset = 0x450;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0);
+
+ return 0;
+}
+
+static int ad4_cfg_ipc_resume(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ u32 blk_offset, val;
+
+ /* disable temporal filters */
+ blk_offset = 0x34;
+ val = (0x55 & (BIT(8) - 1));
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+ /* set manual strength */
+ blk_offset = 0x15c;
+ val = (info[dspp->idx].last_str & (BIT(10) - 1));
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+ /* enable manul mode */
+ blk_offset = 0x138;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0);
+
+ return 0;
+}
+
+static int ad4_cfg_ipc_reset(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ u32 blk_offset;
+
+ /* enable temporal filters */
+ blk_offset = 0x34;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+ info[dspp->idx].tf_ctrl);
+
+ /* disable manul mode */
+ blk_offset = 0x138;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+ info[dspp->idx].vc_control_0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 305d45e..b1772ed 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -230,6 +230,12 @@
};
enum {
+ DSPP_TOP_OFF,
+ DSPP_TOP_SIZE,
+ DSPP_TOP_PROP_MAX,
+};
+
+enum {
DSPP_OFF,
DSPP_SIZE,
DSPP_BLOCKS,
@@ -463,6 +469,11 @@
{MIXER_GC_PROP, "qcom,sde-mixer-gc", false, PROP_TYPE_U32_ARRAY},
};
+static struct sde_prop_type dspp_top_prop[] = {
+ {DSPP_TOP_OFF, "qcom,sde-dspp-top-off", true, PROP_TYPE_U32},
+ {DSPP_TOP_SIZE, "qcom,sde-dspp-top-size", false, PROP_TYPE_U32},
+};
+
static struct sde_prop_type dspp_prop[] = {
{DSPP_OFF, "qcom,sde-dspp-off", true, PROP_TYPE_U32_ARRAY},
{DSPP_SIZE, "qcom,sde-dspp-size", false, PROP_TYPE_U32},
@@ -1859,6 +1870,54 @@
return rc;
}
+static int sde_dspp_top_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[DSPP_TOP_PROP_MAX];
+ bool prop_exists[DSPP_TOP_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL;
+ u32 off_count;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(DSPP_TOP_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, dspp_top_prop, ARRAY_SIZE(dspp_top_prop),
+ prop_count, &off_count);
+ if (rc)
+ goto end;
+
+ rc = _read_dt_entry(np, dspp_top_prop, ARRAY_SIZE(dspp_top_prop),
+ prop_count, prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ if (off_count != 1) {
+ SDE_ERROR("invalid dspp_top off_count:%d\n", off_count);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ sde_cfg->dspp_top.base =
+ PROP_VALUE_ACCESS(prop_value, DSPP_TOP_OFF, 0);
+ sde_cfg->dspp_top.len =
+ PROP_VALUE_ACCESS(prop_value, DSPP_TOP_SIZE, 0);
+ snprintf(sde_cfg->dspp_top.name, SDE_HW_BLK_NAME_LEN, "dspp_top");
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
static int sde_dspp_parse_dt(struct device_node *np,
struct sde_mdss_cfg *sde_cfg)
{
@@ -2992,6 +3051,10 @@
if (rc)
goto end;
+ rc = sde_dspp_top_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
rc = sde_dspp_parse_dt(np, sde_cfg);
if (rc)
goto end;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 29698bc..db5a6b4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -568,6 +568,17 @@
};
/**
+ * struct sde_dspp_cfg - information of DSPP top block
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * supported by this block
+ */
+struct sde_dspp_top_cfg {
+ SDE_HW_BLK_INFO;
+};
+
+/**
* struct sde_dspp_cfg - information of DSPP blocks
* @id enum identifying this block
* @base register offset of this block
@@ -891,6 +902,8 @@
u32 mixer_count;
struct sde_lm_cfg mixer[MAX_BLOCKS];
+ struct sde_dspp_top_cfg dspp_top;
+
u32 dspp_count;
struct sde_dspp_cfg dspp[MAX_BLOCKS];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h
index 8f7764d..5cbfe8e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h
@@ -37,4 +37,33 @@
#define GC_TBL_NUM 3
#define GC_LUT_SWAP_OFF 0x1c
+#define IGC_TBL_NUM 3
+#define IGC_DITHER_OFF 0x7e0
+#define IGC_OPMODE_OFF 0x0
+#define IGC_C0_OFF 0x0
+#define IGC_DATA_MASK (BIT(12) - 1)
+#define IGC_DSPP_SEL_MASK_MAX (BIT(4) - 1)
+#define IGC_DSPP_SEL_MASK(n) \
+ ((IGC_DSPP_SEL_MASK_MAX & ~(1 << (n))) << 28)
+#define IGC_INDEX_UPDATE BIT(25)
+#define IGC_EN BIT(0)
+#define IGC_DIS 0
+#define IGC_DITHER_DATA_MASK (BIT(4) - 1)
+
+#define PCC_NUM_PLANES 3
+#define PCC_NUM_COEFF 11
+#define PCC_EN BIT(0)
+#define PCC_DIS 0
+#define PCC_C_OFF 0x4
+#define PCC_R_OFF 0x10
+#define PCC_G_OFF 0x1c
+#define PCC_B_OFF 0x28
+#define PCC_RG_OFF 0x34
+#define PCC_RB_OFF 0x40
+#define PCC_GB_OFF 0x4c
+#define PCC_RGB_OFF 0x58
+#define PCC_RR_OFF 0x64
+#define PCC_GG_OFF 0x70
+#define PCC_BB_OFF 0x7c
+
#endif /* _SDE_HW_COLOR_PROC_COMMON_V4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.c b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.c
index 42d1480..4da0456 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.c
@@ -109,3 +109,127 @@
&op_mode);
}
+
+void sde_setup_dspp_igcv3(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct drm_msm_igc_lut *lut_cfg;
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ int i = 0, j = 0;
+ u32 *addr = NULL;
+ u32 offset = 0;
+
+ if (!ctx || !cfg) {
+ DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
+ return;
+ }
+
+ if (!hw_cfg->payload) {
+ DRM_DEBUG_DRIVER("disable igc feature\n");
+ SDE_REG_WRITE(&ctx->hw, IGC_OPMODE_OFF, 0);
+ return;
+ }
+
+ if (hw_cfg->len != sizeof(struct drm_msm_igc_lut)) {
+ DRM_ERROR("invalid size of payload len %d exp %zd\n",
+ hw_cfg->len, sizeof(struct drm_msm_igc_lut));
+ return;
+ }
+
+ lut_cfg = hw_cfg->payload;
+
+ for (i = 0; i < IGC_TBL_NUM; i++) {
+ addr = lut_cfg->c0 + (i * ARRAY_SIZE(lut_cfg->c0));
+ offset = IGC_C0_OFF + (i * sizeof(u32));
+
+ for (j = 0; j < IGC_TBL_LEN; j++) {
+ addr[j] &= IGC_DATA_MASK;
+ addr[j] |= IGC_DSPP_SEL_MASK(ctx->idx - 1);
+ if (j == 0)
+ addr[j] |= IGC_INDEX_UPDATE;
+ /* IGC lut registers are part of DSPP Top HW block */
+ SDE_REG_WRITE(&ctx->hw_top, offset, addr[j]);
+ }
+ }
+
+ if (lut_cfg->flags & IGC_DITHER_ENABLE) {
+ SDE_REG_WRITE(&ctx->hw, IGC_DITHER_OFF,
+ lut_cfg->strength & IGC_DITHER_DATA_MASK);
+ }
+
+ SDE_REG_WRITE(&ctx->hw, IGC_OPMODE_OFF, IGC_EN);
+}
+
+void sde_setup_dspp_pccv4(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct drm_msm_pcc *pcc_cfg;
+ struct drm_msm_pcc_coeff *coeffs = NULL;
+ int i = 0;
+ u32 base = 0;
+
+ if (!ctx || !cfg) {
+ DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
+ return;
+ }
+
+ if (!hw_cfg->payload) {
+ DRM_DEBUG_DRIVER("disable pcc feature\n");
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, 0);
+ return;
+ }
+
+ if (hw_cfg->len != sizeof(struct drm_msm_pcc)) {
+ DRM_ERROR("invalid size of payload len %d exp %zd\n",
+ hw_cfg->len, sizeof(struct drm_msm_pcc));
+ return;
+ }
+
+ pcc_cfg = hw_cfg->payload;
+
+ for (i = 0; i < PCC_NUM_PLANES; i++) {
+ base = ctx->cap->sblk->pcc.base + (i * sizeof(u32));
+ switch (i) {
+ case 0:
+ coeffs = &pcc_cfg->r;
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_RR_OFF, pcc_cfg->r_rr);
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_GG_OFF, pcc_cfg->r_gg);
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_BB_OFF, pcc_cfg->r_bb);
+ break;
+ case 1:
+ coeffs = &pcc_cfg->g;
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_RR_OFF, pcc_cfg->g_rr);
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_GG_OFF, pcc_cfg->g_gg);
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_BB_OFF, pcc_cfg->g_bb);
+ break;
+ case 2:
+ coeffs = &pcc_cfg->b;
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_RR_OFF, pcc_cfg->b_rr);
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_GG_OFF, pcc_cfg->b_gg);
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_BB_OFF, pcc_cfg->b_bb);
+ break;
+ default:
+ DRM_ERROR("invalid pcc plane: %d\n", i);
+ return;
+ }
+
+ SDE_REG_WRITE(&ctx->hw, base + PCC_C_OFF, coeffs->c);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_R_OFF, coeffs->r);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_G_OFF, coeffs->g);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_B_OFF, coeffs->b);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_RG_OFF, coeffs->rg);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_RB_OFF, coeffs->rb);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_GB_OFF, coeffs->gb);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_RGB_OFF, coeffs->rgb);
+ }
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, PCC_EN);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.h b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.h
index 250830e..ad4f556 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.h
@@ -22,5 +22,19 @@
* @cfg: pointer to sde_hw_cp_cfg
*/
void sde_setup_dspp_3d_gamutv4(struct sde_hw_dspp *ctx, void *cfg);
+/**
+ * sde_setup_dspp_igcv3 - Function for igc v3 version feature
+ * programming.
+ * @ctx: dspp ctx pointer
+ * @cfg: pointer to sde_hw_cp_cfg
+ */
+void sde_setup_dspp_igcv3(struct sde_hw_dspp *ctx, void *cfg);
+/**
+ * sde_setup_dspp_pccv4 - Function for pcc v4 version feature
+ * programming.
+ * @ctx: dspp ctx pointer
+ * @cfg: pointer to sde_hw_cp_cfg
+ */
+void sde_setup_dspp_pccv4(struct sde_hw_dspp *ctx, void *cfg);
#endif /* _SDE_HW_COLOR_PROC_V4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
index 1a346f0..9fd3c25 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
@@ -120,7 +120,6 @@
data |= dsc->max_qp_flatness << 5;
data |= dsc->min_qp_flatness;
SDE_REG_WRITE(dsc_c, DSC_FLATNESS, data);
- SDE_REG_WRITE(dsc_c, DSC_FLATNESS, 0x983);
data = dsc->rc_model_size;
SDE_REG_WRITE(dsc_c, DSC_RC_MODEL_SIZE, data);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index e766cdb..5b3f51e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -58,6 +58,16 @@
if (c->cap->sblk->pcc.version ==
(SDE_COLOR_PROCESS_VER(0x1, 0x7)))
c->ops.setup_pcc = sde_setup_dspp_pcc_v1_7;
+ else if (c->cap->sblk->pcc.version ==
+ (SDE_COLOR_PROCESS_VER(0x4, 0x0))) {
+ ret = reg_dmav1_init_dspp_op_v4(i, c->idx);
+ if (!ret)
+ c->ops.setup_pcc =
+ reg_dmav1_setup_dspp_pccv4;
+ else
+ c->ops.setup_pcc =
+ sde_setup_dspp_pccv4;
+ }
break;
case SDE_DSPP_HSIC:
if (c->cap->sblk->hsic.version ==
@@ -104,6 +114,18 @@
sde_setup_dspp_gc_v1_7;
}
break;
+ case SDE_DSPP_IGC:
+ if (c->cap->sblk->igc.version ==
+ SDE_COLOR_PROCESS_VER(0x3, 0x1)) {
+ ret = reg_dmav1_init_dspp_op_v4(i, c->idx);
+ if (!ret)
+ c->ops.setup_igc =
+ reg_dmav1_setup_dspp_igcv31;
+ else
+ c->ops.setup_igc =
+ sde_setup_dspp_igcv3;
+ }
+ break;
case SDE_DSPP_AD:
if (c->cap->sblk->ad.version ==
SDE_COLOR_PROCESS_VER(4, 0)) {
@@ -145,6 +167,13 @@
return ERR_PTR(-EINVAL);
}
+ /* Populate DSPP Top HW block */
+ c->hw_top.base_off = addr;
+ c->hw_top.blk_off = m->dspp_top.base;
+ c->hw_top.length = m->dspp_top.len;
+ c->hw_top.hwversion = m->hwversion;
+ c->hw_top.log_mask = SDE_DBG_MASK_DSPP;
+
/* Assign ops */
c->idx = idx;
c->cap = cfg;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 0baa970..44b3831 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -170,13 +170,17 @@
* struct sde_hw_dspp - dspp description
* @base: Hardware block base structure
* @hw: Block hardware details
+ * @hw_top: Block hardware top details
* @idx: DSPP index
* @cap: Pointer to layer_cfg
* @ops: Pointer to operations possible for this DSPP
*/
struct sde_hw_dspp {
struct sde_hw_blk base;
- struct sde_hw_blk_reg_map hw;
+ struct sde_hw_blk_reg_map hw;
+
+ /* dspp top */
+ struct sde_hw_blk_reg_map hw_top;
/* dspp */
enum sde_dspp idx;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index 8c3d4fc..8eebf89fc 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -710,6 +710,9 @@
return;
SDE_REG_WRITE(&intr->hw, reg_off, mask);
+
+ /* ensure register writes go through */
+ wmb();
}
static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
@@ -731,7 +734,7 @@
* Now need to go through each IRQ status and find matching
* irq lookup index.
*/
- spin_lock_irqsave(&intr->status_lock, irq_flags);
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
for (reg_idx = 0; reg_idx < ARRAY_SIZE(sde_intr_set); reg_idx++) {
irq_status = intr->save_irq_status[reg_idx];
@@ -766,7 +769,7 @@
if (cbfunc)
cbfunc(arg, irq_idx);
else
- intr->ops.clear_interrupt_status(
+ intr->ops.clear_intr_status_nolock(
intr, irq_idx);
/*
@@ -777,7 +780,7 @@
irq_status &= ~sde_irq_map[irq_idx].irq_mask;
}
}
- spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
}
static int sde_hw_intr_enable_irq(struct sde_hw_intr *intr, int irq_idx)
@@ -801,7 +804,7 @@
reg_idx = irq->reg_idx;
reg = &sde_intr_set[reg_idx];
- spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
cache_irq_mask = intr->cache_irq_mask[reg_idx];
if (cache_irq_mask & irq->irq_mask) {
dbgstr = "SDE IRQ already set:";
@@ -814,9 +817,12 @@
/* Enabling interrupts with the new mask */
SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* ensure register write goes through */
+ wmb();
+
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
- spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
irq->irq_mask, cache_irq_mask);
@@ -845,7 +851,7 @@
reg_idx = irq->reg_idx;
reg = &sde_intr_set[reg_idx];
- spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
cache_irq_mask = intr->cache_irq_mask[reg_idx];
if ((cache_irq_mask & irq->irq_mask) == 0) {
dbgstr = "SDE IRQ is already cleared:";
@@ -858,9 +864,12 @@
/* Cleaning any pending interrupt */
SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ /* ensure register write goes through */
+ wmb();
+
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
- spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
irq->irq_mask, cache_irq_mask);
@@ -878,6 +887,9 @@
for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
+ /* ensure register writes go through */
+ wmb();
+
return 0;
}
@@ -891,6 +903,9 @@
for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
+ /* ensure register writes go through */
+ wmb();
+
return 0;
}
@@ -926,7 +941,7 @@
if (!intr)
return;
- spin_lock_irqsave(&intr->status_lock, irq_flags);
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) {
/* Read interrupt status */
intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
@@ -943,25 +958,68 @@
/* Finally update IRQ status based on enable mask */
intr->save_irq_status[i] &= enable_mask;
}
- spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
}
-static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
+static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
int irq_idx)
{
int reg_idx;
- unsigned long irq_flags;
if (!intr)
return;
- spin_lock_irqsave(&intr->mask_lock, irq_flags);
-
reg_idx = sde_irq_map[irq_idx].reg_idx;
SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
sde_irq_map[irq_idx].irq_mask);
- spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
+ int irq_idx)
+{
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ sde_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static u32 sde_hw_intr_get_intr_status_nolock(struct sde_hw_intr *intr,
+ int irq_idx, bool clear)
+{
+ int reg_idx;
+ u32 intr_status;
+
+ if (!intr)
+ return 0;
+
+ if (irq_idx >= ARRAY_SIZE(sde_irq_map) || irq_idx < 0) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return 0;
+ }
+
+ reg_idx = sde_irq_map[irq_idx].reg_idx;
+ intr_status = SDE_REG_READ(&intr->hw,
+ sde_intr_set[reg_idx].status_off) &
+ sde_irq_map[irq_idx].irq_mask;
+ if (intr_status && clear)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+ intr_status);
+
+ /* ensure register writes go through */
+ wmb();
+
+ return intr_status;
}
static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
@@ -979,7 +1037,7 @@
return 0;
}
- spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
reg_idx = sde_irq_map[irq_idx].reg_idx;
intr_status = SDE_REG_READ(&intr->hw,
@@ -989,7 +1047,10 @@
SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
intr_status);
- spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
return intr_status;
}
@@ -1007,7 +1068,9 @@
ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
+ ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
+ ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
}
static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
@@ -1059,8 +1122,7 @@
return ERR_PTR(-ENOMEM);
}
- spin_lock_init(&intr->mask_lock);
- spin_lock_init(&intr->status_lock);
+ spin_lock_init(&intr->irq_lock);
return intr;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
index aaba1be..ced4077 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -187,6 +187,15 @@
int irq_idx);
/**
+ * clear_intr_status_nolock() - clears the HW interrupts without lock
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_intr_status_nolock)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
* get_interrupt_status - Gets HW interrupt status, and clear if set,
* based on given lookup IRQ index.
* @intr: HW interrupt handle
@@ -199,6 +208,17 @@
bool clear);
/**
+ * get_intr_status_nolock - nolock version of get_interrupt_status
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @clear: True to clear irq after read
+ */
+ u32 (*get_intr_status_nolock)(
+ struct sde_hw_intr *intr,
+ int irq_idx,
+ bool clear);
+
+ /**
* get_valid_interrupts - Gets a mask of all valid interrupt sources
* within SDE. These are actually status bits
* within interrupt registers that specify the
@@ -232,8 +252,7 @@
* @cache_irq_mask: array of IRQ enable masks reg storage created during init
* @save_irq_status: array of IRQ status reg storage created during init
* @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
- * @mask_lock: spinlock for accessing IRQ mask
- * @status_lock: spinlock for accessing IRQ status
+ * @irq_lock: spinlock for accessing IRQ resources
*/
struct sde_hw_intr {
struct sde_hw_blk_reg_map hw;
@@ -241,8 +260,7 @@
u32 *cache_irq_mask;
u32 *save_irq_status;
u32 irq_idx_tbl_size;
- spinlock_t mask_lock;
- spinlock_t status_lock;
+ spinlock_t irq_lock;
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index 582ab5a..f07f5ed 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -82,6 +82,19 @@
#define SDE_BLEND_BG_INV_MOD_ALPHA (1 << 12)
#define SDE_BLEND_BG_TRANSP_EN (1 << 13)
+#define SDE_VSYNC0_SOURCE_GPIO 0
+#define SDE_VSYNC1_SOURCE_GPIO 1
+#define SDE_VSYNC2_SOURCE_GPIO 2
+#define SDE_VSYNC_SOURCE_INTF_0 3
+#define SDE_VSYNC_SOURCE_INTF_1 4
+#define SDE_VSYNC_SOURCE_INTF_2 5
+#define SDE_VSYNC_SOURCE_INTF_3 6
+#define SDE_VSYNC_SOURCE_WD_TIMER_4 11
+#define SDE_VSYNC_SOURCE_WD_TIMER_3 12
+#define SDE_VSYNC_SOURCE_WD_TIMER_2 13
+#define SDE_VSYNC_SOURCE_WD_TIMER_1 14
+#define SDE_VSYNC_SOURCE_WD_TIMER_0 15
+
enum sde_hw_blk_type {
SDE_HW_BLK_TOP = 0,
SDE_HW_BLK_SSPP,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
index 4140a12..9a5035a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
@@ -90,6 +90,8 @@
[GAMUT] = GRP_VIG_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT,
[VLUT] = GRP_DSPP_HW_BLK_SELECT,
[GC] = GRP_DSPP_HW_BLK_SELECT,
+ [IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT,
+ [PCC] = GRP_DSPP_HW_BLK_SELECT,
};
static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
@@ -182,7 +184,7 @@
loc = (u8 *)cfg->dma_buf->vaddr + cfg->dma_buf->index;
memcpy(loc, cfg->data, cfg->data_size);
cfg->dma_buf->index += cfg->data_size;
- cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
+ cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
cfg->dma_buf->ops_completed |= REG_WRITE_OP;
return 0;
@@ -244,7 +246,7 @@
loc[1] = *cfg->data;
cfg->dma_buf->index += ops_mem_size[cfg->ops];
cfg->dma_buf->ops_completed |= REG_WRITE_OP;
- cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
+ cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
return 0;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
index 285ef11..70427ab 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
@@ -30,6 +30,14 @@
#define GC_LUT_MEM_SIZE ((sizeof(struct drm_msm_pgc_lut)) + \
REG_DMA_HEADERS_BUFFER_SZ)
+#define IGC_LUT_MEM_SIZE ((sizeof(struct drm_msm_igc_lut)) + \
+ REG_DMA_HEADERS_BUFFER_SZ)
+
+#define PCC_LUT_ENTRIES (PCC_NUM_PLANES * PCC_NUM_COEFF)
+#define PCC_LEN (PCC_LUT_ENTRIES * sizeof(u32))
+#define PCC_MEM_SIZE (PCC_LEN + \
+ REG_DMA_HEADERS_BUFFER_SZ)
+
#define REG_MASK(n) ((BIT(n)) - 1)
static struct sde_reg_dma_buffer *dspp_buf[REG_DMA_FEATURES_MAX][DSPP_MAX];
@@ -37,8 +45,8 @@
static u32 feature_map[SDE_DSPP_MAX] = {
[SDE_DSPP_VLUT] = VLUT,
[SDE_DSPP_GAMUT] = GAMUT,
- [SDE_DSPP_IGC] = REG_DMA_FEATURES_MAX,
- [SDE_DSPP_PCC] = REG_DMA_FEATURES_MAX,
+ [SDE_DSPP_IGC] = IGC,
+ [SDE_DSPP_PCC] = PCC,
[SDE_DSPP_GC] = GC,
[SDE_DSPP_HSIC] = REG_DMA_FEATURES_MAX,
[SDE_DSPP_MEMCOLOR] = REG_DMA_FEATURES_MAX,
@@ -52,6 +60,8 @@
[SDE_DSPP_VLUT] = VLUT_MEM_SIZE,
[SDE_DSPP_GAMUT] = GAMUT_LUT_MEM_SIZE,
[SDE_DSPP_GC] = GC_LUT_MEM_SIZE,
+ [SDE_DSPP_IGC] = IGC_LUT_MEM_SIZE,
+ [SDE_DSPP_PCC] = PCC_MEM_SIZE,
};
static u32 dspp_mapping[DSPP_MAX] = {
@@ -233,6 +243,7 @@
int rc = -ENOTSUPP;
struct sde_hw_reg_dma_ops *dma_ops;
bool is_supported = false;
+ u32 blk;
if (feature >= SDE_DSPP_MAX || idx >= DSPP_MAX) {
DRM_ERROR("invalid feature %x max %x dspp idx %x max %xd\n",
@@ -250,8 +261,8 @@
if (IS_ERR_OR_NULL(dma_ops))
return -ENOTSUPP;
- rc = dma_ops->check_support(feature_map[feature], dspp_mapping[idx],
- &is_supported);
+ blk = (feature_map[feature] == IGC) ? DSPP_IGC : dspp_mapping[idx];
+ rc = dma_ops->check_support(feature_map[feature], blk, &is_supported);
if (!rc)
rc = (is_supported) ? 0 : -ENOTSUPP;
@@ -379,7 +390,7 @@
{
struct sde_reg_dma_kickoff_cfg kick_off;
struct sde_hw_cp_cfg *hw_cfg = cfg;
- u32 op_mode;
+ u32 op_mode = 0;
struct sde_hw_reg_dma_ops *dma_ops;
struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
int rc;
@@ -519,7 +530,7 @@
int rc, i = 0;
u32 reg;
- rc = reg_dma_dspp_check(ctx, cfg, GAMUT);
+ rc = reg_dma_dspp_check(ctx, cfg, GC);
if (rc)
return;
@@ -603,6 +614,298 @@
}
}
+static void _dspp_igcv31_off(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct sde_reg_dma_kickoff_cfg kick_off;
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct sde_hw_reg_dma_ops *dma_ops;
+ struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
+ int rc;
+ u32 reg;
+
+ dma_ops = sde_reg_dma_get_ops();
+ dma_ops->reset_reg_dma_buf(dspp_buf[IGC][ctx->idx]);
+
+ REG_DMA_INIT_OPS(dma_write_cfg, dspp_mapping[ctx->idx], IGC,
+ dspp_buf[IGC][ctx->idx]);
+
+ REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("write decode select failed ret %d\n", rc);
+ return;
+ }
+
+ reg = IGC_DIS;
+ REG_DMA_SETUP_OPS(dma_write_cfg,
+ ctx->cap->sblk->igc.base + IGC_OPMODE_OFF,
+ ®, sizeof(reg), REG_SINGLE_WRITE, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("setting opcode failed ret %d\n", rc);
+ return;
+ }
+
+ REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[IGC][ctx->idx],
+ REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+ rc = dma_ops->kick_off(&kick_off);
+ if (rc)
+ DRM_ERROR("failed to kick off ret %d\n", rc);
+}
+
+void reg_dmav1_setup_dspp_igcv31(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct drm_msm_igc_lut *lut_cfg;
+ struct sde_hw_reg_dma_ops *dma_ops;
+ struct sde_reg_dma_kickoff_cfg kick_off;
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
+ int rc, i = 0, j = 0;
+ u32 *addr = NULL;
+ u32 offset = 0;
+ u32 reg;
+
+ rc = reg_dma_dspp_check(ctx, cfg, IGC);
+ if (rc)
+ return;
+
+ if (!hw_cfg->payload) {
+ DRM_DEBUG_DRIVER("disable igc feature\n");
+ _dspp_igcv31_off(ctx, cfg);
+ return;
+ }
+
+ if (hw_cfg->len != sizeof(struct drm_msm_igc_lut)) {
+ DRM_ERROR("invalid size of payload len %d exp %zd\n",
+ hw_cfg->len, sizeof(struct drm_msm_igc_lut));
+ return;
+ }
+
+ lut_cfg = hw_cfg->payload;
+
+ dma_ops = sde_reg_dma_get_ops();
+ dma_ops->reset_reg_dma_buf(dspp_buf[IGC][ctx->idx]);
+
+ REG_DMA_INIT_OPS(dma_write_cfg, DSPP_IGC, IGC, dspp_buf[IGC][ctx->idx]);
+
+ REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("write decode select failed ret %d\n", rc);
+ return;
+ }
+
+ for (i = 0; i < IGC_TBL_NUM; i++) {
+ addr = lut_cfg->c0 + (i * ARRAY_SIZE(lut_cfg->c0));
+ offset = IGC_C0_OFF + (i * sizeof(u32));
+
+ for (j = 0; j < IGC_TBL_LEN; j++) {
+ addr[j] &= IGC_DATA_MASK;
+ addr[j] |= IGC_DSPP_SEL_MASK(ctx->idx - 1);
+ if (j == 0)
+ addr[j] |= IGC_INDEX_UPDATE;
+ }
+
+ REG_DMA_SETUP_OPS(dma_write_cfg, offset, addr,
+ IGC_TBL_LEN * sizeof(u32),
+ REG_BLK_WRITE_INC, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("lut write failed ret %d\n", rc);
+ return;
+ }
+ }
+
+ REG_DMA_INIT_OPS(dma_write_cfg, dspp_mapping[ctx->idx], IGC,
+ dspp_buf[IGC][ctx->idx]);
+
+ REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("write decode select failed ret %d\n", rc);
+ return;
+ }
+
+ if (lut_cfg->flags & IGC_DITHER_ENABLE) {
+ reg = lut_cfg->strength & IGC_DITHER_DATA_MASK;
+ REG_DMA_SETUP_OPS(dma_write_cfg,
+ ctx->cap->sblk->igc.base + IGC_DITHER_OFF,
+ ®, sizeof(reg), REG_SINGLE_WRITE, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("dither strength failed ret %d\n", rc);
+ return;
+ }
+ }
+
+ reg = IGC_EN;
+ REG_DMA_SETUP_OPS(dma_write_cfg,
+ ctx->cap->sblk->igc.base + IGC_OPMODE_OFF,
+ ®, sizeof(reg), REG_SINGLE_WRITE, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("setting opcode failed ret %d\n", rc);
+ return;
+ }
+
+ REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[IGC][ctx->idx],
+ REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+ rc = dma_ops->kick_off(&kick_off);
+ if (rc)
+ DRM_ERROR("failed to kick off ret %d\n", rc);
+}
+
+static void _dspp_pccv4_off(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct sde_reg_dma_kickoff_cfg kick_off;
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct sde_hw_reg_dma_ops *dma_ops;
+ struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
+ int rc;
+ u32 reg;
+
+ dma_ops = sde_reg_dma_get_ops();
+ dma_ops->reset_reg_dma_buf(dspp_buf[PCC][ctx->idx]);
+
+ REG_DMA_INIT_OPS(dma_write_cfg, dspp_mapping[ctx->idx], PCC,
+ dspp_buf[PCC][ctx->idx]);
+
+ REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("write decode select failed ret %d\n", rc);
+ return;
+ }
+
+ reg = PCC_DIS;
+ REG_DMA_SETUP_OPS(dma_write_cfg,
+ ctx->cap->sblk->pcc.base,
+ ®, sizeof(reg), REG_SINGLE_WRITE, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("setting opcode failed ret %d\n", rc);
+ return;
+ }
+
+ REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[PCC][ctx->idx],
+ REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+ rc = dma_ops->kick_off(&kick_off);
+ if (rc)
+ DRM_ERROR("failed to kick off ret %d\n", rc);
+}
+
+void reg_dmav1_setup_dspp_pccv4(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct sde_hw_reg_dma_ops *dma_ops;
+ struct sde_reg_dma_kickoff_cfg kick_off;
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
+ struct drm_msm_pcc *pcc_cfg;
+ struct drm_msm_pcc_coeff *coeffs = NULL;
+ u32 *data = NULL;
+ int rc, i = 0;
+ u32 reg = 0;
+
+ rc = reg_dma_dspp_check(ctx, cfg, PCC);
+ if (rc)
+ return;
+
+ if (!hw_cfg->payload) {
+ DRM_DEBUG_DRIVER("disable pcc feature\n");
+ _dspp_pccv4_off(ctx, cfg);
+ return;
+ }
+
+ if (hw_cfg->len != sizeof(struct drm_msm_pcc)) {
+ DRM_ERROR("invalid size of payload len %d exp %zd\n",
+ hw_cfg->len, sizeof(struct drm_msm_pcc));
+ return;
+ }
+
+ pcc_cfg = hw_cfg->payload;
+
+ dma_ops = sde_reg_dma_get_ops();
+ dma_ops->reset_reg_dma_buf(dspp_buf[PCC][ctx->idx]);
+
+ REG_DMA_INIT_OPS(dma_write_cfg, dspp_mapping[ctx->idx],
+ PCC, dspp_buf[PCC][ctx->idx]);
+
+ REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("write decode select failed ret %d\n", rc);
+ return;
+ }
+
+ data = kzalloc(PCC_LEN, GFP_KERNEL);
+ if (!data)
+ return;
+
+ for (i = 0; i < PCC_NUM_PLANES; i++) {
+ switch (i) {
+ case 0:
+ coeffs = &pcc_cfg->r;
+ data[i + 24] = pcc_cfg->r_rr;
+ data[i + 27] = pcc_cfg->r_gg;
+ data[i + 30] = pcc_cfg->r_bb;
+ break;
+ case 1:
+ coeffs = &pcc_cfg->g;
+ data[i + 24] = pcc_cfg->g_rr;
+ data[i + 27] = pcc_cfg->g_gg;
+ data[i + 30] = pcc_cfg->g_bb;
+ break;
+ case 2:
+ coeffs = &pcc_cfg->b;
+ data[i + 24] = pcc_cfg->b_rr;
+ data[i + 27] = pcc_cfg->b_gg;
+ data[i + 30] = pcc_cfg->b_bb;
+ break;
+ default:
+ DRM_ERROR("invalid pcc plane: %d\n", i);
+ goto exit;
+ }
+
+ data[i] = coeffs->c;
+ data[i + 3] = coeffs->r;
+ data[i + 6] = coeffs->g;
+ data[i + 9] = coeffs->b;
+ data[i + 12] = coeffs->rg;
+ data[i + 15] = coeffs->rb;
+ data[i + 18] = coeffs->gb;
+ data[i + 21] = coeffs->rgb;
+ }
+
+ REG_DMA_SETUP_OPS(dma_write_cfg,
+ ctx->cap->sblk->pcc.base + PCC_C_OFF,
+ data, PCC_LEN,
+ REG_BLK_WRITE_SINGLE, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("write pcc lut failed ret %d\n", rc);
+ goto exit;
+ }
+
+ reg = PCC_EN;
+ REG_DMA_SETUP_OPS(dma_write_cfg,
+ ctx->cap->sblk->pcc.base,
+ ®, sizeof(reg), REG_SINGLE_WRITE, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("setting opcode failed ret %d\n", rc);
+ goto exit;
+ }
+
+ REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[PCC][ctx->idx],
+ REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+ rc = dma_ops->kick_off(&kick_off);
+ if (rc)
+ DRM_ERROR("failed to kick off ret %d\n", rc);
+
+exit:
+ kfree(data);
+}
+
int reg_dmav1_deinit_dspp_ops(enum sde_dspp idx)
{
int i;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.h b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.h
index 94e1a5c..bb72c8f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.h
@@ -46,13 +46,27 @@
void reg_dmav1_setup_dspp_3d_gamutv4(struct sde_hw_dspp *ctx, void *cfg);
/**
- * reg_dmav1_setup_dspp_gc_v18() - gc v18 implementation using reg dma v1.
+ * reg_dmav1_setup_dspp_gcv18() - gc v18 implementation using reg dma v1.
* @ctx: dspp ctx info
* @cfg: pointer to struct sde_hw_cp_cfg
*/
void reg_dmav1_setup_dspp_gcv18(struct sde_hw_dspp *ctx, void *cfg);
/**
+ * reg_dmav1_setup_dspp_igcv31() - igc v31 implementation using reg dma v1.
+ * @ctx: dspp ctx info
+ * @cfg: pointer to struct sde_hw_cp_cfg
+ */
+void reg_dmav1_setup_dspp_igcv31(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * reg_dmav1_setup_dspp_pccv4() - pcc v4 implementation using reg dma v1.
+ * @ctx: dspp ctx info
+ * @cfg: pointer to struct sde_hw_cp_cfg
+ */
+void reg_dmav1_setup_dspp_pccv4(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
* reg_dmav1_deinit_dspp_ops() - deinitialize the dspp feature op for sde v4
* which were initialized.
* @idx: dspp idx
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index b773187..613ac53 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -38,6 +38,18 @@
#define MDP_WD_TIMER_0_CTL 0x380
#define MDP_WD_TIMER_0_CTL2 0x384
#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
+#define MDP_WD_TIMER_1_CTL 0x390
+#define MDP_WD_TIMER_1_CTL2 0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE 0x398
+#define MDP_WD_TIMER_2_CTL 0x420
+#define MDP_WD_TIMER_2_CTL2 0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE 0x428
+#define MDP_WD_TIMER_3_CTL 0x430
+#define MDP_WD_TIMER_3_CTL2 0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE 0x438
+#define MDP_WD_TIMER_4_CTL 0x440
+#define MDP_WD_TIMER_4_CTL2 0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
#define MDP_TICK_COUNT 16
#define XO_CLK_RATE 19200
@@ -204,38 +216,74 @@
status->wb[WB_3] = 0;
}
-static void sde_hw_setup_vsync_sel(struct sde_hw_mdp *mdp,
- struct sde_watchdog_te_status *cfg, bool watchdog_te)
+static void sde_hw_setup_vsync_source(struct sde_hw_mdp *mdp,
+ struct sde_vsync_source_cfg *cfg)
{
- struct sde_hw_blk_reg_map *c = &mdp->hw;
- u32 reg = 0;
- int i = 0;
- u32 pp_offset[] = {0xC, 0x8, 0x4, 0x13};
+ struct sde_hw_blk_reg_map *c;
+ u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
+ static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
- if (!mdp)
+ if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
return;
+ c = &mdp->hw;
reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
for (i = 0; i < cfg->pp_count; i++) {
int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+ if (pp_idx >= ARRAY_SIZE(pp_offset))
+ continue;
- if (watchdog_te)
- reg |= 0xF << pp_offset[pp_idx];
- else
- reg &= ~(0xF << pp_offset[pp_idx]);
+ reg &= ~(0xf << pp_offset[pp_idx]);
+ reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
}
-
SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
- if (watchdog_te) {
- SDE_REG_WRITE(c, MDP_WD_TIMER_0_LOAD_VALUE,
+ if (cfg->vsync_source >= SDE_VSYNC_SOURCE_WD_TIMER_4 &&
+ cfg->vsync_source <= SDE_VSYNC_SOURCE_WD_TIMER_0) {
+ switch (cfg->vsync_source) {
+ case SDE_VSYNC_SOURCE_WD_TIMER_4:
+ wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_4_CTL;
+ wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+ break;
+ case SDE_VSYNC_SOURCE_WD_TIMER_3:
+ wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_3_CTL;
+ wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+ break;
+ case SDE_VSYNC_SOURCE_WD_TIMER_2:
+ wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_2_CTL;
+ wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+ break;
+ case SDE_VSYNC_SOURCE_WD_TIMER_1:
+ wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_1_CTL;
+ wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+ break;
+ case SDE_VSYNC_SOURCE_WD_TIMER_0:
+ default:
+ wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_0_CTL;
+ wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+ break;
+ }
+
+ if (cfg->is_dummy) {
+ SDE_REG_WRITE(c, wd_ctl2, 0x0);
+ } else {
+ SDE_REG_WRITE(c, wd_load_value,
CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
- SDE_REG_WRITE(c, MDP_WD_TIMER_0_CTL, BIT(0)); /* clear timer */
- reg = SDE_REG_READ(c, MDP_WD_TIMER_0_CTL2);
- reg |= BIT(8); /* enable heartbeat timer */
- reg |= BIT(0); /* enable WD timer */
- SDE_REG_WRITE(c, MDP_WD_TIMER_0_CTL2, reg);
+ SDE_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+ reg = SDE_REG_READ(c, wd_ctl2);
+ reg |= BIT(8); /* enable heartbeat timer */
+ reg |= BIT(0); /* enable WD timer */
+ SDE_REG_WRITE(c, wd_ctl2, reg);
+ }
+
+ /* make sure that timers are enabled/disabled for vsync state */
+ wmb();
}
}
@@ -308,7 +356,7 @@
ops->setup_cdm_output = sde_hw_setup_cdm_output;
ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
ops->get_danger_status = sde_hw_get_danger_status;
- ops->setup_vsync_sel = sde_hw_setup_vsync_sel;
+ ops->setup_vsync_source = sde_hw_setup_vsync_source;
ops->get_safe_status = sde_hw_get_safe_status;
ops->setup_dce = sde_hw_setup_dce;
ops->reset_ubwc = sde_hw_reset_ubwc;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index 573780e..86c4219 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -78,15 +78,21 @@
};
/**
- * struct sde_watchdog_te_status - configure watchdog timer to generate TE
+ * struct sde_vsync_source_cfg - configure vsync source and configure the
+ * watchdog timers if required.
* @pp_count: number of ping pongs active
* @frame_rate: Display frame rate
* @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ * @is_dummy: a dummy source of vsync selection. It must not be selected for
+ * any case other than sde rsc idle request.
*/
-struct sde_watchdog_te_status {
+struct sde_vsync_source_cfg {
u32 pp_count;
u32 frame_rate;
u32 ppnumber[PINGPONG_MAX];
+ u32 vsync_source;
+ bool is_dummy;
};
/**
@@ -155,13 +161,12 @@
struct sde_danger_safe_status *status);
/**
- * setup_vsync_sel - get vsync configuration details
+ * setup_vsync_source - setup vsync source configuration details
* @mdp: mdp top context driver
- * @cfg: watchdog timer configuration
- * @watchdog_te: watchdog timer enable
+ * @cfg: vsync source selection configuration
*/
- void (*setup_vsync_sel)(struct sde_hw_mdp *mdp,
- struct sde_watchdog_te_status *cfg, bool watchdog_te);
+ void (*setup_vsync_source)(struct sde_hw_mdp *mdp,
+ struct sde_vsync_source_cfg *cfg);
/**
* get_safe_status - get safe status
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index b9fbd62..42af245 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -393,9 +393,6 @@
{
struct sde_kms *sde_kms;
struct msm_drm_private *priv;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state;
- int i;
if (!kms || !old_state)
return;
@@ -405,8 +402,6 @@
return;
priv = sde_kms->dev->dev_private;
- for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
- sde_crtc_complete_commit(crtc, old_crtc_state);
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
@@ -519,8 +514,10 @@
}
/* old_state actually contains updated crtc pointers */
- for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
- sde_crtc_prepare_commit(crtc, old_crtc_state);
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ if (crtc->state->active)
+ sde_crtc_prepare_commit(crtc, old_crtc_state);
+ }
}
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 8077756..90e6caf 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -957,15 +957,15 @@
cfg->dir_lut = msm_property_get_blob(
&psde->property_info,
- pstate->property_blobs, &cfg->dir_len,
+ &pstate->property_state, &cfg->dir_len,
PLANE_PROP_SCALER_LUT_ED);
cfg->cir_lut = msm_property_get_blob(
&psde->property_info,
- pstate->property_blobs, &cfg->cir_len,
+ &pstate->property_state, &cfg->cir_len,
PLANE_PROP_SCALER_LUT_CIR);
cfg->sep_lut = msm_property_get_blob(
&psde->property_info,
- pstate->property_blobs, &cfg->sep_len,
+ &pstate->property_state, &cfg->sep_len,
PLANE_PROP_SCALER_LUT_SEP);
if (!cfg->dir_lut || !cfg->cir_lut || !cfg->sep_lut)
ret = -ENODATA;
@@ -1276,7 +1276,7 @@
if (psde->pipe_hw->ops.setup_pa_memcolor) {
/* Skin memory color setup */
memcol = msm_property_get_blob(&psde->property_info,
- pstate->property_blobs,
+ &pstate->property_state,
&memcol_sz,
PLANE_PROP_SKIN_COLOR);
psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
@@ -1284,7 +1284,7 @@
/* Sky memory color setup */
memcol = msm_property_get_blob(&psde->property_info,
- pstate->property_blobs,
+ &pstate->property_state,
&memcol_sz,
PLANE_PROP_SKY_COLOR);
psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
@@ -1292,7 +1292,7 @@
/* Foliage memory color setup */
memcol = msm_property_get_blob(&psde->property_info,
- pstate->property_blobs,
+ &pstate->property_state,
&memcol_sz,
PLANE_PROP_FOLIAGE_COLOR);
psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
@@ -1753,6 +1753,15 @@
drm_rect_height(&rstate->out_rot_rect) >> 16,
rstate->out_rot_rect.x1 >> 16,
rstate->out_rot_rect.y1 >> 16);
+ SDE_EVT32_VERBOSE(DRMID(plane), rstate->sequence_id,
+ rstate->out_xpos, rstate->nplane,
+ in_rot->x1 >> 16, in_rot->y1 >> 16,
+ drm_rect_width(in_rot) >> 16,
+ drm_rect_height(in_rot) >> 16,
+ rstate->out_rot_rect.x1 >> 16,
+ rstate->out_rot_rect.y1 >> 16,
+ drm_rect_width(&rstate->out_rot_rect) >> 16,
+ drm_rect_height(&rstate->out_rot_rect) >> 16);
}
/**
@@ -1840,7 +1849,7 @@
struct sde_hw_fmt_layout layout;
memset(&layout, 0, sizeof(struct sde_hw_fmt_layout));
- sde_format_populate_layout(rstate->aspace, state->fb,
+ sde_format_populate_layout(pstate->aspace, state->fb,
&layout);
for (i = 0; i < ARRAY_SIZE(rot_cmd->src_iova); i++) {
rot_cmd->src_iova[i] = layout.plane_addr[i];
@@ -1849,7 +1858,7 @@
rot_cmd->src_planes = layout.num_planes;
memset(&layout, 0, sizeof(struct sde_hw_fmt_layout));
- sde_format_populate_layout(rstate->aspace, rstate->out_fb,
+ sde_format_populate_layout(pstate->aspace, rstate->out_fb,
&layout);
for (i = 0; i < ARRAY_SIZE(rot_cmd->dst_iova); i++) {
rot_cmd->dst_iova[i] = layout.plane_addr[i];
@@ -1951,7 +1960,7 @@
struct sde_kms_fbo *fbo;
struct drm_framebuffer *fb;
- if (!plane || !cstate || !rstate)
+ if (!plane || !cstate || !rstate || !rstate->rot_hw)
return;
fbo = sde_crtc_res_get(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
@@ -2014,27 +2023,6 @@
if (sde_plane_enabled(new_state) && !new_rstate->out_fb)
_sde_plane_rot_get_fb(plane, cstate, new_rstate);
- /* release buffer if output format configuration changes */
- if (new_rstate->out_fb &&
- ((new_rstate->out_fb_height != new_rstate->out_fb->height) ||
- (new_rstate->out_fb_width != new_rstate->out_fb->width) ||
- (new_rstate->out_fb_pixel_format !=
- new_rstate->out_fb->pixel_format) ||
- (new_rstate->out_fb_modifier[0] !=
- new_rstate->out_fb->modifier[0]) ||
- (new_rstate->out_fb_flags != new_rstate->out_fb->flags))) {
-
- SDE_DEBUG("plane%d.%d release fb/fbo\n", plane->base.id,
- new_rstate->sequence_id);
-
- sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
- (u64) &new_rstate->rot_hw->base);
- new_rstate->out_fb = NULL;
- sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
- (u64) &new_rstate->rot_hw->base);
- new_rstate->out_fbo = NULL;
- }
-
/* create new stream buffer if it is not available */
if (sde_plane_enabled(new_state) && !new_rstate->out_fb) {
u32 fb_w = drm_rect_width(&new_rstate->out_rot_rect) >> 16;
@@ -2042,7 +2030,6 @@
SDE_DEBUG("plane%d.%d allocate fb/fbo\n", plane->base.id,
new_rstate->sequence_id);
- new_rstate->aspace = new_pstate->aspace;
/* check if out_fb is already attached to rotator */
new_rstate->out_fbo = sde_kms_fbo_alloc(plane->dev, fb_w, fb_h,
@@ -2070,6 +2057,8 @@
ret = -EINVAL;
goto error_create_fb;
}
+ SDE_EVT32_VERBOSE(DRMID(plane), new_rstate->sequence_id,
+ new_rstate->out_fb->base.id);
ret = sde_crtc_res_add(cstate, SDE_CRTC_RES_ROT_OUT_FB,
(u64) &new_rstate->rot_hw->base,
@@ -2081,9 +2070,9 @@
}
/* prepare rotator input buffer */
- ret = msm_framebuffer_prepare(new_state->fb, new_rstate->aspace);
+ ret = msm_framebuffer_prepare(new_state->fb, new_pstate->aspace);
if (ret) {
- SDE_ERROR("failed to prepare input framebuffer\n");
+ SDE_ERROR("failed to prepare input framebuffer, %d\n", ret);
goto error_prepare_input_buffer;
}
@@ -2093,9 +2082,10 @@
new_rstate->sequence_id);
ret = msm_framebuffer_prepare(new_rstate->out_fb,
- new_rstate->aspace);
+ new_pstate->aspace);
if (ret) {
- SDE_ERROR("failed to prepare inline framebuffer\n");
+ SDE_ERROR("failed to prepare inline framebuffer, %d\n",
+ ret);
goto error_prepare_output_buffer;
}
}
@@ -2103,7 +2093,7 @@
return 0;
error_prepare_output_buffer:
- msm_framebuffer_cleanup(new_state->fb, new_rstate->aspace);
+ msm_framebuffer_cleanup(new_state->fb, new_pstate->aspace);
error_prepare_input_buffer:
sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
(u64) &new_rstate->rot_hw->base);
@@ -2159,7 +2149,7 @@
if (sde_plane_enabled(old_state)) {
if (old_rstate->out_fb) {
msm_framebuffer_cleanup(old_rstate->out_fb,
- old_rstate->aspace);
+ old_pstate->aspace);
sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
(u64) &old_rstate->rot_hw->base);
old_rstate->out_fb = NULL;
@@ -2168,7 +2158,7 @@
old_rstate->out_fbo = NULL;
}
- msm_framebuffer_cleanup(old_state->fb, old_rstate->aspace);
+ msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace);
}
}
@@ -2222,22 +2212,24 @@
rstate->out_sbuf = psde->sbuf_mode || rstate->rot90;
if (sde_plane_enabled(state) && rstate->out_sbuf) {
- SDE_DEBUG("plane%d.%d acquire rotator\n",
- plane->base.id, rstate->sequence_id);
+ SDE_DEBUG("plane%d.%d acquire rotator, fb %d\n",
+ plane->base.id, rstate->sequence_id,
+ state->fb ? state->fb->base.id : -1);
hw_blk = sde_crtc_res_get(cstate, SDE_HW_BLK_ROT,
(u64) state->fb);
if (!hw_blk) {
- SDE_ERROR("plane%d no available rotator\n",
- plane->base.id);
+ SDE_ERROR("plane%d.%d no available rotator, fb %d\n",
+ plane->base.id, rstate->sequence_id,
+ state->fb ? state->fb->base.id : -1);
return -EINVAL;
}
rstate->rot_hw = to_sde_hw_rot(hw_blk);
if (!rstate->rot_hw->ops.commit) {
- SDE_ERROR("plane%d invalid rotator ops\n",
- plane->base.id);
+ SDE_ERROR("plane%d.%d invalid rotator ops\n",
+ plane->base.id, rstate->sequence_id);
sde_crtc_res_put(cstate,
SDE_HW_BLK_ROT, (u64) state->fb);
rstate->rot_hw = NULL;
@@ -2251,19 +2243,44 @@
}
if (sde_plane_enabled(state) && rstate->out_sbuf && rstate->rot_hw) {
+ uint32_t fb_id;
- SDE_DEBUG("plane%d.%d use rotator\n",
- plane->base.id, rstate->sequence_id);
+ fb_id = state->fb ? state->fb->base.id : -1;
+ SDE_DEBUG("plane%d.%d use rotator, fb %d\n",
+ plane->base.id, rstate->sequence_id, fb_id);
sde_plane_rot_calc_cfg(plane, state);
- /* attempt to reuse stream buffer if already available */
- if (sde_plane_enabled(state))
- _sde_plane_rot_get_fb(plane, cstate, rstate);
-
ret = sde_plane_rot_submit_command(plane, state,
SDE_HW_ROT_CMD_VALIDATE);
+ if (ret)
+ return ret;
+ /* check if stream buffer is already attached to rotator */
+ _sde_plane_rot_get_fb(plane, cstate, rstate);
+
+ /* release buffer if output format configuration changes */
+ if (rstate->out_fb &&
+ ((rstate->out_fb_height != rstate->out_fb->height) ||
+ (rstate->out_fb_width != rstate->out_fb->width) ||
+ (rstate->out_fb_pixel_format !=
+ rstate->out_fb->pixel_format) ||
+ (rstate->out_fb_modifier[0] !=
+ rstate->out_fb->modifier[0]) ||
+ (rstate->out_fb_flags != rstate->out_fb->flags))) {
+
+ SDE_DEBUG("plane%d.%d release fb/fbo\n", plane->base.id,
+ rstate->sequence_id);
+ SDE_EVT32_VERBOSE(DRMID(plane),
+ rstate->sequence_id, fb_id);
+
+ sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+ (u64) &rstate->rot_hw->base);
+ rstate->out_fb = NULL;
+ sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+ (u64) &rstate->rot_hw->base);
+ rstate->out_fbo = NULL;
+ }
} else {
SDE_DEBUG("plane%d.%d bypass rotator\n", plane->base.id,
@@ -2382,8 +2399,6 @@
{
struct sde_plane_state *pstate = to_sde_plane_state(new_state);
struct sde_plane_rot_state *rstate = &pstate->rot;
- struct drm_crtc_state *cstate;
- int ret;
rstate->sequence_id++;
@@ -2391,19 +2406,7 @@
rstate->sequence_id,
!!rstate->out_sbuf, !!rstate->rot_hw);
- cstate = _sde_plane_get_crtc_state(new_state);
- if (IS_ERR(cstate)) {
- ret = PTR_ERR(cstate);
- SDE_ERROR("invalid crtc state %d\n", ret);
- return -EINVAL;
- }
-
- if (rstate->rot_hw && cstate)
- sde_crtc_res_get(cstate, SDE_HW_BLK_ROT, (u64) rstate->in_fb);
- else if (rstate->rot_hw && !cstate)
- SDE_ERROR("plane%d.%d zombie rotator hw\n",
- plane->base.id, rstate->sequence_id);
-
+ rstate->rot_hw = NULL;
rstate->out_fb = NULL;
rstate->out_fbo = NULL;
@@ -2723,16 +2726,19 @@
struct drm_plane_state *old_state)
{
struct sde_plane *psde = to_sde_plane(plane);
+ struct sde_plane_state *old_pstate;
struct sde_plane_rot_state *old_rstate;
- if (!old_state->fb)
+ if (!old_state || !old_state->fb)
return;
+ old_pstate = to_sde_plane_state(old_state);
+
SDE_DEBUG_PLANE(psde, "FB[%u]\n", old_state->fb->base.id);
- old_rstate = &to_sde_plane_state(old_state)->rot;
+ old_rstate = &old_pstate->rot;
- msm_framebuffer_cleanup(old_rstate->out_fb, old_rstate->aspace);
+ msm_framebuffer_cleanup(old_rstate->out_fb, old_pstate->aspace);
sde_plane_rot_cleanup_fb(plane, old_state);
}
@@ -3244,7 +3250,8 @@
}
/* determine what needs to be refreshed */
- while ((idx = msm_property_pop_dirty(&psde->property_info)) >= 0) {
+ while ((idx = msm_property_pop_dirty(&psde->property_info,
+ &pstate->property_state)) >= 0) {
switch (idx) {
case PLANE_PROP_SCALER_V1:
case PLANE_PROP_SCALER_V2:
@@ -3438,7 +3445,7 @@
}
if (psde->pipe_hw->ops.setup_sys_cache) {
- if (rstate->out_sbuf) {
+ if (rstate->out_sbuf && rstate->rot_hw) {
if (rstate->nplane < 2)
pstate->sc_cfg.op_mode =
SDE_PIPE_SC_OP_MODE_INLINE_SINGLE;
@@ -3902,7 +3909,8 @@
}
/* force property to be dirty, even if the pointer didn't change */
- msm_property_set_dirty(&psde->property_info, PLANE_PROP_SCALER_V1);
+ msm_property_set_dirty(&psde->property_info,
+ &pstate->property_state, PLANE_PROP_SCALER_V1);
/* populate from user space */
pe = &pstate->pixel_ext;
@@ -3968,7 +3976,8 @@
}
/* force property to be dirty, even if the pointer didn't change */
- msm_property_set_dirty(&psde->property_info, PLANE_PROP_SCALER_V2);
+ msm_property_set_dirty(&psde->property_info,
+ &pstate->property_state, PLANE_PROP_SCALER_V2);
/* populate from user space */
pe = &pstate->pixel_ext;
@@ -4085,8 +4094,7 @@
} else {
pstate = to_sde_plane_state(state);
ret = msm_property_atomic_set(&psde->property_info,
- pstate->property_values, pstate->property_blobs,
- property, val);
+ &pstate->property_state, property, val);
if (!ret) {
idx = msm_property_index(&psde->property_info,
property);
@@ -4103,7 +4111,7 @@
break;
case PLANE_PROP_SCALER_V2:
_sde_plane_set_scaler_v2(psde, pstate,
- (void *)val);
+ (void *)val);
break;
case PLANE_PROP_EXCL_RECT_V1:
_sde_plane_set_excl_rect_v1(psde, pstate,
@@ -4148,8 +4156,7 @@
pstate = to_sde_plane_state(state);
sde_plane_rot_install_caps(plane);
ret = msm_property_atomic_get(&psde->property_info,
- pstate->property_values, pstate->property_blobs,
- property, val);
+ &pstate->property_state, property, val);
}
return ret;
@@ -4210,7 +4217,7 @@
/* destroy value helper */
msm_property_destroy_state(&psde->property_info, pstate,
- pstate->property_values, pstate->property_blobs);
+ &pstate->property_state);
}
static struct drm_plane_state *
@@ -4241,13 +4248,14 @@
/* duplicate value helper */
msm_property_duplicate_state(&psde->property_info, old_state, pstate,
- pstate->property_values, pstate->property_blobs);
+ &pstate->property_state, pstate->property_values);
/* clear out any input fence */
pstate->input_fence = 0;
input_fence_default = msm_property_get_default(
&psde->property_info, PLANE_PROP_INPUT_FENCE);
- msm_property_set_property(&psde->property_info, pstate->property_values,
+ msm_property_set_property(&psde->property_info,
+ &pstate->property_state,
PLANE_PROP_INPUT_FENCE, input_fence_default);
pstate->dirty = 0x0;
@@ -4287,7 +4295,8 @@
/* reset value helper */
msm_property_reset_state(&psde->property_info, pstate,
- pstate->property_values, pstate->property_blobs);
+ &pstate->property_state,
+ pstate->property_values);
pstate->base.plane = plane;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index be0ea67..a5599a5 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -34,7 +34,6 @@
* @rot90: true if rotation of 90 degree is required
* @hflip: true if horizontal flip is required
* @vflip: true if vertical flip is required
- * @aspace: pointer address space for input/output buffers
* @rot_cmd: rotator configuration command
* @nplane: total number of drm plane attached to rotator
* @in_fb: input fb attached to rotator
@@ -64,7 +63,6 @@
bool rot90;
bool hflip;
bool vflip;
- struct msm_gem_address_space *aspace;
struct sde_hw_rot_cmd rot_cmd;
int nplane;
/* input */
@@ -121,8 +119,8 @@
/**
* struct sde_plane_state: Define sde extension of drm plane state object
* @base: base drm plane state object
+ * @property_state: Local storage for msm_prop properties
* @property_values: cached plane property values
- * @property_blobs: blob properties
* @aspace: pointer to address space for input/output buffers
* @input_fence: dereferenced input fence pointer
* @stage: assigned by crtc blender
@@ -138,8 +136,8 @@
*/
struct sde_plane_state {
struct drm_plane_state base;
- uint64_t property_values[PLANE_PROP_COUNT];
- struct drm_property_blob *property_blobs[PLANE_PROP_BLOBCOUNT];
+ struct msm_property_state property_state;
+ struct msm_property_value property_values[PLANE_PROP_COUNT];
struct msm_gem_address_space *aspace;
void *input_fence;
enum sde_stage stage;
@@ -180,8 +178,8 @@
* @X: Property index, from enum msm_mdp_plane_property
* Returns: Integer value of requested property
*/
-#define sde_plane_get_property(S, X) \
- ((S) && ((X) < PLANE_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+#define sde_plane_get_property(S, X) ((S) && ((X) < PLANE_PROP_COUNT) ? \
+ ((S)->property_values[(X)].value) : 0)
/**
* sde_plane_pipe - return sspp identifier for the given plane
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 0382ed0..be3a8af 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -1150,7 +1150,7 @@
(void) msm_property_set_property(
sde_connector_get_propinfo(conn),
- sde_connector_get_property_values(conn->state),
+ sde_connector_get_property_state(conn->state),
CONNECTOR_PROP_TOPOLOGY_NAME,
SDE_RM_TOPOLOGY_NONE);
}
@@ -1170,7 +1170,7 @@
ret = msm_property_set_property(
sde_connector_get_propinfo(conn_state->connector),
- sde_connector_get_property_values(conn_state),
+ sde_connector_get_property_state(conn_state),
CONNECTOR_PROP_TOPOLOGY_NAME,
rsvp->topology);
if (ret) {
@@ -1267,7 +1267,7 @@
(void) msm_property_set_property(
sde_connector_get_propinfo(
conn_state->connector),
- sde_connector_get_property_values(conn_state),
+ sde_connector_get_property_state(conn_state),
CONNECTOR_PROP_TOPOLOGY_NAME,
SDE_RM_TOPOLOGY_NONE);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
index e233fc7..47fc39b 100644
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -125,7 +125,7 @@
TP_printk("crtc:%d", __entry->crtc_id)
);
-TRACE_EVENT(sde_mark_write,
+TRACE_EVENT(tracing_mark_write,
TP_PROTO(int pid, const char *name, bool trace_begin),
TP_ARGS(pid, name, trace_begin),
TP_STRUCT__entry(
@@ -230,8 +230,8 @@
__entry->update_clk)
);
-#define SDE_ATRACE_END(name) trace_sde_mark_write(current->tgid, name, 0)
-#define SDE_ATRACE_BEGIN(name) trace_sde_mark_write(current->tgid, name, 1)
+#define SDE_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define SDE_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
#define SDE_ATRACE_FUNC() SDE_ATRACE_BEGIN(__func__)
#define SDE_ATRACE_INT(name, value) \
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index b058bdd..58448ca 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -55,6 +55,12 @@
#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
#define MMSS_VBIF_TEST_BUS_OUT 0x230
+/* Vbif error info */
+#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
+#define MMSS_VBIF_ERR_INFO 0X1a0
+#define MMSS_VBIF_ERR_INFO_1 0x1a4
+#define MMSS_VBIF_CLIENT_NUM 14
+
/* print debug ranges in groups of 4 u32s */
#define REG_DUMP_ALIGN 16
@@ -2366,7 +2372,8 @@
bool in_log, in_mem;
u32 **dump_mem = NULL;
u32 *dump_addr = NULL;
- u32 value;
+ u32 value, d0, d1;
+ unsigned long reg;
struct vbif_debug_bus_entry *head;
phys_addr_t phys = 0;
int i, list_size = 0;
@@ -2439,6 +2446,30 @@
/* make sure that vbif core is on */
wmb();
+ /**
+ * Extract VBIF error info based on XIN halt status.
+ * If the XIN client is not in HALT state, then retrieve the
+ * VBIF error info for it.
+ */
+ reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
+ dev_err(sde_dbg_base.dev, "XIN HALT:0x%lX\n", reg);
+ reg >>= 16;
+ for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
+ if (!test_bit(0, ®)) {
+ writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
+ /* make sure reg write goes through */
+ wmb();
+
+ d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
+ d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
+
+ dev_err(sde_dbg_base.dev,
+ "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
+ i, d0, d1);
+ }
+ reg >>= 1;
+ }
+
for (i = 0; i < bus_size; i++) {
head = dbg_bus + i;
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 02d46c7..e14d60e 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -27,6 +27,7 @@
#define SDE_EVTLOG_FUNC_CASE5 0x7777
#define SDE_EVTLOG_PANIC 0xdead
#define SDE_EVTLOG_FATAL 0xbad
+#define SDE_EVTLOG_ERROR 0xebad
#define SDE_DBG_DUMP_DATA_LIMITER (NULL)
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
index 130bd1f..3c03b92 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -228,10 +228,17 @@
{
u8 cea_mode = 0;
struct drm_display_mode *mode;
+ u32 mode_fmt_flags = 0;
/* Need to add Y420 support flag to the modes */
list_for_each_entry(mode, &connector->probed_modes, head) {
+ /* Cache the format flags before clearing */
+ mode_fmt_flags = mode->flags;
+ /* Clear the RGB/YUV format flags before calling upstream API */
+ mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
cea_mode = drm_match_cea_mode(mode);
+ /* Restore the format flags */
+ mode->flags = mode_fmt_flags;
if ((cea_mode != 0) && (cea_mode == video_format)) {
SDE_EDID_DEBUG("%s found match for %d ", __func__,
video_format);
@@ -245,7 +252,7 @@
const u8 *db)
{
u32 offset = 0;
- u8 len = 0;
+ u8 cmdb_len = 0;
u8 svd_len = 0;
const u8 *svd = NULL;
u32 i = 0, j = 0;
@@ -261,10 +268,8 @@
return;
}
SDE_EDID_DEBUG("%s +\n", __func__);
- len = db[0] & 0x1f;
+ cmdb_len = db[0] & 0x1f;
- if (len < 7)
- return;
/* Byte 3 to L+1 contain SVDs */
offset += 2;
@@ -272,20 +277,24 @@
if (svd) {
/*moving to the next byte as vic info begins there*/
- ++svd;
svd_len = svd[0] & 0x1f;
+ ++svd;
}
for (i = 0; i < svd_len; i++, j++) {
- video_format = *svd & 0x7F;
- if (db[offset] & (1 << j))
+ video_format = *(svd + i) & 0x7F;
+ if (cmdb_len == 1) {
+ /* If cmdb_len is 1, it means all SVDs support YUV */
+ sde_edid_set_y420_support(connector, video_format);
+ } else if (db[offset] & (1 << j)) {
sde_edid_set_y420_support(connector, video_format);
- if (j & 0x80) {
- j = j/8;
- offset++;
- if (offset >= len)
- break;
+ if (j & 0x80) {
+ j = j/8;
+ offset++;
+ if (offset >= cmdb_len)
+ break;
+ }
}
}
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
index eb68439..b58b322 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.h
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -33,6 +33,8 @@
#define SDE_CEA_EXT 0x02
#define SDE_EXTENDED_TAG 0x07
+#define SDE_DRM_MODE_FLAG_FMT_MASK (0x3 << 20)
+
enum extended_data_block_types {
VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 9730f0b..54bdd42 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -46,8 +46,9 @@
#define RSC_TIME_SLOT_0_NS ((SINGLE_TCS_EXECUTION_TIME * 2) + 100)
#define DEFAULT_PANEL_FPS 60
-#define DEFAULT_PANEL_JITTER 5
-#define DEFAULT_PANEL_PREFILL_LINES 16
+#define DEFAULT_PANEL_JITTER_NUMERATOR 2
+#define DEFAULT_PANEL_JITTER_DENOMINATOR 1
+#define DEFAULT_PANEL_PREFILL_LINES 25
#define DEFAULT_PANEL_VTOTAL (480 + DEFAULT_PANEL_PREFILL_LINES)
#define TICKS_IN_NANO_SECOND 1000000000
@@ -57,6 +58,13 @@
#define TRY_CLK_MODE_SWITCH 0xFFFE
#define STATE_UPDATE_NOT_ALLOWED 0xFFFD
+/**
+ * Expected primary command mode panel vsync ranges
+ * Note: update if a primary panel is expected to run lower than 60fps
+ */
+#define PRIMARY_VBLANK_MIN_US (18 * 1000)
+#define PRIMARY_VBLANK_MAX_US (20 * 1000)
+
static struct sde_rsc_priv *rsc_prv_list[MAX_RSC_COUNT];
/**
@@ -320,21 +328,25 @@
/* calculate for 640x480 60 fps resolution by default */
if (!rsc->cmd_config.fps)
rsc->cmd_config.fps = DEFAULT_PANEL_FPS;
- if (!rsc->cmd_config.jitter)
- rsc->cmd_config.jitter = DEFAULT_PANEL_JITTER;
+ if (!rsc->cmd_config.jitter_numer)
+ rsc->cmd_config.jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR;
+ if (!rsc->cmd_config.jitter_denom)
+ rsc->cmd_config.jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR;
if (!rsc->cmd_config.vtotal)
rsc->cmd_config.vtotal = DEFAULT_PANEL_VTOTAL;
if (!rsc->cmd_config.prefill_lines)
rsc->cmd_config.prefill_lines = DEFAULT_PANEL_PREFILL_LINES;
- pr_debug("frame fps:%d jitter:%d vtotal:%d prefill lines:%d\n",
- rsc->cmd_config.fps, rsc->cmd_config.jitter,
- rsc->cmd_config.vtotal, rsc->cmd_config.prefill_lines);
+ pr_debug("frame fps:%d jitter_numer:%d jitter_denom:%d vtotal:%d prefill lines:%d\n",
+ rsc->cmd_config.fps, rsc->cmd_config.jitter_numer,
+ rsc->cmd_config.jitter_denom, rsc->cmd_config.vtotal,
+ rsc->cmd_config.prefill_lines);
/* 1 nano second */
frame_time_ns = TICKS_IN_NANO_SECOND;
frame_time_ns = div_u64(frame_time_ns, rsc->cmd_config.fps);
- frame_jitter = frame_time_ns * rsc->cmd_config.jitter;
+ frame_jitter = frame_time_ns * rsc->cmd_config.jitter_numer;
+ frame_jitter = div_u64(frame_jitter, rsc->cmd_config.jitter_denom);
/* convert it to percentage */
frame_jitter = div_u64(frame_jitter, 100);
@@ -477,8 +489,7 @@
/* wait for vsync for vid to cmd state switch and config update */
if (!rc && (rsc->current_state == SDE_RSC_VID_STATE ||
rsc->current_state == SDE_RSC_CMD_STATE))
- drm_wait_one_vblank(rsc->master_drm,
- rsc->primary_client->crtc_id);
+ usleep_range(PRIMARY_VBLANK_MIN_US, PRIMARY_VBLANK_MAX_US);
end:
return rc;
}
@@ -502,8 +513,7 @@
/* wait for vsync for cmd to clk state switch */
if (!rc && rsc->primary_client &&
(rsc->current_state == SDE_RSC_CMD_STATE))
- drm_wait_one_vblank(rsc->master_drm,
- rsc->primary_client->crtc_id);
+ usleep_range(PRIMARY_VBLANK_MIN_US, PRIMARY_VBLANK_MAX_US);
end:
return rc;
}
@@ -532,8 +542,7 @@
/* wait for vsync for cmd to vid state switch */
if (!rc && rsc->primary_client &&
(rsc->current_state == SDE_RSC_CMD_STATE))
- drm_wait_one_vblank(rsc->master_drm,
- rsc->primary_client->crtc_id);
+ usleep_range(PRIMARY_VBLANK_MIN_US, PRIMARY_VBLANK_MAX_US);
end:
return rc;
@@ -749,8 +758,9 @@
rsc->timer_config.rsc_time_slot_0_ns);
seq_printf(s, "rsc time slot 1(ns):%d\n",
rsc->timer_config.rsc_time_slot_1_ns);
- seq_printf(s, "frame fps:%d jitter:%d vtotal:%d prefill lines:%d\n",
- rsc->cmd_config.fps, rsc->cmd_config.jitter,
+ seq_printf(s, "frame fps:%d jitter_numer:%d jitter_denom:%d vtotal:%d prefill lines:%d\n",
+ rsc->cmd_config.fps, rsc->cmd_config.jitter_numer,
+ rsc->cmd_config.jitter_denom,
rsc->cmd_config.vtotal, rsc->cmd_config.prefill_lines);
seq_puts(s, "\n");
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index 87a350e..26a3154 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -18,12 +18,14 @@
#include <linux/delay.h>
#include "sde_rsc_priv.h"
+#include "sde_dbg.h"
/* display rsc offset */
#define SDE_RSCC_PDC_SEQ_START_ADDR_REG_OFFSET_DRV0 0x020
#define SDE_RSCC_PDC_MATCH_VALUE_LO_REG_OFFSET_DRV0 0x024
#define SDE_RSCC_PDC_MATCH_VALUE_HI_REG_OFFSET_DRV0 0x028
#define SDE_RSCC_PDC_SLAVE_ID_DRV0 0x02c
+#define SDE_RSCC_SEQ_PROGRAM_COUNTER 0x408
#define SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0 0x410
#define SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0 0x414
#define SDE_RSCC_SEQ_MEM_0_DRV0 0x600
@@ -299,6 +301,7 @@
{
int rc = -EBUSY;
int count, reg;
+ unsigned long power_status;
rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_RESTORE);
@@ -335,9 +338,14 @@
/* make sure that mode-2 exit before wait*/
wmb();
- /* check for sequence running status before exiting */
+ /* this wait is required to make sure that gdsc is powered on */
for (count = MAX_CHECK_LOOPS; count > 0; count--) {
- if (regulator_is_enabled(rsc->fs)) {
+ power_status = dss_reg_r(&rsc->wrapper_io,
+ SDE_RSCC_PWR_CTRL, rsc->debug_mode);
+ if (!test_bit(POWER_CTRL_BIT_12, &power_status)) {
+ reg = dss_reg_r(&rsc->drv_io,
+ SDE_RSCC_SEQ_PROGRAM_COUNTER, rsc->debug_mode);
+ SDE_EVT32(count, reg, power_status);
rc = 0;
break;
}
@@ -415,7 +423,7 @@
rc = 0;
break;
}
- usleep_range(1, 2);
+ usleep_range(10, 100);
}
if (rc) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 1483dae..6f66b73 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -81,8 +81,10 @@
return -ENOMEM;
size = roundup(size, PAGE_SIZE);
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
- if (ret != 0)
+ if (ret != 0) {
+ kfree(bo);
return ret;
+ }
bo->dumb = false;
virtio_gpu_init_ttm_placement(bo, pinned);
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 6f465aa..6426363 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2342,6 +2342,11 @@
return ret;
}
+ /* Clear the busy_data stats - we're starting over from scratch */
+ adreno_dev->busy_data.gpu_busy = 0;
+ adreno_dev->busy_data.vbif_ram_cycles = 0;
+ adreno_dev->busy_data.vbif_starved_ram = 0;
+
/* Set the page table back to the default page table */
adreno_ringbuffer_set_global(adreno_dev, 0);
kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable);
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 13c36e6..742da91 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -55,7 +55,7 @@
{ adreno_is_a530, a530_vbif },
{ adreno_is_a512, a540_vbif },
{ adreno_is_a510, a530_vbif },
- { adreno_is_a508, a540_vbif },
+ { adreno_is_a508, a530_vbif },
{ adreno_is_a505, a530_vbif },
{ adreno_is_a506, a530_vbif },
};
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
index 43302a0..4036530 100644
--- a/drivers/gpu/msm/adreno_a5xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -547,13 +547,42 @@
KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED,
"smmu_info");
}
+
+static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+
+ kgsl_free_global(device, &iommu->smmu_info);
+}
+
#else
static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
{
return -ENODEV;
}
+
+static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev)
+{
+}
#endif
+static void a5xx_preemption_close(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_preemption *preempt = &adreno_dev->preempt;
+ struct adreno_ringbuffer *rb;
+ unsigned int i;
+
+ del_timer(&preempt->timer);
+ kgsl_free_global(device, &preempt->counters);
+ a5xx_preemption_iommu_close(adreno_dev);
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ kgsl_free_global(device, &rb->preemption_desc);
+ }
+}
+
int a5xx_preemption_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -578,7 +607,7 @@
A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0,
"preemption_counters");
if (ret)
- return ret;
+ goto err;
addr = preempt->counters.gpuaddr;
@@ -586,10 +615,16 @@
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
ret = a5xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
if (ret)
- return ret;
+ goto err;
addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
}
- return a5xx_preemption_iommu_init(adreno_dev);
+ ret = a5xx_preemption_iommu_init(adreno_dev);
+
+err:
+ if (ret)
+ a5xx_preemption_close(device);
+
+ return ret;
}
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 2e5913d..78b56bc 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -360,8 +360,8 @@
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
- 0x04E0, 0x04F4, 0X04F6, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400,
- 0xF800, 0xF807,
+ 0x04E0, 0x04F4, 0X04F8, 0x0529, 0x0531, 0x0533, 0x0540, 0x0555,
+ 0xF400, 0xF400, 0xF800, 0xF807,
/* CP */
0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
0x0B00, 0x0B12, 0x0B15, 0X0B1C, 0X0B1E, 0x0B28, 0x0B78, 0x0B7F,
@@ -422,8 +422,8 @@
* is the stop offset (inclusive)
*/
static const unsigned int a5xx_pre_crashdumper_registers[] = {
- /* RBBM: RBBM_STATUS */
- 0x04F5, 0x04F5,
+ /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
+ 0x04F5, 0x04F7, 0x0530, 0x0530,
/* CP: CP_STATUS_1 */
0x0B1D, 0x0B1D,
};
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 422c434..0a45d27 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -983,6 +983,13 @@
spin_unlock(&dispatcher->plist_lock);
}
+static inline void _decrement_submit_now(struct kgsl_device *device)
+{
+ spin_lock(&device->submit_lock);
+ device->submit_now--;
+ spin_unlock(&device->submit_lock);
+}
+
/**
* adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
* @adreno_dev: Pointer to the adreno device struct
@@ -992,15 +999,29 @@
static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
{
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ spin_lock(&device->submit_lock);
+ /* If state transition to SLUMBER, schedule the work for later */
+ if (device->slumber == true) {
+ spin_unlock(&device->submit_lock);
+ goto done;
+ }
+ device->submit_now++;
+ spin_unlock(&device->submit_lock);
/* If the dispatcher is busy then schedule the work for later */
if (!mutex_trylock(&dispatcher->mutex)) {
- adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
- return;
+ _decrement_submit_now(device);
+ goto done;
}
_adreno_dispatcher_issuecmds(adreno_dev);
mutex_unlock(&dispatcher->mutex);
+ _decrement_submit_now(device);
+ return;
+done:
+ adreno_dispatcher_schedule(device);
}
/**
@@ -2452,7 +2473,7 @@
mutex_unlock(&device->mutex);
}
-static void adreno_dispatcher_work(struct work_struct *work)
+static void adreno_dispatcher_work(struct kthread_work *work)
{
struct adreno_dispatcher *dispatcher =
container_of(work, struct adreno_dispatcher, work);
@@ -2512,7 +2533,7 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- kgsl_schedule_work(&dispatcher->work);
+ kthread_queue_work(&kgsl_driver.worker, &dispatcher->work);
}
/**
@@ -2808,7 +2829,7 @@
setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer,
(unsigned long) adreno_dev);
- INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
+ kthread_init_work(&dispatcher->work, adreno_dispatcher_work);
init_completion(&dispatcher->idle_gate);
complete_all(&dispatcher->idle_gate);
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index 72545db..48f0cdc 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -91,7 +91,7 @@
atomic_t fault;
struct plist_head pending;
spinlock_t plist_lock;
- struct work_struct work;
+ struct kthread_work work;
struct kobject kobj;
struct completion idle_gate;
unsigned int disp_preempt_fair_sched;
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index c6df7bb..0882447 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -548,6 +548,8 @@
mutex_unlock(&device->mutex);
+ debugfs_remove_recursive(drawctxt->debug_root);
+
/* wake threads waiting to submit commands from this context */
wake_up_all(&drawctxt->waiting);
wake_up_all(&drawctxt->wq);
@@ -569,7 +571,6 @@
gpudev->preemption_context_destroy(context);
drawctxt = ADRENO_CONTEXT(context);
- debugfs_remove_recursive(drawctxt->debug_root);
kfree(drawctxt);
}
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 129e99c..f88132f 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -4645,6 +4645,7 @@
device->id, device->reg_phys, device->reg_len);
rwlock_init(&device->context_lock);
+ spin_lock_init(&device->submit_lock);
setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
@@ -4788,6 +4789,8 @@
static int __init kgsl_core_init(void)
{
int result = 0;
+ struct sched_param param = { .sched_priority = 2 };
+
/* alloc major and minor device numbers */
result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
"kgsl");
@@ -4854,6 +4857,18 @@
kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ kthread_init_worker(&kgsl_driver.worker);
+
+ kgsl_driver.worker_thread = kthread_run(kthread_worker_fn,
+ &kgsl_driver.worker, "kgsl_worker_thread");
+
+ if (IS_ERR(kgsl_driver.worker_thread)) {
+ pr_err("unable to start kgsl thread\n");
+ goto err;
+ }
+
+ sched_setscheduler(kgsl_driver.worker_thread, SCHED_FIFO, ¶m);
+
kgsl_events_init();
result = kgsl_drawobjs_cache_init();
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index c54e51e..f80da79 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -25,6 +25,7 @@
#include <linux/regulator/consumer.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/kthread.h>
#include <asm/cacheflush.h>
/*
@@ -151,6 +152,8 @@
unsigned int full_cache_threshold;
struct workqueue_struct *workqueue;
struct workqueue_struct *mem_workqueue;
+ struct kthread_worker worker;
+ struct task_struct *worker_thread;
};
extern struct kgsl_driver kgsl_driver;
@@ -300,7 +303,7 @@
void *priv;
struct list_head node;
unsigned int created;
- struct work_struct work;
+ struct kthread_work work;
int result;
struct kgsl_event_group *group;
};
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index b621ada..4aaea80 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -269,6 +269,11 @@
struct kgsl_pwrctrl pwrctrl;
int open_count;
+ /* For GPU inline submission */
+ uint32_t submit_now;
+ spinlock_t submit_lock;
+ bool slumber;
+
struct mutex mutex;
uint32_t state;
uint32_t requested_state;
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index d042f05..759a966 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -32,7 +32,7 @@
{
list_del(&event->node);
event->result = result;
- queue_work(device->events_wq, &event->work);
+ kthread_queue_work(&kgsl_driver.worker, &event->work);
}
/**
@@ -42,7 +42,7 @@
* Each event callback has its own work struct and is run on a event specific
* workqeuue. This is the worker that queues up the event callback function.
*/
-static void _kgsl_event_worker(struct work_struct *work)
+static void _kgsl_event_worker(struct kthread_work *work)
{
struct kgsl_event *event = container_of(work, struct kgsl_event, work);
int id = KGSL_CONTEXT_ID(event->context);
@@ -286,7 +286,7 @@
event->created = jiffies;
event->group = group;
- INIT_WORK(&event->work, _kgsl_event_worker);
+ kthread_init_work(&event->work, _kgsl_event_worker);
trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func);
@@ -301,7 +301,7 @@
if (timestamp_cmp(retired, timestamp) >= 0) {
event->result = KGSL_EVENT_RETIRED;
- queue_work(device->events_wq, &event->work);
+ kthread_queue_work(&kgsl_driver.worker, &event->work);
spin_unlock(&group->lock);
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 5c53a05c..6710cd2 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1049,6 +1049,8 @@
if (on) {
switch (flag) {
case KGSL_PWRFLAGS_CLK_ON:
+ /* make sure pwrrail is ON before enabling clocks */
+ kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON,
KGSL_STATE_ACTIVE);
break;
@@ -1854,7 +1856,12 @@
if (kgsl_gmu_isenabled(device))
return 0;
- if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags))
+ /*
+ * Disabling the regulator means also disabling dependent clocks.
+ * Hence don't disable it if force clock ON is set.
+ */
+ if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags) ||
+ test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags))
return 0;
if (state == KGSL_PWRFLAGS_OFF) {
@@ -2362,9 +2369,24 @@
|| device->state == KGSL_STATE_NAP) {
if (!atomic_read(&device->active_cnt)) {
+ spin_lock(&device->submit_lock);
+ if (device->submit_now) {
+ spin_unlock(&device->submit_lock);
+ goto done;
+ }
+ /* Don't allow GPU inline submission in SLUMBER */
+ if (requested_state == KGSL_STATE_SLUMBER)
+ device->slumber = true;
+ spin_unlock(&device->submit_lock);
+
ret = kgsl_pwrctrl_change_state(device,
device->requested_state);
if (ret == -EBUSY) {
+ if (requested_state == KGSL_STATE_SLUMBER) {
+ spin_lock(&device->submit_lock);
+ device->slumber = false;
+ spin_unlock(&device->submit_lock);
+ }
/*
* If the GPU is currently busy, restore
* the requested state and reschedule
@@ -2375,7 +2397,7 @@
kgsl_schedule_work(&device->idle_check_ws);
}
}
-
+done:
if (!ret)
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
@@ -2835,6 +2857,13 @@
trace_kgsl_pwr_set_state(device, state);
device->state = state;
device->requested_state = KGSL_STATE_NONE;
+
+ spin_lock(&device->submit_lock);
+ if (state == KGSL_STATE_SLUMBER || state == KGSL_STATE_SUSPEND)
+ device->slumber = true;
+ else
+ device->slumber = false;
+ spin_unlock(&device->submit_lock);
}
static void kgsl_pwrctrl_request_state(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 7636a42..6fb81ee 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -530,7 +530,8 @@
struct kgsl_device *device = dev_get_drvdata(dev);
struct kgsl_pwrctrl *pwr;
struct kgsl_pwrlevel *pwr_level;
- int level, i;
+ int level;
+ unsigned int i;
unsigned long cur_freq, rec_freq;
struct dev_pm_opp *opp;
@@ -574,7 +575,12 @@
/* If the governor recommends a new frequency, update it here */
if (rec_freq != cur_freq) {
level = pwr->max_pwrlevel;
- for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--)
+ /*
+ * Array index of pwrlevels[] should be within the permitted
+ * power levels, i.e., from max_pwrlevel to min_pwrlevel.
+ */
+ for (i = pwr->min_pwrlevel; (i >= pwr->max_pwrlevel
+ && i <= pwr->min_pwrlevel); i--)
if (rec_freq <= pwr->pwrlevels[i].gpu_freq) {
if (pwr->thermal_cycle == CYCLE_ACTIVE)
level = _thermal_adjust(pwr, i);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index f0f202b..5061f6a 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -741,6 +741,8 @@
*/
memdesc->pages = kgsl_malloc(len_alloc * sizeof(struct page *));
+ memdesc->page_count = 0;
+ memdesc->size = 0;
if (memdesc->pages == NULL) {
ret = -ENOMEM;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d6941ea..202d867 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -171,8 +171,11 @@
if (!used)
kfree(buf);
- if (!ret)
+ if (!ret) {
+ coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+ coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
+ }
return ret;
}
@@ -244,6 +247,9 @@
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 1, 0);
+
dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 9e6f443..3234928 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -800,6 +800,8 @@
mutex_unlock(&drvdata->mem_lock);
return ret;
}
+ coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
} else {
drvdata->usbch = usb_qdss_open("qdss", drvdata,
usb_notifier);
@@ -891,6 +893,7 @@
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -911,6 +914,11 @@
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+ }
+ mutex_unlock(&drvdata->mem_lock);
dev_info(drvdata->dev, "TMC-ETR disabled\n");
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 98fcd01..b97ebb8 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -361,6 +361,9 @@
drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+
tmc_etr_bam_disable(drvdata);
usb_qdss_close(drvdata->usbch);
} else if (!strcmp(str, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_USB])) {
@@ -381,6 +384,9 @@
drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+
drvdata->usbch = usb_qdss_open("qdss", drvdata,
usb_notifier);
if (IS_ERR(drvdata->usbch)) {
@@ -503,6 +509,7 @@
struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
+ struct coresight_cti_data *ctidata;
pdata = of_get_coresight_platform_data(dev, np);
if (IS_ERR(pdata)) {
@@ -554,6 +561,19 @@
pm_runtime_put(&adev->dev);
+ ctidata = of_get_coresight_cti_data(dev, adev->dev.of_node);
+ if (IS_ERR(ctidata)) {
+ dev_err(dev, "invalid cti data\n");
+ } else if (ctidata && ctidata->nr_ctis == 2) {
+ drvdata->cti_flush = coresight_cti_get(ctidata->names[0]);
+ if (IS_ERR(drvdata->cti_flush))
+ dev_err(dev, "failed to get flush cti\n");
+
+ drvdata->cti_reset = coresight_cti_get(ctidata->names[1]);
+ if (IS_ERR(drvdata->cti_reset))
+ dev_err(dev, "failed to get reset cti\n");
+ }
+
desc.pdata = pdata;
desc.dev = dev;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index a9de0e8..6643adc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -25,6 +25,7 @@
#include <linux/amba/bus.h>
#include <linux/usb_bam.h>
#include <linux/usb/usb_qdss.h>
+#include <linux/coresight-cti.h>
#define TMC_RSZ 0x004
#define TMC_STS 0x00c
@@ -184,7 +185,8 @@
struct tmc_etr_bam_data *bamdata;
bool enable_to_bam;
bool sticky_enable;
-
+ struct coresight_cti *cti_flush;
+ struct coresight_cti *cti_reset;
};
/* Generic functions */
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 2492f90..81bbd78 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,7 @@
#include <linux/coresight.h>
#include <linux/cpumask.h>
#include <asm/smp_plat.h>
-
+#include <linux/coresight-cti.h>
static int of_dev_node_match(struct device *dev, void *data)
{
@@ -196,3 +196,45 @@
return pdata;
}
EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
+
+struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node)
+{
+ int i, ret;
+ uint32_t ctis_len;
+ struct device_node *child_node;
+ struct coresight_cti_data *ctidata;
+
+ ctidata = devm_kzalloc(dev, sizeof(*ctidata), GFP_KERNEL);
+ if (!ctidata)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_get_property(node, "coresight-ctis", &ctis_len))
+ ctidata->nr_ctis = ctis_len/sizeof(uint32_t);
+ else
+ return ERR_PTR(-EINVAL);
+
+ if (ctidata->nr_ctis) {
+ ctidata->names = devm_kzalloc(dev, ctidata->nr_ctis *
+ sizeof(*ctidata->names),
+ GFP_KERNEL);
+ if (!ctidata->names)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ctidata->nr_ctis; i++) {
+ child_node = of_parse_phandle(node, "coresight-ctis",
+ i);
+ if (!child_node)
+ return ERR_PTR(-EINVAL);
+
+ ret = of_property_read_string(child_node,
+ "coresight-name",
+ &ctidata->names[i]);
+ of_node_put(child_node);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ }
+ return ctidata;
+}
+EXPORT_SYMBOL(of_get_coresight_cti_data);
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 6aa2e36..946e0ba 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -85,6 +85,7 @@
int cur_rd;
struct device *wrapper_dev;
void *ipcl;
+ int clk_fld_idx;
};
struct geni_i2c_err_log {
@@ -109,15 +110,55 @@
[GENI_TIMEOUT] = {-ETIMEDOUT, "I2C TXN timed out"},
};
-static inline void qcom_geni_i2c_conf(void __iomem *base, int dfs, int div)
+struct geni_i2c_clk_fld {
+ u32 clk_freq_out;
+ u8 clk_div;
+ u8 t_high;
+ u8 t_low;
+ u8 t_cycle;
+};
+
+static struct geni_i2c_clk_fld geni_i2c_clk_map[] = {
+ {KHz(100), 7, 10, 11, 26},
+ {KHz(400), 2, 5, 12, 24},
+ {KHz(1000), 1, 3, 9, 18},
+};
+
+static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
{
- geni_write_reg(dfs, base, SE_GENI_CLK_SEL);
- geni_write_reg((div << 4) | 1, base, GENI_SER_M_CLK_CFG);
- geni_write_reg(((5 << 20) | (0xC << 10) | 0x18),
- base, SE_I2C_SCL_COUNTERS);
+ int i;
+ int ret = 0;
+ bool clk_map_present = false;
+ struct geni_i2c_clk_fld *itr = geni_i2c_clk_map;
+
+ for (i = 0; i < ARRAY_SIZE(geni_i2c_clk_map); i++, itr++) {
+ if (itr->clk_freq_out == gi2c->i2c_rsc.clk_freq_out) {
+ clk_map_present = true;
+ break;
+ }
+ }
+
+ if (clk_map_present)
+ gi2c->clk_fld_idx = i;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static inline void qcom_geni_i2c_conf(struct geni_i2c_dev *gi2c, int dfs)
+{
+ struct geni_i2c_clk_fld *itr = geni_i2c_clk_map + gi2c->clk_fld_idx;
+
+ geni_write_reg(dfs, gi2c->base, SE_GENI_CLK_SEL);
+
+ geni_write_reg((itr->clk_div << 4) | 1, gi2c->base, GENI_SER_M_CLK_CFG);
+ geni_write_reg(((itr->t_high << 20) | (itr->t_low << 10) |
+ itr->t_cycle), gi2c->base, SE_I2C_SCL_COUNTERS);
+
/*
- * Ensure Clk config completes before return.
- */
+ * Ensure Clk config completes before return.
+ */
mb();
}
@@ -283,7 +324,7 @@
pm_runtime_set_suspended(gi2c->dev);
return ret;
}
- qcom_geni_i2c_conf(gi2c->base, 0, 2);
+ qcom_geni_i2c_conf(gi2c, 0);
dev_dbg(gi2c->dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
num, msgs[0].len, msgs[0].flags);
for (i = 0; i < num; i++) {
@@ -485,12 +526,26 @@
return ret;
}
+ if (of_property_read_u32(pdev->dev.of_node, "qcom,clk-freq-out",
+ &gi2c->i2c_rsc.clk_freq_out)) {
+ dev_info(&pdev->dev,
+ "Bus frequency not specified, default to 400KHz.\n");
+ gi2c->i2c_rsc.clk_freq_out = KHz(400);
+ }
+
gi2c->irq = platform_get_irq(pdev, 0);
if (gi2c->irq < 0) {
dev_err(gi2c->dev, "IRQ error for i2c-geni\n");
return gi2c->irq;
}
+ ret = geni_i2c_clk_map_idx(gi2c);
+ if (ret) {
+ dev_err(gi2c->dev, "Invalid clk frequency %d KHz: %d\n",
+ gi2c->i2c_rsc.clk_freq_out, ret);
+ return ret;
+ }
+
gi2c->adap.algo = &geni_i2c_algo;
init_completion(&gi2c->xfer);
platform_set_drvdata(pdev, gi2c);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index cb3f515a..01e3a37 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2342,6 +2342,10 @@
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ if (cmd.port_num < rdma_start_port(ib_dev) ||
+ cmd.port_num > rdma_end_port(ib_dev))
+ return -EINVAL;
+
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
@@ -2882,6 +2886,10 @@
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ if (cmd.attr.port_num < rdma_start_port(ib_dev) ||
+ cmd.attr.port_num > rdma_end_port(ib_dev))
+ return -EINVAL;
+
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
if (!uobj)
return -ENOMEM;
diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c
index c9ea89d..0dea590 100644
--- a/drivers/input/misc/hbtp_input.c
+++ b/drivers/input/misc/hbtp_input.c
@@ -1362,10 +1362,12 @@
ret = kstrtou32(buf, 10, &status);
if (ret) {
pr_err("hbtp: ret error: %zd\n", ret);
+ mutex_unlock(&hbtp->mutex);
return ret;
}
if (!hbtp || !hbtp->input_dev) {
pr_err("hbtp: hbtp or hbtp->input_dev not ready!\n");
+ mutex_unlock(&hbtp->mutex);
return ret;
}
if (status) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1d5c514..142357e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -238,6 +238,7 @@
#define ARM_SMMU_CB_FSYNR0 0x68
#define ARM_SMMU_CB_S1_TLBIVA 0x600
#define ARM_SMMU_CB_S1_TLBIASID 0x610
+#define ARM_SMMU_CB_S1_TLBIALL 0x618
#define ARM_SMMU_CB_S1_TLBIVAL 0x620
#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
@@ -249,6 +250,7 @@
#define SCTLR_S1_ASIDPNE (1 << 12)
#define SCTLR_CFCFG (1 << 7)
+#define SCTLR_HUPCF (1 << 8)
#define SCTLR_CFIE (1 << 6)
#define SCTLR_CFRE (1 << 5)
#define SCTLR_E (1 << 4)
@@ -415,6 +417,7 @@
#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
+#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
u32 options;
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
@@ -534,6 +537,7 @@
{ ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
{ ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
{ ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
+ { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
{ 0, NULL},
};
@@ -991,12 +995,17 @@
struct arm_smmu_device *smmu = smmu_domain->smmu;
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
void __iomem *base;
+ bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
- if (stage1) {
+ if (stage1 && !use_tlbiall) {
base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
base + ARM_SMMU_CB_S1_TLBIASID);
arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
+ } else if (stage1 && use_tlbiall) {
+ base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
+ arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
} else {
base = ARM_SMMU_GR0(smmu);
writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
@@ -1013,8 +1022,9 @@
struct arm_smmu_device *smmu = smmu_domain->smmu;
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
void __iomem *reg;
+ bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
- if (stage1) {
+ if (stage1 && !use_tlbiall) {
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
@@ -1033,6 +1043,10 @@
iova += granule >> 12;
} while (size -= granule);
}
+ } else if (stage1 && use_tlbiall) {
+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ reg += ARM_SMMU_CB_S1_TLBIALL;
+ writel_relaxed(0, reg);
} else if (smmu->version == ARM_SMMU_V2) {
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
@@ -1440,6 +1454,11 @@
/* SCTLR */
reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
+ if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
+ reg &= ~SCTLR_CFCFG;
+ reg |= SCTLR_HUPCF;
+ }
+
if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
!(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
!stage1)
@@ -2567,19 +2586,23 @@
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
int ret = 0;
+ mutex_lock(&smmu_domain->init_mutex);
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
- return 0;
+ ret = 0;
+ break;
case DOMAIN_ATTR_PT_BASE_ADDR:
*((phys_addr_t *)data) =
smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
- return 0;
+ ret = 0;
+ break;
case DOMAIN_ATTR_CONTEXT_BANK:
/* context bank index isn't valid until we are attached */
- if (smmu_domain->smmu == NULL)
- return -ENODEV;
-
+ if (smmu_domain->smmu == NULL) {
+ ret = -ENODEV;
+ break;
+ }
*((unsigned int *) data) = smmu_domain->cfg.cbndx;
ret = 0;
break;
@@ -2587,9 +2610,10 @@
u64 val;
struct arm_smmu_device *smmu = smmu_domain->smmu;
/* not valid until we are attached */
- if (smmu == NULL)
- return -ENODEV;
-
+ if (smmu == NULL) {
+ ret = -ENODEV;
+ break;
+ }
val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
@@ -2600,8 +2624,10 @@
}
case DOMAIN_ATTR_CONTEXTIDR:
/* not valid until attached */
- if (smmu_domain->smmu == NULL)
- return -ENODEV;
+ if (smmu_domain->smmu == NULL) {
+ ret = -ENODEV;
+ break;
+ }
*((u32 *)data) = smmu_domain->cfg.procid;
ret = 0;
break;
@@ -2655,8 +2681,10 @@
ret = 0;
break;
case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
- if (!smmu_domain->smmu)
- return -ENODEV;
+ if (!smmu_domain->smmu) {
+ ret = -ENODEV;
+ break;
+ }
*((int *)data) = is_iommu_pt_coherent(smmu_domain);
ret = 0;
break;
@@ -2665,9 +2693,16 @@
& (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
ret = 0;
break;
+ case DOMAIN_ATTR_CB_STALL_DISABLE:
+ *((int *)data) = !!(smmu_domain->attributes
+ & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
+ ret = 0;
+ break;
default:
- return -ENODEV;
+ ret = -ENODEV;
+ break;
}
+ mutex_unlock(&smmu_domain->init_mutex);
return ret;
}
@@ -2842,6 +2877,12 @@
break;
}
+ case DOMAIN_ATTR_CB_STALL_DISABLE:
+ if (*((int *)data))
+ smmu_domain->attributes |=
+ 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
+ ret = 0;
+ break;
default:
ret = -ENODEV;
}
@@ -3298,14 +3339,23 @@
struct device *dev)
{
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
u32 i, idx;
int cb = -EINVAL;
bool dynamic;
- /* Dynamic domains must set cbndx through domain attribute */
+ /*
+ * Dynamic domains have already set cbndx through domain attribute.
+ * Verify that they picked a valid value.
+ */
dynamic = is_dynamic_domain(domain);
- if (dynamic)
- return INVALID_CBNDX;
+ if (dynamic) {
+ cb = smmu_domain->cfg.cbndx;
+ if (cb < smmu->num_context_banks)
+ return cb;
+ else
+ return -EINVAL;
+ }
mutex_lock(&smmu->stream_map_mutex);
for_each_cfg_sme(fwspec, i, idx) {
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index c98d8c2..56eff61b 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -71,6 +71,8 @@
return "DOMAIN_ATTR_FAST";
case DOMAIN_ATTR_EARLY_MAP:
return "DOMAIN_ATTR_EARLY_MAP";
+ case DOMAIN_ATTR_CB_STALL_DISABLE:
+ return "DOMAIN_ATTR_CB_STALL_DISABLE";
default:
return "Unknown attr!";
}
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index d2cb1e8..3989bc6 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -330,6 +330,16 @@
}
EXPORT_SYMBOL(mbox_controller_is_idle);
+
+void mbox_chan_debug(struct mbox_chan *chan)
+{
+ if (!chan || !chan->cl || !chan->mbox->debug)
+ return;
+
+ return chan->mbox->debug(chan);
+}
+EXPORT_SYMBOL(mbox_chan_debug);
+
/**
* mbox_request_channel - Request a mailbox channel.
* @cl: Identity of the client requesting the channel.
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index be91a65..a1e0908 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -11,12 +11,13 @@
*
*/
-#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -40,6 +41,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/rpmh.h>
+#define TCS_DRV_IPC_LOG_SIZE 2
+
#define MAX_CMDS_PER_TCS 16
#define MAX_TCS_PER_TYPE 3
#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
@@ -140,8 +143,35 @@
u64 tcs_last_recv_ts[MAX_POOL_SIZE];
atomic_t tcs_send_count[MAX_POOL_SIZE];
atomic_t tcs_irq_count[MAX_POOL_SIZE];
+ void *ipc_log_ctx;
};
+/* Log to IPC and Ftrace */
+#define log_send_msg(drv, m, n, i, a, d, c, t) do { \
+ trace_rpmh_send_msg(drv->name, m, n, i, a, d, c, t); \
+ ipc_log_string(drv->ipc_log_ctx, \
+ "send msg: m=%d n=%d msgid=0x%x addr=0x%x data=0x%x cmpl=%d trigger=%d", \
+ m, n, i, a, d, c, t); \
+ } while (0)
+
+#define log_rpmh_notify_irq(drv, m, a, e) do { \
+ trace_rpmh_notify_irq(drv->name, m, a, e); \
+ ipc_log_string(drv->ipc_log_ctx, \
+ "irq response: m=%d addr=0x%x err=%d", m, a, e); \
+ } while (0)
+
+#define log_rpmh_control_msg(drv, d) do { \
+ trace_rpmh_control_msg(drv->name, d); \
+ ipc_log_string(drv->ipc_log_ctx, "ctrlr msg: data=0x%x", d); \
+ } while (0)
+
+#define log_rpmh_notify(drv, m, a, e) do { \
+ trace_rpmh_notify(drv->name, m, a, e); \
+ ipc_log_string(drv->ipc_log_ctx, \
+ "tx done: m=%d addr=0x%x err=%d", m, a, e); \
+ } while (0)
+
+
static int tcs_response_pool_init(struct tcs_drv *drv)
{
struct tcs_response_pool *pool;
@@ -223,7 +253,6 @@
break;
}
pos++;
- udelay(1);
} while (1);
spin_unlock_irqrestore(&pool->lock, flags);
@@ -241,11 +270,11 @@
return;
msg = resp->msg;
- pr_debug("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
+ pr_warn("Response object [idx=%d for-tcs=%d in-use=%d]\n",
resp->idx, resp->m, resp->in_use);
- pr_debug("Msg: state=%d\n", msg->state);
+ pr_warn("Msg: state=%d\n", msg->state);
for (i = 0; i < msg->num_payload; i++)
- pr_debug("addr=0x%x data=0x%x complete=0x%x\n",
+ pr_warn("addr=0x%x data=0x%x complete=0x%x\n",
msg->payload[i].addr,
msg->payload[i].data,
msg->payload[i].complete);
@@ -425,8 +454,10 @@
sts = read_tcs_reg(base, TCS_DRV_CMD_STATUS, m, i);
if ((!(sts & CMD_STATUS_ISSUED)) ||
((resp->msg->is_complete || cmd->complete) &&
- (!(sts & CMD_STATUS_COMPL))))
+ (!(sts & CMD_STATUS_COMPL)))) {
resp->err = -EIO;
+ break;
+ }
}
/* Check for response if this was a read request */
@@ -437,7 +468,7 @@
mbox_chan_received_data(resp->chan, resp->msg);
}
- trace_rpmh_notify_irq(drv->name, m, resp->msg->payload[0].addr,
+ log_rpmh_notify_irq(drv, m, resp->msg->payload[0].addr,
resp->err);
/* Clear the AMC mode for non-ACTIVE TCSes */
@@ -480,7 +511,7 @@
{
struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
- trace_rpmh_notify(drv->name, m, msg->payload[0].addr, err);
+ log_rpmh_notify(drv, m, msg->payload[0].addr, err);
mbox_chan_txdone(chan, err);
}
@@ -546,7 +577,7 @@
write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, msgid);
write_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n + i, cmd->addr);
write_tcs_reg(base, TCS_DRV_CMD_DATA, m, n + i, cmd->data);
- trace_rpmh_send_msg(drv->name, m, n + i, msgid, cmd->addr,
+ log_send_msg(drv, m, n + i, msgid, cmd->addr,
cmd->data, cmd->complete, trigger);
}
@@ -667,7 +698,8 @@
int n = 0;
/* For active requests find the first free AMC. */
- if (tcs->type == ACTIVE_TCS)
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE ||
+ msg->state == RPMH_AWAKE_STATE)
return find_free_tcs(tcs);
/* Find if we already have the msg in our TCS */
@@ -780,6 +812,10 @@
spin_lock_irqsave(&tcs->tcs_lock, flags);
for (i = 0; i < tcs->num_tcs; i++) {
m = i + tcs->tcs_offset;
+ if (!tcs_is_free(drv, m)) {
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
+ return -EBUSY;
+ }
__tcs_buffer_invalidate(drv->reg_base, m);
}
/* Mark the TCS as free */
@@ -795,7 +831,7 @@
int n;
struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
void __iomem *base = drv->reg_base;
- u32 enable, addr, data, msgid;
+ u32 enable, addr, data, msgid, sts, irq_sts;
if (!tcs || tcs_is_free(drv, m))
return;
@@ -804,15 +840,24 @@
if (!enable)
return;
- pr_debug("TCS-%d contents:\n", m);
+ pr_warn("RSC:%s\n", drv->name);
+
+ sts = read_tcs_reg(base, TCS_DRV_STATUS, m, 0);
+ data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
+ irq_sts = read_tcs_reg(base, TCS_DRV_IRQ_STATUS, 0, 0);
+ pr_warn("TCS=%d [ctrlr-sts:%s amc-mode:0x%x irq-sts:%s]\n",
+ m, sts ? "IDLE" : "BUSY", data,
+ (irq_sts & BIT(m)) ? "COMPLETED" : "PENDING");
+
for (n = 0; n < tcs->ncpt; n++) {
if (!(enable & BIT(n)))
continue;
addr = read_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n);
data = read_tcs_reg(base, TCS_DRV_CMD_DATA, m, n);
msgid = read_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n);
- pr_debug("\tn=%d addr=0x%x data=0x%x hdr=0x%x\n",
- n, addr, data, msgid);
+ sts = read_tcs_reg(base, TCS_DRV_CMD_STATUS, m, n);
+ pr_warn("\tCMD=%d [addr=0x%x data=0x%x hdr=0x%x sts=0x%x]\n",
+ n, addr, data, msgid, sts);
}
}
@@ -824,7 +869,7 @@
for (i = 0; i < drv->num_tcs; i++) {
if (!atomic_read(&drv->tcs_in_use[i]))
continue;
- pr_debug("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
+ pr_warn("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
curr, i,
atomic_read(&drv->tcs_send_count[i]),
drv->tcs_last_sent_ts[i],
@@ -835,6 +880,13 @@
}
}
+static void chan_debug(struct mbox_chan *chan)
+{
+ struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
+
+ dump_tcs_stats(drv);
+}
+
/**
* chan_tcs_write: Validate the incoming message and write to the
* appropriate TCS block.
@@ -859,7 +911,8 @@
goto tx_fail;
}
- if (!msg->payload || msg->num_payload > MAX_RPMH_PAYLOAD) {
+ if (!msg->payload || !msg->num_payload ||
+ msg->num_payload > MAX_RPMH_PAYLOAD) {
dev_err(dev, "Payload error\n");
ret = -EINVAL;
goto tx_fail;
@@ -889,8 +942,11 @@
* Since we are re-purposing the wake TCS, invalidate previous
* contents to avoid confusion.
*/
- if (msg->state == RPMH_AWAKE_STATE)
- tcs_mbox_invalidate(chan);
+ if (msg->state == RPMH_AWAKE_STATE) {
+ ret = tcs_mbox_invalidate(chan);
+ if (ret)
+ goto tx_fail;
+ }
/* Post the message to the TCS and trigger */
ret = tcs_mbox_write(chan, msg, true);
@@ -902,15 +958,16 @@
drv, msg, chan, TCS_M_INIT, ret);
dev_err(dev, "Error sending RPMH message %d\n", ret);
- if (resp)
+ if (!IS_ERR(resp))
send_tcs_response(resp);
+ else
+ dev_err(dev, "No response object %ld\n", PTR_ERR(resp));
ret = 0;
}
/* If we were just busy waiting for TCS, dump the state and return */
if (ret == -EBUSY) {
pr_info_ratelimited("TCS Busy, retrying RPMH message send\n");
- dump_tcs_stats(drv);
ret = -EAGAIN;
}
@@ -926,7 +983,7 @@
for (i = 0; i < msg->num_payload; i++) {
/* Only data is write capable */
writel_relaxed(cpu_to_le32(msg->payload[i].data), addr);
- trace_rpmh_control_msg(drv->name, msg->payload[i].data);
+ log_rpmh_control_msg(drv, msg->payload[i].data);
addr += TCS_HIDDEN_CMD_SHIFT;
}
}
@@ -971,7 +1028,8 @@
goto tx_done;
}
- if (msg->num_payload > MAX_RPMH_PAYLOAD) {
+ if (!msg->payload || (!msg->num_payload && !msg->invalidate) ||
+ msg->num_payload > MAX_RPMH_PAYLOAD) {
dev_err(dev, "Payload error\n");
goto tx_done;
}
@@ -1111,7 +1169,8 @@
if (tcs->num_tcs > MAX_TCS_PER_TYPE)
return -EINVAL;
- if (st > max_tcs)
+ if (st + tcs->num_tcs > max_tcs &&
+ st + tcs->num_tcs >= sizeof(tcs->tcs_mask))
return -EINVAL;
tcs->tcs_mask = ((1 << tcs->num_tcs) - 1) << st;
@@ -1133,10 +1192,12 @@
for (j = 0; j < i; j++) {
ret = of_parse_phandle_with_args(np, "mboxes",
"#mbox-cells", j, &p);
- if (!ret && p.np == pdev->dev.of_node)
+ of_node_put(p.np);
+ if (!ret && p.np == pdev->dev.of_node) {
+ num_chans++;
break;
+ }
}
- num_chans++;
}
if (!num_chans) {
@@ -1162,6 +1223,7 @@
drv->mbox.txdone_irq = true;
drv->mbox.of_xlate = of_tcs_mbox_xlate;
drv->mbox.is_idle = tcs_drv_is_idle;
+ drv->mbox.debug = chan_debug;
drv->num_tcs = st;
drv->pdev = pdev;
INIT_LIST_HEAD(&drv->response_pending);
@@ -1182,7 +1244,7 @@
ret = devm_request_irq(&pdev->dev, irq, tcs_irq_handler,
IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
- "tcs_irq", drv);
+ drv->name, drv);
if (ret)
return ret;
@@ -1193,6 +1255,9 @@
for (i = 0; i < ARRAY_SIZE(drv->tcs_in_use); i++)
atomic_set(&drv->tcs_in_use[i], 0);
+ drv->ipc_log_ctx = ipc_log_context_create(TCS_DRV_IPC_LOG_SIZE,
+ drv->name, 0);
+
ret = mbox_controller_register(&drv->mbox);
if (ret)
return ret;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index eddd360..8ebf1b9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1861,7 +1861,7 @@
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
- sb->super_offset = rdev->sb_start;
+ sb->super_offset = cpu_to_le64(rdev->sb_start);
sb->sb_csum = calc_sb_1_csum(sb);
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
@@ -2270,7 +2270,7 @@
/* Check if any mddev parameters have changed */
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
- (mddev->layout != le64_to_cpu(sb->layout)) ||
+ (mddev->layout != le32_to_cpu(sb->layout)) ||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
return true;
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index c68239e..98b067b 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -468,7 +468,7 @@
while ((entity_err = media_entity_graph_walk_next(graph))) {
/* don't let the stream_count go negative */
- if (entity->stream_count > 0) {
+ if (entity_err->stream_count > 0) {
entity_err->stream_count--;
if (entity_err->stream_count == 0)
entity_err->pipe = NULL;
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 2dac48f..dca0592 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -355,12 +355,43 @@
/* ----------------------------------------------------------- */
+/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
+static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
+{
+ u8 subaddr = 0x7, dmdregval;
+ u8 data[2];
+ int ret;
+ struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0,
+ .buf = &subaddr, .len = 1},
+ {.addr = 0x08,
+ .flags = I2C_M_RD,
+ .buf = &dmdregval, .len = 1}
+ };
+ struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0,
+ .buf = data, .len = 2} };
+
+ ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
+ if ((ret == 2) && (dmdregval & 0x2)) {
+ pr_debug("%s: DVB-T demod i2c gate was left closed\n",
+ dev->name);
+
+ data[0] = subaddr;
+ data[1] = (dmdregval & ~0x2);
+ if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
+ pr_err("%s: EEPROM i2c gate open failure\n",
+ dev->name);
+ }
+}
+
static int
saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len)
{
unsigned char buf;
int i,err;
+ if (dev->board == SAA7134_BOARD_MD7134)
+ saa7134_i2c_eeprom_md7134_gate(dev);
+
dev->i2c_client.addr = 0xa0 >> 1;
buf = 0;
if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) {
diff --git a/drivers/media/platform/msm/broadcast/tspp.c b/drivers/media/platform/msm/broadcast/tspp.c
index 43b426de..44193f5 100644
--- a/drivers/media/platform/msm/broadcast/tspp.c
+++ b/drivers/media/platform/msm/broadcast/tspp.c
@@ -24,6 +24,7 @@
#include <linux/uaccess.h> /* copy_to_user */
#include <linux/slab.h> /* kfree, kzalloc */
#include <linux/ioport.h> /* XXX_ mem_region */
+#include <asm/dma-iommu.h>
#include <linux/dma-mapping.h> /* dma_XXX */
#include <linux/dmapool.h> /* DMA pools */
#include <linux/delay.h> /* msleep */
@@ -46,7 +47,6 @@
#include <linux/msm-bus.h>
#include <linux/interrupt.h> /* tasklet */
#include <asm/arch_timer.h> /* Timer */
-#include <linux/avtimer_kernel.h> /* Timer */
/*
* General defines
@@ -60,6 +60,10 @@
#define TSPP_NUM_KEYS 8
#define INVALID_CHANNEL 0xFFFFFFFF
#define TSPP_BAM_DEFAULT_IPC_LOGLVL 2
+
+#define TSPP_SMMU_IOVA_START (0x10000000)
+#define TSPP_SMMU_IOVA_SIZE (0x40000000)
+
/*
* BAM descriptor FIFO size (in number of descriptors).
* Max number of descriptors allowed by SPS which is 8K-1.
@@ -490,6 +494,8 @@
struct mutex mutex;
struct tspp_pinctrl pinctrl;
unsigned int tts_source; /* Time stamp source type LPASS timer/TCR */
+ struct dma_iommu_mapping *iommu_mapping;
+ bool bypass_s1_smmu;
struct dentry *dent;
struct dentry *debugfs_regs[ARRAY_SIZE(debugfs_tspp_regs)];
@@ -924,8 +930,6 @@
{
int start_hardware = 0;
u32 ctl;
- u32 tts_ctl;
- int retval;
if (tsif_device->ref_count == 0) {
start_hardware = 1;
@@ -978,39 +982,6 @@
pr_warn("tspp: unknown tsif mode 0x%x",
tsif_device->mode);
}
- /* Set 4bytes Time Stamp for TCR */
- if (tsif_device->tts_source == TSIF_TTS_LPASS_TIMER) {
- if (tsif_device->lpass_timer_enable == 0) {
- retval = avcs_core_open();
- if (retval < 0) {
- pr_warn("tspp: avcs open fail:%d\n",
- retval);
- return retval;
- }
- retval = avcs_core_disable_power_collapse(1);
- if (retval < 0) {
- pr_warn("tspp: avcs power enable:%d\n",
- retval);
- return retval;
- }
- tsif_device->lpass_timer_enable = 1;
- }
-
- tts_ctl = readl_relaxed(tsif_device->base +
- TSIF_TTS_CTL_OFF);
- tts_ctl = 0;
- /* Set LPASS Timer TTS source */
- tts_ctl |= TSIF_TTS_CTL_TTS_SOURCE;
- /* Set 4 byte TTS */
- tts_ctl |= TSIF_TTS_CTL_TTS_LENGTH_0;
-
- writel_relaxed(tts_ctl, tsif_device->base +
- TSIF_TTS_CTL_OFF);
- /* write TTS control register */
- wmb();
- tts_ctl = readl_relaxed(tsif_device->base +
- TSIF_TTS_CTL_OFF);
- }
writel_relaxed(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
/* write Status control register */
@@ -1035,13 +1006,8 @@
static void tspp_stop_tsif(struct tspp_tsif_device *tsif_device)
{
- if (tsif_device->ref_count == 0) {
- if (tsif_device->lpass_timer_enable == 1) {
- if (avcs_core_disable_power_collapse(0) == 0)
- tsif_device->lpass_timer_enable = 0;
- }
+ if (tsif_device->ref_count == 0)
return;
- }
tsif_device->ref_count--;
@@ -1099,6 +1065,42 @@
tspp_key_entry &= ~(1 << entry);
}
+static int tspp_iommu_init(struct tspp_device *device)
+{
+ struct dma_iommu_mapping *iommu_map;
+ int s1_bypass = 1;
+
+ iommu_map = arm_iommu_create_mapping(&platform_bus_type,
+ TSPP_SMMU_IOVA_START,
+ TSPP_SMMU_IOVA_SIZE);
+ if (IS_ERR(iommu_map)) {
+ dev_err(&device->pdev->dev, "iommu_create_mapping failure\n");
+ return PTR_ERR(iommu_map);
+ }
+ if (iommu_domain_set_attr(iommu_map->domain,
+ DOMAIN_ATTR_S1_BYPASS, &s1_bypass)) {
+ dev_err(&device->pdev->dev, "Can't bypass s1 translation\n");
+ arm_iommu_release_mapping(iommu_map);
+ return -EIO;
+ }
+ if (arm_iommu_attach_device(&device->pdev->dev, iommu_map)) {
+ dev_err(&device->pdev->dev, "can't arm_iommu_attach_device\n");
+ arm_iommu_release_mapping(iommu_map);
+ return -EIO;
+ }
+
+ device->iommu_mapping = iommu_map;
+ return 0;
+}
+
+static void tspp_iommu_release_iomapping(struct tspp_device *device)
+{
+ if (device->bypass_s1_smmu && device->iommu_mapping)
+ arm_iommu_release_mapping(device->iommu_mapping);
+
+ device->iommu_mapping = NULL;
+}
+
static int tspp_alloc_buffer(u32 channel_id, struct tspp_data_descriptor *desc,
u32 size, struct dma_pool *dma_pool, tspp_allocator *alloc, void *user)
{
@@ -1957,44 +1959,9 @@
int tspp_get_lpass_time_counter(u32 dev, enum tspp_source source,
u64 *lpass_time_counter)
{
- struct tspp_device *pdev;
- struct tspp_tsif_device *tsif_device;
-
- if (!lpass_time_counter)
- return -EINVAL;
-
- pdev = tspp_find_by_id(dev);
- if (!pdev) {
- pr_err("tspp_get_lpass_time_counter: can't find device %i\n",
- dev);
- return -ENODEV;
- }
-
- switch (source) {
- case TSPP_SOURCE_TSIF0:
- tsif_device = &pdev->tsif[0];
- break;
-
- case TSPP_SOURCE_TSIF1:
- tsif_device = &pdev->tsif[1];
- break;
-
- default:
- tsif_device = NULL;
- break;
- }
-
- if (tsif_device && tsif_device->ref_count) {
- if (avcs_core_query_timer(lpass_time_counter) < 0) {
- pr_err("tspp_get_lpass_time_counter: read error\n");
- *lpass_time_counter = 0;
- return -ENETRESET;
- }
- } else
- *lpass_time_counter = 0;
-
- return 0;
+ return -EPERM;
}
+
EXPORT_SYMBOL(tspp_get_lpass_time_counter);
/**
@@ -3035,17 +3002,15 @@
goto err_irq;
device->req_irqs = false;
- /* Check whether AV timer time stamps are enabled */
- if (!of_property_read_u32(pdev->dev.of_node, "qcom,lpass-timer-tts",
- &device->tts_source)) {
- if (device->tts_source == 1)
- device->tts_source = TSIF_TTS_LPASS_TIMER;
- else
- device->tts_source = TSIF_TTS_TCR;
- } else {
- device->tts_source = TSIF_TTS_TCR;
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,smmu-s1-bypass")) {
+ device->bypass_s1_smmu = true;
+ if (tspp_iommu_init(device)) {
+ dev_err(&pdev->dev, "iommu init failed");
+ goto err_iommu;
+ }
}
+ device->tts_source = TSIF_TTS_TCR;
for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
device->tsif[i].tts_source = device->tts_source;
@@ -3115,6 +3080,8 @@
tspp_debugfs_exit(device);
for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
tsif_debugfs_exit(&device->tsif[i]);
+err_iommu:
+ tspp_iommu_release_iomapping(device);
err_irq:
iounmap(device->bam_props.virt_addr);
err_map_bam:
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
index fc7a493..048fe8f 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
@@ -27,18 +27,7 @@
#include "cam_cpas_api.h"
#include "cam_hw_intf.h"
#include "cam_hw.h"
-
-#ifdef CONFIG_CAM_CDM_DBG
-#define CDM_CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDM_CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
-#ifdef CONFIG_CAM_CDM_DUMP_DBG
-#define CDM_DUMP_CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDM_DUMP_CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
+#include "cam_debug_util.h"
#define CAM_MAX_SW_CDM_VERSION_SUPPORTED 1
#define CAM_SW_CDM_INDEX 0
@@ -233,6 +222,7 @@
struct cam_cdm_client *clients[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
uint8_t bl_tag;
atomic_t error;
+ atomic_t bl_done;
struct cam_cdm_hw_mem gen_irq;
uint32_t cpas_handle;
};
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
index 341406a..6c8bde1 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-CDM-CORE %s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -32,7 +30,7 @@
static void cam_cdm_get_client_refcount(struct cam_cdm_client *client)
{
mutex_lock(&client->lock);
- CDM_CDBG("CDM client get refcount=%d\n",
+ CAM_DBG(CAM_CDM, "CDM client get refcount=%d",
client->refcount);
client->refcount++;
mutex_unlock(&client->lock);
@@ -41,12 +39,12 @@
static void cam_cdm_put_client_refcount(struct cam_cdm_client *client)
{
mutex_lock(&client->lock);
- CDM_CDBG("CDM client put refcount=%d\n",
+ CAM_DBG(CAM_CDM, "CDM client put refcount=%d",
client->refcount);
if (client->refcount > 0) {
client->refcount--;
} else {
- pr_err("Refcount put when zero\n");
+ CAM_ERR(CAM_CDM, "Refcount put when zero");
WARN_ON(1);
}
mutex_unlock(&client->lock);
@@ -63,16 +61,16 @@
cam_version->reserved = 0;
return true;
default:
- pr_err("CDM Version=%x not supported in util\n", ver);
+ CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util", ver);
break;
}
return false;
}
-void cam_cdm_cpas_cb(int32_t client_handle, void *userdata,
+void cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
enum cam_camnoc_irq_type evt_type, uint32_t evt_data)
{
- pr_err("CPAS error callback type=%d with data=%x\n", evt_type,
+ CAM_ERR(CAM_CDM, "CPAS error callback type=%d with data=%x", evt_type,
evt_data);
}
@@ -84,13 +82,14 @@
case CAM_CDM170_VERSION:
return &CDM170_ops;
default:
- pr_err("CDM Version=%x not supported in util\n", ver);
+ CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util",
+ ver);
}
} else if (cam_version) {
if ((cam_version->major == 1) && (cam_version->minor == 0) &&
(cam_version->incr == 0))
return &CDM170_ops;
- pr_err("cam_hw_version=%x:%x:%x not supported\n",
+ CAM_ERR(CAM_CDM, "cam_hw_version=%x:%x:%x not supported",
cam_version->major, cam_version->minor,
cam_version->incr);
}
@@ -107,7 +106,7 @@
if (node->bl_tag == tag)
return node;
}
- pr_err("Could not find the bl request for tag=%d\n", tag);
+ CAM_ERR(CAM_CDM, "Could not find the bl request for tag=%x", tag);
return NULL;
}
@@ -135,11 +134,11 @@
for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
if (hw->clients[i] == NULL) {
- CDM_CDBG("Found client slot %d\n", i);
+ CAM_DBG(CAM_CDM, "Found client slot %d", i);
return i;
}
}
- pr_err("No more client slots\n");
+ CAM_ERR(CAM_CDM, "No more client slots");
return -EBUSY;
}
@@ -153,7 +152,7 @@
struct cam_cdm_client *client = NULL;
if (!cdm_hw) {
- pr_err("CDM Notify called with NULL hw info\n");
+ CAM_ERR(CAM_CDM, "CDM Notify called with NULL hw info");
return;
}
core = (struct cam_cdm *)cdm_hw->core_info;
@@ -166,20 +165,21 @@
client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl);
client = core->clients[client_idx];
if ((!client) || (client->handle != node->client_hdl)) {
- pr_err("Invalid client %pK hdl=%x\n", client,
+ CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
node->client_hdl);
return;
}
cam_cdm_get_client_refcount(client);
if (client->data.cam_cdm_callback) {
- CDM_CDBG("Calling client=%s cb cookie=%d\n",
+ CAM_DBG(CAM_CDM, "Calling client=%s cb cookie=%d",
client->data.identifier, node->cookie);
client->data.cam_cdm_callback(node->client_hdl,
node->userdata, CAM_CDM_CB_STATUS_BL_SUCCESS,
node->cookie);
- CDM_CDBG("Exit client cb cookie=%d\n", node->cookie);
+ CAM_DBG(CAM_CDM, "Exit client cb cookie=%d",
+ node->cookie);
} else {
- pr_err("No cb registered for client hdl=%x\n",
+ CAM_ERR(CAM_CDM, "No cb registered for client hdl=%x",
node->client_hdl);
}
cam_cdm_put_client_refcount(client);
@@ -190,7 +190,7 @@
if (core->clients[i] != NULL) {
client = core->clients[i];
mutex_lock(&client->lock);
- CDM_CDBG("Found client slot %d\n", i);
+ CAM_DBG(CAM_CDM, "Found client slot %d", i);
if (client->data.cam_cdm_callback) {
if (status == CAM_CDM_CB_STATUS_PAGEFAULT) {
unsigned long iova =
@@ -203,7 +203,8 @@
(iova & 0xFFFFFFFF));
}
} else {
- pr_err("No cb registered for client hdl=%x\n",
+ CAM_ERR(CAM_CDM,
+ "No cb registered for client hdl=%x",
client->handle);
}
mutex_unlock(&client->lock);
@@ -216,7 +217,7 @@
{
struct cam_hw_info *cdm_hw = hw_priv;
struct cam_cdm *core = NULL;
- int rc = -1;
+ int rc = -EPERM;
int client_idx;
struct cam_cdm_client *client;
uint32_t *handle = start_args;
@@ -228,24 +229,26 @@
client_idx = CAM_CDM_GET_CLIENT_IDX(*handle);
client = core->clients[client_idx];
if (!client) {
- pr_err("Invalid client %pK hdl=%x\n", client, *handle);
+ CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, *handle);
return -EINVAL;
}
cam_cdm_get_client_refcount(client);
if (*handle != client->handle) {
- pr_err("client id given handle=%x invalid\n", *handle);
+ CAM_ERR(CAM_CDM, "client id given handle=%x invalid", *handle);
cam_cdm_put_client_refcount(client);
return -EINVAL;
}
if (operation == true) {
if (true == client->stream_on) {
- pr_err("Invalid CDM client is already streamed ON\n");
+ CAM_ERR(CAM_CDM,
+ "Invalid CDM client is already streamed ON");
cam_cdm_put_client_refcount(client);
return rc;
}
} else {
if (client->stream_on == false) {
- pr_err("Invalid CDM client is already streamed Off\n");
+ CAM_ERR(CAM_CDM,
+ "Invalid CDM client is already streamed Off");
cam_cdm_put_client_refcount(client);
return rc;
}
@@ -265,26 +268,28 @@
rc = cam_cpas_start(core->cpas_handle,
&ahb_vote, &axi_vote);
if (rc != 0) {
- pr_err("CPAS start failed\n");
+ CAM_ERR(CAM_CDM, "CPAS start failed");
goto end;
}
- CDM_CDBG("CDM init first time\n");
+ CAM_DBG(CAM_CDM, "CDM init first time");
if (core->id == CAM_CDM_VIRTUAL) {
- CDM_CDBG("Virtual CDM HW init first time\n");
+ CAM_DBG(CAM_CDM,
+ "Virtual CDM HW init first time");
rc = 0;
} else {
- CDM_CDBG("CDM HW init first time\n");
+ CAM_DBG(CAM_CDM, "CDM HW init first time");
rc = cam_hw_cdm_init(hw_priv, NULL, 0);
if (rc == 0) {
rc = cam_hw_cdm_alloc_genirq_mem(
hw_priv);
if (rc != 0) {
- pr_err("Genirqalloc failed\n");
+ CAM_ERR(CAM_CDM,
+ "Genirqalloc failed");
cam_hw_cdm_deinit(hw_priv,
NULL, 0);
}
} else {
- pr_err("CDM HW init failed\n");
+ CAM_ERR(CAM_CDM, "CDM HW init failed");
}
}
if (rc == 0) {
@@ -292,11 +297,11 @@
client->stream_on = true;
} else {
if (cam_cpas_stop(core->cpas_handle))
- pr_err("CPAS stop failed\n");
+ CAM_ERR(CAM_CDM, "CPAS stop failed");
}
} else {
cdm_hw->open_count++;
- CDM_CDBG("CDM HW already ON count=%d\n",
+ CAM_DBG(CAM_CDM, "CDM HW already ON count=%d",
cdm_hw->open_count);
rc = 0;
client->stream_on = true;
@@ -304,35 +309,41 @@
} else {
if (cdm_hw->open_count) {
cdm_hw->open_count--;
- CDM_CDBG("stream OFF CDM %d\n", cdm_hw->open_count);
+ CAM_DBG(CAM_CDM, "stream OFF CDM %d",
+ cdm_hw->open_count);
if (!cdm_hw->open_count) {
- CDM_CDBG("CDM Deinit now\n");
+ CAM_DBG(CAM_CDM, "CDM Deinit now");
if (core->id == CAM_CDM_VIRTUAL) {
- CDM_CDBG("Virtual CDM HW Deinit\n");
+ CAM_DBG(CAM_CDM,
+ "Virtual CDM HW Deinit");
rc = 0;
} else {
- CDM_CDBG("CDM HW Deinit now\n");
+ CAM_DBG(CAM_CDM, "CDM HW Deinit now");
rc = cam_hw_cdm_deinit(
hw_priv, NULL, 0);
if (cam_hw_cdm_release_genirq_mem(
hw_priv))
- pr_err("Genirq release failed\n");
+ CAM_ERR(CAM_CDM,
+ "Genirq release fail");
}
if (rc) {
- pr_err("Deinit failed in streamoff\n");
+ CAM_ERR(CAM_CDM,
+ "Deinit failed in streamoff");
} else {
client->stream_on = false;
rc = cam_cpas_stop(core->cpas_handle);
if (rc)
- pr_err("CPAS stop failed\n");
+ CAM_ERR(CAM_CDM,
+ "CPAS stop failed");
}
} else {
client->stream_on = false;
- CDM_CDBG("Client stream off success =%d\n",
+ CAM_DBG(CAM_CDM,
+ "Client stream off success =%d",
cdm_hw->open_count);
}
} else {
- CDM_CDBG("stream OFF CDM Invalid %d\n",
+ CAM_DBG(CAM_CDM, "stream OFF CDM Invalid %d",
cdm_hw->open_count);
rc = -ENXIO;
}
@@ -390,33 +401,35 @@
struct cam_cdm_client *client;
if (sizeof(struct cam_cdm_hw_intf_cmd_submit_bl) != arg_size) {
- pr_err("Invalid CDM cmd %d arg size=%x\n", cmd,
+ CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
arg_size);
break;
}
req = (struct cam_cdm_hw_intf_cmd_submit_bl *)cmd_args;
if ((req->data->type < 0) ||
(req->data->type > CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA)) {
- pr_err("Invalid req bl cmd addr type=%d\n",
+ CAM_ERR(CAM_CDM, "Invalid req bl cmd addr type=%d",
req->data->type);
break;
}
idx = CAM_CDM_GET_CLIENT_IDX(req->handle);
client = core->clients[idx];
if ((!client) || (req->handle != client->handle)) {
- pr_err("Invalid client %pK hdl=%x\n", client,
+ CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
req->handle);
break;
}
cam_cdm_get_client_refcount(client);
if ((req->data->flag == true) &&
(!client->data.cam_cdm_callback)) {
- pr_err("CDM request cb without registering cb\n");
+ CAM_ERR(CAM_CDM,
+ "CDM request cb without registering cb");
cam_cdm_put_client_refcount(client);
break;
}
if (client->stream_on != true) {
- pr_err("Invalid CDM needs to be streamed ON first\n");
+ CAM_ERR(CAM_CDM,
+ "Invalid CDM needs to be streamed ON first");
cam_cdm_put_client_refcount(client);
break;
}
@@ -434,19 +447,20 @@
struct cam_cdm_client *client;
if (sizeof(struct cam_cdm_acquire_data) != arg_size) {
- pr_err("Invalid CDM cmd %d arg size=%x\n", cmd,
+ CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
arg_size);
break;
}
mutex_lock(&cdm_hw->hw_mutex);
data = (struct cam_cdm_acquire_data *)cmd_args;
- CDM_CDBG("Trying to acquire client=%s in hw idx=%d\n",
+ CAM_DBG(CAM_CDM, "Trying to acquire client=%s in hw idx=%d",
data->identifier, core->index);
idx = cam_cdm_find_free_client_slot(core);
if ((idx < 0) || (core->clients[idx])) {
mutex_unlock(&cdm_hw->hw_mutex);
- pr_err("Failed to client slots for client=%s in hw idx=%d\n",
+ CAM_ERR(CAM_CDM,
+ "Fail to client slots, client=%s in hw idx=%d",
data->identifier, core->index);
break;
}
@@ -476,7 +490,8 @@
core->clients[idx] = NULL;
mutex_unlock(
&cdm_hw->hw_mutex);
- rc = -1;
+ rc = -EPERM;
+ CAM_ERR(CAM_CDM, "Invalid ops for virtual cdm");
break;
}
} else {
@@ -492,7 +507,7 @@
idx);
client->stream_on = false;
data->handle = client->handle;
- CDM_CDBG("Acquired client=%s in hwidx=%d\n",
+ CAM_DBG(CAM_CDM, "Acquired client=%s in hwidx=%d",
data->identifier, core->index);
mutex_unlock(&client->lock);
rc = 0;
@@ -504,7 +519,8 @@
struct cam_cdm_client *client;
if (sizeof(uint32_t) != arg_size) {
- pr_err("Invalid CDM cmd %d size=%x for handle=%x\n",
+ CAM_ERR(CAM_CDM,
+ "Invalid CDM cmd %d size=%x for handle=%x",
cmd, arg_size, *handle);
return -EINVAL;
}
@@ -512,16 +528,17 @@
mutex_lock(&cdm_hw->hw_mutex);
client = core->clients[idx];
if ((!client) || (*handle != client->handle)) {
- pr_err("Invalid client %pK hdl=%x\n", client, *handle);
+ CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x",
+ client, *handle);
mutex_unlock(&cdm_hw->hw_mutex);
break;
}
cam_cdm_put_client_refcount(client);
mutex_lock(&client->lock);
if (client->refcount != 0) {
- pr_err("CDM Client refcount not zero %d",
+ CAM_ERR(CAM_CDM, "CDM Client refcount not zero %d",
client->refcount);
- rc = -1;
+ rc = -EPERM;
mutex_unlock(&client->lock);
mutex_unlock(&cdm_hw->hw_mutex);
break;
@@ -535,12 +552,12 @@
break;
}
case CAM_CDM_HW_INTF_CMD_RESET_HW: {
- pr_err("CDM HW reset not supported for handle =%x\n",
+ CAM_ERR(CAM_CDM, "CDM HW reset not supported for handle =%x",
*((uint32_t *)cmd_args));
break;
}
default:
- pr_err("CDM HW intf command not valid =%d\n", cmd);
+ CAM_ERR(CAM_CDM, "CDM HW intf command not valid =%d", cmd);
break;
}
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
index eb75aaa..fa3ae04 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
@@ -32,7 +32,7 @@
uint32_t arg_size);
bool cam_cdm_set_cam_hw_version(
uint32_t ver, struct cam_hw_version *cam_version);
-void cam_cdm_cpas_cb(int32_t client_handle, void *userdata,
+void cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
enum cam_camnoc_irq_type evt_type, uint32_t evt_data);
struct cam_cdm_utils_ops *cam_cdm_get_ops(
uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
index 6009c25..5f6895c 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-CDM-HW %s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -31,7 +29,6 @@
#include "cam_io_util.h"
#include "cam_hw_cdm170_reg.h"
-
#define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0"
#define CAM_HW_CDM_IPE_0_NAME "qcom,cam170-ipe0-cdm"
#define CAM_HW_CDM_IPE_1_NAME "qcom,cam170-ipe1-cdm"
@@ -65,20 +62,75 @@
if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
pending_bl)) {
- pr_err("Failed to read CDM pending BL's\n");
- rc = -1;
+ CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's");
+ rc = -EIO;
}
return rc;
}
+static int cam_hw_cdm_enable_bl_done_irq(struct cam_hw_info *cdm_hw,
+ bool enable)
+{
+ int rc = -EIO;
+ uint32_t irq_mask = 0;
+ struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+ if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_MASK,
+ &irq_mask)) {
+ CAM_ERR(CAM_CDM, "Failed to read CDM IRQ mask");
+ return rc;
+ }
+
+ if (enable == true) {
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK,
+ (irq_mask | 0x4))) {
+ CAM_ERR(CAM_CDM, "Write failed to enable BL done irq");
+ } else {
+ atomic_inc(&core->bl_done);
+ rc = 0;
+ CAM_DBG(CAM_CDM, "BL done irq enabled =%d",
+ atomic_read(&core->bl_done));
+ }
+ } else {
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK,
+ (irq_mask & 0x70003))) {
+ CAM_ERR(CAM_CDM, "Write failed to disable BL done irq");
+ } else {
+ atomic_dec(&core->bl_done);
+ rc = 0;
+ CAM_DBG(CAM_CDM, "BL done irq disable =%d",
+ atomic_read(&core->bl_done));
+ }
+ }
+ return rc;
+}
+
+static int cam_hw_cdm_enable_core(struct cam_hw_info *cdm_hw, bool enable)
+{
+ int rc = 0;
+
+ if (enable == true) {
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x01)) {
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW core enable");
+ rc = -EIO;
+ }
+ } else {
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x02)) {
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW core disable");
+ rc = -EIO;
+ }
+ }
+ return rc;
+}
+
int cam_hw_cdm_enable_core_dbg(struct cam_hw_info *cdm_hw)
{
int rc = 0;
if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0x10100)) {
- pr_err("Failed to Write CDM HW core debug\n");
- rc = -1;
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug");
+ rc = -EIO;
}
return rc;
@@ -89,8 +141,8 @@
int rc = 0;
if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0)) {
- pr_err("Failed to Write CDM HW core debug\n");
- rc = -1;
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug");
+ rc = -EIO;
}
return rc;
@@ -101,31 +153,31 @@
uint32_t dump_reg = 0;
cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
- pr_err("dump core en=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump core en=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_0_REG, &dump_reg);
- pr_err("dump scratch0=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch0=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_1_REG, &dump_reg);
- pr_err("dump scratch1=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch1=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_2_REG, &dump_reg);
- pr_err("dump scratch2=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch2=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_3_REG, &dump_reg);
- pr_err("dump scratch3=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch3=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_4_REG, &dump_reg);
- pr_err("dump scratch4=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch4=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_5_REG, &dump_reg);
- pr_err("dump scratch5=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch5=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_6_REG, &dump_reg);
- pr_err("dump scratch6=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch6=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_7_REG, &dump_reg);
- pr_err("dump scratch7=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch7=%x", dump_reg);
}
@@ -136,67 +188,68 @@
mutex_lock(&cdm_hw->hw_mutex);
cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
- pr_err("CDM HW core status=%x\n", dump_reg);
- /* First pause CDM */
- cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x03);
+ CAM_ERR(CAM_CDM, "CDM HW core status=%x", dump_reg);
+ /* First pause CDM, If it fails still proceed to dump debug info */
+ cam_hw_cdm_enable_core(cdm_hw, false);
cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
- pr_err("CDM HW current pending BL=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg);
loop_cnt = dump_reg;
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_DEBUG_STATUS, &dump_reg);
- pr_err("CDM HW Debug status reg=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW Debug status reg=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, &core_dbg);
if (core_dbg & 0x100) {
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_ADDR, &dump_reg);
- pr_err("AHB dump reglastaddr=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "AHB dump reglastaddr=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_DATA, &dump_reg);
- pr_err("AHB dump reglastdata=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "AHB dump reglastdata=%x", dump_reg);
} else {
- pr_err("CDM HW AHB dump not enable\n");
+ CAM_ERR(CAM_CDM, "CDM HW AHB dump not enable");
}
if (core_dbg & 0x10000) {
int i;
- pr_err("CDM HW BL FIFO dump with loop count=%d\n", loop_cnt);
+ CAM_ERR(CAM_CDM, "CDM HW BL FIFO dump with loop count=%d",
+ loop_cnt);
for (i = 0 ; i < loop_cnt ; i++) {
cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_RB, i);
cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_RB,
&dump_reg);
- pr_err("BL(%d) base addr =%x\n", i, dump_reg);
+ CAM_ERR(CAM_CDM, "BL(%d) base addr =%x", i, dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_RB,
&dump_reg);
- pr_err("BL(%d) len=%d tag=%d\n", i,
+ CAM_ERR(CAM_CDM, "BL(%d) len=%d tag=%d", i,
(dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
}
} else {
- pr_err("CDM HW BL FIFO readback not enable\n");
+ CAM_ERR(CAM_CDM, "CDM HW BL FIFO readback not enable");
}
- pr_err("CDM HW default dump\n");
+ CAM_ERR(CAM_CDM, "CDM HW default dump");
cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_CFG, &dump_reg);
- pr_err("CDM HW core cfg=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW core cfg=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS, &dump_reg);
- pr_err("CDM HW irq status=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW irq status=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_SET, &dump_reg);
- pr_err("CDM HW irq set reg=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW irq set reg=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_BASE, &dump_reg);
- pr_err("CDM HW current BL base=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current BL base=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_LEN, &dump_reg);
- pr_err("CDM HW current BL len=%d tag=%d\n", (dump_reg & 0xFFFFF),
- (dump_reg & 0xFF000000));
+ CAM_ERR(CAM_CDM, "CDM HW current BL len=%d tag=%d",
+ (dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_USED_AHB_BASE, &dump_reg);
- pr_err("CDM HW current AHB base=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current AHB base=%x", dump_reg);
cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
- pr_err("CDM HW current pending BL=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg);
/* Enable CDM back */
- cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 1);
+ cam_hw_cdm_enable_core(cdm_hw, true);
mutex_unlock(&cdm_hw->hw_mutex);
}
@@ -206,40 +259,53 @@
{
uint32_t pending_bl = 0;
int32_t available_bl_slots = 0;
- int rc = -1;
+ int rc = -EIO;
long time_left;
struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
do {
if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
&pending_bl)) {
- pr_err("Failed to read CDM pending BL's\n");
- rc = -1;
+ CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's");
+ rc = -EIO;
break;
}
available_bl_slots = CAM_CDM_HWFIFO_SIZE - pending_bl;
if (available_bl_slots < 0) {
- pr_err("Invalid available slots %d:%d:%d\n",
+ CAM_ERR(CAM_CDM, "Invalid available slots %d:%d:%d",
available_bl_slots, CAM_CDM_HWFIFO_SIZE,
pending_bl);
break;
}
if (bl_count < (available_bl_slots - 1)) {
- CDM_CDBG("BL slot available_cnt=%d requested=%d\n",
+ CAM_DBG(CAM_CDM,
+ "BL slot available_cnt=%d requested=%d",
(available_bl_slots - 1), bl_count);
rc = bl_count;
break;
} else if (0 == (available_bl_slots - 1)) {
+ rc = cam_hw_cdm_enable_bl_done_irq(cdm_hw, true);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Enable BL done irq failed");
+ break;
+ }
time_left = wait_for_completion_timeout(
&core->bl_complete, msecs_to_jiffies(
CAM_CDM_BL_FIFO_WAIT_TIMEOUT));
if (time_left <= 0) {
- pr_err("CDM HW BL Wait timed out failed\n");
- rc = -1;
+ CAM_ERR(CAM_CDM,
+ "CDM HW BL Wait timed out failed");
+ if (cam_hw_cdm_enable_bl_done_irq(cdm_hw,
+ false))
+ CAM_ERR(CAM_CDM,
+ "Disable BL done irq failed");
+ rc = -EIO;
break;
}
+ if (cam_hw_cdm_enable_bl_done_irq(cdm_hw, false))
+ CAM_ERR(CAM_CDM, "Disable BL done irq failed");
rc = 0;
- CDM_CDBG("CDM HW is ready for data\n");
+ CAM_DBG(CAM_CDM, "CDM HW is ready for data");
} else {
rc = (bl_count - (available_bl_slots - 1));
break;
@@ -253,12 +319,12 @@
uint32_t len, uint32_t tag)
{
if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_REG, src)) {
- pr_err("Failed to write CDM base to BL base\n");
+ CAM_ERR(CAM_CDM, "Failed to write CDM base to BL base");
return true;
}
if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_REG,
((len & 0xFFFFF) | ((tag & 0xFF) << 20)))) {
- pr_err("Failed to write CDM BL len\n");
+ CAM_ERR(CAM_CDM, "Failed to write CDM BL len");
return true;
}
return false;
@@ -267,7 +333,7 @@
bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw)
{
if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_STORE_REG, 1)) {
- pr_err("Failed to write CDM commit BL\n");
+ CAM_ERR(CAM_CDM, "Failed to write CDM commit BL");
return true;
}
return false;
@@ -282,12 +348,12 @@
int rc;
if (core->bl_tag > 63) {
- pr_err("bl_tag invalid =%d\n", core->bl_tag);
+ CAM_ERR(CAM_CDM, "bl_tag invalid =%d", core->bl_tag);
rc = -EINVAL;
goto end;
}
- CDM_CDBG("CDM write BL last cmd tag=%d total=%d\n",
- core->bl_tag, req->data->cmd_arrary_count);
+ CAM_DBG(CAM_CDM, "CDM write BL last cmd tag=%x total=%d cookie=%d",
+ core->bl_tag, req->data->cmd_arrary_count, req->data->cookie);
node = kzalloc(sizeof(struct cam_cdm_bl_cb_request_entry),
GFP_KERNEL);
if (!node) {
@@ -307,20 +373,20 @@
((4 * core->ops->cdm_required_size_genirq()) - 1),
core->bl_tag);
if (rc) {
- pr_err("CDM hw bl write failed for gen irq bltag=%d\n",
+ CAM_ERR(CAM_CDM, "CDM hw bl write failed for gen irq bltag=%d",
core->bl_tag);
list_del_init(&node->entry);
kfree(node);
- rc = -1;
+ rc = -EIO;
goto end;
}
if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
- pr_err("Cannot commit the genirq BL with tag tag=%d\n",
+ CAM_ERR(CAM_CDM, "Cannot commit the genirq BL with tag tag=%d",
core->bl_tag);
list_del_init(&node->entry);
kfree(node);
- rc = -1;
+ rc = -EIO;
}
end:
@@ -331,27 +397,25 @@
struct cam_cdm_hw_intf_cmd_submit_bl *req,
struct cam_cdm_client *client)
{
- int i, rc = -1;
+ int i, rc;
struct cam_cdm_bl_request *cdm_cmd = req->data;
struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
uint32_t pending_bl = 0;
int write_count = 0;
if (req->data->cmd_arrary_count > CAM_CDM_HWFIFO_SIZE) {
- pr_info("requested BL more than max size, cnt=%d max=%d\n",
+ pr_info("requested BL more than max size, cnt=%d max=%d",
req->data->cmd_arrary_count, CAM_CDM_HWFIFO_SIZE);
}
- if (atomic_read(&core->error) != 0) {
- pr_err("HW in error state, cannot trigger transactions now\n");
- return rc;
- }
+ if (atomic_read(&core->error))
+ return -EIO;
mutex_lock(&cdm_hw->hw_mutex);
mutex_lock(&client->lock);
rc = cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &pending_bl);
if (rc) {
- pr_err("Cannot read the current BL depth\n");
+ CAM_ERR(CAM_CDM, "Cannot read the current BL depth");
mutex_unlock(&client->lock);
mutex_unlock(&cdm_hw->hw_mutex);
return rc;
@@ -363,25 +427,28 @@
if ((!cdm_cmd->cmd[i].len) &&
(cdm_cmd->cmd[i].len > 0x100000)) {
- pr_err("cmd len(%d) is invalid cnt=%d total cnt=%d\n",
+ CAM_ERR(CAM_CDM,
+ "cmd len(%d) is invalid cnt=%d total cnt=%d",
cdm_cmd->cmd[i].len, i,
req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EINVAL;
break;
}
- if (atomic_read(&core->error) != 0) {
- pr_err("HW in error state cmd_count=%d total cnt=%d\n",
+ if (atomic_read(&core->error)) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM,
+ "In error state cnt=%d total cnt=%d\n",
i, req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EIO;
break;
}
if (write_count == 0) {
write_count = cam_hw_cdm_wait_for_bl_fifo(cdm_hw,
(req->data->cmd_arrary_count - i));
if (write_count < 0) {
- pr_err("wait for bl fifo failed %d:%d\n",
+ CAM_ERR(CAM_CDM,
+ "wait for bl fifo failed %d:%d",
i, req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EIO;
break;
}
} else {
@@ -395,9 +462,10 @@
&len);
} else if (req->data->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) {
if (!cdm_cmd->cmd[i].bl_addr.hw_iova) {
- pr_err("Hw bl hw_iova is invalid %d:%d\n",
+ CAM_ERR(CAM_CDM,
+ "Hw bl hw_iova is invalid %d:%d",
i, req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EINVAL;
break;
}
rc = 0;
@@ -405,48 +473,56 @@
(uint64_t)cdm_cmd->cmd[i].bl_addr.hw_iova;
len = cdm_cmd->cmd[i].len + cdm_cmd->cmd[i].offset;
} else {
- pr_err("Only mem hdl/hw va type is supported %d\n",
+ CAM_ERR(CAM_CDM,
+ "Only mem hdl/hw va type is supported %d",
req->data->type);
- rc = -1;
+ rc = -EINVAL;
break;
}
if ((!rc) && (hw_vaddr_ptr) && (len) &&
(len >= cdm_cmd->cmd[i].offset)) {
- CDM_CDBG("Got the HW VA\n");
+ CAM_DBG(CAM_CDM, "Got the HW VA");
+ if (core->bl_tag >=
+ (CAM_CDM_HWFIFO_SIZE - 1))
+ core->bl_tag = 0;
rc = cam_hw_cdm_bl_write(cdm_hw,
((uint32_t)hw_vaddr_ptr +
cdm_cmd->cmd[i].offset),
(cdm_cmd->cmd[i].len - 1), core->bl_tag);
if (rc) {
- pr_err("Hw bl write failed %d:%d\n",
+ CAM_ERR(CAM_CDM, "Hw bl write failed %d:%d",
i, req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EIO;
break;
}
} else {
- pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
+ CAM_ERR(CAM_CDM,
+ "Sanity check failed for hdl=%x len=%zu:%d",
cdm_cmd->cmd[i].bl_addr.mem_handle, len,
cdm_cmd->cmd[i].offset);
- pr_err("Sanity check failed for %d:%d\n",
+ CAM_ERR(CAM_CDM, "Sanity check failed for %d:%d",
i, req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EINVAL;
break;
}
if (!rc) {
- CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
+ CAM_DBG(CAM_CDM,
+ "write BL success for cnt=%d with tag=%d",
i, core->bl_tag);
- core->bl_tag++;
- CDM_CDBG("Now commit the BL\n");
+
+ CAM_DBG(CAM_CDM, "Now commit the BL");
if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
- pr_err("Cannot commit the BL %d tag=%d\n",
- i, (core->bl_tag - 1));
- rc = -1;
+ CAM_ERR(CAM_CDM,
+ "Cannot commit the BL %d tag=%d",
+ i, core->bl_tag);
+ rc = -EIO;
break;
}
- CDM_CDBG("BL commit success BL %d tag=%d\n", i,
- (core->bl_tag - 1));
+ CAM_DBG(CAM_CDM, "BL commit success BL %d tag=%d", i,
+ core->bl_tag);
+ core->bl_tag++;
if ((req->data->flag == true) &&
(i == (req->data->cmd_arrary_count -
1))) {
@@ -455,9 +531,6 @@
if (rc == 0)
core->bl_tag++;
}
- if (!rc && ((CAM_CDM_HWFIFO_SIZE - 1) ==
- core->bl_tag))
- core->bl_tag = 0;
}
}
mutex_unlock(&client->lock);
@@ -477,12 +550,12 @@
cdm_hw = payload->hw;
core = (struct cam_cdm *)cdm_hw->core_info;
- CDM_CDBG("IRQ status=%x\n", payload->irq_status);
+ CAM_DBG(CAM_CDM, "IRQ status=%x", payload->irq_status);
if (payload->irq_status &
CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
struct cam_cdm_bl_cb_request_entry *node;
- CDM_CDBG("inline IRQ data=%x\n",
+ CAM_DBG(CAM_CDM, "inline IRQ data=%x",
payload->irq_data);
mutex_lock(&cdm_hw->hw_mutex);
node = cam_cdm_find_request_by_bl_tag(
@@ -496,47 +569,56 @@
(void *)node);
} else if (node->request_type ==
CAM_HW_CDM_BL_CB_INTERNAL) {
- pr_err("Invalid node=%pK %d\n", node,
+ CAM_ERR(CAM_CDM,
+ "Invalid node=%pK %d", node,
node->request_type);
}
list_del_init(&node->entry);
kfree(node);
} else {
- pr_err("Invalid node for inline irq\n");
+ CAM_ERR(CAM_CDM,
+ "Inval node, inline_irq st=%x data=%x",
+ payload->irq_status, payload->irq_data);
}
mutex_unlock(&cdm_hw->hw_mutex);
}
if (payload->irq_status &
CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK) {
- CDM_CDBG("CDM HW reset done IRQ\n");
+ CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
complete(&core->reset_complete);
}
if (payload->irq_status &
CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK) {
- pr_err("CDM HW BL done IRQ\n");
- complete(&core->bl_complete);
+ if (atomic_read(&core->bl_done)) {
+ CAM_DBG(CAM_CDM, "CDM HW BL done IRQ");
+ complete(&core->bl_complete);
+ }
}
if (payload->irq_status &
CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) {
- pr_err("Invalid command IRQ, Need HW reset\n");
+ CAM_ERR_RATE_LIMIT(CAM_CDM,
+ "Invalid command IRQ, Need HW reset\n");
+ atomic_inc(&core->error);
+ cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+ }
+ if (payload->irq_status &
+ CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "AHB Error IRQ\n");
atomic_inc(&core->error);
cam_hw_cdm_dump_core_debug_registers(cdm_hw);
atomic_dec(&core->error);
}
if (payload->irq_status &
- CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) {
- pr_err("AHB IRQ\n");
- cam_hw_cdm_dump_core_debug_registers(cdm_hw);
- }
- if (payload->irq_status &
CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK) {
- pr_err("Overflow IRQ\n");
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Overflow Error IRQ\n");
+ atomic_inc(&core->error);
cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+ atomic_dec(&core->error);
}
kfree(payload);
} else {
- pr_err("NULL payload\n");
+ CAM_ERR(CAM_CDM, "NULL payload");
}
}
@@ -552,12 +634,13 @@
core = (struct cam_cdm *)cdm_hw->core_info;
atomic_inc(&core->error);
cam_hw_cdm_dump_core_debug_registers(cdm_hw);
- pr_err("Page fault iova addr %pK\n", (void *)iova);
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Page fault iova addr %pK\n",
+ (void *)iova);
cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT,
(void *)iova);
atomic_dec(&core->error);
} else {
- pr_err("Invalid token\n");
+ CAM_ERR(CAM_CDM, "Invalid token");
}
}
@@ -569,32 +652,38 @@
struct cam_cdm_work_payload *payload;
bool work_status;
- CDM_CDBG("Got irq\n");
+ CAM_DBG(CAM_CDM, "Got irq");
payload = kzalloc(sizeof(struct cam_cdm_work_payload), GFP_ATOMIC);
if (payload) {
if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS,
&payload->irq_status)) {
- pr_err("Failed to read CDM HW IRQ status\n");
+ CAM_ERR(CAM_CDM, "Failed to read CDM HW IRQ status");
+ }
+ if (!payload->irq_status) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid irq received\n");
+ kfree(payload);
+ return IRQ_HANDLED;
}
if (payload->irq_status &
CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_USR_DATA,
&payload->irq_data)) {
- pr_err("Failed to read CDM HW IRQ data\n");
+ CAM_ERR(CAM_CDM,
+ "Failed to read CDM HW IRQ data");
}
}
- CDM_CDBG("Got payload=%d\n", payload->irq_status);
+ CAM_DBG(CAM_CDM, "Got payload=%d", payload->irq_status);
payload->hw = cdm_hw;
INIT_WORK((struct work_struct *)&payload->work,
cam_hw_cdm_work);
if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR,
payload->irq_status))
- pr_err("Failed to Write CDM HW IRQ Clear\n");
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ Clear");
if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR_CMD, 0x01))
- pr_err("Failed to Write CDM HW IRQ cmd\n");
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ cmd");
work_status = queue_work(cdm_core->work_queue, &payload->work);
if (work_status == false) {
- pr_err("Failed to queue work for irq=%x\n",
+ CAM_ERR(CAM_CDM, "Failed to queue work for irq=%x",
payload->irq_status);
kfree(payload);
}
@@ -618,12 +707,11 @@
genirq_alloc_cmd.align = 0;
genirq_alloc_cmd.size = (8 * CAM_CDM_HWFIFO_SIZE);
genirq_alloc_cmd.smmu_hdl = cdm_core->iommu_hdl.non_secure;
- genirq_alloc_cmd.flags = 0;
- genirq_alloc_cmd.region = CAM_MEM_MGR_REGION_NON_SECURE_IO;
+ genirq_alloc_cmd.flags = CAM_MEM_FLAG_HW_READ_WRITE;
rc = cam_mem_mgr_request_mem(&genirq_alloc_cmd,
&genirq_alloc_out);
if (rc) {
- pr_err("Failed to get genirq cmd space rc=%d\n", rc);
+ CAM_ERR(CAM_CDM, "Failed to get genirq cmd space rc=%d", rc);
goto end;
}
cdm_core->gen_irq.handle = genirq_alloc_out.mem_handle;
@@ -649,7 +737,7 @@
genirq_release_cmd.mem_handle = cdm_core->gen_irq.handle;
rc = cam_mem_mgr_release_mem(&genirq_release_cmd);
if (rc)
- pr_err("Failed to put genirq cmd space for hw\n");
+ CAM_ERR(CAM_CDM, "Failed to put genirq cmd space for hw");
return rc;
}
@@ -672,26 +760,28 @@
rc = cam_soc_util_enable_platform_resource(soc_info, true,
CAM_SVS_VOTE, true);
if (rc) {
- pr_err("Enable platform failed\n");
+ CAM_ERR(CAM_CDM, "Enable platform failed");
goto end;
}
- CDM_CDBG("Enable soc done\n");
+ CAM_DBG(CAM_CDM, "Enable soc done");
/* Before triggering the reset to HW, clear the reset complete */
+ atomic_set(&cdm_core->error, 0);
+ atomic_set(&cdm_core->bl_done, 0);
reinit_completion(&cdm_core->reset_complete);
reinit_completion(&cdm_core->bl_complete);
if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003)) {
- pr_err("Failed to Write CDM HW IRQ mask\n");
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ mask");
goto disable_return;
}
if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_RST_CMD, 0x9)) {
- pr_err("Failed to Write CDM HW reset\n");
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset");
goto disable_return;
}
- CDM_CDBG("Waiting for CDM HW resetdone\n");
+ CAM_DBG(CAM_CDM, "Waiting for CDM HW resetdone");
time_left = wait_for_completion_timeout(&cdm_core->reset_complete,
msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
@@ -702,24 +792,24 @@
* as a workaround.
*/
if (time_left <= 0) {
- pr_err("CDM HW reset Wait failed time_left=%ld\n", time_left);
+ CAM_ERR(CAM_CDM, "CDM HW reset Wait failed time_left=%ld",
+ time_left);
time_left = 1;
}
if (time_left <= 0) {
- pr_err("CDM HW reset Wait failed rc=%d\n", rc);
+ CAM_ERR(CAM_CDM, "CDM HW reset Wait failed rc=%d", rc);
goto disable_return;
} else {
- CDM_CDBG("CDM Init success\n");
+ CAM_DBG(CAM_CDM, "CDM Init success");
cdm_hw->hw_state = CAM_HW_STATE_POWER_UP;
cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003);
- cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CGC_CFG, 0x7);
rc = 0;
goto end;
}
disable_return:
- rc = -1;
+ rc = -EIO;
cam_soc_util_disable_platform_resource(soc_info, true, true);
end:
return rc;
@@ -740,9 +830,9 @@
cdm_core = cdm_hw->core_info;
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc) {
- pr_err("disable platform failed\n");
+ CAM_ERR(CAM_CDM, "disable platform failed");
} else {
- CDM_CDBG("CDM Deinit success\n");
+ CAM_DBG(CAM_CDM, "CDM Deinit success");
cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
}
@@ -757,6 +847,8 @@
struct cam_cdm *cdm_core = NULL;
struct cam_cdm_private_dt_data *soc_private = NULL;
struct cam_cpas_register_params cpas_parms;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
if (!cdm_hw_intf)
@@ -785,10 +877,10 @@
rc = cam_hw_cdm_soc_get_dt_properties(cdm_hw, msm_cam_hw_cdm_dt_match);
if (rc) {
- pr_err("Failed to get dt properties\n");
+ CAM_ERR(CAM_CDM, "Failed to get dt properties");
goto release_mem;
}
- cdm_hw_intf->hw_idx = cdm_hw->soc_info.pdev->id;
+ cdm_hw_intf->hw_idx = cdm_hw->soc_info.index;
cdm_core = (struct cam_cdm *)cdm_hw->core_info;
soc_private = (struct cam_cdm_private_dt_data *)
cdm_hw->soc_info.soc_private;
@@ -798,10 +890,10 @@
cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
cdm_core->bl_tag = 0;
- atomic_set(&cdm_core->error, 0);
cdm_core->id = cam_hw_cdm_get_id_by_name(cdm_core->name);
if (cdm_core->id >= CAM_CDM_MAX) {
- pr_err("Failed to get CDM HW name for %s\n", cdm_core->name);
+ CAM_ERR(CAM_CDM, "Failed to get CDM HW name for %s",
+ cdm_core->name);
goto release_private_mem;
}
INIT_LIST_HEAD(&cdm_core->bl_request_list);
@@ -818,14 +910,14 @@
cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
mutex_lock(&cdm_hw->hw_mutex);
- CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
+ CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
cdm_hw_intf->hw_idx);
platform_set_drvdata(pdev, cdm_hw_intf);
rc = cam_smmu_get_handle("cpas-cdm0", &cdm_core->iommu_hdl.non_secure);
if (rc < 0) {
- pr_err("cpas-cdm get iommu handle failed\n");
+ CAM_ERR(CAM_CDM, "cpas-cdm get iommu handle failed");
goto unlock_release_mem;
}
cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
@@ -833,7 +925,7 @@
rc = cam_smmu_ops(cdm_core->iommu_hdl.non_secure, CAM_SMMU_ATTACH);
if (rc < 0) {
- pr_err("Attach iommu non secure handle failed\n");
+ CAM_ERR(CAM_CDM, "Attach iommu non secure handle failed");
goto destroy_non_secure_hdl;
}
cdm_core->iommu_hdl.secure = -1;
@@ -845,7 +937,7 @@
rc = cam_soc_util_request_platform_resource(&cdm_hw->soc_info,
cam_hw_cdm_irq, cdm_hw);
if (rc) {
- pr_err("Failed to request platform resource\n");
+ CAM_ERR(CAM_CDM, "Failed to request platform resource");
goto destroy_non_secure_hdl;
}
@@ -856,76 +948,98 @@
strlcpy(cpas_parms.identifier, "cpas-cdm", CAM_HW_IDENTIFIER_LENGTH);
rc = cam_cpas_register_client(&cpas_parms);
if (rc) {
- pr_err("Virtual CDM CPAS registration failed\n");
+ CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
goto release_platform_resource;
}
- CDM_CDBG("CPAS registration successful handle=%d\n",
+ CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
cpas_parms.client_handle);
cdm_core->cpas_handle = cpas_parms.client_handle;
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ rc = cam_cpas_start(cdm_core->cpas_handle, &ahb_vote, &axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "CPAS start failed");
+ goto cpas_unregister;
+ }
+
rc = cam_hw_cdm_init(cdm_hw, NULL, 0);
if (rc) {
- pr_err("Failed to Init CDM HW\n");
- goto init_failed;
+ CAM_ERR(CAM_CDM, "Failed to Init CDM HW");
+ goto cpas_stop;
}
cdm_hw->open_count++;
if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
&cdm_core->hw_version)) {
- pr_err("Failed to read CDM HW Version\n");
+ CAM_ERR(CAM_CDM, "Failed to read CDM HW Version");
goto deinit;
}
- if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
+ if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_TITAN_VERSION,
&cdm_core->hw_family_version)) {
- pr_err("Failed to read CDM family Version\n");
+ CAM_ERR(CAM_CDM, "Failed to read CDM family Version");
goto deinit;
}
- CDM_CDBG("CDM Hw version read success family =%x hw =%x\n",
+ CAM_DBG(CAM_CDM, "CDM Hw version read success family =%x hw =%x",
cdm_core->hw_family_version, cdm_core->hw_version);
cdm_core->ops = cam_cdm_get_ops(cdm_core->hw_version, NULL,
false);
if (!cdm_core->ops) {
- pr_err("Failed to util ops for hw\n");
+ CAM_ERR(CAM_CDM, "Failed to util ops for hw");
goto deinit;
}
if (!cam_cdm_set_cam_hw_version(cdm_core->hw_version,
&cdm_core->version)) {
- pr_err("Failed to set cam he version for hw\n");
+ CAM_ERR(CAM_CDM, "Failed to set cam he version for hw");
goto deinit;
}
rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
if (rc) {
- pr_err("Failed to Deinit CDM HW\n");
- goto release_platform_resource;
+ CAM_ERR(CAM_CDM, "Failed to Deinit CDM HW");
+ cdm_hw->open_count--;
+ goto cpas_stop;
+ }
+
+ rc = cam_cpas_stop(cdm_core->cpas_handle);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "CPAS stop failed");
+ cdm_hw->open_count--;
+ goto cpas_unregister;
}
rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
soc_private, CAM_HW_CDM, &cdm_core->index);
if (rc) {
- pr_err("HW CDM Interface registration failed\n");
- goto release_platform_resource;
+ CAM_ERR(CAM_CDM, "HW CDM Interface registration failed");
+ cdm_hw->open_count--;
+ goto cpas_unregister;
}
cdm_hw->open_count--;
mutex_unlock(&cdm_hw->hw_mutex);
- CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
+ CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
return rc;
deinit:
if (cam_hw_cdm_deinit(cdm_hw, NULL, 0))
- pr_err("Deinit failed for hw\n");
+ CAM_ERR(CAM_CDM, "Deinit failed for hw");
cdm_hw->open_count--;
-init_failed:
+cpas_stop:
+ if (cam_cpas_stop(cdm_core->cpas_handle))
+ CAM_ERR(CAM_CDM, "CPAS stop failed");
+cpas_unregister:
if (cam_cpas_unregister_client(cdm_core->cpas_handle))
- pr_err("CPAS unregister failed\n");
+ CAM_ERR(CAM_CDM, "CPAS unregister failed");
release_platform_resource:
if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
- pr_err("Release platform resource failed\n");
+ CAM_ERR(CAM_CDM, "Release platform resource failed");
flush_workqueue(cdm_core->work_queue);
destroy_workqueue(cdm_core->work_queue);
@@ -933,7 +1047,7 @@
cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
NULL, cdm_hw);
if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
- pr_err("Release iommu secure hdl failed\n");
+ CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
unlock_release_mem:
mutex_unlock(&cdm_hw->hw_mutex);
release_private_mem:
@@ -955,26 +1069,28 @@
cdm_hw_intf = platform_get_drvdata(pdev);
if (!cdm_hw_intf) {
- pr_err("Failed to get dev private data\n");
+ CAM_ERR(CAM_CDM, "Failed to get dev private data");
return rc;
}
cdm_hw = cdm_hw_intf->hw_priv;
if (!cdm_hw) {
- pr_err("Failed to get hw private data for type=%d idx=%d\n",
+ CAM_ERR(CAM_CDM,
+ "Failed to get hw private data for type=%d idx=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
return rc;
}
cdm_core = cdm_hw->core_info;
if (!cdm_core) {
- pr_err("Failed to get hw core data for type=%d idx=%d\n",
+ CAM_ERR(CAM_CDM,
+ "Failed to get hw core data for type=%d idx=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
return rc;
}
if (cdm_hw->open_count != 0) {
- pr_err("Hw open count invalid type=%d idx=%d cnt=%d\n",
+ CAM_ERR(CAM_CDM, "Hw open count invalid type=%d idx=%d cnt=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx,
cdm_hw->open_count);
return rc;
@@ -982,24 +1098,24 @@
rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
if (rc) {
- pr_err("Deinit failed for hw\n");
+ CAM_ERR(CAM_CDM, "Deinit failed for hw");
return rc;
}
rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
if (rc) {
- pr_err("CPAS unregister failed\n");
+ CAM_ERR(CAM_CDM, "CPAS unregister failed");
return rc;
}
if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
- pr_err("Release platform resource failed\n");
+ CAM_ERR(CAM_CDM, "Release platform resource failed");
flush_workqueue(cdm_core->work_queue);
destroy_workqueue(cdm_core->work_queue);
if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
- pr_err("Release iommu secure hdl failed\n");
+ CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
NULL, cdm_hw);
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
index b1b2117..fa98be2 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-CDM-INTF %s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -41,10 +39,10 @@
mutex_lock(&cam_cdm_mgr_lock);
if (cdm_mgr.probe_done == false) {
- pr_err("CDM intf mgr not probed yet\n");
- rc = -1;
+ CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
+ rc = -EPERM;
} else {
- CDM_CDBG("CDM intf mgr get refcount=%d\n",
+ CAM_DBG(CAM_CDM, "CDM intf mgr get refcount=%d",
cdm_mgr.refcount);
cdm_mgr.refcount++;
}
@@ -56,14 +54,14 @@
{
mutex_lock(&cam_cdm_mgr_lock);
if (cdm_mgr.probe_done == false) {
- pr_err("CDM intf mgr not probed yet\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
} else {
- CDM_CDBG("CDM intf mgr put refcount=%d\n",
+ CAM_DBG(CAM_CDM, "CDM intf mgr put refcount=%d",
cdm_mgr.refcount);
if (cdm_mgr.refcount > 0) {
cdm_mgr.refcount--;
} else {
- pr_err("Refcount put when zero\n");
+ CAM_ERR(CAM_CDM, "Refcount put when zero");
WARN_ON(1);
}
}
@@ -73,7 +71,7 @@
static int get_cdm_iommu_handle(struct cam_iommu_handle *cdm_handles,
uint32_t hw_idx)
{
- int rc = -1;
+ int rc = -EPERM;
struct cam_hw_intf *hw = cdm_mgr.nodes[hw_idx].device;
if (hw->hw_ops.get_hw_caps) {
@@ -87,23 +85,23 @@
static int get_cdm_index_by_id(char *identifier,
uint32_t cell_index, uint32_t *hw_index)
{
- int rc = -1, i, j;
+ int rc = -EPERM, i, j;
char client_name[128];
- CDM_CDBG("Looking for HW id of =%s and index=%d\n",
+ CAM_DBG(CAM_CDM, "Looking for HW id of =%s and index=%d",
identifier, cell_index);
snprintf(client_name, sizeof(client_name), "%s", identifier);
- CDM_CDBG("Looking for HW id of %s count:%d\n", client_name,
+ CAM_DBG(CAM_CDM, "Looking for HW id of %s count:%d", client_name,
cdm_mgr.cdm_count);
mutex_lock(&cam_cdm_mgr_lock);
for (i = 0; i < cdm_mgr.cdm_count; i++) {
mutex_lock(&cdm_mgr.nodes[i].lock);
- CDM_CDBG("dt_num_supported_clients=%d\n",
+ CAM_DBG(CAM_CDM, "dt_num_supported_clients=%d",
cdm_mgr.nodes[i].data->dt_num_supported_clients);
for (j = 0; j <
cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) {
- CDM_CDBG("client name:%s\n",
+ CAM_DBG(CAM_CDM, "client name:%s",
cdm_mgr.nodes[i].data->dt_cdm_client_name[j]);
if (!strcmp(
cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
@@ -125,16 +123,16 @@
int cam_cdm_get_iommu_handle(char *identifier,
struct cam_iommu_handle *cdm_handles)
{
- int i, j, rc = -1;
+ int i, j, rc = -EPERM;
if ((!identifier) || (!cdm_handles))
return -EINVAL;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
- CDM_CDBG("Looking for Iommu handle of %s\n", identifier);
+ CAM_DBG(CAM_CDM, "Looking for Iommu handle of %s", identifier);
for (i = 0; i < cdm_mgr.cdm_count; i++) {
mutex_lock(&cdm_mgr.nodes[i].lock);
@@ -164,7 +162,7 @@
int cam_cdm_acquire(struct cam_cdm_acquire_data *data)
{
- int rc = -1;
+ int rc = -EPERM;
struct cam_hw_intf *hw;
uint32_t hw_index = 0;
@@ -173,39 +171,41 @@
return -EINVAL;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
if (data->id > CAM_CDM_HW_ANY) {
- pr_err("only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported\n");
- rc = -1;
+ CAM_ERR(CAM_CDM,
+ "only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported");
+ rc = -EPERM;
goto end;
}
rc = get_cdm_index_by_id(data->identifier, data->cell_index,
&hw_index);
if ((rc < 0) && (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM)) {
- pr_err("Failed to identify associated hw id\n");
+ CAM_ERR(CAM_CDM, "Failed to identify associated hw id");
goto end;
} else {
- CDM_CDBG("hw_index:%d\n", hw_index);
+ CAM_DBG(CAM_CDM, "hw_index:%d", hw_index);
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.process_cmd) {
rc = hw->hw_ops.process_cmd(hw->hw_priv,
CAM_CDM_HW_INTF_CMD_ACQUIRE, data,
sizeof(struct cam_cdm_acquire_data));
if (rc < 0) {
- pr_err("CDM hw acquire failed\n");
+ CAM_ERR(CAM_CDM, "CDM hw acquire failed");
goto end;
}
} else {
- pr_err("idx %d doesn't have acquire ops\n", hw_index);
- rc = -1;
+ CAM_ERR(CAM_CDM, "idx %d doesn't have acquire ops",
+ hw_index);
+ rc = -EPERM;
}
}
end:
if (rc < 0) {
- pr_err("CDM acquire failed for id=%d name=%s, idx=%d\n",
+ CAM_ERR(CAM_CDM, "CDM acquire failed for id=%d name=%s, idx=%d",
data->id, data->identifier, data->cell_index);
put_cdm_mgr_refcount();
}
@@ -216,11 +216,11 @@
int cam_cdm_release(uint32_t handle)
{
uint32_t hw_index;
- int rc = -1;
+ int rc = -EPERM;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
@@ -232,10 +232,11 @@
CAM_CDM_HW_INTF_CMD_RELEASE, &handle,
sizeof(handle));
if (rc < 0)
- pr_err("hw release failed for handle=%x\n",
+ CAM_ERR(CAM_CDM,
+ "hw release failed for handle=%x",
handle);
} else
- pr_err("hw idx %d doesn't have release ops\n",
+ CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
hw_index);
}
put_cdm_mgr_refcount();
@@ -250,14 +251,15 @@
int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data)
{
uint32_t hw_index;
- int rc = -1;
+ int rc = -EINVAL;
struct cam_hw_intf *hw;
if (!data)
return rc;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
return rc;
}
@@ -273,10 +275,11 @@
CAM_CDM_HW_INTF_CMD_SUBMIT_BL, &req,
sizeof(struct cam_cdm_hw_intf_cmd_submit_bl));
if (rc < 0)
- pr_err("hw submit bl failed for handle=%x\n",
+ CAM_ERR(CAM_CDM,
+ "hw submit bl failed for handle=%x",
handle);
} else {
- pr_err("hw idx %d doesn't have submit ops\n",
+ CAM_ERR(CAM_CDM, "hw idx %d doesn't have submit ops",
hw_index);
}
}
@@ -289,11 +292,12 @@
int cam_cdm_stream_on(uint32_t handle)
{
uint32_t hw_index;
- int rc = -1;
+ int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
return rc;
}
@@ -304,10 +308,12 @@
rc = hw->hw_ops.start(hw->hw_priv, &handle,
sizeof(uint32_t));
if (rc < 0)
- pr_err("hw start failed handle=%x\n",
+ CAM_ERR(CAM_CDM,
+ "hw start failed handle=%x",
handle);
} else {
- pr_err("hw idx %d doesn't have start ops\n",
+ CAM_ERR(CAM_CDM,
+ "hw idx %d doesn't have start ops",
hw_index);
}
}
@@ -320,11 +326,12 @@
int cam_cdm_stream_off(uint32_t handle)
{
uint32_t hw_index;
- int rc = -1;
+ int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
return rc;
}
@@ -335,10 +342,10 @@
rc = hw->hw_ops.stop(hw->hw_priv, &handle,
sizeof(uint32_t));
if (rc < 0)
- pr_err("hw stop failed handle=%x\n",
+ CAM_ERR(CAM_CDM, "hw stop failed handle=%x",
handle);
} else {
- pr_err("hw idx %d doesn't have stop ops\n",
+ CAM_ERR(CAM_CDM, "hw idx %d doesn't have stop ops",
hw_index);
}
}
@@ -351,11 +358,12 @@
int cam_cdm_reset_hw(uint32_t handle)
{
uint32_t hw_index;
- int rc = -1;
+ int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
return rc;
}
@@ -367,10 +375,11 @@
CAM_CDM_HW_INTF_CMD_RESET_HW, &handle,
sizeof(handle));
if (rc < 0)
- pr_err("CDM hw release failed for handle=%x\n",
+ CAM_ERR(CAM_CDM,
+ "CDM hw release failed for handle=%x",
handle);
} else {
- pr_err("hw idx %d doesn't have release ops\n",
+ CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
hw_index);
}
}
@@ -390,7 +399,7 @@
return rc;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
@@ -413,7 +422,7 @@
cdm_mgr.cdm_count++;
rc = 0;
} else {
- pr_err("CDM registration failed type=%d count=%d\n",
+ CAM_ERR(CAM_CDM, "CDM registration failed type=%d count=%d",
type, cdm_mgr.cdm_count);
}
mutex_unlock(&cam_cdm_mgr_lock);
@@ -426,13 +435,14 @@
struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
uint32_t index)
{
- int rc = -1;
+ int rc = -EINVAL;
if ((!hw) || (!data))
- return -EINVAL;
+ return rc;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
return rc;
}
@@ -454,7 +464,7 @@
cdm_mgr.cdm_count--;
rc = 0;
} else {
- pr_err("CDM Deregistration failed type=%d index=%d\n",
+ CAM_ERR(CAM_CDM, "CDM Deregistration failed type=%d index=%d",
type, index);
}
mutex_unlock(&cam_cdm_mgr_lock);
@@ -469,7 +479,7 @@
rc = cam_cdm_intf_mgr_soc_get_dt_properties(pdev, &cdm_mgr);
if (rc) {
- pr_err("Failed to get dt properties\n");
+ CAM_ERR(CAM_CDM, "Failed to get dt properties");
return rc;
}
mutex_lock(&cam_cdm_mgr_lock);
@@ -489,7 +499,8 @@
for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
(cdm_mgr.nodes[i].refcount != 0))
- pr_err("Valid node present in index=%d\n", i);
+ CAM_ERR(CAM_CDM,
+ "Valid node present in index=%d", i);
mutex_destroy(&cdm_mgr.nodes[i].lock);
cdm_mgr.nodes[i].device = NULL;
cdm_mgr.nodes[i].data = NULL;
@@ -506,19 +517,19 @@
int i, rc = -EBUSY;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
if (cam_virtual_cdm_remove(pdev)) {
- pr_err("Virtual CDM remove failed\n");
+ CAM_ERR(CAM_CDM, "Virtual CDM remove failed");
goto end;
}
put_cdm_mgr_refcount();
mutex_lock(&cam_cdm_mgr_lock);
if (cdm_mgr.refcount != 0) {
- pr_err("cdm manger refcount not zero %d\n",
+ CAM_ERR(CAM_CDM, "cdm manger refcount not zero %d",
cdm_mgr.refcount);
goto end;
}
@@ -526,7 +537,7 @@
for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
(cdm_mgr.nodes[i].refcount != 0)) {
- pr_err("Valid node present in index=%d\n", i);
+ CAM_ERR(CAM_CDM, "Valid node present in index=%d", i);
mutex_unlock(&cam_cdm_mgr_lock);
goto end;
}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
index 0f5458c..f8b0d3d 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-CDM-SOC %s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -40,27 +38,29 @@
resource_size_t mem_len =
cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
- CDM_CDBG("E: b=%pK blen=%d reg=%x off=%x\n", (void *)base,
+ CAM_DBG(CAM_CDM, "E: b=%pK blen=%d reg=%x off=%x", (void __iomem *)base,
(int)mem_len, reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl,
reg)));
- CDM_CDBG("E: b=%pK reg=%x off=%x\n", (void *)base,
+ CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x", (void __iomem *)base,
reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)));
if ((reg > cdm->offset_tbl->offset_max_size) ||
(reg > cdm->offset_tbl->last_offset)) {
- pr_err("CDM accessing invalid reg=%d\n", reg);
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid reg=%d\n", reg);
goto permission_error;
} else {
reg_addr = (base + (CAM_CDM_OFFSET_FROM_REG(
cdm->offset_tbl, reg)));
if (reg_addr > (base + mem_len)) {
- pr_err("accessing invalid mapped region %d\n", reg);
+ CAM_ERR_RATE_LIMIT(CAM_CDM,
+ "Invalid mapped region %d", reg);
goto permission_error;
}
*value = cam_io_r_mb(reg_addr);
- CDM_CDBG("X b=%pK reg=%x off=%x val=%x\n",
- (void *)base, reg, (CAM_CDM_OFFSET_FROM_REG(
- cdm->offset_tbl, reg)), *value);
+ CAM_DBG(CAM_CDM, "X b=%pK reg=%x off=%x val=%x",
+ (void __iomem *)base, reg,
+ (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)),
+ *value);
return false;
}
permission_error:
@@ -79,18 +79,20 @@
resource_size_t mem_len =
cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
- CDM_CDBG("E: b=%pK reg=%x off=%x val=%x\n", (void *)base,
+ CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x val=%x", (void __iomem *)base,
reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), value);
if ((reg > cdm->offset_tbl->offset_max_size) ||
(reg > cdm->offset_tbl->last_offset)) {
- pr_err("CDM accessing invalid reg=%d\n", reg);
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "CDM accessing invalid reg=%d\n",
+ reg);
goto permission_error;
} else {
reg_addr = (base + CAM_CDM_OFFSET_FROM_REG(
cdm->offset_tbl, reg));
if (reg_addr > (base + mem_len)) {
- pr_err("Accessing invalid region %d:%d\n",
+ CAM_ERR_RATE_LIMIT(CAM_CDM,
+ "Accessing invalid region %d:%d\n",
reg, (CAM_CDM_OFFSET_FROM_REG(
cdm->offset_tbl, reg)));
goto permission_error;
@@ -106,22 +108,22 @@
int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
struct cam_cdm_private_dt_data *ptr)
{
- int i, rc = -1;
+ int i, rc = -EINVAL;
ptr->dt_num_supported_clients = of_property_count_strings(
pdev->dev.of_node,
"cdm-client-names");
- CDM_CDBG("Num supported cdm_client = %d\n",
+ CAM_DBG(CAM_CDM, "Num supported cdm_client = %d",
ptr->dt_num_supported_clients);
if (ptr->dt_num_supported_clients >
CAM_PER_CDM_MAX_REGISTERED_CLIENTS) {
- pr_err("Invalid count of client names count=%d\n",
+ CAM_ERR(CAM_CDM, "Invalid count of client names count=%d",
ptr->dt_num_supported_clients);
rc = -EINVAL;
return rc;
}
if (ptr->dt_num_supported_clients < 0) {
- CDM_CDBG("No cdm client names found\n");
+ CAM_DBG(CAM_CDM, "No cdm client names found");
ptr->dt_num_supported_clients = 0;
ptr->dt_cdm_shared = false;
} else {
@@ -130,10 +132,10 @@
for (i = 0; i < ptr->dt_num_supported_clients; i++) {
rc = of_property_read_string_index(pdev->dev.of_node,
"cdm-client-names", i, &(ptr->dt_cdm_client_name[i]));
- CDM_CDBG("cdm-client-names[%d] = %s\n", i,
+ CAM_DBG(CAM_CDM, "cdm-client-names[%d] = %s", i,
ptr->dt_cdm_client_name[i]);
if (rc < 0) {
- pr_err("Reading cdm-client-names failed\n");
+ CAM_ERR(CAM_CDM, "Reading cdm-client-names failed");
break;
}
}
@@ -156,7 +158,7 @@
rc = cam_soc_util_get_dt_properties(soc_ptr);
if (rc != 0) {
- pr_err("Failed to retrieve the CDM dt properties\n");
+ CAM_ERR(CAM_CDM, "Failed to retrieve the CDM dt properties");
} else {
soc_ptr->soc_private = kzalloc(
sizeof(struct cam_cdm_private_dt_data),
@@ -167,15 +169,15 @@
rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev,
soc_ptr->soc_private);
if (rc != 0) {
- pr_err("Failed to load CDM dt private data\n");
+ CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
goto error;
}
id = of_match_node(table, soc_ptr->pdev->dev.of_node);
if ((!id) || !(id->data)) {
- pr_err("Failed to retrieve the CDM id table\n");
+ CAM_ERR(CAM_CDM, "Failed to retrieve the CDM id table");
goto error;
}
- CDM_CDBG("CDM Hw Id compatible =%s\n", id->compatible);
+ CAM_DBG(CAM_CDM, "CDM Hw Id compatible =%s", id->compatible);
((struct cam_cdm *)cdm_hw->core_info)->offset_tbl =
(struct cam_cdm_reg_offset_table *)id->data;
strlcpy(((struct cam_cdm *)cdm_hw->core_info)->name,
@@ -186,7 +188,7 @@
return rc;
error:
- rc = -1;
+ rc = -EINVAL;
kfree(soc_ptr->soc_private);
soc_ptr->soc_private = NULL;
return rc;
@@ -199,7 +201,8 @@
rc = of_property_read_u32(pdev->dev.of_node,
"num-hw-cdm", &mgr->dt_supported_hw_cdm);
- CDM_CDBG("Number of HW cdm supported =%d\n", mgr->dt_supported_hw_cdm);
+ CAM_DBG(CAM_CDM, "Number of HW cdm supported =%d",
+ mgr->dt_supported_hw_cdm);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
index 3d258b4..a63031b 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-CDM-UTIL %s:%d " fmt, __func__, __LINE__
-
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -323,11 +321,11 @@
struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
void __iomem **device_base)
{
- int ret = -1, i;
+ int ret = -EINVAL, i;
for (i = 0; i < base_array_size; i++) {
if (base_table[i])
- CDM_CDBG("In loop %d ioremap for %x addr=%x\n",
+ CAM_DBG(CAM_CDM, "In loop %d ioremap for %x addr=%x",
i, (base_table[i])->mem_cam_base, hw_base);
if ((base_table[i]) &&
((base_table[i])->mem_cam_base == hw_base)) {
@@ -349,7 +347,7 @@
if ((cmd_buf_size < cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) ||
(!base_addr)) {
- pr_err(" invalid base addr and data length %d %pK\n",
+ CAM_ERR(CAM_CDM, "invalid base addr and data length %d %pK",
cmd_buf_size, base_addr);
return -EINVAL;
}
@@ -359,7 +357,7 @@
(((reg_cont->count * sizeof(uint32_t)) +
cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
cmd_buf_size)) {
- pr_err(" buffer size %d is not sufficient for count%d\n",
+ CAM_ERR(CAM_CDM, "buffer size %d is not sufficient for count%d",
cmd_buf_size, reg_cont->count);
return -EINVAL;
}
@@ -381,7 +379,7 @@
uint32_t *data;
if (!base_addr) {
- pr_err("invalid base address\n");
+ CAM_ERR(CAM_CDM, "invalid base address");
return -EINVAL;
}
@@ -390,15 +388,16 @@
(((reg_random->count * (sizeof(uint32_t) * 2)) +
cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
cmd_buf_size)) {
- pr_err("invalid reg_count %d cmd_buf_size %d\n",
+ CAM_ERR(CAM_CDM, "invalid reg_count %d cmd_buf_size %d",
reg_random->count, cmd_buf_size);
return -EINVAL;
}
data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
for (i = 0; i < reg_random->count; i++) {
- CDM_DUMP_CDBG("reg random: offset 0x%llx, value 0x%x\n",
- ((uint64_t) base_addr + data[0]), data[1]);
+ CAM_DBG(CAM_CDM, "reg random: offset %pK, value 0x%x",
+ ((void __iomem *)(base_addr + data[0])),
+ data[1]);
cam_io_w(data[1], base_addr + data[0]);
data += 2;
}
@@ -420,7 +419,8 @@
swd_dmi = (struct cdm_dmi_cmd *)cmd_buf;
if (cmd_buf_size < (cdm_required_size_dmi() + swd_dmi->length + 1)) {
- pr_err("invalid CDM_SWD_DMI length %d\n", swd_dmi->length + 1);
+ CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d",
+ swd_dmi->length + 1);
return -EINVAL;
}
data = cmd_buf + cdm_required_size_dmi();
@@ -457,7 +457,7 @@
total_cmd_buf_size = cmd_buf_size;
while (cmd_buf_size > 0) {
- CDM_CDBG("cmd data=%x\n", *cmd_buf);
+ CAM_DBG(CAM_CDM, "cmd data=%x", *cmd_buf);
cdm_cmd_type = (*cmd_buf >> CAM_CDM_COMMAND_OFFSET);
switch (cdm_cmd_type) {
case CAM_CDM_CMD_REG_CONT: {
@@ -488,7 +488,8 @@
case CAM_CDM_CMD_SWD_DMI_32:
case CAM_CDM_CMD_SWD_DMI_64: {
if (*current_device_base == 0) {
- pr_err("Got SWI DMI cmd =%d for invalid hw\n",
+ CAM_ERR(CAM_CDM,
+ "Got SWI DMI cmd =%d for invalid hw",
cdm_cmd_type);
ret = -EINVAL;
break;
@@ -513,11 +514,12 @@
change_base_cmd->base, base_array_size,
base_table, current_device_base);
if (ret != 0) {
- pr_err("Get ioremap change base failed %x\n",
+ CAM_ERR(CAM_CDM,
+ "Get ioremap change base failed %x",
change_base_cmd->base);
break;
}
- CDM_CDBG("Got ioremap for %x addr=%pK\n",
+ CAM_DBG(CAM_CDM, "Got ioremap for %x addr=%pK",
change_base_cmd->base,
current_device_base);
cmd_buf_size -= (4 *
@@ -526,7 +528,7 @@
}
break;
default:
- pr_err(" unsupported cdm_cmd_type type 0%x\n",
+ CAM_ERR(CAM_CDM, "unsupported cdm_cmd_type type 0%x",
cdm_cmd_type);
ret = -EINVAL;
break;
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
index e34bfc2..b230d4e 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-CDM-VIRTUAL %s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -46,7 +44,7 @@
if (payload->irq_status & 0x2) {
struct cam_cdm_bl_cb_request_entry *node;
- CDM_CDBG("CDM HW Gen/inline IRQ with data=%x\n",
+ CAM_DBG(CAM_CDM, "CDM HW Gen/inline IRQ with data=%x",
payload->irq_data);
mutex_lock(&cdm_hw->hw_mutex);
node = cam_cdm_find_request_by_bl_tag(
@@ -60,18 +58,18 @@
(void *)node);
} else if (node->request_type ==
CAM_HW_CDM_BL_CB_INTERNAL) {
- pr_err("Invalid node=%pK %d\n", node,
- node->request_type);
+ CAM_ERR(CAM_CDM, "Invalid node=%pK %d",
+ node, node->request_type);
}
list_del_init(&node->entry);
kfree(node);
} else {
- pr_err("Invalid node for inline irq\n");
+ CAM_ERR(CAM_CDM, "Invalid node for inline irq");
}
mutex_unlock(&cdm_hw->hw_mutex);
}
if (payload->irq_status & 0x1) {
- CDM_CDBG("CDM HW reset done IRQ\n");
+ CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
complete(&core->reset_complete);
}
kfree(payload);
@@ -83,7 +81,7 @@
struct cam_cdm_hw_intf_cmd_submit_bl *req,
struct cam_cdm_client *client)
{
- int i, rc = -1;
+ int i, rc = -EINVAL;
struct cam_cdm_bl_request *cdm_cmd = req->data;
struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
@@ -94,10 +92,11 @@
if ((!cdm_cmd->cmd[i].len) &&
(cdm_cmd->cmd[i].len > 0x100000)) {
- pr_err("len(%d) is invalid count=%d total cnt=%d\n",
+ CAM_ERR(CAM_CDM,
+ "len(%d) is invalid count=%d total cnt=%d",
cdm_cmd->cmd[i].len, i,
req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EINVAL;
break;
}
if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
@@ -111,15 +110,17 @@
(uint64_t)cdm_cmd->cmd[i].bl_addr.kernel_iova;
len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
} else {
- pr_err("Only mem hdl/Kernel va type is supported %d\n",
+ CAM_ERR(CAM_CDM,
+ "Only mem hdl/Kernel va type is supported %d",
req->data->type);
- rc = -1;
+ rc = -EINVAL;
break;
}
if ((!rc) && (vaddr_ptr) && (len) &&
(len >= cdm_cmd->cmd[i].offset)) {
- CDM_CDBG("hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu\n",
+ CAM_DBG(CAM_CDM,
+ "hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu",
cdm_cmd->cmd[i].bl_addr.mem_handle,
(void *)vaddr_ptr, cdm_cmd->cmd[i].offset,
cdm_cmd->cmd[i].len, len);
@@ -130,23 +131,26 @@
cdm_cmd->cmd[i].len, client->data.base_array,
client->data.base_array_cnt, core->bl_tag);
if (rc) {
- pr_err("write failed for cnt=%d:%d\n",
+ CAM_ERR(CAM_CDM, "write failed for cnt=%d:%d",
i, req->data->cmd_arrary_count);
break;
}
} else {
- pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
+ CAM_ERR(CAM_CDM,
+ "Sanity check failed for hdl=%x len=%zu:%d",
cdm_cmd->cmd[i].bl_addr.mem_handle, len,
cdm_cmd->cmd[i].offset);
- pr_err("Sanity check failed for cmd_count=%d cnt=%d\n",
+ CAM_ERR(CAM_CDM,
+ "Sanity check failed for cmd_count=%d cnt=%d",
i, req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EINVAL;
break;
}
if (!rc) {
struct cam_cdm_work_payload *payload;
- CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
+ CAM_DBG(CAM_CDM,
+ "write BL success for cnt=%d with tag=%d",
i, core->bl_tag);
if ((true == req->data->flag) &&
(i == req->data->cmd_arrary_count)) {
@@ -184,7 +188,8 @@
}
}
core->bl_tag++;
- CDM_CDBG("Now commit the BL nothing for virtual\n");
+ CAM_DBG(CAM_CDM,
+ "Now commit the BL nothing for virtual");
if (!rc && (core->bl_tag == 63))
core->bl_tag = 0;
}
@@ -229,9 +234,8 @@
}
rc = cam_cdm_soc_load_dt_private(pdev, cdm_hw->soc_info.soc_private);
- if (rc != 0) {
- pr_err("Failed to load CDM dt private data\n");
- rc = -1;
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
kfree(cdm_hw->soc_info.soc_private);
cdm_hw->soc_info.soc_private = NULL;
goto soc_load_failed;
@@ -258,7 +262,7 @@
cdm_hw_intf->hw_ops.write = NULL;
cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
- CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
+ CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
cdm_hw_intf->hw_idx);
platform_set_drvdata(pdev, cdm_hw_intf);
@@ -286,22 +290,23 @@
CAM_HW_IDENTIFIER_LENGTH);
rc = cam_cpas_register_client(&cpas_parms);
if (rc) {
- pr_err("Virtual CDM CPAS registration failed\n");
+ CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
goto cpas_registration_failed;
}
- CDM_CDBG("CPAS registration successful handle=%d\n",
+ CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
cpas_parms.client_handle);
cdm_core->cpas_handle = cpas_parms.client_handle;
- CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
+ CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
soc_private, CAM_VIRTUAL_CDM, &cdm_core->index);
if (rc) {
- pr_err("Virtual CDM Interface registration failed\n");
+ CAM_ERR(CAM_CDM, "Virtual CDM Interface registration failed");
goto intf_registration_failed;
}
- CDM_CDBG("CDM%d registered to intf successful\n", cdm_hw_intf->hw_idx);
+ CAM_DBG(CAM_CDM, "CDM%d registered to intf successful",
+ cdm_hw_intf->hw_idx);
mutex_unlock(&cdm_hw->hw_mutex);
return 0;
@@ -329,27 +334,29 @@
cdm_hw_intf = platform_get_drvdata(pdev);
if (!cdm_hw_intf) {
- pr_err("Failed to get dev private data\n");
+ CAM_ERR(CAM_CDM, "Failed to get dev private data");
return rc;
}
cdm_hw = cdm_hw_intf->hw_priv;
if (!cdm_hw) {
- pr_err("Failed to get virtual private data for type=%d idx=%d\n",
+ CAM_ERR(CAM_CDM,
+ "Failed to get virtual private data for type=%d idx=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
return rc;
}
cdm_core = cdm_hw->core_info;
if (!cdm_core) {
- pr_err("Failed to get virtual core data for type=%d idx=%d\n",
+ CAM_ERR(CAM_CDM,
+ "Failed to get virtual core data for type=%d idx=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
return rc;
}
rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
if (rc) {
- pr_err("CPAS unregister failed\n");
+ CAM_ERR(CAM_CDM, "CPAS unregister failed");
return rc;
}
@@ -357,7 +364,8 @@
cdm_hw->soc_info.soc_private, CAM_VIRTUAL_CDM,
cdm_core->index);
if (rc) {
- pr_err("Virtual CDM Interface de-registration failed\n");
+ CAM_ERR(CAM_CDM,
+ "Virtual CDM Interface de-registration failed");
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_core/Makefile b/drivers/media/platform/msm/camera/cam_core/Makefile
index 60f94d1..6fb1200 100644
--- a/drivers/media/platform/msm/camera/cam_core/Makefile
+++ b/drivers/media/platform/msm/camera/cam_core/Makefile
@@ -1,4 +1,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
obj-$(CONFIG_SPECTRA_CAMERA) += cam_context.o cam_context_utils.o cam_node.o cam_subdev.o
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index 7f0fb7f..d87c984 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -13,6 +13,7 @@
#ifndef _CAM_CONTEXT_H_
#define _CAM_CONTEXT_H_
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include "cam_req_mgr_interface.h"
#include "cam_hw_mgr_intf.h"
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index edd2e11..a430466 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CTXT-UTILS %s:%d " fmt, __func__, __LINE__
-
#include <linux/debugfs.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
@@ -19,11 +17,13 @@
#include <media/cam_sync.h>
#include <media/cam_defs.h>
-#include "cam_sync_api.h"
-#include "cam_req_mgr_util.h"
+#include "cam_context.h"
#include "cam_mem_mgr.h"
#include "cam_node.h"
-#include "cam_context.h"
+#include "cam_req_mgr_util.h"
+#include "cam_sync_api.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
int cam_context_buf_done_from_hw(struct cam_context *ctx,
void *done_event_data, uint32_t bubble_state)
@@ -35,21 +35,23 @@
(struct cam_hw_done_event_data *)done_event_data;
if (list_empty(&ctx->active_req_list)) {
- pr_err("Buf done with no active request\n");
+ CAM_ERR(CAM_CTXT, "no active request");
return -EIO;
}
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
+ trace_cam_buf_done("UTILS", ctx, req);
+
if (done->request_id != req->request_id) {
- pr_err("mismatch: done request [%lld], active request [%lld]\n",
+ CAM_ERR(CAM_CTXT, "mismatch: done req[%lld], active req[%lld]",
done->request_id, req->request_id);
return -EIO;
}
if (!req->num_out_map_entries) {
- pr_err("active request with no output fence objects to signal\n");
+ CAM_ERR(CAM_CTXT, "no output fence to signal");
return -EIO;
}
@@ -77,13 +79,13 @@
struct cam_hw_config_args cfg;
if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready\n");
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
if (list_empty(&ctx->pending_req_list)) {
- pr_err("No available request for Apply id %lld\n",
+ CAM_ERR(CAM_CTXT, "No available request for Apply id %lld",
apply->request_id);
rc = -EFAULT;
goto end;
@@ -100,7 +102,7 @@
cfg.num_hw_update_entries = req->num_hw_update_entries;
cfg.out_map_entries = req->out_map_entries;
cfg.num_out_map_entries = req->num_out_map_entries;
- cfg.priv = (void *)&req->request_id;
+ cfg.priv = req->req_priv;
list_add_tail(&req->list, &ctx->active_req_list);
rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
@@ -124,7 +126,7 @@
spin_unlock(&ctx->lock);
if (!req) {
- pr_err("No more request obj free\n");
+ CAM_ERR(CAM_CTXT, "No more request obj free");
return;
}
@@ -143,7 +145,7 @@
struct cam_ctx_request *req;
if ((!ctx->hw_mgr_intf) || (!ctx->hw_mgr_intf->hw_release)) {
- pr_err("HW interface is not ready\n");
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
return -EINVAL;
}
@@ -165,7 +167,7 @@
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
- pr_debug("signal fence in active list. fence num %d\n",
+ CAM_DBG(CAM_CTXT, "signal fence in active list, num %d",
req->num_out_map_entries);
for (i = 0; i < req->num_out_map_entries; i++) {
if (req->out_map_entries[i].sync_id > 0)
@@ -184,7 +186,7 @@
cam_sync_deregister_callback(
cam_context_sync_callback, ctx,
req->in_map_entries[i].sync_id);
- pr_debug("signal out fence in pending list. fence num %d\n",
+ CAM_DBG(CAM_CTXT, "signal fence in pending list, num %d",
req->num_out_map_entries);
for (i = 0; i < req->num_out_map_entries; i++)
if (req->out_map_entries[i].sync_id > 0)
@@ -208,7 +210,7 @@
int32_t i = 0;
if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready\n");
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
@@ -222,7 +224,7 @@
spin_unlock(&ctx->lock);
if (!req) {
- pr_err("No more request obj free\n");
+ CAM_ERR(CAM_CTXT, "No more request obj free");
rc = -ENOMEM;
goto end;
}
@@ -236,20 +238,12 @@
(uint64_t *) &packet_addr,
&len);
if (rc != 0) {
- pr_err("Can not get packet address\n");
+ CAM_ERR(CAM_CTXT, "Can not get packet address");
rc = -EINVAL;
goto free_req;
}
packet = (struct cam_packet *) (packet_addr + cmd->offset);
- pr_debug("pack_handle %llx\n", cmd->packet_handle);
- pr_debug("packet address is 0x%llx\n", packet_addr);
- pr_debug("packet with length %zu, offset 0x%llx\n",
- len, cmd->offset);
- pr_debug("Packet request id 0x%llx\n",
- packet->header.request_id);
- pr_debug("Packet size 0x%x\n", packet->header.size);
- pr_debug("packet op %d\n", packet->header.op_code);
/* preprocess the configuration */
memset(&cfg, 0, sizeof(cfg));
@@ -266,7 +260,7 @@
rc = ctx->hw_mgr_intf->hw_prepare_update(
ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
if (rc != 0) {
- pr_err("Prepare config packet failed in HW layer\n");
+ CAM_ERR(CAM_CTXT, "Prepare config packet failed in HW layer");
rc = -EFAULT;
goto free_req;
}
@@ -286,7 +280,7 @@
cam_context_sync_callback,
(void *)ctx,
req->in_map_entries[i].sync_id);
- pr_debug("register in fence callback: %d ret = %d\n",
+ CAM_DBG(CAM_CTXT, "register in fence cb: %d ret = %d",
req->in_map_entries[i].sync_id, rc);
}
goto end;
@@ -299,7 +293,6 @@
list_add_tail(&req->list, &ctx->free_req_list);
spin_unlock(&ctx->lock);
end:
- pr_debug("Config dev successful\n");
return rc;
}
@@ -312,25 +305,24 @@
struct cam_hw_release_args release;
if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready\n");
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
- pr_debug("acquire cmd: session_hdl 0x%x, num_resources %d\n",
- cmd->session_handle, cmd->num_resources);
- pr_debug(" handle type %d, res %lld\n", cmd->handle_type,
+ CAM_DBG(CAM_CTXT, "ses hdl: %x, num_res: %d, type: %d, res: %lld",
+ cmd->session_handle, cmd->num_resources, cmd->handle_type,
cmd->resource_hdl);
if (cmd->num_resources > CAM_CTX_RES_MAX) {
- pr_err("Too much resources in the acquire\n");
+ CAM_ERR(CAM_CTXT, "resource limit exceeded");
rc = -ENOMEM;
goto end;
}
/* for now we only support user pointer */
if (cmd->handle_type != 1) {
- pr_err("Only user pointer is supported");
+ CAM_ERR(CAM_CTXT, "Only user pointer is supported");
rc = -EINVAL;
goto end;
}
@@ -341,15 +333,11 @@
param.num_acq = cmd->num_resources;
param.acquire_info = cmd->resource_hdl;
- pr_debug("ctx %pK: acquire hw resource: hw_intf: 0x%pK, priv 0x%pK",
- ctx, ctx->hw_mgr_intf, ctx->hw_mgr_intf->hw_mgr_priv);
- pr_debug("acquire_hw_func 0x%pK\n", ctx->hw_mgr_intf->hw_acquire);
-
/* call HW manager to reserve the resource */
rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
¶m);
if (rc != 0) {
- pr_err("Acquire device failed\n");
+ CAM_ERR(CAM_CTXT, "Acquire device failed");
goto end;
}
@@ -362,11 +350,10 @@
req_hdl_param.media_entity_flag = 0;
req_hdl_param.priv = ctx;
- pr_debug("get device handle from bridge\n");
ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
if (ctx->dev_hdl <= 0) {
rc = -EFAULT;
- pr_err("Can not create device handle\n");
+ CAM_ERR(CAM_CTXT, "Can not create device handle");
goto free_hw;
}
cmd->dev_handle = ctx->dev_hdl;
@@ -374,7 +361,6 @@
/* store session information */
ctx->session_hdl = cmd->session_handle;
- pr_err("dev_handle = %x\n", cmd->dev_handle);
return rc;
free_hw:
@@ -392,14 +378,14 @@
struct cam_hw_start_args arg;
if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready\n");
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
if ((cmd->session_handle != ctx->session_hdl) ||
(cmd->dev_handle != ctx->dev_hdl)) {
- pr_err("Invalid session hdl[%d], dev_handle[%d]\n",
+ CAM_ERR(CAM_CTXT, "Invalid session hdl[%d], dev_handle[%d]",
cmd->session_handle, cmd->dev_handle);
rc = -EPERM;
goto end;
@@ -410,12 +396,11 @@
&arg);
if (rc) {
/* HW failure. user need to clean up the resource */
- pr_err("Start HW failed\n");
+ CAM_ERR(CAM_CTXT, "Start HW failed");
goto end;
}
}
- pr_debug("start device success\n");
end:
return rc;
}
@@ -428,7 +413,7 @@
struct cam_ctx_request *req;
if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready\n");
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
@@ -446,7 +431,7 @@
req = list_first_entry(&ctx->pending_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
- pr_debug("signal fence in pending list. fence num %d\n",
+ CAM_DBG(CAM_CTXT, "signal fence in pending list. fence num %d",
req->num_out_map_entries);
for (i = 0; i < req->num_out_map_entries; i++)
if (req->out_map_entries[i].sync_id != -1)
@@ -459,7 +444,7 @@
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
- pr_debug("signal fence in active list. fence num %d\n",
+ CAM_DBG(CAM_CTXT, "signal fence in active list. fence num %d",
req->num_out_map_entries);
for (i = 0; i < req->num_out_map_entries; i++)
if (req->out_map_entries[i].sync_id != -1)
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index ab4c25d..fa26ea0 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -15,6 +15,7 @@
#include <linux/uaccess.h>
#include "cam_node.h"
+#include "cam_trace.h"
static void __cam_node_handle_shutdown(struct cam_node *node)
{
@@ -255,6 +256,8 @@
return -EINVAL;
}
+ trace_cam_apply_req("Node", apply);
+
return cam_context_handle_crm_apply_req(ctx, apply);
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 813f392..82035e9 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -21,22 +21,6 @@
#include "cam_cpas_hw_intf.h"
#include "cam_cpas_soc.h"
-int cam_cpas_util_get_string_index(const char **strings,
- uint32_t num_strings, char *matching_string, uint32_t *index)
-{
- int i;
-
- for (i = 0; i < num_strings; i++) {
- if (strnstr(strings[i], matching_string, strlen(strings[i]))) {
- CPAS_CDBG("matched %s : %d\n", matching_string, i);
- *index = i;
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
{
@@ -62,7 +46,7 @@
value = reg_info->value;
}
- CPAS_CDBG("Base[%d] Offset[0x%8x] Value[0x%8x]\n",
+ CAM_DBG(CAM_CPAS, "Base[%d] Offset[0x%8x] Value[0x%8x]",
reg_base, reg_info->offset, value);
cam_io_w_mb(value, soc_info->reg_map[reg_base_index].mem_base +
@@ -75,13 +59,13 @@
struct cam_cpas_bus_client *bus_client, unsigned int level)
{
if (!bus_client->valid || (bus_client->dyn_vote == true)) {
- pr_err("Invalid params %d %d\n", bus_client->valid,
+ CAM_ERR(CAM_CPAS, "Invalid params %d %d", bus_client->valid,
bus_client->dyn_vote);
return -EINVAL;
}
if (level >= bus_client->num_usecases) {
- pr_err("Invalid vote level=%d, usecases=%d\n", level,
+ CAM_ERR(CAM_CPAS, "Invalid vote level=%d, usecases=%d", level,
bus_client->num_usecases);
return -EINVAL;
}
@@ -89,7 +73,8 @@
if (level == bus_client->curr_vote_level)
return 0;
- CPAS_CDBG("Bus client[%d] index[%d]\n", bus_client->client_id, level);
+ CAM_DBG(CAM_CPAS, "Bus client[%d] index[%d]", bus_client->client_id,
+ level);
msm_bus_scale_client_update_request(bus_client->client_id, level);
bus_client->curr_vote_level = level;
@@ -104,14 +89,14 @@
int idx = 0;
if (!bus_client->valid) {
- pr_err("bus client not valid\n");
+ CAM_ERR(CAM_CPAS, "bus client not valid");
return -EINVAL;
}
if ((bus_client->num_usecases != 2) ||
(bus_client->num_paths != 1) ||
(bus_client->dyn_vote != true)) {
- pr_err("dynamic update not allowed %d %d %d\n",
+ CAM_ERR(CAM_CPAS, "dynamic update not allowed %d %d %d",
bus_client->num_usecases, bus_client->num_paths,
bus_client->dyn_vote);
return -EINVAL;
@@ -120,7 +105,7 @@
mutex_lock(&bus_client->lock);
if (bus_client->curr_vote_level > 1) {
- pr_err("curr_vote_level %d cannot be greater than 1\n",
+ CAM_ERR(CAM_CPAS, "curr_vote_level %d cannot be greater than 1",
bus_client->curr_vote_level);
mutex_unlock(&bus_client->lock);
return -EINVAL;
@@ -136,7 +121,7 @@
path->vectors[0].ab = ab;
path->vectors[0].ib = ib;
- CPAS_CDBG("Bus client[%d] :ab[%llu] ib[%llu], index[%d]\n",
+ CAM_DBG(CAM_CPAS, "Bus client[%d] :ab[%llu] ib[%llu], index[%d]",
bus_client->client_id, ab, ib, idx);
msm_bus_scale_client_update_request(bus_client->client_id, idx);
@@ -154,20 +139,20 @@
pdata = msm_bus_pdata_from_node(soc_info->pdev,
dev_node);
if (!pdata) {
- pr_err("failed get_pdata\n");
+ CAM_ERR(CAM_CPAS, "failed get_pdata");
return -EINVAL;
}
if ((pdata->num_usecases == 0) ||
(pdata->usecase[0].num_paths == 0)) {
- pr_err("usecase=%d\n", pdata->num_usecases);
+ CAM_ERR(CAM_CPAS, "usecase=%d", pdata->num_usecases);
rc = -EINVAL;
goto error;
}
client_id = msm_bus_scale_register_client(pdata);
if (!client_id) {
- pr_err("failed in register ahb bus client\n");
+ CAM_ERR(CAM_CPAS, "failed in register ahb bus client");
rc = -EINVAL;
goto error;
}
@@ -176,7 +161,8 @@
"qcom,msm-bus-vector-dyn-vote");
if (bus_client->dyn_vote && (pdata->num_usecases != 2)) {
- pr_err("Excess or less vectors %d\n", pdata->num_usecases);
+ CAM_ERR(CAM_CPAS, "Excess or less vectors %d",
+ pdata->num_usecases);
rc = -EINVAL;
goto fail_unregister_client;
}
@@ -193,7 +179,7 @@
bus_client->valid = true;
mutex_init(&bus_client->lock);
- CPAS_CDBG("Bus Client : src=%d, dst=%d, bus_client=%d\n",
+ CAM_DBG(CAM_CPAS, "Bus Client : src=%d, dst=%d, bus_client=%d",
bus_client->src, bus_client->dst, bus_client->client_id);
return 0;
@@ -268,7 +254,7 @@
axi_port_list_node = of_find_node_by_name(soc_info->pdev->dev.of_node,
"qcom,axi-port-list");
if (!axi_port_list_node) {
- pr_err("Node qcom,axi-port-list not found.\n");
+ CAM_ERR(CAM_CPAS, "Node qcom,axi-port-list not found.");
return -EINVAL;
}
@@ -286,14 +272,15 @@
"qcom,axi-port-name", 0,
(const char **)&axi_port->axi_port_name);
if (rc) {
- pr_err("failed to read qcom,axi-port-name rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS,
+ "failed to read qcom,axi-port-name rc=%d", rc);
goto port_name_fail;
}
axi_port_mnoc_node = of_find_node_by_name(axi_port_node,
"qcom,axi-port-mnoc");
if (!axi_port_mnoc_node) {
- pr_err("Node qcom,axi-port-mnoc not found.\n");
+ CAM_ERR(CAM_CPAS, "Node qcom,axi-port-mnoc not found.");
rc = -EINVAL;
goto mnoc_node_get_fail;
}
@@ -308,7 +295,8 @@
axi_port_camnoc_node = of_find_node_by_name(
axi_port_node, "qcom,axi-port-camnoc");
if (!axi_port_camnoc_node) {
- pr_err("Node qcom,axi-port-camnoc not found\n");
+ CAM_ERR(CAM_CPAS,
+ "Node qcom,axi-port-camnoc not found");
rc = -EINVAL;
goto camnoc_node_get_fail;
}
@@ -358,7 +346,8 @@
rc = cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
(enable == true) ? CAM_SVS_VOTE : CAM_SUSPEND_VOTE);
if (rc) {
- pr_err("Failed in AHB vote, enable=%d, rc=%d\n", enable, rc);
+ CAM_ERR(CAM_CPAS, "Failed in AHB vote, enable=%d, rc=%d",
+ enable, rc);
return rc;
}
@@ -375,7 +364,8 @@
rc = cam_cpas_util_vote_bus_client_bw(&curr_port->mnoc_bus,
mnoc_bw, 0);
if (rc) {
- pr_err("Failed in mnoc vote, enable=%d, rc=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "Failed in mnoc vote, enable=%d, rc=%d",
enable, rc);
goto remove_ahb_vote;
}
@@ -384,7 +374,8 @@
cam_cpas_util_vote_bus_client_bw(
&curr_port->camnoc_bus, camnoc_bw, 0);
if (rc) {
- pr_err("Failed in mnoc vote, enable=%d, %d\n",
+ CAM_ERR(CAM_CPAS,
+ "Failed in mnoc vote, enable=%d, %d",
enable, rc);
cam_cpas_util_vote_bus_client_bw(
&curr_port->mnoc_bus, 0, 0);
@@ -446,7 +437,8 @@
int rc = 0;
if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
- pr_err("Invalid reg_base=%d, reg_base_index=%d, num_map=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "Invalid reg_base=%d, reg_base_index=%d, num_map=%d",
reg_base, reg_base_index, soc_info->num_reg_map);
return -EINVAL;
}
@@ -457,7 +449,7 @@
mutex_lock(&cpas_core->client_mutex[client_indx]);
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("client has not started%d\n", client_indx);
+ CAM_ERR(CAM_CPAS, "client has not started%d", client_indx);
rc = -EPERM;
goto unlock_client;
}
@@ -489,7 +481,8 @@
return -EINVAL;
if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
- pr_err("Invalid reg_base=%d, reg_base_index=%d, num_map=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "Invalid reg_base=%d, reg_base_index=%d, num_map=%d",
reg_base, reg_base_index, soc_info->num_reg_map);
return -EINVAL;
}
@@ -500,7 +493,7 @@
mutex_lock(&cpas_core->client_mutex[client_indx]);
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("client has not started%d\n", client_indx);
+ CAM_ERR(CAM_CPAS, "client has not started%d", client_indx);
rc = -EPERM;
goto unlock_client;
}
@@ -531,7 +524,7 @@
int rc = 0;
if (!axi_port) {
- pr_err("axi port does not exists\n");
+ CAM_ERR(CAM_CPAS, "axi port does not exists");
return -EINVAL;
}
@@ -563,7 +556,8 @@
if ((!soc_private->axi_camnoc_based) && (mnoc_bw < camnoc_bw))
mnoc_bw = camnoc_bw;
- CPAS_CDBG("axi[(%d, %d),(%d, %d)] : camnoc_bw[%llu], mnoc_bw[%llu]\n",
+ CAM_DBG(CAM_CPAS,
+ "axi[(%d, %d),(%d, %d)] : camnoc_bw[%llu], mnoc_bw[%llu]",
axi_port->mnoc_bus.src, axi_port->mnoc_bus.dst,
axi_port->camnoc_bus.src, axi_port->camnoc_bus.dst,
camnoc_bw, mnoc_bw);
@@ -571,7 +565,8 @@
rc = cam_cpas_util_vote_bus_client_bw(&axi_port->mnoc_bus,
mnoc_bw, 0);
if (rc) {
- pr_err("Failed in mnoc vote ab[%llu] ib[%llu] rc=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "Failed in mnoc vote ab[%llu] ib[%llu] rc=%d",
mnoc_bw, mnoc_bw, rc);
goto unlock_axi_port;
}
@@ -580,7 +575,8 @@
rc = cam_cpas_util_vote_bus_client_bw(&axi_port->camnoc_bus,
camnoc_bw, 0);
if (rc) {
- pr_err("Failed camnoc vote ab[%llu] ib[%llu] rc=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "Failed camnoc vote ab[%llu] ib[%llu] rc=%d",
camnoc_bw, camnoc_bw, rc);
goto unlock_axi_port;
}
@@ -600,7 +596,8 @@
if (!axi_vote || ((axi_vote->compressed_bw == 0) &&
(axi_vote->uncompressed_bw == 0))) {
- pr_err("Invalid vote, client_handle=%d\n", client_handle);
+ CAM_ERR(CAM_CPAS, "Invalid vote, client_handle=%d",
+ client_handle);
return -EINVAL;
}
@@ -610,12 +607,13 @@
mutex_lock(&cpas_core->client_mutex[client_indx]);
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("client has not started %d\n", client_indx);
+ CAM_ERR(CAM_CPAS, "client has not started %d", client_indx);
rc = -EPERM;
goto unlock_client;
}
- CPAS_CDBG("Client[%d] Requested compressed[%llu], uncompressed[%llu]\n",
+ CAM_DBG(CAM_CPAS,
+ "Client[%d] Requested compressed[%llu], uncompressed[%llu]",
client_indx, axi_vote->compressed_bw,
axi_vote->uncompressed_bw);
@@ -640,13 +638,14 @@
int i;
if (!dev || !req_level) {
- pr_err("Invalid params %pK, %pK\n", dev, req_level);
+ CAM_ERR(CAM_CPAS, "Invalid params %pK, %pK", dev, req_level);
return -EINVAL;
}
opp = dev_pm_opp_find_freq_ceil(dev, &corner_freq);
if (IS_ERR(opp)) {
- pr_err("Error on OPP freq :%ld, %pK\n", corner_freq, opp);
+ CAM_ERR(CAM_CPAS, "Error on OPP freq :%ld, %pK",
+ corner_freq, opp);
return -EINVAL;
}
@@ -656,7 +655,8 @@
if (corner == soc_private->vdd_ahb[i].vdd_corner)
level = soc_private->vdd_ahb[i].ahb_level;
- CPAS_CDBG("From OPP table : freq=[%ld][%ld], corner=%d, level=%d\n",
+ CAM_DBG(CAM_CPAS,
+ "From OPP table : freq=[%ld][%ld], corner=%d, level=%d",
freq, corner_freq, corner, level);
*req_level = level;
@@ -675,7 +675,7 @@
int i, rc = 0;
if (!ahb_bus_client->valid) {
- pr_err("AHB Bus client not valid\n");
+ CAM_ERR(CAM_CPAS, "AHB Bus client not valid");
return -EINVAL;
}
@@ -694,7 +694,7 @@
mutex_lock(&ahb_bus_client->lock);
cpas_client->ahb_level = required_level;
- CPAS_CDBG("Clients required level[%d], curr_level[%d]\n",
+ CAM_DBG(CAM_CPAS, "Clients required level[%d], curr_level[%d]",
required_level, ahb_bus_client->curr_vote_level);
if (required_level == ahb_bus_client->curr_vote_level)
@@ -707,19 +707,20 @@
highest_level = cpas_core->cpas_client[i]->ahb_level;
}
- CPAS_CDBG("Required highest_level[%d]\n", highest_level);
+ CAM_DBG(CAM_CPAS, "Required highest_level[%d]", highest_level);
rc = cam_cpas_util_vote_bus_client_level(ahb_bus_client,
highest_level);
if (rc) {
- pr_err("Failed in ahb vote, level=%d, rc=%d\n",
+ CAM_ERR(CAM_CPAS, "Failed in ahb vote, level=%d, rc=%d",
highest_level, rc);
goto unlock_bus_client;
}
rc = cam_soc_util_set_clk_rate_level(&cpas_hw->soc_info, highest_level);
if (rc) {
- pr_err("Failed in scaling clock rate level %d for AHB\n",
+ CAM_ERR(CAM_CPAS,
+ "Failed in scaling clock rate level %d for AHB",
highest_level);
goto unlock_bus_client;
}
@@ -740,7 +741,7 @@
int rc = 0;
if (!ahb_vote || (ahb_vote->vote.level == 0)) {
- pr_err("Invalid AHB vote, %pK\n", ahb_vote);
+ CAM_ERR(CAM_CPAS, "Invalid AHB vote, %pK", ahb_vote);
return -EINVAL;
}
@@ -750,12 +751,13 @@
mutex_lock(&cpas_core->client_mutex[client_indx]);
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("client has not started %d\n", client_indx);
+ CAM_ERR(CAM_CPAS, "client has not started %d", client_indx);
rc = -EPERM;
goto unlock_client;
}
- CPAS_CDBG("client[%d] : type[%d], level[%d], freq[%ld], applied[%d]\n",
+ CAM_DBG(CAM_CPAS,
+ "client[%d] : type[%d], level[%d], freq[%ld], applied[%d]",
client_indx, ahb_vote->type, ahb_vote->vote.level,
ahb_vote->vote.freq,
cpas_core->cpas_client[client_indx]->ahb_level);
@@ -782,12 +784,13 @@
int rc;
if (!hw_priv || !start_args) {
- pr_err("Invalid arguments %pK %pK\n", hw_priv, start_args);
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+ hw_priv, start_args);
return -EINVAL;
}
if (sizeof(struct cam_cpas_hw_cmd_start) != arg_size) {
- pr_err("HW_CAPS size mismatch %ld %d\n",
+ CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
sizeof(struct cam_cpas_hw_cmd_start), arg_size);
return -EINVAL;
}
@@ -804,7 +807,7 @@
if ((ahb_vote->vote.level == 0) || ((axi_vote->compressed_bw == 0) &&
(axi_vote->uncompressed_bw == 0))) {
- pr_err("Invalid vote ahb[%d], axi[%llu], [%llu]\n",
+ CAM_ERR(CAM_CPAS, "Invalid vote ahb[%d], axi[%llu], [%llu]",
ahb_vote->vote.level, axi_vote->compressed_bw,
axi_vote->uncompressed_bw);
return -EINVAL;
@@ -817,20 +820,20 @@
mutex_lock(&cpas_core->client_mutex[client_indx]);
if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
- pr_err("client is not registered %d\n", client_indx);
+ CAM_ERR(CAM_CPAS, "client is not registered %d", client_indx);
rc = -EPERM;
goto done;
}
if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("Client %d is in start state\n", client_indx);
+ CAM_ERR(CAM_CPAS, "Client %d is in start state", client_indx);
rc = -EPERM;
goto done;
}
cpas_client = cpas_core->cpas_client[client_indx];
- CPAS_CDBG("AHB :client[%d] type[%d], level[%d], applied[%d]\n",
+ CAM_DBG(CAM_CPAS, "AHB :client[%d] type[%d], level[%d], applied[%d]",
client_indx, ahb_vote->type, ahb_vote->vote.level,
cpas_client->ahb_level);
rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
@@ -838,7 +841,8 @@
if (rc)
goto done;
- CPAS_CDBG("AXI client[%d] compressed_bw[%llu], uncompressed_bw[%llu]\n",
+ CAM_DBG(CAM_CPAS,
+ "AXI client[%d] compressed_bw[%llu], uncompressed_bw[%llu]",
client_indx, axi_vote->compressed_bw,
axi_vote->uncompressed_bw);
rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
@@ -850,7 +854,7 @@
rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info,
applied_level);
if (rc) {
- pr_err("enable_resorce failed, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "enable_resorce failed, rc=%d", rc);
goto done;
}
@@ -859,7 +863,8 @@
if (rc) {
cam_cpas_soc_disable_resources(
&cpas_hw->soc_info);
- pr_err("failed in power_on settings rc=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "failed in power_on settings rc=%d",
rc);
goto done;
}
@@ -870,7 +875,7 @@
cpas_client->started = true;
cpas_core->streamon_clients++;
- CPAS_CDBG("client_indx=%d, streamon_clients=%d\n",
+ CAM_DBG(CAM_CPAS, "client_indx=%d, streamon_clients=%d",
client_indx, cpas_core->streamon_clients);
done:
mutex_unlock(&cpas_core->client_mutex[client_indx]);
@@ -892,12 +897,13 @@
int rc = 0;
if (!hw_priv || !stop_args) {
- pr_err("Invalid arguments %pK %pK\n", hw_priv, stop_args);
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+ hw_priv, stop_args);
return -EINVAL;
}
if (sizeof(struct cam_cpas_hw_cmd_stop) != arg_size) {
- pr_err("HW_CAPS size mismatch %ld %d\n",
+ CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
sizeof(struct cam_cpas_hw_cmd_stop), arg_size);
return -EINVAL;
}
@@ -913,11 +919,11 @@
mutex_lock(&cpas_hw->hw_mutex);
mutex_lock(&cpas_core->client_mutex[client_indx]);
- CPAS_CDBG("client_indx=%d, streamon_clients=%d\n",
+ CAM_DBG(CAM_CPAS, "client_indx=%d, streamon_clients=%d",
client_indx, cpas_core->streamon_clients);
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("Client %d is not started\n", client_indx);
+ CAM_ERR(CAM_CPAS, "Client %d is not started", client_indx);
rc = -EPERM;
goto done;
}
@@ -930,7 +936,8 @@
if (cpas_core->internal_ops.power_off) {
rc = cpas_core->internal_ops.power_off(cpas_hw);
if (rc) {
- pr_err("failed in power_off settings rc=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "failed in power_off settings rc=%d",
rc);
/* Do not return error, passthrough */
}
@@ -938,7 +945,7 @@
rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
if (rc) {
- pr_err("disable_resorce failed, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "disable_resorce failed, rc=%d", rc);
goto done;
}
cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
@@ -970,12 +977,13 @@
int rc = 0;
if (!hw_priv || !init_hw_args) {
- pr_err("Invalid arguments %pK %pK\n", hw_priv, init_hw_args);
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+ hw_priv, init_hw_args);
return -EINVAL;
}
if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
- pr_err("INIT HW size mismatch %ld %d\n",
+ CAM_ERR(CAM_CPAS, "INIT HW size mismatch %ld %d",
sizeof(struct cam_cpas_hw_caps), arg_size);
return -EINVAL;
}
@@ -1002,7 +1010,7 @@
struct cam_cpas_private_soc *soc_private =
(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
- CPAS_CDBG("Register params : identifier=%s, cell_index=%d\n",
+ CAM_DBG(CAM_CPAS, "Register params : identifier=%s, cell_index=%d",
register_params->identifier, register_params->cell_index);
if (soc_private->client_id_based)
@@ -1015,11 +1023,11 @@
mutex_lock(&cpas_hw->hw_mutex);
- rc = cam_cpas_util_get_string_index(soc_private->client_name,
+ rc = cam_common_util_get_string_index(soc_private->client_name,
soc_private->num_clients, client_name, &client_indx);
if (rc || !CAM_CPAS_CLIENT_VALID(client_indx) ||
CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
- pr_err("Invalid Client register : %s %d, %d\n",
+ CAM_ERR(CAM_CPAS, "Invalid Client register : %s %d, %d",
register_params->identifier,
register_params->cell_index, client_indx);
mutex_unlock(&cpas_hw->hw_mutex);
@@ -1035,7 +1043,8 @@
rc = cam_cpas_util_insert_client_to_axi_port(cpas_core, soc_private,
cpas_client, client_indx);
if (rc) {
- pr_err("axi_port_insert failed client_indx=%d, rc=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "axi_port_insert failed client_indx=%d, rc=%d",
client_indx, rc);
kfree(cpas_client);
mutex_unlock(&cpas_hw->hw_mutex);
@@ -1051,7 +1060,7 @@
mutex_unlock(&cpas_hw->hw_mutex);
- CPAS_CDBG("client_indx=%d, registered_clients=%d\n",
+ CAM_DBG(CAM_CPAS, "client_indx=%d, registered_clients=%d",
client_indx, cpas_core->registered_clients);
return 0;
@@ -1071,13 +1080,13 @@
mutex_lock(&cpas_core->client_mutex[client_indx]);
if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
- pr_err("client not registered %d\n", client_indx);
+ CAM_ERR(CAM_CPAS, "client not registered %d", client_indx);
rc = -EPERM;
goto done;
}
if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("Client %d is not stopped\n", client_indx);
+ CAM_ERR(CAM_CPAS, "Client %d is not stopped", client_indx);
rc = -EPERM;
goto done;
}
@@ -1085,7 +1094,7 @@
cam_cpas_util_remove_client_from_axi_port(
cpas_core->cpas_client[client_indx]);
- CPAS_CDBG("client_indx=%d, registered_clients=%d\n",
+ CAM_DBG(CAM_CPAS, "client_indx=%d, registered_clients=%d",
client_indx, cpas_core->registered_clients);
kfree(cpas_core->cpas_client[client_indx]);
@@ -1105,12 +1114,13 @@
struct cam_cpas_hw_caps *hw_caps;
if (!hw_priv || !get_hw_cap_args) {
- pr_err("Invalid arguments %pK %pK\n", hw_priv, get_hw_cap_args);
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+ hw_priv, get_hw_cap_args);
return -EINVAL;
}
if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
- pr_err("HW_CAPS size mismatch %ld %d\n",
+ CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
sizeof(struct cam_cpas_hw_caps), arg_size);
return -EINVAL;
}
@@ -1132,8 +1142,8 @@
if (!hw_priv || !cmd_args ||
(cmd_type >= CAM_CPAS_HW_CMD_INVALID)) {
- pr_err("Invalid arguments %pK %pK %d\n", hw_priv, cmd_args,
- cmd_type);
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK %d",
+ hw_priv, cmd_args, cmd_type);
return -EINVAL;
}
@@ -1142,7 +1152,7 @@
struct cam_cpas_register_params *register_params;
if (sizeof(struct cam_cpas_register_params) != arg_size) {
- pr_err("cmd_type %d, size mismatch %d\n",
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
cmd_type, arg_size);
break;
}
@@ -1155,7 +1165,7 @@
uint32_t *client_handle;
if (sizeof(uint32_t) != arg_size) {
- pr_err("cmd_type %d, size mismatch %d\n",
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
cmd_type, arg_size);
break;
}
@@ -1169,7 +1179,7 @@
if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
arg_size) {
- pr_err("cmd_type %d, size mismatch %d\n",
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
cmd_type, arg_size);
break;
}
@@ -1186,7 +1196,7 @@
if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
arg_size) {
- pr_err("cmd_type %d, size mismatch %d\n",
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
cmd_type, arg_size);
break;
}
@@ -1203,7 +1213,7 @@
struct cam_cpas_hw_cmd_ahb_vote *cmd_ahb_vote;
if (sizeof(struct cam_cpas_hw_cmd_ahb_vote) != arg_size) {
- pr_err("cmd_type %d, size mismatch %d\n",
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
cmd_type, arg_size);
break;
}
@@ -1217,7 +1227,7 @@
struct cam_cpas_hw_cmd_axi_vote *cmd_axi_vote;
if (sizeof(struct cam_cpas_hw_cmd_axi_vote) != arg_size) {
- pr_err("cmd_type %d, size mismatch %d\n",
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
cmd_type, arg_size);
break;
}
@@ -1228,7 +1238,7 @@
break;
}
default:
- pr_err("CPAS HW command not valid =%d\n", cmd_type);
+ CAM_ERR(CAM_CPAS, "CPAS HW command not valid =%d", cmd_type);
break;
}
@@ -1274,7 +1284,7 @@
rc = of_property_read_string_index(of_node, "arch-compat", 0,
(const char **)&compat_str);
if (rc) {
- pr_err("failed to get arch-compat rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed to get arch-compat rc=%d", rc);
return -EINVAL;
}
@@ -1285,7 +1295,7 @@
hw_intf->hw_type = CAM_HW_CPASTOP;
rc = cam_cpastop_get_internal_ops(internal_ops);
} else {
- pr_err("arch-compat %s not supported\n", compat_str);
+ CAM_ERR(CAM_CPAS, "arch-compat %s not supported", compat_str);
rc = -EINVAL;
}
@@ -1375,7 +1385,7 @@
rc = cam_cpas_util_client_setup(cpas_hw);
if (rc) {
- pr_err("failed in client setup, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in client setup, rc=%d", rc);
goto deinit_platform_res;
}
@@ -1383,13 +1393,13 @@
cpas_hw->soc_info.pdev->dev.of_node,
&cpas_core->ahb_bus_client);
if (rc) {
- pr_err("failed in ahb setup, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in ahb setup, rc=%d", rc);
goto client_cleanup;
}
rc = cam_cpas_util_axi_setup(cpas_core, &cpas_hw->soc_info);
if (rc) {
- pr_err("failed in axi setup, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in axi setup, rc=%d", rc);
goto ahb_cleanup;
}
@@ -1400,18 +1410,18 @@
rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info, CAM_SVS_VOTE);
if (rc) {
- pr_err("failed in soc_enable_resources, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in soc_enable_resources, rc=%d", rc);
goto remove_default_vote;
}
if (internal_ops->get_hw_info) {
rc = internal_ops->get_hw_info(cpas_hw, &cpas_core->hw_caps);
if (rc) {
- pr_err("failed in get_hw_info, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in get_hw_info, rc=%d", rc);
goto disable_soc_res;
}
} else {
- pr_err("Invalid get_hw_info\n");
+ CAM_ERR(CAM_CPAS, "Invalid get_hw_info");
goto disable_soc_res;
}
@@ -1422,7 +1432,7 @@
rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
if (rc) {
- pr_err("failed in soc_disable_resources, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in soc_disable_resources, rc=%d", rc);
goto remove_default_vote;
}
@@ -1453,7 +1463,7 @@
kfree(cpas_core);
kfree(cpas_hw);
kfree(cpas_hw_intf);
- pr_err("failed in hw probe\n");
+ CAM_ERR(CAM_CPAS, "failed in hw probe");
return rc;
}
@@ -1463,7 +1473,7 @@
struct cam_cpas *cpas_core;
if (!cpas_hw_intf) {
- pr_err("cpas interface not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas interface not initialized");
return -EINVAL;
}
@@ -1471,7 +1481,7 @@
cpas_core = (struct cam_cpas *)cpas_hw->core_info;
if (cpas_hw->hw_state == CAM_HW_STATE_POWER_UP) {
- pr_err("cpas hw is in power up state\n");
+ CAM_ERR(CAM_CPAS, "cpas hw is in power up state");
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index 6d4fafe..bbc99b7 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -15,6 +15,7 @@
#include "cam_cpas_api.h"
#include "cam_cpas_hw_intf.h"
+#include "cam_common_util.h"
#define CPAS_MAX_CLIENTS 20
#define CAM_CPAS_INFLIGHT_WORKS 5
@@ -192,7 +193,5 @@
int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info);
-int cam_cpas_util_get_string_index(const char **strings,
- uint32_t num_strings, char *matching_string, uint32_t *index);
#endif /* _CAM_CPAS_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
index 9ee5a43..fa4018e 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
@@ -18,16 +18,7 @@
#include "cam_cpas_api.h"
#include "cam_hw.h"
#include "cam_hw_intf.h"
-
-#ifdef CONFIG_CAM_CPAS_DBG
-#define CPAS_CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CPAS_CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-#undef pr_fmt
-#define pr_fmt(fmt) "CAM-CPAS %s:%d " fmt, __func__, __LINE__
-
-#define BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+#include "cam_debug_util.h"
/* Number of times to retry while polling */
#define CAM_CPAS_POLL_RETRY_CNT 5
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index aba0caa..3846784 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -55,12 +55,12 @@
struct cam_hw_version *cpas_version)
{
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (!camera_family || !camera_version || !cpas_version) {
- pr_err("invalid input %pK %pK %pK\n", camera_family,
+ CAM_ERR(CAM_CPAS, "invalid input %pK %pK %pK", camera_family,
camera_version, cpas_version);
return -EINVAL;
}
@@ -80,7 +80,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -98,9 +98,9 @@
CAM_CPAS_HW_CMD_REG_WRITE, &cmd_reg_write,
sizeof(struct cam_cpas_hw_cmd_reg_read_write));
if (rc)
- pr_err("Failed in process_cmd, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
- pr_err("Invalid process_cmd ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
@@ -115,12 +115,12 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (!value) {
- pr_err("Invalid arg value\n");
+ CAM_ERR(CAM_CPAS, "Invalid arg value");
return -EINVAL;
}
@@ -138,13 +138,13 @@
CAM_CPAS_HW_CMD_REG_READ, &cmd_reg_read,
sizeof(struct cam_cpas_hw_cmd_reg_read_write));
if (rc) {
- pr_err("Failed in process_cmd, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
return rc;
}
*value = cmd_reg_read.value;
} else {
- pr_err("Invalid process_cmd ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
@@ -158,7 +158,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -173,9 +173,9 @@
CAM_CPAS_HW_CMD_AXI_VOTE, &cmd_axi_vote,
sizeof(struct cam_cpas_hw_cmd_axi_vote));
if (rc)
- pr_err("Failed in process_cmd, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
- pr_err("Invalid process_cmd ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
@@ -189,7 +189,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -204,9 +204,9 @@
CAM_CPAS_HW_CMD_AHB_VOTE, &cmd_ahb_vote,
sizeof(struct cam_cpas_hw_cmd_ahb_vote));
if (rc)
- pr_err("Failed in process_cmd, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
- pr_err("Invalid process_cmd ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
@@ -219,7 +219,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -232,9 +232,9 @@
g_cpas_intf->hw_intf->hw_priv, &cmd_hw_stop,
sizeof(struct cam_cpas_hw_cmd_stop));
if (rc)
- pr_err("Failed in stop, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in stop, rc=%d", rc);
} else {
- pr_err("Invalid stop ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid stop ops");
rc = -EINVAL;
}
@@ -248,7 +248,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -263,9 +263,9 @@
g_cpas_intf->hw_intf->hw_priv, &cmd_hw_start,
sizeof(struct cam_cpas_hw_cmd_start));
if (rc)
- pr_err("Failed in start, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in start, rc=%d", rc);
} else {
- pr_err("Invalid start ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid start ops");
rc = -EINVAL;
}
@@ -278,7 +278,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -288,9 +288,9 @@
CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
&client_handle, sizeof(uint32_t));
if (rc)
- pr_err("Failed in process_cmd, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
- pr_err("Invalid process_cmd ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
@@ -304,7 +304,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -314,9 +314,9 @@
CAM_CPAS_HW_CMD_REGISTER_CLIENT, register_params,
sizeof(struct cam_cpas_register_params));
if (rc)
- pr_err("Failed in process_cmd, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
- pr_err("Invalid process_cmd ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
@@ -330,7 +330,7 @@
int rc = 0;
if (!cmd) {
- pr_err("Invalid input cmd\n");
+ CAM_ERR(CAM_CPAS, "Invalid input cmd");
return -EINVAL;
}
@@ -341,7 +341,8 @@
rc = copy_from_user(&query, (void __user *) cmd->handle,
sizeof(query));
if (rc) {
- pr_err("Failed in copy from user, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d",
+ rc);
break;
}
@@ -353,14 +354,14 @@
rc = copy_to_user((void __user *) cmd->handle, &query,
sizeof(query));
if (rc)
- pr_err("Failed in copy to user, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc);
break;
}
case CAM_SD_SHUTDOWN:
break;
default:
- pr_err("Unknown op code %d for CPAS\n", cmd->op_code);
+ CAM_ERR(CAM_CPAS, "Unknown op code %d for CPAS", cmd->op_code);
rc = -EINVAL;
break;
}
@@ -374,13 +375,13 @@
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
if (!cpas_intf || !cpas_intf->probe_done) {
- pr_err("CPAS not initialized\n");
+ CAM_ERR(CAM_CPAS, "CPAS not initialized");
return -ENODEV;
}
mutex_lock(&cpas_intf->intf_lock);
cpas_intf->open_cnt++;
- CPAS_CDBG("CPAS Subdev open count %d\n", cpas_intf->open_cnt);
+ CAM_DBG(CAM_CPAS, "CPAS Subdev open count %d", cpas_intf->open_cnt);
mutex_unlock(&cpas_intf->intf_lock);
return 0;
@@ -392,13 +393,13 @@
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
if (!cpas_intf || !cpas_intf->probe_done) {
- pr_err("CPAS not initialized\n");
+ CAM_ERR(CAM_CPAS, "CPAS not initialized");
return -ENODEV;
}
mutex_lock(&cpas_intf->intf_lock);
cpas_intf->open_cnt--;
- CPAS_CDBG("CPAS Subdev close count %d\n", cpas_intf->open_cnt);
+ CAM_DBG(CAM_CPAS, "CPAS Subdev close count %d", cpas_intf->open_cnt);
mutex_unlock(&cpas_intf->intf_lock);
return 0;
@@ -411,7 +412,7 @@
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
if (!cpas_intf || !cpas_intf->probe_done) {
- pr_err("CPAS not initialized\n");
+ CAM_ERR(CAM_CPAS, "CPAS not initialized");
return -ENODEV;
}
@@ -420,7 +421,7 @@
rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
break;
default:
- pr_err("Invalid command %d for CPAS!\n", cmd);
+ CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
rc = -EINVAL;
break;
}
@@ -437,13 +438,13 @@
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
if (!cpas_intf || !cpas_intf->probe_done) {
- pr_err("CPAS not initialized\n");
+ CAM_ERR(CAM_CPAS, "CPAS not initialized");
return -ENODEV;
}
if (copy_from_user(&cmd_data, (void __user *)arg,
sizeof(cmd_data))) {
- pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_CPAS, "Failed to copy from user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
return -EFAULT;
}
@@ -453,7 +454,7 @@
rc = cam_cpas_subdev_cmd(cpas_intf, &cmd_data);
break;
default:
- pr_err("Invalid command %d for CPAS!\n", cmd);
+ CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
rc = -EINVAL;
break;
}
@@ -461,7 +462,8 @@
if (!rc) {
if (copy_to_user((void __user *)arg, &cmd_data,
sizeof(cmd_data))) {
- pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_CPAS,
+ "Failed to copy to user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
rc = -EFAULT;
}
@@ -508,7 +510,8 @@
rc = cam_register_subdev(subdev);
if (rc) {
- pr_err("failed register subdev: %s!\n", CAM_CPAS_DEV_NAME);
+ CAM_ERR(CAM_CPAS, "failed register subdev: %s!",
+ CAM_CPAS_DEV_NAME);
return rc;
}
@@ -523,7 +526,7 @@
int rc;
if (g_cpas_intf) {
- pr_err("cpas dev proble already done\n");
+ CAM_ERR(CAM_CPAS, "cpas dev proble already done");
return -EALREADY;
}
@@ -536,7 +539,7 @@
rc = cam_cpas_hw_probe(pdev, &g_cpas_intf->hw_intf);
if (rc || (g_cpas_intf->hw_intf == NULL)) {
- pr_err("Failed in hw probe, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in hw probe, rc=%d", rc);
goto error_destroy_mem;
}
@@ -546,11 +549,11 @@
rc = hw_intf->hw_ops.get_hw_caps(hw_intf->hw_priv,
hw_caps, sizeof(struct cam_cpas_hw_caps));
if (rc) {
- pr_err("Failed in get_hw_caps, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in get_hw_caps, rc=%d", rc);
goto error_hw_remove;
}
} else {
- pr_err("Invalid get_hw_caps ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid get_hw_caps ops");
goto error_hw_remove;
}
@@ -559,7 +562,8 @@
goto error_hw_remove;
g_cpas_intf->probe_done = true;
- CPAS_CDBG("CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x\n",
+ CAM_DBG(CAM_CPAS,
+ "CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x",
hw_caps->camera_family, hw_caps->camera_version.major,
hw_caps->camera_version.minor, hw_caps->camera_version.incr,
hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
@@ -573,14 +577,14 @@
mutex_destroy(&g_cpas_intf->intf_lock);
kfree(g_cpas_intf);
g_cpas_intf = NULL;
- pr_err("CPAS probe failed\n");
+ CAM_ERR(CAM_CPAS, "CPAS probe failed");
return rc;
}
static int cam_cpas_dev_remove(struct platform_device *dev)
{
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index 09c2ae5..f85f461 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -29,7 +29,8 @@
int count = 0, i = 0, rc = 0;
if (!soc_private || !pdev) {
- pr_err("invalid input arg %pK %pK\n", soc_private, pdev);
+ CAM_ERR(CAM_CPAS, "invalid input arg %pK %pK",
+ soc_private, pdev);
return -EINVAL;
}
@@ -38,7 +39,8 @@
rc = of_property_read_string_index(of_node, "arch-compat", 0,
(const char **)&soc_private->arch_compat);
if (rc) {
- pr_err("device %s failed to read arch-compat\n", pdev->name);
+ CAM_ERR(CAM_CPAS, "device %s failed to read arch-compat",
+ pdev->name);
return rc;
}
@@ -47,12 +49,13 @@
count = of_property_count_strings(of_node, "client-names");
if (count <= 0) {
- pr_err("no client-names found\n");
+ CAM_ERR(CAM_CPAS, "no client-names found");
count = 0;
return -EINVAL;
}
soc_private->num_clients = count;
- CPAS_CDBG("arch-compat=%s, client_id_based = %d, num_clients=%d\n",
+ CAM_DBG(CAM_CPAS,
+ "arch-compat=%s, client_id_based = %d, num_clients=%d",
soc_private->arch_compat, soc_private->client_id_based,
soc_private->num_clients);
@@ -60,15 +63,16 @@
rc = of_property_read_string_index(of_node,
"client-names", i, &soc_private->client_name[i]);
if (rc) {
- pr_err("no client-name at cnt=%d\n", i);
+ CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
return -ENODEV;
}
- CPAS_CDBG("Client[%d] : %s\n", i, soc_private->client_name[i]);
+ CAM_DBG(CAM_CPAS, "Client[%d] : %s", i,
+ soc_private->client_name[i]);
}
count = of_property_count_strings(of_node, "client-axi-port-names");
if ((count <= 0) || (count != soc_private->num_clients)) {
- pr_err("incorrect client-axi-port-names info %d %d\n",
+ CAM_ERR(CAM_CPAS, "incorrect client-axi-port-names info %d %d",
count, soc_private->num_clients);
count = 0;
return -EINVAL;
@@ -79,10 +83,10 @@
"client-axi-port-names", i,
&soc_private->client_axi_port_name[i]);
if (rc) {
- pr_err("no client-name at cnt=%d\n", i);
+ CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
return -ENODEV;
}
- CPAS_CDBG("Client AXI Port[%d] : %s\n", i,
+ CAM_DBG(CAM_CPAS, "Client AXI Port[%d] : %s", i,
soc_private->client_axi_port_name[i]);
}
@@ -99,25 +103,29 @@
rc = of_property_read_u32_index(of_node, "vdd-corners",
i, &soc_private->vdd_ahb[i].vdd_corner);
if (rc) {
- pr_err("vdd-corners failed at index=%d\n", i);
+ CAM_ERR(CAM_CPAS,
+ "vdd-corners failed at index=%d", i);
return -ENODEV;
}
rc = of_property_read_string_index(of_node,
"vdd-corner-ahb-mapping", i, &ahb_string);
if (rc) {
- pr_err("no ahb-mapping at index=%d\n", i);
+ CAM_ERR(CAM_CPAS,
+ "no ahb-mapping at index=%d", i);
return -ENODEV;
}
rc = cam_soc_util_get_level_from_string(ahb_string,
&soc_private->vdd_ahb[i].ahb_level);
if (rc) {
- pr_err("invalid ahb-string at index=%d\n", i);
+ CAM_ERR(CAM_CPAS,
+ "invalid ahb-string at index=%d", i);
return -EINVAL;
}
- CPAS_CDBG("Vdd-AHB mapping [%d] : [%d] [%s] [%d]\n", i,
+ CAM_DBG(CAM_CPAS,
+ "Vdd-AHB mapping [%d] : [%d] [%s] [%d]", i,
soc_private->vdd_ahb[i].vdd_corner,
ahb_string, soc_private->vdd_ahb[i].ahb_level);
}
@@ -135,19 +143,20 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc) {
- pr_err("failed in get_dt_properties, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in get_dt_properties, rc=%d", rc);
return rc;
}
if (soc_info->irq_line && !irq_handler) {
- pr_err("Invalid IRQ handler\n");
+ CAM_ERR(CAM_CPAS, "Invalid IRQ handler");
return -EINVAL;
}
rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
irq_data);
if (rc) {
- pr_err("failed in request_platform_resource, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in request_platform_resource, rc=%d",
+ rc);
return rc;
}
@@ -160,7 +169,7 @@
rc = cam_cpas_get_custom_dt_info(soc_info->pdev, soc_info->soc_private);
if (rc) {
- pr_err("failed in get_custom_info, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in get_custom_info, rc=%d", rc);
goto free_soc_private;
}
@@ -179,7 +188,7 @@
rc = cam_soc_util_release_platform_resource(soc_info);
if (rc)
- pr_err("release platform failed, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "release platform failed, rc=%d", rc);
kfree(soc_info->soc_private);
soc_info->soc_private = NULL;
@@ -195,7 +204,7 @@
rc = cam_soc_util_enable_platform_resource(soc_info, true,
default_level, true);
if (rc)
- pr_err("enable platform resource failed, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "enable platform resource failed, rc=%d", rc);
return rc;
}
@@ -206,7 +215,7 @@
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc)
- pr_err("disable platform failed, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "disable platform failed, rc=%d", rc);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
index 95e26c5..0669070 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
@@ -29,13 +29,13 @@
reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
hw_caps->camera_version.major =
- BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
hw_caps->camera_version.minor =
- BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
hw_caps->camera_version.incr =
- BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
- CPAS_CDBG("Family %d, version %d.%d.%d\n",
+ CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d",
hw_caps->camera_family, hw_caps->camera_version.major,
hw_caps->camera_version.minor, hw_caps->camera_version.incr);
@@ -49,21 +49,22 @@
int rc;
if (num_reg_map > CAM_CPAS_REG_MAX) {
- pr_err("invalid num_reg_map=%d\n", num_reg_map);
+ CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
return -EINVAL;
}
if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
- pr_err("invalid num_mem_block=%d\n", soc_info->num_mem_block);
+ CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
+ soc_info->num_mem_block);
return -EINVAL;
}
- rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+ rc = cam_common_util_get_string_index(soc_info->mem_block_name,
soc_info->num_mem_block, "cam_camss", &index);
if ((rc == 0) && (index < num_reg_map)) {
regbase_index[CAM_CPAS_REG_CAMSS] = index;
} else {
- pr_err("regbase not found for CAM_CPAS_REG_CAMSS\n");
+ CAM_ERR(CAM_CPAS, "regbase not found for CAM_CPAS_REG_CAMSS");
return -EINVAL;
}
@@ -73,7 +74,7 @@
int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
{
if (!internal_ops) {
- pr_err("invalid NULL param\n");
+ CAM_ERR(CAM_CPAS, "invalid NULL param");
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index b901410..32ef2e4 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -38,24 +38,24 @@
reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
hw_caps->camera_version.major =
- BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
hw_caps->camera_version.minor =
- BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
hw_caps->camera_version.incr =
- BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x4);
hw_caps->cpas_version.major =
- BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
hw_caps->cpas_version.minor =
- BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
hw_caps->cpas_version.incr =
- BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x8);
hw_caps->camera_capability = reg_value;
- CPAS_CDBG("Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x\n",
+ CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x",
hw_caps->camera_family, hw_caps->camera_version.major,
hw_caps->camera_version.minor, hw_caps->camera_version.incr,
hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
@@ -71,31 +71,32 @@
int rc;
if (num_reg_map > CAM_CPAS_REG_MAX) {
- pr_err("invalid num_reg_map=%d\n", num_reg_map);
+ CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
return -EINVAL;
}
if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
- pr_err("invalid num_mem_block=%d\n", soc_info->num_mem_block);
+ CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
+ soc_info->num_mem_block);
return -EINVAL;
}
- rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+ rc = cam_common_util_get_string_index(soc_info->mem_block_name,
soc_info->num_mem_block, "cam_cpas_top", &index);
if ((rc == 0) && (index < num_reg_map)) {
regbase_index[CAM_CPAS_REG_CPASTOP] = index;
} else {
- pr_err("regbase not found for CPASTOP, rc=%d, %d %d\n",
+ CAM_ERR(CAM_CPAS, "regbase not found for CPASTOP, rc=%d, %d %d",
rc, index, num_reg_map);
return -EINVAL;
}
- rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+ rc = cam_common_util_get_string_index(soc_info->mem_block_name,
soc_info->num_mem_block, "cam_camnoc", &index);
if ((rc == 0) && (index < num_reg_map)) {
regbase_index[CAM_CPAS_REG_CAMNOC] = index;
} else {
- pr_err("regbase not found for CAMNOC, rc=%d, %d %d\n",
+ CAM_ERR(CAM_CPAS, "regbase not found for CAMNOC, rc=%d, %d %d",
rc, index, num_reg_map);
return -EINVAL;
}
@@ -124,7 +125,8 @@
reg_value[3] = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->error_logger[i + 3]);
- pr_err("offset[0x%x] values [0x%x] [0x%x] [0x%x] [0x%x]\n",
+ CAM_ERR(CAM_CPAS,
+ "offset[0x%x] values [0x%x] [0x%x] [0x%x] [0x%x]",
camnoc_info->error_logger[i], reg_value[0],
reg_value[1], reg_value[2], reg_value[3]);
}
@@ -139,7 +141,7 @@
reg_value[2] = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->error_logger[i + 2]);
- pr_err("offset[0x%x] values [0x%x] [0x%x] [0x%x]\n",
+ CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x] [0x%x] [0x%x]",
camnoc_info->error_logger[i], reg_value[0],
reg_value[1], reg_value[2]);
i = i + 3;
@@ -152,7 +154,7 @@
reg_value[1] = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->error_logger[i + 1]);
- pr_err("offset[0x%x] values [0x%x] [0x%x]\n",
+ CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x] [0x%x]",
camnoc_info->error_logger[i], reg_value[0],
reg_value[1]);
i = i + 2;
@@ -162,7 +164,7 @@
reg_value[0] = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->error_logger[i]);
- pr_err("offset[0x%x] values [0x%x]\n",
+ CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x]",
camnoc_info->error_logger[i], reg_value[0]);
}
@@ -178,7 +180,8 @@
reg_value = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->irq_err[i].err_status.offset);
- pr_err("Dumping ubwc error status [%d]: offset[0x%x] value[0x%x]\n",
+ CAM_ERR(CAM_CPAS,
+ "Dumping ubwc error status [%d]: offset[0x%x] value[0x%x]",
i, camnoc_info->irq_err[i].err_status.offset, reg_value);
return reg_value;
@@ -186,7 +189,7 @@
static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw)
{
- pr_err("ahb timout error\n");
+ CAM_ERR(CAM_CPAS, "ahb timout error");
return 0;
}
@@ -229,7 +232,8 @@
int i;
struct cam_cpas_client *cpas_client;
- CPAS_CDBG("Notify CB : num_clients=%d, registered=%d, started=%d\n",
+ CAM_DBG(CAM_CPAS,
+ "Notify CB : num_clients=%d, registered=%d, started=%d",
cpas_core->num_clients, cpas_core->registered_clients,
cpas_core->streamon_clients);
@@ -237,7 +241,8 @@
if (CAM_CPAS_CLIENT_STARTED(cpas_core, i)) {
cpas_client = cpas_core->cpas_client[i];
if (cpas_client->data.cam_cpas_client_cb) {
- CPAS_CDBG("Calling client CB %d : %d 0x%x\n",
+ CAM_DBG(CAM_CPAS,
+ "Calling client CB %d : %d 0x%x",
i, irq_type, irq_data);
cpas_client->data.cam_cpas_client_cb(
cpas_client->data.client_handle,
@@ -261,7 +266,7 @@
payload = container_of(work, struct cam_cpas_work_payload, work);
if (!payload) {
- pr_err("NULL payload");
+ CAM_ERR(CAM_CPAS, "NULL payload");
return;
}
@@ -273,7 +278,7 @@
if ((payload->irq_status & camnoc_info->irq_err[i].sbm_port) &&
(camnoc_info->irq_err[i].enable)) {
irq_type = camnoc_info->irq_err[i].irq_type;
- pr_err("Error occurred, type=%d\n", irq_type);
+ CAM_ERR(CAM_CPAS, "Error occurred, type=%d", irq_type);
irq_data = 0;
switch (irq_type) {
@@ -293,10 +298,10 @@
cpas_hw);
break;
case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
- CPAS_CDBG("TEST IRQ\n");
+ CAM_DBG(CAM_CPAS, "TEST IRQ");
break;
default:
- pr_err("Invalid IRQ type\n");
+ CAM_ERR(CAM_CPAS, "Invalid IRQ type");
break;
}
@@ -309,7 +314,7 @@
}
if (payload->irq_status)
- pr_err("IRQ not handled irq_status=0x%x\n",
+ CAM_ERR(CAM_CPAS, "IRQ not handled irq_status=0x%x",
payload->irq_status);
kfree(payload);
@@ -331,7 +336,7 @@
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->irq_sbm->sbm_status.offset);
- CPAS_CDBG("IRQ callback, irq_status=0x%x\n", payload->irq_status);
+ CAM_DBG(CAM_CPAS, "IRQ callback, irq_status=0x%x", payload->irq_status);
payload->hw = cpas_hw;
INIT_WORK((struct work_struct *)&payload->work, cam_cpastop_work);
@@ -396,7 +401,8 @@
CAM_CPAS_POLL_RETRY_CNT,
CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS);
if (rc) {
- pr_err("camnoc flush slave pending trans failed\n");
+ CAM_ERR(CAM_CPAS,
+ "camnoc flush slave pending trans failed");
/* Do not return error, passthrough */
}
}
@@ -415,14 +421,14 @@
(hw_caps->cpas_version.incr == 0)) {
camnoc_info = &cam170_cpas100_camnoc_info;
} else {
- pr_err("CPAS Version not supported %d.%d.%d\n",
+ CAM_ERR(CAM_CPAS, "CPAS Version not supported %d.%d.%d",
hw_caps->cpas_version.major,
hw_caps->cpas_version.minor,
hw_caps->cpas_version.incr);
return -EINVAL;
}
} else {
- pr_err("Camera Version not supported %d.%d.%d\n",
+ CAM_ERR(CAM_CPAS, "Camera Version not supported %d.%d.%d",
hw_caps->camera_version.major,
hw_caps->camera_version.minor,
hw_caps->camera_version.incr);
@@ -435,7 +441,7 @@
int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
{
if (!internal_ops) {
- pr_err("invalid NULL param\n");
+ CAM_ERR(CAM_CPAS, "invalid NULL param");
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index 801d09d..3977b68 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -94,7 +94,7 @@
struct device *dev;
void *userdata;
void (*cam_cpas_client_cb)(
- int32_t client_handle,
+ uint32_t client_handle,
void *userdata,
enum cam_camnoc_irq_type event_type,
uint32_t event_data);
diff --git a/drivers/media/platform/msm/camera/cam_icp/Makefile b/drivers/media/platform/msm/camera/cam_icp/Makefile
index b35e4e4..5aba168 100644
--- a/drivers/media/platform/msm/camera/cam_icp/Makefile
+++ b/drivers/media/platform/msm/camera/cam_icp/Makefile
@@ -8,6 +8,7 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
ccflags-y += -Idrivers/media/platform/msm/camera
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_subdev.o cam_icp_context.o hfi.o
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 2311f66..15bd98c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-ICP-CTXT %s:%d " fmt, __func__, __LINE__
-
#include <linux/debugfs.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
@@ -25,6 +23,8 @@
#include "cam_icp_context.h"
#include "cam_req_mgr_util.h"
#include "cam_mem_mgr.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
@@ -32,8 +32,10 @@
int rc;
rc = cam_context_acquire_dev_to_hw(ctx, cmd);
- if (!rc)
+ if (!rc) {
ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("ICP", ctx);
+ }
return rc;
}
@@ -45,9 +47,10 @@
rc = cam_context_release_dev_to_hw(ctx, cmd);
if (rc)
- pr_err("Unable to release device\n");
+ CAM_ERR(CAM_ICP, "Unable to release device");
ctx->state = CAM_CTX_AVAILABLE;
+ trace_cam_context_state("ICP", ctx);
return rc;
}
@@ -57,8 +60,10 @@
int rc;
rc = cam_context_start_dev_to_hw(ctx, cmd);
- if (!rc)
+ if (!rc) {
ctx->state = CAM_CTX_READY;
+ trace_cam_context_state("ICP", ctx);
+ }
return rc;
}
@@ -70,7 +75,7 @@
rc = cam_context_prepare_dev_to_hw(ctx, cmd);
if (rc)
- pr_err("Unable to prepare device\n");
+ CAM_ERR(CAM_ICP, "Failed to prepare device");
return rc;
}
@@ -82,9 +87,10 @@
rc = cam_context_stop_dev_to_hw(ctx);
if (rc)
- pr_err("Unable to stop device\n");
+ CAM_ERR(CAM_ICP, "Failed to stop device");
ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("ICP", ctx);
return rc;
}
@@ -95,11 +101,11 @@
rc = __cam_icp_stop_dev_in_ready(ctx, NULL);
if (rc)
- pr_err("Unable to stop device\n");
+ CAM_ERR(CAM_ICP, "Failed to stop device");
rc = __cam_icp_release_dev_in_acquired(ctx, cmd);
if (rc)
- pr_err("Unable to stop device\n");
+ CAM_ERR(CAM_ICP, "Failed to release device");
return rc;
}
@@ -160,7 +166,7 @@
int rc;
if ((!ctx) || (!ctx->base) || (!hw_intf)) {
- pr_err("Invalid params: %pK %pK\n", ctx, hw_intf);
+ CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", ctx, hw_intf);
rc = -EINVAL;
goto err;
}
@@ -168,7 +174,7 @@
rc = cam_context_init(ctx->base, NULL, hw_intf, ctx->req_base,
CAM_CTX_REQ_MAX);
if (rc) {
- pr_err("Camera Context Base init failed!\n");
+ CAM_ERR(CAM_ICP, "Camera Context Base init failed");
goto err;
}
@@ -183,7 +189,7 @@
int cam_icp_context_deinit(struct cam_icp_context *ctx)
{
if ((!ctx) || (!ctx->base)) {
- pr_err("Invalid params: %pK\n", ctx);
+ CAM_ERR(CAM_ICP, "Invalid params: %pK", ctx);
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
index 69c2e03..bbdff27 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-ICP %s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -36,6 +34,7 @@
#include "cam_icp_context.h"
#include "cam_hw_mgr_intf.h"
#include "cam_icp_hw_mgr_intf.h"
+#include "cam_debug_util.h"
#define CAM_ICP_DEV_NAME "cam-icp"
@@ -65,13 +64,13 @@
mutex_lock(&g_icp_dev.icp_lock);
if (g_icp_dev.open_cnt >= 1) {
- pr_err("ICP subdev is already opened\n");
+ CAM_ERR(CAM_ICP, "ICP subdev is already opened");
rc = -EALREADY;
goto end;
}
if (!node) {
- pr_err("Invalid args\n");
+ CAM_ERR(CAM_ICP, "Invalid args");
rc = -EINVAL;
goto end;
}
@@ -79,7 +78,7 @@
hw_mgr_intf = &node->hw_mgr_intf;
rc = hw_mgr_intf->download_fw(hw_mgr_intf->hw_mgr_priv, NULL);
if (rc < 0) {
- pr_err("FW download failed\n");
+ CAM_ERR(CAM_ICP, "FW download failed");
goto end;
}
g_icp_dev.open_cnt++;
@@ -97,27 +96,27 @@
mutex_lock(&g_icp_dev.icp_lock);
if (g_icp_dev.open_cnt <= 0) {
- pr_err("ICP subdev is already closed\n");
+ CAM_ERR(CAM_ICP, "ICP subdev is already closed");
rc = -EINVAL;
goto end;
}
g_icp_dev.open_cnt--;
if (!node) {
- pr_err("Invalid args\n");
+ CAM_ERR(CAM_ICP, "Invalid args");
rc = -EINVAL;
goto end;
}
hw_mgr_intf = &node->hw_mgr_intf;
if (!hw_mgr_intf) {
- pr_err("hw_mgr_intf is not initialized\n");
+ CAM_ERR(CAM_ICP, "hw_mgr_intf is not initialized");
rc = -EINVAL;
goto end;
}
rc = hw_mgr_intf->hw_close(hw_mgr_intf->hw_mgr_priv, NULL);
if (rc < 0) {
- pr_err("HW close failed\n");
+ CAM_ERR(CAM_ICP, "HW close failed");
goto end;
}
@@ -138,7 +137,7 @@
struct cam_hw_mgr_intf *hw_mgr_intf;
if (!pdev) {
- pr_err("pdev is NULL\n");
+ CAM_ERR(CAM_ICP, "pdev is NULL");
return -EINVAL;
}
@@ -147,7 +146,7 @@
rc = cam_subdev_probe(&g_icp_dev.sd, pdev, CAM_ICP_DEV_NAME,
CAM_ICP_DEVICE_TYPE);
if (rc) {
- pr_err("ICP cam_subdev_probe failed!\n");
+ CAM_ERR(CAM_ICP, "ICP cam_subdev_probe failed");
goto probe_fail;
}
@@ -161,26 +160,24 @@
rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf);
if (rc) {
- pr_err("ICP HW manager init failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "ICP HW manager init failed: %d", rc);
goto hw_init_fail;
}
- pr_debug("Initializing the ICP contexts\n");
for (i = 0; i < CAM_CTX_MAX; i++) {
g_icp_dev.ctx_icp[i].base = &g_icp_dev.ctx[i];
rc = cam_icp_context_init(&g_icp_dev.ctx_icp[i],
hw_mgr_intf);
if (rc) {
- pr_err("ICP context init failed!\n");
+ CAM_ERR(CAM_ICP, "ICP context init failed");
goto ctx_fail;
}
}
- pr_debug("Initializing the ICP Node\n");
rc = cam_node_init(node, hw_mgr_intf, g_icp_dev.ctx,
CAM_CTX_MAX, CAM_ICP_DEV_NAME);
if (rc) {
- pr_err("ICP node init failed!\n");
+ CAM_ERR(CAM_ICP, "ICP node init failed");
goto ctx_fail;
}
@@ -207,20 +204,20 @@
struct cam_subdev *subdev;
if (!pdev) {
- pr_err("pdev is NULL\n");
- return -EINVAL;
+ CAM_ERR(CAM_ICP, "pdev is NULL");
+ return -ENODEV;
}
sd = platform_get_drvdata(pdev);
if (!sd) {
- pr_err("V4l2 subdev is NULL\n");
- return -EINVAL;
+ CAM_ERR(CAM_ICP, "V4l2 subdev is NULL");
+ return -ENODEV;
}
subdev = v4l2_get_subdevdata(sd);
if (!subdev) {
- pr_err("cam subdev is NULL\n");
- return -EINVAL;
+ CAM_ERR(CAM_ICP, "cam subdev is NULL");
+ return -ENODEV;
}
for (i = 0; i < CAM_CTX_MAX; i++)
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index b763a39..48e1f1c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "HFI-FW %s:%d " fmt, __func__, __LINE__
-
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -27,6 +25,7 @@
#include "hfi_session_defs.h"
#include "hfi_intf.h"
#include "cam_icp_hw_mgr_intf.h"
+#include "cam_debug_util.h"
#define HFI_VERSION_INFO_MAJOR_VAL 1
#define HFI_VERSION_INFO_MINOR_VAL 1
@@ -39,11 +38,10 @@
#define HFI_VERSION_INFO_STEP_BMSK 0xFF
#define HFI_VERSION_INFO_STEP_SHFT 0
-#undef HFI_DBG
-#define HFI_DBG(fmt, args...) pr_debug(fmt, ##args)
-
static struct hfi_info *g_hfi;
unsigned int g_icp_mmu_hdl;
+static DEFINE_MUTEX(hfi_cmd_q_mutex);
+static DEFINE_MUTEX(hfi_msg_q_mutex);
int hfi_write_cmd(void *cmd_ptr)
{
@@ -52,23 +50,25 @@
struct hfi_qtbl *q_tbl;
struct hfi_q_hdr *q;
int rc = 0;
- int i = 0;
if (!cmd_ptr) {
- pr_err("Invalid args\n");
+ CAM_ERR(CAM_HFI, "command is null");
return -EINVAL;
}
- if (!g_hfi || (g_hfi->hfi_state != HFI_READY)) {
- pr_err("HFI interface not ready yet\n");
- return -EIO;
+ mutex_lock(&hfi_cmd_q_mutex);
+ if (!g_hfi) {
+ CAM_ERR(CAM_HFI, "HFI interface not setup");
+ rc = -ENODEV;
+ goto err;
}
- mutex_lock(&g_hfi->cmd_q_lock);
- if (!g_hfi->cmd_q_state) {
- pr_err("HFI command interface not ready yet\n");
- mutex_unlock(&g_hfi->cmd_q_lock);
- return -EIO;
+ if (g_hfi->hfi_state != HFI_READY ||
+ !g_hfi->cmd_q_state) {
+ CAM_ERR(CAM_HFI, "HFI state: %u, cmd q state: %u",
+ g_hfi->hfi_state, g_hfi->cmd_q_state);
+ rc = -ENODEV;
+ goto err;
}
q_tbl = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
@@ -78,24 +78,20 @@
size_in_words = (*(uint32_t *)cmd_ptr) >> BYTE_WORD_SHIFT;
if (!size_in_words) {
- pr_debug("failed");
+ CAM_DBG(CAM_HFI, "failed");
rc = -EINVAL;
goto err;
}
- HFI_DBG("size_in_words : %u, q->qhdr_write_idx %x\n", size_in_words,
- q->qhdr_write_idx);
-
read_idx = q->qhdr_read_idx;
empty_space = (q->qhdr_write_idx >= read_idx) ?
(q->qhdr_q_size - (q->qhdr_write_idx - read_idx)) :
(read_idx - q->qhdr_write_idx);
if (empty_space <= size_in_words) {
- pr_err("failed");
+ CAM_ERR(CAM_HFI, "failed");
rc = -EIO;
goto err;
}
- HFI_DBG("empty_space : %u\n", empty_space);
new_write_idx = q->qhdr_write_idx + size_in_words;
write_ptr = (uint32_t *)(write_q + q->qhdr_write_idx);
@@ -110,15 +106,12 @@
memcpy(write_q, (uint8_t *)cmd_ptr + temp,
new_write_idx << BYTE_WORD_SHIFT);
}
- for (i = 0; i < size_in_words; i++)
- pr_debug("%x\n", write_ptr[i]);
q->qhdr_write_idx = new_write_idx;
- HFI_DBG("q->qhdr_write_idx %x\n", q->qhdr_write_idx);
cam_io_w((uint32_t)INTR_ENABLE,
g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
err:
- mutex_unlock(&g_hfi->cmd_q_lock);
+ mutex_unlock(&hfi_cmd_q_mutex);
return rc;
}
@@ -129,32 +122,40 @@
uint32_t new_read_idx, size_in_words, temp;
uint32_t *read_q, *read_ptr;
int rc = 0;
- int i = 0;
- if (!pmsg || q_id > Q_DBG) {
- pr_err("Inavlid args\n");
+ if (!pmsg) {
+ CAM_ERR(CAM_HFI, "Invalid msg");
return -EINVAL;
}
- if (!g_hfi || (g_hfi->hfi_state != HFI_READY)) {
- pr_err("HFI interface not ready yet\n");
- return -EIO;
+ if (q_id > Q_DBG) {
+ CAM_ERR(CAM_HFI, "Inavlid q :%u", q_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hfi_msg_q_mutex);
+ if (!g_hfi) {
+ CAM_ERR(CAM_HFI, "hfi not set up yet");
+ rc = -ENODEV;
+ goto err;
+ }
+
+ if ((g_hfi->hfi_state != HFI_READY) ||
+ !g_hfi->msg_q_state) {
+ CAM_ERR(CAM_HFI, "hfi state: %u, msg q state: %u",
+ g_hfi->hfi_state, g_hfi->msg_q_state);
+ rc = -ENODEV;
+ goto err;
}
q_tbl_ptr = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
q = &q_tbl_ptr->q_hdr[q_id];
if (q->qhdr_read_idx == q->qhdr_write_idx) {
- pr_debug("FW or Q not ready, hfi state : %u, r idx : %u, w idx : %u\n",
+ CAM_DBG(CAM_HFI, "Q not ready, state:%u, r idx:%u, w idx:%u",
g_hfi->hfi_state, q->qhdr_read_idx, q->qhdr_write_idx);
- return -EIO;
- }
-
- mutex_lock(&g_hfi->msg_q_lock);
- if (!g_hfi->msg_q_state) {
- pr_err("HFI message interface not ready yet\n");
- mutex_unlock(&g_hfi->msg_q_lock);
- return -EIO;
+ rc = -EIO;
+ goto err;
}
if (q_id == Q_MSG)
@@ -165,12 +166,9 @@
read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
size_in_words = (*read_ptr) >> BYTE_WORD_SHIFT;
- HFI_DBG("size_in_words : %u, read_ptr : %pK\n", size_in_words,
- (void *)read_ptr);
-
if ((size_in_words == 0) ||
(size_in_words > ICP_HFI_MAX_MSG_SIZE_IN_WORDS)) {
- pr_err("invalid HFI message packet size - 0x%08x\n",
+ CAM_ERR(CAM_HFI, "invalid HFI message packet size - 0x%08x",
size_in_words << BYTE_WORD_SHIFT);
q->qhdr_read_idx = q->qhdr_write_idx;
rc = -EIO;
@@ -178,7 +176,6 @@
}
new_read_idx = q->qhdr_read_idx + size_in_words;
- HFI_DBG("new_read_idx : %u\n", new_read_idx);
if (new_read_idx < q->qhdr_q_size) {
memcpy(pmsg, read_ptr, size_in_words << BYTE_WORD_SHIFT);
@@ -190,12 +187,9 @@
new_read_idx << BYTE_WORD_SHIFT);
}
- for (i = 0; i < size_in_words; i++)
- HFI_DBG("%x\n", read_ptr[i]);
-
q->qhdr_read_idx = new_read_idx;
err:
- mutex_unlock(&g_hfi->msg_q_lock);
+ mutex_unlock(&hfi_msg_q_mutex);
return rc;
}
@@ -265,7 +259,7 @@
case HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT:
break;
default:
- pr_err("command not supported :%d\n", type);
+ CAM_ERR(CAM_HFI, "command not supported :%d", type);
break;
}
}
@@ -277,7 +271,7 @@
struct cam_icp_query_cap_cmd *query_cmd = NULL;
if (!query_buf) {
- pr_err("%s: query buf is NULL\n", __func__);
+ CAM_ERR(CAM_HFI, "query buf is NULL");
return -EINVAL;
}
@@ -332,6 +326,9 @@
struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr, *dbg_q_hdr;
uint32_t hw_version, fw_version, status = 0;
+ mutex_lock(&hfi_cmd_q_mutex);
+ mutex_lock(&hfi_msg_q_mutex);
+
if (!g_hfi) {
g_hfi = kzalloc(sizeof(struct hfi_info), GFP_KERNEL);
if (!g_hfi) {
@@ -340,13 +337,13 @@
}
}
- HFI_DBG("g_hfi: %pK\n", (void *)g_hfi);
if (g_hfi->hfi_state != HFI_DEINIT) {
- pr_err("hfi_init: invalid state\n");
+ CAM_ERR(CAM_HFI, "hfi_init: invalid state");
return -EINVAL;
}
memcpy(&g_hfi->map, hfi_mem, sizeof(g_hfi->map));
+ g_hfi->hfi_state = HFI_DEINIT;
if (debug) {
cam_io_w_mb(
@@ -373,7 +370,6 @@
qtbl_hdr->qtbl_num_active_q = ICP_HFI_NUMBER_OF_QS;
/* setup host-to-firmware command queue */
- pr_debug("updating the command queue info\n");
cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
cmd_q_hdr->qhdr_status = QHDR_ACTIVE;
cmd_q_hdr->qhdr_start_addr = hfi_mem->cmd_q.iova;
@@ -384,7 +380,6 @@
cmd_q_hdr->qhdr_write_idx = RESET;
/* setup firmware-to-Host message queue */
- pr_debug("updating the message queue info\n");
msg_q_hdr = &qtbl->q_hdr[Q_MSG];
msg_q_hdr->qhdr_status = QHDR_ACTIVE;
msg_q_hdr->qhdr_start_addr = hfi_mem->msg_q.iova;
@@ -395,7 +390,6 @@
msg_q_hdr->qhdr_write_idx = RESET;
/* setup firmware-to-Host message queue */
- pr_debug("updating the debug queue info\n");
dbg_q_hdr = &qtbl->q_hdr[Q_DBG];
dbg_q_hdr->qhdr_status = QHDR_ACTIVE;
dbg_q_hdr->qhdr_start_addr = hfi_mem->dbg_q.iova;
@@ -404,7 +398,6 @@
dbg_q_hdr->qhdr_pkt_drop_cnt = RESET;
dbg_q_hdr->qhdr_read_idx = RESET;
dbg_q_hdr->qhdr_write_idx = RESET;
- pr_debug("Done updating the debug queue info\n");
switch (event_driven_mode) {
case INTR_MODE:
@@ -473,7 +466,8 @@
break;
default:
- pr_err("Invalid event driven mode :%u", event_driven_mode);
+ CAM_ERR(CAM_HFI, "Invalid event driven mode :%u",
+ event_driven_mode);
break;
}
@@ -490,56 +484,58 @@
icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
- HFI_DBG("hw version : [%x]\n", hw_version);
rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
status, status != ICP_INIT_RESP_SUCCESS, 15, 200);
if (rc) {
- pr_err("timed out , status = %u\n", status);
+ CAM_ERR(CAM_HFI, "timed out , status = %u", status);
goto regions_fail;
}
fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
- HFI_DBG("fw version : %u[%x]\n", fw_version, fw_version);
+ CAM_DBG(CAM_HFI, "hw version : : [%x], fw version : [%x]",
+ hw_version, fw_version);
g_hfi->csr_base = icp_base;
g_hfi->hfi_state = HFI_READY;
g_hfi->cmd_q_state = true;
g_hfi->msg_q_state = true;
- mutex_init(&g_hfi->cmd_q_lock);
- mutex_init(&g_hfi->msg_q_lock);
cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+ mutex_unlock(&hfi_cmd_q_mutex);
+ mutex_unlock(&hfi_msg_q_mutex);
+
return rc;
regions_fail:
kfree(g_hfi);
alloc_fail:
+ mutex_unlock(&hfi_cmd_q_mutex);
+ mutex_unlock(&hfi_msg_q_mutex);
return rc;
}
void cam_hfi_deinit(void)
{
+ mutex_lock(&hfi_cmd_q_mutex);
+ mutex_lock(&hfi_msg_q_mutex);
+
if (!g_hfi) {
- pr_err("hfi path not established yet\n");
- return;
+ CAM_ERR(CAM_HFI, "hfi path not established yet");
+ goto err;
}
+
+ g_hfi->cmd_q_state = false;
+ g_hfi->msg_q_state = false;
+
cam_io_w((uint32_t)INTR_DISABLE,
g_hfi->csr_base + HFI_REG_A5_CSR_A2HOSTINTEN);
-
- mutex_lock(&g_hfi->cmd_q_lock);
- g_hfi->cmd_q_state = false;
- mutex_unlock(&g_hfi->cmd_q_lock);
-
- mutex_lock(&g_hfi->msg_q_lock);
- g_hfi->msg_q_state = false;
- mutex_unlock(&g_hfi->msg_q_lock);
-
- mutex_destroy(&g_hfi->cmd_q_lock);
- mutex_destroy(&g_hfi->msg_q_lock);
-
- kfree(g_hfi);
+ kzfree(g_hfi);
g_hfi = NULL;
+
+err:
+ mutex_unlock(&hfi_cmd_q_mutex);
+ mutex_unlock(&hfi_msg_q_mutex);
}
void icp_enable_fw_debug(void)
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index 9f6f940..e200f6f 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "A5-CORE %s:%d " fmt, __func__, __LINE__
-
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/debugfs.h>
@@ -35,6 +33,7 @@
#include "hfi_sys_defs.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
static int cam_a5_cpas_vote(struct cam_a5_device_core_info *core_info,
struct cam_icp_cpas_vote *cpas_vote)
@@ -50,7 +49,7 @@
&cpas_vote->axi_vote);
if (rc)
- pr_err("cpas vote is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
return rc;
}
@@ -60,26 +59,26 @@
struct elf32_hdr *elf_hdr;
if (!elf) {
- pr_err("Invalid params\n");
+ CAM_ERR(CAM_ICP, "Invalid params");
return -EINVAL;
}
elf_hdr = (struct elf32_hdr *)elf;
if (memcmp(elf_hdr->e_ident, ELFMAG, SELFMAG)) {
- pr_err("ICP elf identifier is failed\n");
+ CAM_ERR(CAM_ICP, "ICP elf identifier is failed");
return -EINVAL;
}
/* check architecture */
if (elf_hdr->e_machine != EM_ARM) {
- pr_err("unsupported arch\n");
+ CAM_ERR(CAM_ICP, "unsupported arch");
return -EINVAL;
}
/* check elf bit format */
if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
- pr_err("elf doesn't support 32 bit format\n");
+ CAM_ERR(CAM_ICP, "elf doesn't support 32 bit format");
return -EINVAL;
}
@@ -97,7 +96,7 @@
struct elf32_phdr *prg_hdr;
if (!elf || !fw_size) {
- pr_err("invalid args\n");
+ CAM_ERR(CAM_ICP, "invalid args");
return -EINVAL;
}
@@ -109,11 +108,11 @@
prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
if (!prg_hdr) {
- pr_err("failed to get elf program header attr\n");
+ CAM_ERR(CAM_ICP, "failed to get elf program header attr");
return -EINVAL;
}
- pr_debug("num_prg_hdrs = %d\n", num_prg_hdrs);
+ CAM_DBG(CAM_ICP, "num_prg_hdrs = %d", num_prg_hdrs);
for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
if (prg_hdr->p_flags == 0)
continue;
@@ -121,7 +120,7 @@
seg_mem_size = (prg_hdr->p_memsz + prg_hdr->p_align - 1) &
~(prg_hdr->p_align - 1);
seg_mem_size += prg_hdr->p_vaddr;
- pr_debug("p_memsz = %x p_align = %x p_vaddr = %x seg_mem_size = %x\n",
+ CAM_DBG(CAM_ICP, "memsz:%x align:%x addr:%x seg_mem_size:%x",
(int)prg_hdr->p_memsz, (int)prg_hdr->p_align,
(int)prg_hdr->p_vaddr, (int)seg_mem_size);
if (*fw_size < seg_mem_size)
@@ -130,7 +129,7 @@
}
if (*fw_size == 0) {
- pr_err("invalid elf fw file\n");
+ CAM_ERR(CAM_ICP, "invalid elf fw file");
return -EINVAL;
}
@@ -155,7 +154,7 @@
prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
if (!prg_hdr) {
- pr_err("failed to get elf program header attr\n");
+ CAM_ERR(CAM_ICP, "failed to get elf program header attr");
return -EINVAL;
}
@@ -163,15 +162,14 @@
if (prg_hdr->p_flags == 0)
continue;
- pr_debug("Loading FW header size: %u\n", prg_hdr->p_filesz);
+ CAM_DBG(CAM_ICP, "Loading FW header size: %u",
+ prg_hdr->p_filesz);
if (prg_hdr->p_filesz != 0) {
src = (u8 *)((u8 *)elf + prg_hdr->p_offset);
dest = (u8 *)(((u8 *)core_info->fw_kva_addr) +
prg_hdr->p_vaddr);
memcpy_toio(dest, src, prg_hdr->p_filesz);
- pr_debug("fw kva: %pK, p_vaddr: 0x%x\n",
- dest, prg_hdr->p_vaddr);
}
}
@@ -191,7 +189,7 @@
struct a5_soc_info *cam_a5_soc_info = NULL;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
@@ -202,44 +200,38 @@
cam_a5_soc_info = soc_info->soc_private;
rc = request_firmware(&core_info->fw_elf, "CAMERA_ICP.elf", &pdev->dev);
- pr_debug("request_firmware: %d\n", rc);
- if (rc < 0) {
- pr_err("Failed to locate fw\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Failed to locate fw: %d", rc);
return rc;
}
if (!core_info->fw_elf) {
- pr_err("request_firmware is failed\n");
+ CAM_ERR(CAM_ICP, "Invalid elf size");
return -EINVAL;
}
fw_start = core_info->fw_elf->data;
rc = cam_icp_validate_fw(fw_start);
- if (rc < 0) {
- pr_err("fw elf validation failed\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "fw elf validation failed");
return -EINVAL;
}
rc = cam_icp_get_fw_size(fw_start, &fw_size);
- if (rc < 0) {
- pr_err("unable to get fw file size\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "unable to get fw size");
return rc;
}
- pr_debug("cam_icp_get_fw_size: %u\n", fw_size);
-
- /* Check FW firmware memory allocation is OK or not */
- pr_debug("cam_icp_get_fw_size: %u %llu\n",
- fw_size, core_info->fw_buf_len);
if (core_info->fw_buf_len < fw_size) {
- pr_err("fw allocation failed\n");
+ CAM_ERR(CAM_ICP, "mismatch in fw size: %u %llu",
+ fw_size, core_info->fw_buf_len);
goto fw_alloc_failed;
}
- /* download fw */
rc = cam_icp_program_fw(fw_start, core_info);
- if (rc < 0) {
- pr_err("fw program is failed\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "fw program is failed");
goto fw_program_failed;
}
@@ -259,7 +251,7 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
@@ -267,7 +259,8 @@
core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
if ((!soc_info) || (!core_info)) {
- pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+ CAM_ERR(CAM_ICP, "soc_info: %pK core_info: %pK",
+ soc_info, core_info);
return -EINVAL;
}
@@ -279,16 +272,16 @@
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
if (rc) {
- pr_err("cpass start failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
return rc;
}
core_info->cpas_start = true;
rc = cam_a5_enable_soc_resources(soc_info);
if (rc) {
- pr_err("soc enable is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
if (cam_cpas_stop(core_info->cpas_handle))
- pr_err("cpas stop is failed\n");
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
@@ -305,24 +298,25 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &a5_dev->soc_info;
core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
if ((!soc_info) || (!core_info)) {
- pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
return -EINVAL;
}
rc = cam_a5_disable_soc_resources(soc_info);
if (rc)
- pr_err("soc disable is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
if (core_info->cpas_start) {
if (cam_cpas_stop(core_info->cpas_handle))
- pr_err("cpas stop is failed\n");
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
@@ -339,7 +333,7 @@
uint32_t irq_status = 0;
if (!data) {
- pr_err("Invalid cam_dev_info or query_cap args\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info or query_cap args");
return IRQ_HANDLED;
}
@@ -354,18 +348,15 @@
soc_info->reg_map[A5_SIERRA_BASE].mem_base +
core_info->a5_hw_info->a5_host_int_clr);
- pr_debug("irq_status = %x\n", irq_status);
- if (irq_status & A5_HOST_INT)
- pr_debug("A5 to Host interrupt, read msg Q\n");
-
if ((irq_status & A5_WDT_0) ||
(irq_status & A5_WDT_1)) {
- pr_err_ratelimited("watch dog interrupt from A5\n");
+ CAM_ERR_RATE_LIMIT(CAM_ICP, "watch dog interrupt from A5");
}
if (core_info->irq_cb.icp_hw_mgr_cb)
core_info->irq_cb.icp_hw_mgr_cb(irq_status,
core_info->irq_cb.data);
+
return IRQ_HANDLED;
}
@@ -379,12 +370,12 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_ICP, "Invalid arguments");
return -EINVAL;
}
if (cmd_type >= CAM_ICP_A5_CMD_MAX) {
- pr_err("Invalid command : %x\n", cmd_type);
+ CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
return -EINVAL;
}
@@ -401,7 +392,7 @@
struct cam_icp_a5_set_fw_buf_info *fw_buf_info = cmd_args;
if (!cmd_args) {
- pr_err("cmd args NULL\n");
+ CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
@@ -409,15 +400,16 @@
core_info->fw_kva_addr = fw_buf_info->kva;
core_info->fw_buf_len = fw_buf_info->len;
- pr_debug("fw buf info = %x %llx %lld\n", core_info->fw_buf,
- core_info->fw_kva_addr, core_info->fw_buf_len);
+ CAM_DBG(CAM_ICP, "fw buf info = %x %llx %lld",
+ core_info->fw_buf, core_info->fw_kva_addr,
+ core_info->fw_buf_len);
break;
}
case CAM_ICP_A5_SET_IRQ_CB: {
struct cam_icp_a5_set_irq_cb *irq_cb = cmd_args;
if (!cmd_args) {
- pr_err("cmd args NULL\n");
+ CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
@@ -433,7 +425,7 @@
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args) {
- pr_err("cmd args NULL\n");
+ CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
@@ -445,7 +437,7 @@
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args) {
- pr_err("cmd args NULL\n");
+ CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
index f649c3b..08b934e 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
@@ -23,6 +23,7 @@
#include "cam_a5_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
struct a5_soc_info cam_a5_soc_info;
EXPORT_SYMBOL(cam_a5_soc_info);
@@ -64,7 +65,7 @@
rc = cam_cpas_register_client(&cpas_register_params);
if (rc < 0) {
- pr_err("cam_cpas_register_client is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "failed: %d", rc);
return rc;
}
@@ -101,7 +102,7 @@
a5_dev_intf->hw_ops.process_cmd = cam_a5_process_cmd;
a5_dev_intf->hw_type = CAM_ICP_DEV_A5;
- pr_debug("%s: type %d index %d\n", __func__,
+ CAM_DBG(CAM_ICP, "type %d index %d",
a5_dev_intf->hw_type,
a5_dev_intf->hw_idx);
@@ -118,9 +119,9 @@
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
- pr_err("%s: No a5 hardware info\n", __func__);
+ CAM_ERR(CAM_ICP, "No a5 hardware info");
rc = -EINVAL;
- goto pr_err;
+ goto match_err;
}
hw_info = (struct cam_a5_device_hw_info *)match_dev->data;
core_info->a5_hw_info = hw_info;
@@ -130,16 +131,16 @@
rc = cam_a5_init_soc_resources(&a5_dev->soc_info, cam_a5_irq,
a5_dev);
if (rc < 0) {
- pr_err("%s: failed to init_soc\n", __func__);
+ CAM_ERR(CAM_ICP, "failed to init_soc");
goto init_soc_failure;
}
- pr_debug("cam_a5_init_soc_resources : %pK\n",
+ CAM_DBG(CAM_ICP, "soc info : %pK",
(void *)&a5_dev->soc_info);
rc = cam_a5_register_cpas(&a5_dev->soc_info,
core_info, a5_dev_intf->hw_idx);
if (rc < 0) {
- pr_err("a5 cpas registration failed\n");
+ CAM_ERR(CAM_ICP, "a5 cpas registration failed");
goto cpas_reg_failed;
}
a5_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
@@ -147,13 +148,13 @@
spin_lock_init(&a5_dev->hw_lock);
init_completion(&a5_dev->hw_complete);
- pr_debug("%s: A5%d probe successful\n", __func__,
+ CAM_DBG(CAM_ICP, "A5%d probe successful",
a5_dev_intf->hw_idx);
return 0;
cpas_reg_failed:
init_soc_failure:
-pr_err:
+match_err:
kfree(a5_dev->core_info);
core_info_alloc_failure:
kfree(a5_dev);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
index a98f01f..f252931 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
@@ -18,9 +18,7 @@
#include <media/cam_icp.h>
#include "a5_soc.h"
#include "cam_soc_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
@@ -35,7 +33,7 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("%s: get a5 dt prop is failed\n", __func__);
+ CAM_ERR(CAM_ICP, "get a5 dt prop is failed");
return rc;
}
@@ -44,7 +42,7 @@
rc = of_property_read_string(of_node, "fw_name", &fw_name);
if (rc < 0)
- pr_err("%s: fw_name read failed\n", __func__);
+ CAM_ERR(CAM_ICP, "fw_name read failed");
return rc;
}
@@ -85,7 +83,7 @@
rc = cam_soc_util_enable_platform_resource(soc_info, true,
CAM_TURBO_VOTE, true);
if (rc)
- pr_err("%s: enable platform failed\n", __func__);
+ CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
}
@@ -96,7 +94,7 @@
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc)
- pr_err("%s: disable platform failed\n", __func__);
+ CAM_ERR(CAM_ICP, "disable platform failed");
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index cabdc8a..557eaf1 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "BPS-CORE %s:%d " fmt, __func__, __LINE__
-
#include <linux/of.h>
#include <linux/debugfs.h>
#include <linux/videodev2.h>
@@ -31,6 +29,7 @@
#include "cam_icp_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
static int cam_bps_cpas_vote(struct cam_bps_device_core_info *core_info,
struct cam_icp_cpas_vote *cpas_vote)
@@ -45,7 +44,7 @@
&cpas_vote->axi_vote);
if (rc < 0)
- pr_err("cpas vote is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
return rc;
}
@@ -61,7 +60,7 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
@@ -69,7 +68,8 @@
core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
if ((!soc_info) || (!core_info)) {
- pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
return -EINVAL;
}
@@ -81,16 +81,16 @@
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
if (rc) {
- pr_err("cpass start failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
return rc;
}
core_info->cpas_start = true;
rc = cam_bps_enable_soc_resources(soc_info);
if (rc) {
- pr_err("soc enable is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
if (cam_cpas_stop(core_info->cpas_handle))
- pr_err("cpas stop is failed\n");
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
@@ -107,24 +107,25 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &bps_dev->soc_info;
core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
if ((!soc_info) || (!core_info)) {
- pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
return -EINVAL;
}
rc = cam_bps_disable_soc_resources(soc_info);
if (rc)
- pr_err("soc disable is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
if (core_info->cpas_start) {
if (cam_cpas_stop(core_info->cpas_handle))
- pr_err("cpas stop is failed\n");
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
@@ -142,12 +143,12 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_ICP, "Invalid arguments");
return -EINVAL;
}
if (cmd_type >= CAM_ICP_BPS_CMD_MAX) {
- pr_err("Invalid command : %x\n", cmd_type);
+ CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
return -EINVAL;
}
@@ -160,7 +161,7 @@
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args) {
- pr_err("cmd args NULL\n");
+ CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
@@ -172,7 +173,7 @@
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args) {
- pr_err("cmd args NULL\n");
+ CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
index c3477ee..ddff677 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
@@ -23,9 +23,7 @@
#include "cam_icp_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
struct cam_bps_device_hw_info cam_bps_hw_info = {
.reserved = 0,
@@ -47,7 +45,7 @@
rc = cam_cpas_register_client(&cpas_register_params);
if (rc < 0) {
- pr_err("cam_cpas_register_client is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "failed: %d", rc);
return rc;
}
core_info->cpas_handle = cpas_register_params.client_handle;
@@ -95,7 +93,7 @@
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
- pr_err("%s: No bps hardware info\n", __func__);
+ CAM_ERR(CAM_ICP, "No bps hardware info");
kfree(bps_dev->core_info);
kfree(bps_dev);
kfree(bps_dev_intf);
@@ -108,13 +106,13 @@
rc = cam_bps_init_soc_resources(&bps_dev->soc_info, cam_bps_irq,
bps_dev);
if (rc < 0) {
- pr_err("%s: failed to init_soc\n", __func__);
+ CAM_ERR(CAM_ICP, "failed to init_soc");
kfree(bps_dev->core_info);
kfree(bps_dev);
kfree(bps_dev_intf);
return rc;
}
- pr_debug("cam_bps_init_soc_resources : %pK\n",
+ CAM_DBG(CAM_ICP, "soc info : %pK",
(void *)&bps_dev->soc_info);
rc = cam_bps_register_cpas(&bps_dev->soc_info,
@@ -129,7 +127,7 @@
mutex_init(&bps_dev->hw_mutex);
spin_lock_init(&bps_dev->hw_lock);
init_completion(&bps_dev->hw_complete);
- pr_debug("%s: BPS%d probe successful\n", __func__,
+ CAM_DBG(CAM_ICP, "BPS%d probe successful",
bps_dev_intf->hw_idx);
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
index 8a3c7ac..54e898c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -18,9 +18,7 @@
#include <media/cam_icp.h>
#include "bps_soc.h"
#include "cam_soc_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static int cam_bps_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
@@ -28,7 +26,7 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0)
- pr_err("get bps dt prop is failed\n");
+ CAM_ERR(CAM_ICP, "get bps dt prop is failed");
return rc;
}
@@ -69,7 +67,7 @@
rc = cam_soc_util_enable_platform_resource(soc_info, true,
CAM_TURBO_VOTE, false);
if (rc)
- pr_err("%s: enable platform failed\n", __func__);
+ CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
}
@@ -80,7 +78,7 @@
rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
if (rc)
- pr_err("%s: disable platform failed\n", __func__);
+ CAM_ERR(CAM_ICP, "disable platform failed");
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index fe719c7..1b3afc0 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "ICP-HW-MGR %s:%d " fmt, __func__, __LINE__
-
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/of.h>
@@ -46,9 +44,7 @@
#include "cam_mem_mgr.h"
#include "a5_core.h"
#include "hfi_sys_defs.h"
-
-#undef ICP_DBG
-#define ICP_DBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
#define ICP_WORKQ_NUM_TASK 30
#define ICP_WORKQ_TASK_CMD_TYPE 1
@@ -80,7 +76,7 @@
struct cam_icp_hw_mgr *hw_mgr;
if (!data || !priv) {
- pr_err("Invalid params%pK %pK\n", data, priv);
+ CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv);
return -EINVAL;
}
@@ -88,7 +84,6 @@
task_data = (struct hfi_cmd_work_data *)data;
rc = hfi_write_cmd(task_data->data);
- ICP_DBG("task type : %u, rc : %d\n", task_data->type, rc);
return rc;
}
@@ -106,14 +101,15 @@
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
if (ioconfig_ack->err_type != HFI_ERR_SYS_NONE) {
- pr_err("failed with error : %u\n", ioconfig_ack->err_type);
+ CAM_ERR(CAM_ICP, "failed with error : %u",
+ ioconfig_ack->err_type);
return -EIO;
}
frame_done =
(struct hfi_msg_frame_process_done *)ioconfig_ack->msg_data;
if (frame_done->result) {
- pr_err("result : %u\n", frame_done->result);
+ CAM_ERR(CAM_ICP, "result : %u", frame_done->result);
return -EIO;
}
@@ -126,7 +122,7 @@
break;
if (i >= CAM_FRAME_CMD_MAX) {
- pr_err("unable to find pkt in ctx data for req_id =%lld\n",
+ CAM_ERR(CAM_ICP, "pkt not found in ctx data for req_id =%lld",
request_id);
return -EINVAL;
}
@@ -136,7 +132,7 @@
ctx_data->ctxt_event_cb(ctx_data->context_priv, false, &buf_data);
/* now release memory for hfi frame process command */
- ICP_DBG("matching request id: %lld\n",
+ CAM_DBG(CAM_ICP, "matching request id: %lld",
hfi_frame_process->request_id[idx]);
mutex_lock(&ctx_data->hfi_frame_process.lock);
hfi_frame_process->request_id[idx] = 0;
@@ -153,40 +149,37 @@
struct hfi_msg_bps_common *bps_config_ack = NULL;
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
- ICP_DBG("opcode : %u\n", ioconfig_ack->opcode);
if (ioconfig_ack->opcode == HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO) {
ipe_config_ack =
(struct hfi_msg_ipe_config *)(ioconfig_ack->msg_data);
if (ipe_config_ack->rc) {
- pr_err("rc = %d err = %u\n",
+ CAM_ERR(CAM_ICP, "rc = %d err = %u",
ipe_config_ack->rc, ioconfig_ack->err_type);
return -EIO;
}
ctx_data =
(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
if (!ctx_data) {
- pr_err("wrong ctx data from IPE response\n");
+ CAM_ERR(CAM_ICP, "wrong ctx data from IPE response");
return -EINVAL;
}
mutex_lock(&ctx_data->ctx_mutex);
ctx_data->scratch_mem_size = ipe_config_ack->scratch_mem_size;
mutex_unlock(&ctx_data->ctx_mutex);
- ICP_DBG("scratch_mem_size = %u\n",
- ipe_config_ack->scratch_mem_size);
} else {
bps_config_ack =
(struct hfi_msg_bps_common *)(ioconfig_ack->msg_data);
if (bps_config_ack->rc) {
- pr_err("rc : %u, opcode :%u\n",
+ CAM_ERR(CAM_ICP, "rc : %u, opcode :%u",
bps_config_ack->rc, ioconfig_ack->opcode);
return -EIO;
}
ctx_data =
(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
if (!ctx_data) {
- pr_err("wrong ctx data from BPS response\n");
+ CAM_ERR(CAM_ICP, "wrong ctx data from BPS response");
return -EINVAL;
}
}
@@ -202,22 +195,20 @@
create_handle_ack = (struct hfi_msg_create_handle_ack *)msg_ptr;
if (!create_handle_ack) {
- pr_err("Invalid create_handle_ack\n");
+ CAM_ERR(CAM_ICP, "Invalid create_handle_ack");
return -EINVAL;
}
- ICP_DBG("err type : %u\n", create_handle_ack->err_type);
-
ctx_data = (struct cam_icp_hw_ctx_data *)create_handle_ack->user_data1;
if (!ctx_data) {
- pr_err("Invalid ctx_data\n");
+ CAM_ERR(CAM_ICP, "Invalid ctx_data");
return -EINVAL;
}
mutex_lock(&ctx_data->ctx_mutex);
ctx_data->fw_handle = create_handle_ack->fw_handle;
mutex_unlock(&ctx_data->ctx_mutex);
- ICP_DBG("fw_handle = %x\n", ctx_data->fw_handle);
+ CAM_DBG(CAM_ICP, "fw_handle = %x", ctx_data->fw_handle);
complete(&ctx_data->wait_complete);
return 0;
@@ -230,18 +221,16 @@
ping_ack = (struct hfi_msg_ping_ack *)msg_ptr;
if (!ping_ack) {
- pr_err("Empty ping ack message\n");
+ CAM_ERR(CAM_ICP, "Empty ping ack message");
return -EINVAL;
}
ctx_data = (struct cam_icp_hw_ctx_data *)ping_ack->user_data;
if (!ctx_data) {
- pr_err("Invalid ctx_data\n");
+ CAM_ERR(CAM_ICP, "Invalid ctx_data");
return -EINVAL;
}
- ICP_DBG("%x %x %pK\n", ping_ack->size, ping_ack->pkt_type,
- (void *)ping_ack->user_data);
complete(&ctx_data->wait_complete);
return 0;
@@ -254,7 +243,7 @@
switch (msg_ptr[ICP_PACKET_OPCODE]) {
case HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO:
case HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO:
- ICP_DBG("received HFI_IPEBPS_CMD_OPCODE_IPE/BPS_CONFIG_IO:\n");
+ CAM_DBG(CAM_ICP, "received IPE/BPS_CONFIG_IO:");
rc = cam_icp_mgr_process_msg_config_io(msg_ptr);
if (rc)
return rc;
@@ -262,13 +251,12 @@
case HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS:
case HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS:
- ICP_DBG("received OPCODE_IPE/BPS_FRAME_PROCESS:\n");
rc = cam_icp_mgr_process_msg_frame_process(msg_ptr);
if (rc)
return rc;
break;
default:
- pr_err("Invalid opcode : %u\n",
+ CAM_ERR(CAM_ICP, "Invalid opcode : %u",
msg_ptr[ICP_PACKET_OPCODE]);
break;
}
@@ -287,14 +275,14 @@
case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
- ICP_DBG("received IPE/BPS_DESTROY/ABORT:\n");
+ CAM_DBG(CAM_ICP, "received IPE/BPS_DESTROY/ABORT:");
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
ctx_data =
(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
complete(&ctx_data->wait_complete);
break;
default:
- pr_err("Invalid opcode : %u\n",
+ CAM_ERR(CAM_ICP, "Invalid opcode : %u",
msg_ptr[ICP_PACKET_OPCODE]);
rc = -EINVAL;
break;
@@ -312,37 +300,35 @@
int read_len;
if (!data || !priv) {
- pr_err("Invalid data\n");
+ CAM_ERR(CAM_ICP, "Invalid data");
return -EINVAL;
}
task_data = data;
hw_mgr = priv;
- ICP_DBG("irq status : %u\n", task_data->irq_status);
read_len = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG);
if (read_len < 0) {
- ICP_DBG("Unable to read msg q\n");
+ CAM_DBG(CAM_ICP, "Unable to read msg q");
return read_len;
}
msg_ptr = (uint32_t *)icp_hw_mgr.msg_buf;
- ICP_DBG("packet type: %x\n", msg_ptr[ICP_PACKET_TYPE]);
mutex_lock(&hw_mgr->hw_mgr_mutex);
switch (msg_ptr[ICP_PACKET_TYPE]) {
case HFI_MSG_SYS_INIT_DONE:
- ICP_DBG("received HFI_MSG_SYS_INIT_DONE\n");
+ CAM_DBG(CAM_ICP, "received SYS_INIT_DONE");
complete(&hw_mgr->a5_complete);
break;
case HFI_MSG_SYS_PING_ACK:
- ICP_DBG("received HFI_MSG_SYS_PING_ACK\n");
+ CAM_DBG(CAM_ICP, "received SYS_PING_ACK");
rc = cam_icp_mgr_process_msg_ping_ack(msg_ptr);
break;
case HFI_MSG_IPEBPS_CREATE_HANDLE_ACK:
- ICP_DBG("received HFI_MSG_IPEBPS_CREATE_HANDLE_ACK\n");
+ CAM_DBG(CAM_ICP, "received IPEBPS_CREATE_HANDLE_ACK");
rc = cam_icp_mgr_process_msg_create_handle(msg_ptr);
break;
@@ -355,11 +341,12 @@
break;
case HFI_MSG_EVENT_NOTIFY:
- ICP_DBG("received HFI_MSG_EVENT_NOTIFY\n");
+ CAM_DBG(CAM_ICP, "received EVENT_NOTIFY");
break;
default:
- pr_err("invalid msg : %u\n", msg_ptr[ICP_PACKET_TYPE]);
+ CAM_ERR(CAM_ICP, "invalid msg : %u",
+ msg_ptr[ICP_PACKET_TYPE]);
break;
}
@@ -379,7 +366,7 @@
spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
task = cam_req_mgr_workq_get_task(icp_hw_mgr.msg_work);
if (!task) {
- pr_err("no empty task\n");
+ CAM_ERR(CAM_ICP, "no empty task");
spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
return -ENOMEM;
}
@@ -416,19 +403,16 @@
memset(&out, 0, sizeof(out));
alloc.size = SZ_1M;
alloc.align = 0;
- alloc.region = CAM_MEM_MGR_REGION_SHARED;
+ alloc.flags = CAM_MEM_FLAG_HW_READ_WRITE |
+ CAM_MEM_FLAG_HW_SHARED_ACCESS;
alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
rc = cam_mem_mgr_request_mem(&alloc, &out);
if (rc)
return rc;
*qtbl = out;
- ICP_DBG("kva = %llX\n", out.kva);
- ICP_DBG("qtbl IOVA = %X\n", out.iova);
- ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
- ICP_DBG("MEM HDL = %X\n", out.mem_handle);
- ICP_DBG("length = %lld\n", out.len);
- ICP_DBG("region = %d\n", out.region);
+ CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+ out.kva, out.iova, out.mem_handle, out.len);
return rc;
}
@@ -450,9 +434,8 @@
icp_hw_mgr.hfi_mem.fw_buf.iova = iova;
icp_hw_mgr.hfi_mem.fw_buf.smmu_hdl = icp_hw_mgr.iommu_hdl;
- ICP_DBG("kva = %llX\n", kvaddr);
- ICP_DBG("IOVA = %llX\n", iova);
- ICP_DBG("length = %zu\n", len);
+ CAM_DBG(CAM_ICP, "kva: %llX, iova: %llx, len: %zu",
+ kvaddr, iova, len);
return rc;
}
@@ -462,46 +445,46 @@
int rc;
rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
- CAM_MEM_MGR_REGION_SHARED,
+ CAM_SMMU_REGION_SHARED,
&icp_hw_mgr.hfi_mem.shmem);
if (rc) {
- pr_err("Unable to get shared memory info\n");
+ CAM_ERR(CAM_ICP, "Unable to get shared memory info");
return rc;
}
rc = cam_icp_allocate_fw_mem();
if (rc) {
- pr_err("Unable to allocate FW memory\n");
+ CAM_ERR(CAM_ICP, "Unable to allocate FW memory");
return rc;
}
rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.qtbl);
if (rc) {
- pr_err("Unable to allocate qtbl memory\n");
+ CAM_ERR(CAM_ICP, "Unable to allocate qtbl memory");
goto qtbl_alloc_failed;
}
rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.cmd_q);
if (rc) {
- pr_err("Unable to allocate cmd q memory\n");
+ CAM_ERR(CAM_ICP, "Unable to allocate cmd q memory");
goto cmd_q_alloc_failed;
}
rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.msg_q);
if (rc) {
- pr_err("Unable to allocate msg q memory\n");
+ CAM_ERR(CAM_ICP, "Unable to allocate msg q memory");
goto msg_q_alloc_failed;
}
rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.dbg_q);
if (rc) {
- pr_err("Unable to allocate dbg q memory\n");
+ CAM_ERR(CAM_ICP, "Unable to allocate dbg q memory");
goto dbg_q_alloc_failed;
}
rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.sec_heap);
if (rc) {
- pr_err("Unable to allocate sec heap q memory\n");
+ CAM_ERR(CAM_ICP, "Unable to allocate sec heap q memory");
goto sec_heap_alloc_failed;
}
@@ -585,13 +568,13 @@
if (rc)
return rc;
- ICP_DBG("fw_handle = %x ctx_data = %pK\n",
+ CAM_DBG(CAM_ICP, "fw_handle = %x ctx_data = %pK",
ctx_data->fw_handle, ctx_data);
rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- ICP_DBG("FW timeout/err in abort handle command\n");
+ CAM_DBG(CAM_ICP, "FW timeout/err in abort handle command");
}
return rc;
@@ -639,13 +622,13 @@
if (rc)
return rc;
- ICP_DBG("fw_handle = %x ctx_data = %pK\n",
+ CAM_DBG(CAM_ICP, "fw_handle = %x ctx_data = %pK",
ctx_data->fw_handle, ctx_data);
rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- ICP_DBG("FW response timeout: %d\n", rc);
+ CAM_ERR(CAM_ICP, "FW response timeout: %d", rc);
}
return rc;
@@ -656,13 +639,12 @@
int i = 0;
if (ctx_id >= CAM_ICP_CTX_MAX) {
- pr_err("ctx_id is wrong: %d\n", ctx_id);
+ CAM_ERR(CAM_ICP, "ctx_id is wrong: %d", ctx_id);
return -EINVAL;
}
mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
if (!hw_mgr->ctx_data[ctx_id].in_use) {
- ICP_DBG("ctx is not in use: %d\n", ctx_id);
mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
return 0;
}
@@ -703,7 +685,7 @@
bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
- pr_err("dev intfs are wrong, failed to close\n");
+ CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
return;
}
@@ -724,14 +706,14 @@
mutex_lock(&hw_mgr->hw_mgr_mutex);
if ((hw_mgr->fw_download == false) && (!hw_mgr->ctxt_cnt)) {
- ICP_DBG("hw mgr is already closed\n");
+ CAM_DBG(CAM_ICP, "hw mgr is already closed");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return 0;
}
a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
if (!a5_dev_intf) {
- pr_err("a5_dev_intf is NULL\n");
+ CAM_ERR(CAM_ICP, "a5_dev_intf is NULL");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return -EINVAL;
}
@@ -743,7 +725,7 @@
CAM_ICP_A5_SET_IRQ_CB,
&irq_cb, sizeof(irq_cb));
if (rc)
- pr_err("deregister irq call back failed\n");
+ CAM_ERR(CAM_ICP, "deregister irq call back failed");
fw_buf_info.kva = 0;
fw_buf_info.iova = 0;
@@ -754,7 +736,7 @@
&fw_buf_info,
sizeof(fw_buf_info));
if (rc)
- pr_err("nullify the fw buf failed\n");
+ CAM_ERR(CAM_ICP, "nullify the fw buf failed");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
for (i = 0; i < CAM_ICP_CTX_MAX; i++)
@@ -783,7 +765,7 @@
bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
- pr_err("dev intfs are wrong\n");
+ CAM_ERR(CAM_ICP, "dev intfs are wrong");
return -EINVAL;
}
@@ -876,30 +858,18 @@
hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
hfi_mem.qtbl.len = icp_hw_mgr.hfi_mem.qtbl.len;
- ICP_DBG("kva = %llX\n", hfi_mem.qtbl.kva);
- ICP_DBG("IOVA = %X\n", hfi_mem.qtbl.iova);
- ICP_DBG("length = %lld\n", hfi_mem.qtbl.len);
hfi_mem.cmd_q.kva = icp_hw_mgr.hfi_mem.cmd_q.kva;
hfi_mem.cmd_q.iova = icp_hw_mgr.hfi_mem.cmd_q.iova;
hfi_mem.cmd_q.len = icp_hw_mgr.hfi_mem.cmd_q.len;
- ICP_DBG("kva = %llX\n", hfi_mem.cmd_q.kva);
- ICP_DBG("IOVA = %X\n", hfi_mem.cmd_q.iova);
- ICP_DBG("length = %lld\n", hfi_mem.cmd_q.len);
hfi_mem.msg_q.kva = icp_hw_mgr.hfi_mem.msg_q.kva;
hfi_mem.msg_q.iova = icp_hw_mgr.hfi_mem.msg_q.iova;
hfi_mem.msg_q.len = icp_hw_mgr.hfi_mem.msg_q.len;
- ICP_DBG("kva = %llX\n", hfi_mem.msg_q.kva);
- ICP_DBG("IOVA = %X\n", hfi_mem.msg_q.iova);
- ICP_DBG("length = %lld\n", hfi_mem.msg_q.len);
hfi_mem.dbg_q.kva = icp_hw_mgr.hfi_mem.dbg_q.kva;
hfi_mem.dbg_q.iova = icp_hw_mgr.hfi_mem.dbg_q.iova;
hfi_mem.dbg_q.len = icp_hw_mgr.hfi_mem.dbg_q.len;
- ICP_DBG("kva = %llX\n", hfi_mem.dbg_q.kva);
- ICP_DBG("IOVA = %X\n", hfi_mem.dbg_q.iova);
- ICP_DBG("length = %lld\n", hfi_mem.dbg_q.len);
hfi_mem.sec_heap.kva = icp_hw_mgr.hfi_mem.sec_heap.kva;
hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
@@ -907,6 +877,7 @@
hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+
return cam_hfi_init(0, &hfi_mem,
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
hw_mgr->a5_debug);
@@ -921,7 +892,7 @@
a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
reinit_completion(&hw_mgr->a5_complete);
- ICP_DBG("Sending HFI init command\n");
+ CAM_DBG(CAM_ICP, "Sending HFI init command");
rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_SEND_INIT,
@@ -929,14 +900,13 @@
if (rc)
return rc;
- ICP_DBG("Wait for INIT DONE Message\n");
rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- ICP_DBG("FW response timed out %d\n", rc);
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
}
- ICP_DBG("Done Waiting for INIT DONE Message\n");
+ CAM_DBG(CAM_ICP, "Done Waiting for INIT DONE Message");
return rc;
}
@@ -949,13 +919,13 @@
int rc = 0;
if (!hw_mgr) {
- pr_err("hw_mgr is NULL\n");
+ CAM_ERR(CAM_ICP, "hw_mgr is NULL");
return -EINVAL;
}
mutex_lock(&hw_mgr->hw_mgr_mutex);
if (hw_mgr->fw_download) {
- ICP_DBG("FW already downloaded\n");
+ CAM_DBG(CAM_ICP, "FW already downloaded");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return rc;
}
@@ -997,7 +967,7 @@
NULL, 0);
hw_mgr->fw_download = true;
hw_mgr->ctxt_cnt = 0;
- ICP_DBG("FW download done successfully\n");
+ CAM_DBG(CAM_ICP, "FW download done successfully");
if (!download_fw_args)
cam_icp_mgr_hw_close(hw_mgr, NULL);
return rc;
@@ -1039,19 +1009,17 @@
request_id = *(uint64_t *)config_args->priv;
hw_update_entries = config_args->hw_update_entries;
- ICP_DBG("req_id = %lld %pK\n", request_id, config_args->priv);
+ CAM_DBG(CAM_ICP, "req_id = %lld %pK", request_id, config_args->priv);
task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
if (!task) {
- pr_err("no empty task\n");
+ CAM_ERR(CAM_ICP, "no empty task");
return -ENOMEM;
}
task_data = (struct hfi_cmd_work_data *)task->payload;
task_data->data = (void *)hw_update_entries->addr;
hfi_cmd = (struct hfi_cmd_ipebps_async *)hw_update_entries->addr;
- ICP_DBG("request from hfi_cmd :%llu, hfi_cmd: %pK\n",
- hfi_cmd->user_data2, hfi_cmd);
task_data->request_id = request_id;
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
@@ -1069,19 +1037,20 @@
struct cam_icp_hw_ctx_data *ctx_data = NULL;
if (!hw_mgr || !config_args) {
- pr_err("Invalid arguments %pK %pK\n", hw_mgr, config_args);
+ CAM_ERR(CAM_ICP, "Invalid arguments %pK %pK",
+ hw_mgr, config_args);
return -EINVAL;
}
if (!config_args->num_hw_update_entries) {
- pr_err("No hw update enteries are available\n");
+ CAM_ERR(CAM_ICP, "No hw update enteries are available");
return -EINVAL;
}
mutex_lock(&hw_mgr->hw_mgr_mutex);
ctx_data = config_args->ctxt_to_hw_map;
if (!ctx_data->in_use) {
- pr_err("ctx is not in use\n");
+ CAM_ERR(CAM_ICP, "ctx is not in use");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
rc = -EINVAL;
goto config_err;
@@ -1116,7 +1085,7 @@
hfi_cmd->user_data1 = (uint64_t)ctx_data;
hfi_cmd->user_data2 = request_id;
- ICP_DBG("ctx_data : %pK, request_id :%lld cmd_buf %x\n",
+ CAM_DBG(CAM_ICP, "ctx_data : %pK, request_id :%lld cmd_buf %x",
(void *)ctx_data->context_priv, request_id,
fw_cmd_buf_iova_addr);
@@ -1125,32 +1094,18 @@
static int cam_icp_mgr_pkt_validation(struct cam_packet *packet)
{
- ICP_DBG("packet header : opcode = %x size = %x",
- packet->header.op_code, packet->header.size);
-
- ICP_DBG(" req_id = %x flags = %x\n",
- (uint32_t)packet->header.request_id, packet->header.flags);
-
- ICP_DBG("packet data : c_off = %x c_num = %x\n",
- packet->cmd_buf_offset, packet->num_cmd_buf);
-
- ICP_DBG("io_off = %x io_num = %x p_off = %x p_num = %x %x %x\n",
- packet->io_configs_offset, packet->num_io_configs,
- packet->patch_offset, packet->num_patches,
- packet->kmd_cmd_buf_index, packet->kmd_cmd_buf_offset);
-
if (((packet->header.op_code & 0xff) !=
CAM_ICP_OPCODE_IPE_UPDATE) &&
((packet->header.op_code & 0xff) !=
CAM_ICP_OPCODE_BPS_UPDATE)) {
- pr_err("Invalid Opcode in pkt: %d\n",
+ CAM_ERR(CAM_ICP, "Invalid Opcode in pkt: %d",
packet->header.op_code & 0xff);
return -EINVAL;
}
if ((packet->num_cmd_buf > 1) || (!packet->num_patches) ||
(!packet->num_io_configs)) {
- pr_err("wrong number of cmd/patch info: %u %u\n",
+ CAM_ERR(CAM_ICP, "wrong number of cmd/patch info: %u %u",
packet->num_cmd_buf, packet->num_patches);
return -EINVAL;
}
@@ -1169,18 +1124,15 @@
cmd_desc = (struct cam_cmd_buf_desc *)
((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
- ICP_DBG("packet = %pK cmd_desc = %pK size = %lu\n",
- (void *)packet, (void *)cmd_desc,
- sizeof(struct cam_cmd_buf_desc));
rc = cam_mem_get_io_buf(cmd_desc->mem_handle,
hw_mgr->iommu_hdl, &iova_addr, &fw_cmd_buf_len);
if (rc) {
- pr_err("unable to get src buf info for cmd buf: %x\n",
+ CAM_ERR(CAM_ICP, "unable to get src buf info for cmd buf: %x",
hw_mgr->iommu_hdl);
return rc;
}
- ICP_DBG("cmd_buf desc cpu and iova address: %pK %zu\n",
+ CAM_DBG(CAM_ICP, "cmd_buf desc cpu and iova address: %pK %zu",
(void *)iova_addr, fw_cmd_buf_len);
*fw_cmd_buf_iova_addr = iova_addr;
@@ -1189,65 +1141,32 @@
return rc;
}
-static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
+static void cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
struct cam_icp_hw_ctx_data *ctx_data,
struct cam_packet *packet,
struct cam_hw_prepare_update_args *prepare_args)
{
- int rc = 0, i, j;
- int32_t sync_in_obj[CAM_ICP_IPE_IMAGE_MAX];
- int32_t merged_sync_in_obj;
+ int i, j, k;
struct cam_buf_io_cfg *io_cfg_ptr = NULL;
io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
packet->io_configs_offset/4);
- ICP_DBG("packet = %pK io_cfg_ptr = %pK size = %lu\n",
- (void *)packet, (void *)io_cfg_ptr,
- sizeof(struct cam_buf_io_cfg));
-
prepare_args->num_out_map_entries = 0;
- for (i = 0, j = 0; i < packet->num_io_configs; i++) {
+ prepare_args->num_in_map_entries = 0;
+
+ for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
- ICP_DBG("direction is i : %d :%u\n",
- i, io_cfg_ptr[i].direction);
- ICP_DBG("fence is i : %d :%d\n",
- i, io_cfg_ptr[i].fence);
- continue;
+ prepare_args->in_map_entries[j++].sync_id =
+ io_cfg_ptr[i].fence;
+ prepare_args->num_in_map_entries++;
+ } else {
+ prepare_args->out_map_entries[k++].sync_id =
+ io_cfg_ptr[i].fence;
+ prepare_args->num_out_map_entries++;
}
-
- prepare_args->out_map_entries[j++].sync_id =
- io_cfg_ptr[i].fence;
- prepare_args->num_out_map_entries++;
- ICP_DBG(" out fence = %x index = %d\n", io_cfg_ptr[i].fence, i);
+ CAM_DBG(CAM_ICP, "dir[%d]: %u, fence: %u",
+ i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence);
}
-
- for (i = 0, j = 0; i < packet->num_io_configs; i++) {
- if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
- sync_in_obj[j++] = io_cfg_ptr[i].fence;
- ICP_DBG(" in fence = %x index = %d\n",
- io_cfg_ptr[i].fence, i);
- }
- }
-
- if (j == 1) {
- merged_sync_in_obj = sync_in_obj[j - 1];
- } else if (j > 1) {
- rc = cam_sync_merge(&sync_in_obj[0], j, &merged_sync_in_obj);
- if (rc) {
- pr_err("unable to create in merged object: %d\n", rc);
- return rc;
- }
- } else {
- pr_err("no input fence provided %u\n", j);
- return -EINVAL;
- }
-
- prepare_args->in_map_entries[0].sync_id = merged_sync_in_obj;
- prepare_args->in_map_entries[0].resource_handle =
- ctx_data->icp_dev_acquire_info->dev_type;
- prepare_args->num_in_map_entries = 1;
-
- return rc;
}
static int cam_icp_mgr_update_hfi_frame_process(
@@ -1262,7 +1181,7 @@
index = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
ctx_data->hfi_frame_process.bits);
if (index < 0 || index >= CAM_FRAME_CMD_MAX) {
- pr_err("request idx is wrong: %d\n", index);
+ CAM_ERR(CAM_ICP, "request idx is wrong: %d", index);
mutex_unlock(&ctx_data->hfi_frame_process.lock);
return -EINVAL;
}
@@ -1271,8 +1190,7 @@
ctx_data->hfi_frame_process.request_id[index] =
packet->header.request_id;
- ICP_DBG("slot[%d]: %lld\n", index,
- ctx_data->hfi_frame_process.request_id[index]);
+
*idx = index;
return 0;
@@ -1292,7 +1210,7 @@
prepare_hw_update_args;
if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) {
- pr_err("Invalid args\n");
+ CAM_ERR(CAM_ICP, "Invalid args");
return -EINVAL;
}
@@ -1300,7 +1218,7 @@
mutex_lock(&hw_mgr->hw_mgr_mutex);
if (!ctx_data->in_use) {
mutex_unlock(&hw_mgr->hw_mgr_mutex);
- pr_err("ctx is not in use\n");
+ CAM_ERR(CAM_ICP, "ctx is not in use");
return -EINVAL;
}
mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -1321,10 +1239,8 @@
if (rc)
return rc;
- rc = cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
+ cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
packet, prepare_args);
- if (rc)
- return rc;
rc = cam_icp_mgr_update_hfi_frame_process(ctx_data, packet,
prepare_args, &idx);
@@ -1345,9 +1261,6 @@
prepare_args->num_hw_update_entries = 1;
prepare_args->hw_update_entries[0].addr = (uint64_t)hfi_cmd;
prepare_args->priv = &ctx_data->hfi_frame_process.request_id[idx];
- ICP_DBG("slot : %d, hfi_cmd : %pK, request : %lld\n", idx,
- (void *)hfi_cmd,
- ctx_data->hfi_frame_process.request_id[idx]);
return rc;
}
@@ -1384,14 +1297,14 @@
struct cam_icp_hw_ctx_data *ctx_data = NULL;
if (!release_hw || !hw_mgr) {
- pr_err("Invalid args: %pK %pK\n", release_hw, hw_mgr);
+ CAM_ERR(CAM_ICP, "Invalid args: %pK %pK", release_hw, hw_mgr);
return -EINVAL;
}
ctx_data = release_hw->ctxt_to_hw_map;
ctx_id = ctx_data->ctx_id;
if (ctx_id < 0 || ctx_id >= CAM_ICP_CTX_MAX) {
- pr_err("Invalid ctx id: %d\n", ctx_id);
+ CAM_ERR(CAM_ICP, "Invalid ctx id: %d", ctx_id);
return -EINVAL;
}
@@ -1429,8 +1342,7 @@
ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
reinit_completion(&ctx_data->wait_complete);
- ICP_DBG("Sending HFI_CMD_IPEBPS_ASYNC_COMMAND: opcode :%u\n",
- ioconfig_cmd.opcode);
+
ioconfig_cmd.num_fw_handles = 1;
ioconfig_cmd.fw_handles[0] = ctx_data->fw_handle;
ioconfig_cmd.payload.indirect = io_buf_addr;
@@ -1446,13 +1358,11 @@
if (rc)
return rc;
- ICP_DBG("fw_hdl = %x ctx_data = %pK\n", ctx_data->fw_handle, ctx_data);
-
rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- ICP_DBG("FW response timed out %d\n", rc);
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
}
return rc;
@@ -1476,9 +1386,6 @@
create_handle.pkt_type = HFI_CMD_IPEBPS_CREATE_HANDLE;
create_handle.handle_type = dev_type;
create_handle.user_data1 = (uint64_t)ctx_data;
- ICP_DBG("%x %x %x %pK\n", create_handle.size, create_handle.pkt_type,
- create_handle.handle_type, (void *)create_handle.user_data1);
- ICP_DBG("Sending HFI_CMD_IPEBPS_CREATE_HANDLE\n");
reinit_completion(&ctx_data->wait_complete);
task_data = (struct hfi_cmd_work_data *)task->payload;
@@ -1495,7 +1402,7 @@
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- ICP_DBG("FW response timed out %d\n", rc);
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
}
return rc;
@@ -1512,16 +1419,13 @@
task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
if (!task) {
- pr_err("No free task to send ping command\n");
+ CAM_ERR(CAM_ICP, "No free task to send ping command");
return -ENOMEM;
}
ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
ping_pkt.pkt_type = HFI_CMD_SYS_PING;
ping_pkt.user_data = (uint64_t)ctx_data;
- ICP_DBG("Sending HFI_CMD_SYS_PING\n");
- ICP_DBG("%x %x %pK\n", ping_pkt.size, ping_pkt.pkt_type,
- (void *)ping_pkt.user_data);
init_completion(&ctx_data->wait_complete);
task_data = (struct hfi_cmd_work_data *)task->payload;
@@ -1539,7 +1443,7 @@
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- ICP_DBG("FW response timed out %d\n", rc);
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
}
return rc;
@@ -1560,7 +1464,7 @@
return -EFAULT;
if (icp_dev_acquire_info.num_out_res > ICP_MAX_OUTPUT_SUPPORTED) {
- pr_err("num of out resources exceeding : %u\n",
+ CAM_ERR(CAM_ICP, "num of out resources exceeding : %u",
icp_dev_acquire_info.num_out_res);
return -EINVAL;
}
@@ -1579,7 +1483,7 @@
return -EFAULT;
}
- ICP_DBG("%x %x %x %x %x %x %x\n",
+ CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x",
ctx_data->icp_dev_acquire_info->dev_type,
ctx_data->icp_dev_acquire_info->in_res.format,
ctx_data->icp_dev_acquire_info->in_res.width,
@@ -1590,7 +1494,7 @@
p_icp_out = ctx_data->icp_dev_acquire_info->out_res;
for (i = 0; i < ctx_data->icp_dev_acquire_info->num_out_res; i++)
- ICP_DBG("out[i] %x %x %x %x\n",
+ CAM_DBG(CAM_ICP, "out[i] %x %x %x %x",
p_icp_out[i].format,
p_icp_out[i].width,
p_icp_out[i].height,
@@ -1611,20 +1515,21 @@
struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
if ((!hw_mgr_priv) || (!acquire_hw_args)) {
- pr_err("Invalid params: %pK %pK\n", hw_mgr_priv,
+ CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", hw_mgr_priv,
acquire_hw_args);
return -EINVAL;
}
if (args->num_acq > 1) {
- pr_err("number of resources are wrong: %u\n", args->num_acq);
+ CAM_ERR(CAM_ICP, "number of resources are wrong: %u",
+ args->num_acq);
return -EINVAL;
}
mutex_lock(&hw_mgr->hw_mgr_mutex);
ctx_id = cam_icp_mgr_get_free_ctx(hw_mgr);
if (ctx_id >= CAM_ICP_CTX_MAX) {
- pr_err("No free ctx space in hw_mgr\n");
+ CAM_ERR(CAM_ICP, "No free ctx space in hw_mgr");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return -ENOSPC;
}
@@ -1654,13 +1559,13 @@
&io_buf_addr, &io_buf_size);
if (rc) {
- pr_err("unable to get src buf info from io desc\n");
+ CAM_ERR(CAM_ICP, "unable to get src buf info from io desc");
goto get_io_buf_failed;
}
- ICP_DBG("io_config_cmd_handle : %d\n",
- icp_dev_acquire_info->io_config_cmd_handle);
- ICP_DBG("io_buf_addr : %pK\n", (void *)io_buf_addr);
- ICP_DBG("io_buf_size : %zu\n", io_buf_size);
+
+ CAM_DBG(CAM_ICP, "hdl: %d, addr: %pK, size: %zu",
+ icp_dev_acquire_info->io_config_cmd_handle,
+ (void *)io_buf_addr, io_buf_size);
mutex_lock(&hw_mgr->hw_mgr_mutex);
if (!hw_mgr->ctxt_cnt) {
@@ -1674,20 +1579,20 @@
rc = cam_icp_mgr_send_ping(ctx_data);
if (rc) {
- pr_err("ping ack not received\n");
+ CAM_ERR(CAM_ICP, "ping ack not received");
goto send_ping_failed;
}
rc = cam_icp_mgr_create_handle(icp_dev_acquire_info->dev_type,
ctx_data);
if (rc) {
- pr_err("create handle failed\n");
+ CAM_ERR(CAM_ICP, "create handle failed");
goto create_handle_failed;
}
rc = cam_icp_mgr_send_config_io(ctx_data, io_buf_addr);
if (rc) {
- pr_err("IO Config command failed\n");
+ CAM_ERR(CAM_ICP, "IO Config command failed");
goto ioconfig_failed;
}
@@ -1710,7 +1615,7 @@
icp_dev_acquire_info, sizeof(struct cam_icp_acquire_dev_info)))
goto copy_to_user_failed;
- ICP_DBG("scratch mem size = %x fw_handle = %x\n",
+ CAM_DBG(CAM_ICP, "scratch size = %x fw_handle = %x",
(unsigned int)icp_dev_acquire_info->scratch_mem_size,
(unsigned int)ctx_data->fw_handle);
mutex_lock(&hw_mgr->hw_mgr_mutex);
@@ -1743,14 +1648,15 @@
struct cam_query_cap_cmd *query_cap = hw_caps_args;
if ((!hw_mgr_priv) || (!hw_caps_args)) {
- pr_err("Invalid params: %pK %pK\n", hw_mgr_priv, hw_caps_args);
+ CAM_ERR(CAM_ICP, "Invalid params: %pK %pK",
+ hw_mgr_priv, hw_caps_args);
return -EINVAL;
}
if (copy_from_user(&icp_hw_mgr.icp_caps,
(void __user *)query_cap->caps_handle,
sizeof(struct cam_icp_query_cap_cmd))) {
- pr_err("copy_from_user failed\n");
+ CAM_ERR(CAM_ICP, "copy_from_user failed");
return -EFAULT;
}
@@ -1764,7 +1670,7 @@
if (copy_to_user((void __user *)query_cap->caps_handle,
&icp_hw_mgr.icp_caps, sizeof(struct cam_icp_query_cap_cmd))) {
- pr_err("copy_to_user failed\n");
+ CAM_ERR(CAM_ICP, "copy_to_user failed");
rc = -EFAULT;
goto hfi_get_caps_fail;
}
@@ -1787,7 +1693,7 @@
hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
if (!of_node || !hw_mgr_intf) {
- pr_err("Invalid args of_node %pK hw_mgr %pK\n",
+ CAM_ERR(CAM_ICP, "Invalid args of_node %pK hw_mgr %pK",
of_node, hw_mgr_intf);
return -EINVAL;
}
@@ -1810,7 +1716,7 @@
/* Get number of device objects */
count = of_property_count_strings(of_node, "compat-hw-name");
if (!count) {
- pr_err("no compat hw found in dev tree, count = %d\n", count);
+ CAM_ERR(CAM_ICP, "no compat hw found, count = %d", count);
rc = -EINVAL;
goto num_dev_failed;
}
@@ -1818,7 +1724,7 @@
/* Get number of a5 device nodes and a5 mem allocation */
rc = of_property_read_u32(of_node, "num-a5", &num_dev);
if (rc) {
- pr_err("getting num of a5 failed\n");
+ CAM_ERR(CAM_ICP, "getting num of a5 failed");
goto num_dev_failed;
}
@@ -1832,7 +1738,7 @@
/* Get number of ipe device nodes and ipe mem allocation */
rc = of_property_read_u32(of_node, "num-ipe", &num_dev);
if (rc) {
- pr_err("getting number of ipe dev nodes failed\n");
+ CAM_ERR(CAM_ICP, "getting number of ipe dev nodes failed");
goto num_ipe_failed;
}
@@ -1846,7 +1752,7 @@
/* Get number of bps device nodes and bps mem allocation */
rc = of_property_read_u32(of_node, "num-bps", &num_dev);
if (rc) {
- pr_err("read num bps devices failed\n");
+ CAM_ERR(CAM_ICP, "read num bps devices failed");
goto num_bps_failed;
}
icp_hw_mgr.devices[CAM_ICP_DEV_BPS] = kzalloc(
@@ -1860,20 +1766,20 @@
rc = of_property_read_string_index(of_node, "compat-hw-name",
i, &name);
if (rc) {
- pr_err("getting dev object name failed\n");
+ CAM_ERR(CAM_ICP, "getting dev object name failed");
goto compat_hw_name_failed;
}
child_node = of_find_node_by_name(NULL, name);
if (!child_node) {
- pr_err("error! Cannot find node in dtsi %s\n", name);
+ CAM_ERR(CAM_ICP, "Cannot find node in dtsi %s", name);
rc = -ENODEV;
goto compat_hw_name_failed;
}
child_pdev = of_find_device_by_node(child_node);
if (!child_pdev) {
- pr_err("failed to find device on bus %s\n",
+ CAM_ERR(CAM_ICP, "failed to find device on bus %s",
child_node->name);
rc = -ENODEV;
of_node_put(child_node);
@@ -1883,13 +1789,10 @@
child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
child_pdev);
if (!child_dev_intf) {
- pr_err("no child device\n");
+ CAM_ERR(CAM_ICP, "no child device");
of_node_put(child_node);
goto compat_hw_name_failed;
}
- ICP_DBG("child_intf %pK\n", child_dev_intf);
- ICP_DBG("child type %d index %d\n", child_dev_intf->hw_type,
- child_dev_intf->hw_idx);
icp_hw_mgr.devices[child_dev_intf->hw_type]
[child_dev_intf->hw_idx] = child_dev_intf;
@@ -1899,27 +1802,27 @@
rc = cam_smmu_get_handle("icp", &icp_hw_mgr.iommu_hdl);
if (rc) {
- pr_err("icp get iommu handle failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "icp get iommu handle failed: %d", rc);
goto compat_hw_name_failed;
}
rc = cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
if (rc) {
- pr_err("icp attach failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "icp attach failed: %d", rc);
goto icp_attach_failed;
}
rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
&icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ);
if (rc) {
- pr_err("unable to create a worker\n");
+ CAM_ERR(CAM_ICP, "unable to create a worker");
goto cmd_work_failed;
}
rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
&icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ);
if (rc) {
- pr_err("unable to create a worker\n");
+ CAM_ERR(CAM_ICP, "unable to create a worker");
goto msg_work_failed;
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
index 99b45aa..b7b3d7b 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "IPE-CORE %s:%d " fmt, __func__, __LINE__
-
#include <linux/of.h>
#include <linux/debugfs.h>
#include <linux/videodev2.h>
@@ -30,6 +28,7 @@
#include "cam_ipe_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
static int cam_ipe_caps_vote(struct cam_ipe_device_core_info *core_info,
struct cam_icp_cpas_vote *cpas_vote)
@@ -44,7 +43,7 @@
&cpas_vote->axi_vote);
if (rc)
- pr_err("cpas vote is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
return rc;
}
@@ -59,7 +58,7 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
@@ -67,7 +66,8 @@
core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
if ((!soc_info) || (!core_info)) {
- pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
return -EINVAL;
}
@@ -79,16 +79,16 @@
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
if (rc) {
- pr_err("cpass start failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
return rc;
}
core_info->cpas_start = true;
rc = cam_ipe_enable_soc_resources(soc_info);
if (rc) {
- pr_err("soc enable is failed : %d\n", rc);
+ CAM_ERR(CAM_ICP, "soc enable is failed : %d", rc);
if (cam_cpas_stop(core_info->cpas_handle))
- pr_err("cpas stop is failed\n");
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
@@ -105,24 +105,25 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &ipe_dev->soc_info;
core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
if ((!soc_info) || (!core_info)) {
- pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
return -EINVAL;
}
rc = cam_ipe_disable_soc_resources(soc_info);
if (rc)
- pr_err("soc disable is failed : %d\n", rc);
+ CAM_ERR(CAM_ICP, "soc disable is failed : %d", rc);
if (core_info->cpas_start) {
if (cam_cpas_stop(core_info->cpas_handle))
- pr_err("cpas stop is failed\n");
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
@@ -140,12 +141,12 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_ICP, "Invalid arguments");
return -EINVAL;
}
if (cmd_type >= CAM_ICP_IPE_CMD_MAX) {
- pr_err("Invalid command : %x\n", cmd_type);
+ CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
index 0efb1de..d95246f 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
@@ -23,9 +23,7 @@
#include "cam_icp_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
struct cam_ipe_device_hw_info cam_ipe_hw_info = {
.reserved = 0,
@@ -47,7 +45,7 @@
rc = cam_cpas_register_client(&cpas_register_params);
if (rc < 0) {
- pr_err("cam_cpas_register_client is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "failed: %d", rc);
return rc;
}
core_info->cpas_handle = cpas_register_params.client_handle;
@@ -83,7 +81,7 @@
ipe_dev_intf->hw_ops.process_cmd = cam_ipe_process_cmd;
ipe_dev_intf->hw_type = CAM_ICP_DEV_IPE;
- pr_debug("%s: type %d index %d\n", __func__,
+ CAM_DBG(CAM_ICP, "type %d index %d",
ipe_dev_intf->hw_type,
ipe_dev_intf->hw_idx);
@@ -101,7 +99,7 @@
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
- pr_debug("%s: No ipe hardware info\n", __func__);
+ CAM_DBG(CAM_ICP, "No ipe hardware info");
kfree(ipe_dev->core_info);
kfree(ipe_dev);
kfree(ipe_dev_intf);
@@ -114,14 +112,14 @@
rc = cam_ipe_init_soc_resources(&ipe_dev->soc_info, cam_ipe_irq,
ipe_dev);
if (rc < 0) {
- pr_err("%s: failed to init_soc\n", __func__);
+ CAM_ERR(CAM_ICP, "failed to init_soc");
kfree(ipe_dev->core_info);
kfree(ipe_dev);
kfree(ipe_dev_intf);
return rc;
}
- pr_debug("cam_ipe_init_soc_resources : %pK\n",
+ CAM_DBG(CAM_ICP, "cam_ipe_init_soc_resources : %pK",
(void *)&ipe_dev->soc_info);
rc = cam_ipe_register_cpas(&ipe_dev->soc_info,
core_info, ipe_dev_intf->hw_idx);
@@ -136,7 +134,7 @@
spin_lock_init(&ipe_dev->hw_lock);
init_completion(&ipe_dev->hw_complete);
- pr_debug("%s: IPE%d probe successful\n", __func__,
+ CAM_DBG(CAM_ICP, "IPE%d probe successful",
ipe_dev_intf->hw_idx);
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
index e691dad..26dd6d2 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -18,9 +18,7 @@
#include <media/cam_icp.h>
#include "ipe_soc.h"
#include "cam_soc_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static int cam_ipe_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
@@ -28,7 +26,7 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0)
- pr_err("get ipe dt prop is failed\n");
+ CAM_ERR(CAM_ICP, "get ipe dt prop is failed");
return rc;
}
@@ -69,7 +67,7 @@
rc = cam_soc_util_enable_platform_resource(soc_info, true,
CAM_TURBO_VOTE, false);
if (rc) {
- pr_err("%s: enable platform failed\n", __func__);
+ CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
}
@@ -82,7 +80,7 @@
rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
if (rc)
- pr_err("%s: enable platform failed\n", __func__);
+ CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/Makefile b/drivers/media/platform/msm/camera/cam_isp/Makefile
index 77ad6fc..4851535 100644
--- a/drivers/media/platform/msm/camera/cam_isp/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/Makefile
@@ -1,8 +1,9 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
obj-$(CONFIG_SPECTRA_CAMERA) += isp_hw_mgr/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_dev.o cam_isp_context.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 83009d2..a6f60f5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -21,9 +21,45 @@
#include "cam_mem_mgr.h"
#include "cam_sync_api.h"
#include "cam_req_mgr_dev.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
+{
+ uint64_t ts = 0;
+
+ if (!evt_data)
+ return 0;
+
+ switch (evt_id) {
+ case CAM_ISP_HW_EVENT_ERROR:
+ ts = ((struct cam_isp_hw_error_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_SOF:
+ ts = ((struct cam_isp_hw_sof_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_REG_UPDATE:
+ ts = ((struct cam_isp_hw_reg_update_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_EPOCH:
+ ts = ((struct cam_isp_hw_epoch_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_EOF:
+ ts = ((struct cam_isp_hw_eof_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_DONE:
+ break;
+ default:
+ CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id);
+ }
+
+ return ts;
+}
static int __cam_isp_ctx_handle_buf_done_in_activated_state(
struct cam_isp_context *ctx_isp,
@@ -37,14 +73,17 @@
struct cam_context *ctx = ctx_isp->base;
if (list_empty(&ctx->active_req_list)) {
- CDBG("Buf done with no active request!\n");
+ CAM_DBG(CAM_ISP, "Buf done with no active request!");
goto end;
}
- CDBG("%s: Enter with bubble_state %d\n", __func__, bubble_state);
+ CAM_DBG(CAM_ISP, "Enter with bubble_state %d", bubble_state);
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
+
+ trace_cam_buf_done("ISP", ctx, req);
+
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
for (i = 0; i < done->num_handles; i++) {
for (j = 0; j < req_isp->num_fence_map_out; j++) {
@@ -54,29 +93,30 @@
}
if (j == req_isp->num_fence_map_out) {
- pr_err("Can not find matching lane handle 0x%x!\n",
+ CAM_ERR(CAM_ISP,
+ "Can not find matching lane handle 0x%x!",
done->resource_handle[i]);
rc = -EINVAL;
continue;
}
if (!bubble_state) {
- CDBG("%s: Sync with success: fd 0x%x\n", __func__,
+ CAM_DBG(CAM_ISP, "Sync with success: fd 0x%x",
req_isp->fence_map_out[j].sync_id);
rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
CAM_SYNC_STATE_SIGNALED_SUCCESS);
if (rc)
- pr_err("%s: Sync failed with rc = %d\n",
- __func__, rc);
+ CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
+ rc);
} else if (!req_isp->bubble_report) {
- CDBG("%s: Sync with failure: fd 0x%x\n", __func__,
+ CAM_DBG(CAM_ISP, "Sync with failure: fd 0x%x",
req_isp->fence_map_out[j].sync_id);
rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR);
if (rc)
- pr_err("%s: Sync failed with rc = %d\n",
- __func__, rc);
+ CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
+ rc);
} else {
/*
* Ignore the buffer done if bubble detect is on
@@ -89,7 +129,7 @@
continue;
}
- CDBG("%s: req %lld, reset sync id 0x%x\n", __func__,
+ CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x",
req->request_id,
req_isp->fence_map_out[j].sync_id);
req_isp->num_acked++;
@@ -99,12 +139,42 @@
if (req_isp->num_acked == req_isp->num_fence_map_out) {
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->free_req_list);
+ ctx_isp->active_req_cnt--;
+ CAM_DBG(CAM_ISP,
+ "Move active request %lld to free list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
}
end:
return rc;
}
+static void __cam_isp_ctx_send_sof_timestamp(
+ struct cam_isp_context *ctx_isp, uint64_t request_id,
+ uint32_t sof_event_status)
+{
+ struct cam_req_mgr_message req_msg;
+
+ req_msg.session_hdl = ctx_isp->base->session_hdl;
+ req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
+ req_msg.u.frame_msg.request_id = request_id;
+ req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
+ req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
+ req_msg.u.frame_msg.sof_status = sof_event_status;
+
+ CAM_DBG(CAM_ISP,
+ "request id:%lld frame number:%lld SOF time stamp:0x%llx",
+ request_id, ctx_isp->frame_id,
+ ctx_isp->sof_timestamp_val);
+ CAM_DBG(CAM_ISP, " sof status:%d", sof_event_status);
+
+ if (cam_req_mgr_notify_frame_message(&req_msg,
+ V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
+ CAM_ERR(CAM_ISP,
+ "Error in notifying the sof time for req id:%lld",
+ request_id);
+}
+
static int __cam_isp_ctx_reg_upd_in_activated_state(
struct cam_isp_context *ctx_isp, void *evt_data)
{
@@ -114,7 +184,7 @@
struct cam_isp_ctx_req *req_isp;
if (list_empty(&ctx->pending_req_list)) {
- pr_err("Reg upd ack with no pending request\n");
+ CAM_ERR(CAM_ISP, "Reg upd ack with no pending request");
goto end;
}
req = list_first_entry(&ctx->pending_req_list,
@@ -123,12 +193,16 @@
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
if (req_isp->num_fence_map_out != 0) {
- CDBG("%s: move request %lld to active list\n", __func__,
- req->request_id);
list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
} else {
/* no io config, so the request is completed. */
list_add_tail(&req->list, &ctx->free_req_list);
+ CAM_DBG(CAM_ISP,
+ "move active request %lld to free list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
}
/*
@@ -136,7 +210,7 @@
* state so change substate here.
*/
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
- CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
end:
return rc;
@@ -145,34 +219,61 @@
static int __cam_isp_ctx_notify_sof_in_actived_state(
struct cam_isp_context *ctx_isp, void *evt_data)
{
- int rc = 0;
struct cam_req_mgr_sof_notify notify;
struct cam_context *ctx = ctx_isp->base;
+ struct cam_ctx_request *req;
+ uint64_t request_id = 0;
- /* notify reqmgr with sof signal */
- if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
+ /*
+ * notify reqmgr with sof signal. Note, due to scheduling delay
+ * we can run into situation that two active requests has already
+ * be in the active queue while we try to do the notification.
+ * In this case, we need to skip the current notification. This
+ * helps the state machine to catch up the delay.
+ */
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof &&
+ ctx_isp->active_req_cnt <= 2) {
notify.link_hdl = ctx->link_hdl;
notify.dev_hdl = ctx->dev_hdl;
notify.frame_id = ctx_isp->frame_id;
ctx->ctx_crm_intf->notify_sof(¬ify);
- CDBG("%s: Notify CRM SOF frame %lld\n", __func__,
+ CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
ctx_isp->frame_id);
+
+ list_for_each_entry(req, &ctx->active_req_list, list) {
+ if (req->request_id > ctx_isp->reported_req_id) {
+ request_id = req->request_id;
+ ctx_isp->reported_req_id = request_id;
+ break;
+ }
+ }
+
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
} else {
- pr_err("%s: Can not notify SOF to CRM\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
}
- return rc;
+ return 0;
}
-static int __cam_isp_ctx_sof_in_sof(struct cam_isp_context *ctx_isp,
- void *evt_data)
+static int __cam_isp_ctx_sof_in_activated_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
{
int rc = 0;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
- CDBG("%s: Enter\n", __func__);
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
return rc;
}
@@ -186,7 +287,7 @@
struct cam_context *ctx = ctx_isp->base;
if (ctx->state != CAM_CTX_ACTIVATED) {
- CDBG("%s: invalid RUP\n", __func__);
+ CAM_DBG(CAM_ISP, "invalid RUP");
goto end;
}
@@ -199,11 +300,16 @@
struct cam_ctx_request, list);
list_del_init(&req->list);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
- if (req_isp->num_fence_map_out == req_isp->num_acked)
+ if (req_isp->num_fence_map_out == req_isp->num_acked) {
list_add_tail(&req->list, &ctx->free_req_list);
- else {
+ } else {
/* need to handle the buf done */
list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP,
+ "move request %lld to active list(cnt = %d)",
+ req->request_id,
+ ctx_isp->active_req_cnt);
ctx_isp->substate_activated =
CAM_ISP_CTX_ACTIVATED_EPOCH;
}
@@ -215,18 +321,23 @@
static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
void *evt_data)
{
- int rc = 0;
struct cam_ctx_request *req;
struct cam_isp_ctx_req *req_isp;
struct cam_context *ctx = ctx_isp->base;
+ uint64_t request_id = 0;
if (list_empty(&ctx->pending_req_list)) {
/*
* If no pending req in epoch, this is an error case.
* The recovery is to go back to sof state
*/
- pr_err("%s: No pending request\n", __func__);
+ CAM_ERR(CAM_ISP, "No pending request");
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+ /* Send SOF event as empty frame*/
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
goto end;
}
@@ -234,7 +345,7 @@
list);
req_isp = (struct cam_isp_ctx_req *)req->req_priv;
- CDBG("Report Bubble flag %d\n", req_isp->bubble_report);
+ CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
if (req_isp->bubble_report && ctx->ctx_crm_intf &&
ctx->ctx_crm_intf->notify_err) {
struct cam_req_mgr_error_notify notify;
@@ -244,7 +355,7 @@
notify.req_id = req->request_id;
notify.error = CRM_KMD_ERR_BUBBLE;
ctx->ctx_crm_intf->notify_err(¬ify);
- CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
+ CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
ctx_isp->frame_id);
} else {
/*
@@ -253,14 +364,21 @@
*/
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
req_isp->bubble_report = 0;
}
+ request_id = req->request_id;
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_ERROR);
+
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
- CDBG("%s: next substate %d\n", __func__,
+ CAM_DBG(CAM_ISP, "next substate %d",
ctx_isp->substate_activated);
end:
- return rc;
+ return 0;
}
@@ -280,15 +398,23 @@
void *evt_data)
{
int rc = 0;
- struct cam_context *ctx = ctx_isp->base;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+
if (list_empty(&ctx->active_req_list))
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
else
- CDBG("%s: Still need to wait for the buf done\n", __func__);
- CDBG("%s: next substate %d\n", __func__,
+ CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
+
+ CAM_DBG(CAM_ISP, "next substate %d",
ctx_isp->substate_activated);
return rc;
@@ -305,14 +431,6 @@
return rc;
}
-
-static int __cam_isp_ctx_sof_in_bubble(struct cam_isp_context *ctx_isp,
- void *evt_data)
-{
- ctx_isp->frame_id++;
- return 0;
-}
-
static int __cam_isp_ctx_buf_done_in_bubble(
struct cam_isp_context *ctx_isp, void *evt_data)
{
@@ -324,20 +442,13 @@
return rc;
}
-static int __cam_isp_ctx_sof_in_bubble_applied(
- struct cam_isp_context *ctx_isp, void *evt_data)
-{
- ctx_isp->frame_id++;
- return 0;
-}
-
-
static int __cam_isp_ctx_epoch_in_bubble_applied(
struct cam_isp_context *ctx_isp, void *evt_data)
{
struct cam_ctx_request *req;
struct cam_isp_ctx_req *req_isp;
struct cam_context *ctx = ctx_isp->base;
+ uint64_t request_id = 0;
/*
* This means we missed the reg upd ack. So we need to
@@ -349,7 +460,10 @@
* If no pending req in epoch, this is an error case.
* Just go back to the bubble state.
*/
- pr_err("%s: No pending request.\n", __func__);
+ CAM_ERR(CAM_ISP, "No pending request.");
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
goto end;
}
@@ -367,7 +481,7 @@
notify.req_id = req->request_id;
notify.error = CRM_KMD_ERR_BUBBLE;
ctx->ctx_crm_intf->notify_err(¬ify);
- CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
+ CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
ctx_isp->frame_id);
} else {
/*
@@ -376,11 +490,18 @@
*/
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
req_isp->bubble_report = 0;
}
+ request_id = req->request_id;
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_ERROR);
+
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
- CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
end:
return 0;
}
@@ -409,7 +530,7 @@
uint32_t error_type = error_event_data->error_type;
- CDBG("%s: Enter error_type = %d\n", __func__, error_type);
+ CAM_DBG(CAM_ISP, "Enter error_type = %d", error_type);
if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
(error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
notify.error = CRM_KMD_ERR_FATAL;
@@ -421,7 +542,7 @@
*/
if (list_empty(&ctx->active_req_list)) {
- pr_err("handling error with no active request!\n");
+ CAM_ERR(CAM_ISP, "handling error with no active request");
rc = -EINVAL;
goto end;
}
@@ -435,10 +556,10 @@
notify.req_id = req->request_id;
ctx->ctx_crm_intf->notify_err(¬ify);
- pr_err("%s: Notify CRM about ERROR frame %lld\n", __func__,
+ CAM_ERR(CAM_ISP, "Notify CRM about ERROR frame %lld",
ctx_isp->frame_id);
} else {
- pr_err("%s: Can not notify ERRROR to CRM\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not notify ERRROR to CRM");
rc = -EFAULT;
}
@@ -447,7 +568,7 @@
/* might need to check if active list is empty */
end:
- CDBG("%s: Exit\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -457,7 +578,7 @@
{
.irq_ops = {
NULL,
- __cam_isp_ctx_sof_in_sof,
+ __cam_isp_ctx_sof_in_activated_state,
__cam_isp_ctx_reg_upd_in_sof,
__cam_isp_ctx_notify_sof_in_actived_state,
NULL,
@@ -468,7 +589,7 @@
{
.irq_ops = {
__cam_isp_ctx_handle_error,
- __cam_isp_ctx_sof_in_sof,
+ __cam_isp_ctx_sof_in_activated_state,
__cam_isp_ctx_reg_upd_in_activated_state,
__cam_isp_ctx_epoch_in_applied,
NULL,
@@ -490,7 +611,7 @@
{
.irq_ops = {
NULL,
- __cam_isp_ctx_sof_in_bubble,
+ __cam_isp_ctx_sof_in_activated_state,
NULL,
__cam_isp_ctx_notify_sof_in_actived_state,
NULL,
@@ -501,7 +622,7 @@
{
.irq_ops = {
NULL,
- __cam_isp_ctx_sof_in_bubble_applied,
+ __cam_isp_ctx_sof_in_activated_state,
__cam_isp_ctx_reg_upd_in_activated_state,
__cam_isp_ctx_epoch_in_bubble_applied,
NULL,
@@ -518,15 +639,14 @@
uint32_t next_state)
{
int rc = 0;
- int cnt = 0;
struct cam_ctx_request *req;
struct cam_isp_ctx_req *req_isp;
struct cam_isp_context *ctx_isp;
struct cam_hw_config_args cfg;
if (list_empty(&ctx->pending_req_list)) {
- pr_err("%s: No available request for Apply id %lld\n",
- __func__, apply->request_id);
+ CAM_ERR(CAM_ISP, "No available request for Apply id %lld",
+ apply->request_id);
rc = -EFAULT;
goto end;
}
@@ -537,13 +657,13 @@
* The maximum number of request allowed to be outstanding is 2.
*
*/
- list_for_each_entry(req, &ctx->active_req_list, list) {
- if (++cnt > 2) {
- pr_err_ratelimited("%s: Apply failed due to congest\n",
- __func__);
- rc = -EFAULT;
- goto end;
- }
+ ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
+ if (ctx_isp->active_req_cnt >= 2) {
+ CAM_DBG(CAM_ISP,
+ "Reject apply request due to congestion(cnt = %d)",
+ ctx_isp->active_req_cnt);
+ rc = -EFAULT;
+ goto end;
}
req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
@@ -558,9 +678,8 @@
goto end;
}
- CDBG("%s: Apply request %lld\n", __func__, req->request_id);
+ CAM_DBG(CAM_ISP, "Apply request %lld", req->request_id);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
- ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
req_isp->bubble_report = apply->report_if_bubble;
@@ -570,11 +689,11 @@
rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
if (rc) {
- pr_err("%s: Can not apply the configuration\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not apply the configuration");
} else {
spin_lock_bh(&ctx->lock);
ctx_isp->substate_activated = next_state;
- CDBG("%s: new state %d\n", __func__, next_state);
+ CAM_DBG(CAM_ISP, "new state %d", next_state);
spin_unlock_bh(&ctx->lock);
}
end:
@@ -588,11 +707,11 @@
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
- CDBG("%s: current substate %d\n", __func__,
+ CAM_DBG(CAM_ISP, "current substate %d",
ctx_isp->substate_activated);
rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
CAM_ISP_CTX_ACTIVATED_APPLIED);
- CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
return rc;
}
@@ -604,11 +723,11 @@
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
- CDBG("%s: current substate %d\n", __func__,
+ CAM_DBG(CAM_ISP, "current substate %d",
ctx_isp->substate_activated);
rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
CAM_ISP_CTX_ACTIVATED_APPLIED);
- CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
return rc;
}
@@ -620,11 +739,11 @@
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
- CDBG("%s: current substate %d\n", __func__,
+ CAM_DBG(CAM_ISP, "current substate %d",
ctx_isp->substate_activated);
rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
- CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
return rc;
}
@@ -641,7 +760,7 @@
spin_lock(&ctx->lock);
if (list_empty(req_list)) {
spin_unlock(&ctx->lock);
- CDBG("%s: request list is empty\n", __func__);
+ CAM_DBG(CAM_ISP, "request list is empty");
return 0;
}
@@ -654,15 +773,15 @@
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
for (i = 0; i < req_isp->num_fence_map_out; i++) {
if (req_isp->fence_map_out[i].sync_id != -1) {
- CDBG("%s: Flush req 0x%llx, fence %d\n",
- __func__, req->request_id,
+ CAM_DBG(CAM_ISP, "Flush req 0x%llx, fence %d",
+ req->request_id,
req_isp->fence_map_out[i].sync_id);
rc = cam_sync_signal(
req_isp->fence_map_out[i].sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR);
if (rc)
- pr_err_ratelimited("%s: signal fence failed\n",
- __func__);
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "signal fence failed\n");
req_isp->fence_map_out[i].sync_id = -1;
}
}
@@ -678,8 +797,9 @@
if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
!cancel_req_id_found)
- CDBG("%s:Flush request id:%lld is not found in the list\n",
- __func__, flush_req->req_id);
+ CAM_DBG(CAM_ISP,
+ "Flush request id:%lld is not found in the list",
+ flush_req->req_id);
return 0;
}
@@ -690,10 +810,10 @@
{
int rc = 0;
- CDBG("%s: try to flush pending list\n", __func__);
+ CAM_DBG(CAM_ISP, "try to flush pending list");
rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
- CDBG("%s: Flush request in top state %d\n",
- __func__, ctx->state);
+ CAM_DBG(CAM_ISP, "Flush request in top state %d",
+ ctx->state);
return rc;
}
@@ -703,7 +823,7 @@
{
int rc = 0;
- CDBG("%s: try to flush pending list\n", __func__);
+ CAM_DBG(CAM_ISP, "try to flush pending list");
rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
/* if nothing is in pending req list, change state to acquire*/
@@ -712,8 +832,10 @@
ctx->state = CAM_CTX_ACQUIRED;
spin_unlock(&ctx->lock);
- CDBG("%s: Flush request in ready state. next state %d\n",
- __func__, ctx->state);
+ trace_cam_context_state("ISP", ctx);
+
+ CAM_DBG(CAM_ISP, "Flush request in ready state. next state %d",
+ ctx->state);
return rc;
}
@@ -763,6 +885,412 @@
},
};
+static int __cam_isp_ctx_rdi_only_sof_in_top_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ int rc = 0;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_req_mgr_sof_notify notify;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ uint64_t request_id = 0;
+
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+ /*
+ * notify reqmgr with sof signal. Note, due to scheduling delay
+ * we can run into situation that two active requests has already
+ * be in the active queue while we try to do the notification.
+ * In this case, we need to skip the current notification. This
+ * helps the state machine to catch up the delay.
+ */
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof &&
+ ctx_isp->active_req_cnt <= 2) {
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.frame_id = ctx_isp->frame_id;
+
+ ctx->ctx_crm_intf->notify_sof(¬ify);
+ CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
+ ctx_isp->frame_id);
+
+ /*
+ * It is idle frame with out any applied request id, send
+ * request id as zero
+ */
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+ } else {
+ CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+ }
+
+ if (list_empty(&ctx->active_req_list))
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ else
+ CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
+
+ CAM_DBG(CAM_ISP, "next substate %d",
+ ctx_isp->substate_activated);
+ return rc;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_applied_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED;
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+ return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ struct cam_ctx_request *req;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ uint64_t request_id = 0;
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+ if (list_empty(&ctx->pending_req_list)) {
+ /*
+ * If no pending req in epoch, this is an error case.
+ * The recovery is to go back to sof state
+ */
+ CAM_ERR(CAM_ISP, "No pending request");
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+ /* Send SOF event as empty frame*/
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ goto end;
+ }
+
+ req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+ list);
+ req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+
+ CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
+ if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+ ctx->ctx_crm_intf->notify_err) {
+ struct cam_req_mgr_error_notify notify;
+
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.req_id = req->request_id;
+ notify.error = CRM_KMD_ERR_BUBBLE;
+ ctx->ctx_crm_intf->notify_err(¬ify);
+ CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
+ ctx_isp->frame_id);
+ } else {
+ /*
+ * Since can not bubble report, always move the request to
+ * active list.
+ */
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ req_isp->bubble_report = 0;
+ }
+
+ request_id = req->request_id;
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_ERROR);
+
+ /* change the state to bubble, as reg update has not come */
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+end:
+ return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ uint32_t i;
+ struct cam_ctx_request *req;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_req_mgr_sof_notify notify;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ struct cam_isp_ctx_req *req_isp;
+ uint64_t request_id = 0;
+
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+ /*
+ * Signal all active requests with error and move the all the active
+ * requests to free list
+ */
+ while (!list_empty(&ctx->active_req_list)) {
+ req = list_first_entry(&ctx->active_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
+ req_isp->num_fence_map_out);
+ for (i = 0; i < req_isp->num_fence_map_out; i++)
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ }
+ list_add_tail(&req->list, &ctx->free_req_list);
+ }
+
+ /* notify reqmgr with sof signal */
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.frame_id = ctx_isp->frame_id;
+
+ ctx->ctx_crm_intf->notify_sof(¬ify);
+ CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
+ ctx_isp->frame_id);
+
+ } else {
+ CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+ }
+
+ /*
+ * It is idle frame with out any applied request id, send
+ * request id as zero
+ */
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+ CAM_DBG(CAM_ISP, "next substate %d",
+ ctx_isp->substate_activated);
+
+ return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ struct cam_ctx_request *req;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_req_mgr_sof_notify notify;
+ uint64_t request_id = 0;
+
+ /* notify reqmgr with sof signal*/
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
+ if (list_empty(&ctx->pending_req_list)) {
+ CAM_ERR(CAM_ISP, "Reg upd ack with no pending request");
+ goto error;
+ }
+ req = list_first_entry(&ctx->pending_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ request_id = req->request_id;
+ if (req_isp->num_fence_map_out != 0) {
+ list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP,
+ "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ } else {
+ /* no io config, so the request is completed. */
+ list_add_tail(&req->list, &ctx->free_req_list);
+ CAM_DBG(CAM_ISP,
+ "move active req %lld to free list(cnt=%d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ }
+
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.frame_id = ctx_isp->frame_id;
+
+ ctx->ctx_crm_intf->notify_sof(¬ify);
+ CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
+ ctx_isp->frame_id);
+ } else {
+ CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+ }
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+ return 0;
+error:
+ /* Send SOF event as idle frame*/
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ /*
+ * There is no request in the pending list, move the sub state machine
+ * to SOF sub state
+ */
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+ return 0;
+}
+
+static struct cam_isp_ctx_irq_ops
+ cam_isp_ctx_rdi_only_activated_state_machine_irq
+ [CAM_ISP_CTX_ACTIVATED_MAX] = {
+ /* SOF */
+ {
+ .irq_ops = {
+ NULL,
+ __cam_isp_ctx_rdi_only_sof_in_top_state,
+ __cam_isp_ctx_reg_upd_in_sof,
+ NULL,
+ NULL,
+ NULL,
+ },
+ },
+ /* APPLIED */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_rdi_only_sof_in_applied_state,
+ NULL,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_applied,
+ },
+ },
+ /* EPOCH */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_rdi_only_sof_in_top_state,
+ NULL,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_epoch,
+ },
+ },
+ /* BUBBLE*/
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_rdi_only_sof_in_bubble_state,
+ NULL,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_bubble,
+ },
+ },
+ /* BUBBLE APPLIED ie PRE_BUBBLE */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_rdi_only_sof_in_bubble_applied,
+ __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_bubble_applied,
+ },
+ },
+
+ /* HALT */
+ {
+ },
+};
+
+static int __cam_isp_ctx_rdi_only_apply_req_top_state(
+ struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+ int rc = 0;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ CAM_DBG(CAM_ISP, "current substate %d",
+ ctx_isp->substate_activated);
+ rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+ CAM_ISP_CTX_ACTIVATED_APPLIED);
+ CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
+
+ return rc;
+}
+
+static struct cam_ctx_ops
+ cam_isp_ctx_rdi_only_activated_state_machine
+ [CAM_ISP_CTX_ACTIVATED_MAX] = {
+ /* SOF */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {
+ .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
+ },
+ .irq_ops = NULL,
+ },
+ /* APPLIED */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* EPOCH */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {
+ .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
+ },
+ .irq_ops = NULL,
+ },
+ /* PRE BUBBLE */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* BUBBLE */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* HALT */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+};
+
/* top level state machine */
static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
@@ -786,24 +1314,28 @@
ctx->link_hdl = 0;
ctx->ctx_crm_intf = NULL;
ctx_isp->frame_id = 0;
+ ctx_isp->active_req_cnt = 0;
+ ctx_isp->reported_req_id = 0;
/*
* Ideally, we should never have any active request here.
* But we still add some sanity check code here to help the debug
*/
if (!list_empty(&ctx->active_req_list))
- pr_err("%s: Active list is not empty\n", __func__);
+ CAM_ERR(CAM_ISP, "Active list is not empty");
/* Flush all the pending request list */
flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
flush_req.link_hdl = ctx->link_hdl;
flush_req.dev_hdl = ctx->dev_hdl;
- CDBG("%s: try to flush pending list\n", __func__);
+ CAM_DBG(CAM_ISP, "try to flush pending list");
rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
ctx->state = CAM_CTX_AVAILABLE;
- CDBG("%s: next state %d\n", __func__, ctx->state);
+
+ trace_cam_context_state("ISP", ctx);
+ CAM_DBG(CAM_ISP, "next state %d", ctx->state);
return rc;
}
@@ -821,7 +1353,7 @@
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
- CDBG("%s: get free request object......\n", __func__);
+ CAM_DBG(CAM_ISP, "get free request object......");
/* get free request */
spin_lock_bh(&ctx->lock);
@@ -833,7 +1365,7 @@
spin_unlock_bh(&ctx->lock);
if (!req) {
- pr_err("%s: No more request obj free\n", __func__);
+ CAM_ERR(CAM_ISP, "No more request obj free");
rc = -ENOMEM;
goto end;
}
@@ -845,20 +1377,20 @@
rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
(uint64_t *) &packet_addr, &len);
if (rc != 0) {
- pr_err("%s: Can not get packet address\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not get packet address");
rc = -EINVAL;
goto free_req;
}
packet = (struct cam_packet *) (packet_addr + cmd->offset);
- CDBG("%s: pack_handle %llx\n", __func__, cmd->packet_handle);
- CDBG("%s: packet address is 0x%llx\n", __func__, packet_addr);
- CDBG("%s: packet with length %zu, offset 0x%llx\n", __func__,
+ CAM_DBG(CAM_ISP, "pack_handle %llx", cmd->packet_handle);
+ CAM_DBG(CAM_ISP, "packet address is 0x%llx", packet_addr);
+ CAM_DBG(CAM_ISP, "packet with length %zu, offset 0x%llx",
len, cmd->offset);
- CDBG("%s: Packet request id 0x%llx\n", __func__,
+ CAM_DBG(CAM_ISP, "Packet request id %lld",
packet->header.request_id);
- CDBG("%s: Packet size 0x%x\n", __func__, packet->header.size);
- CDBG("%s: packet op %d\n", __func__, packet->header.op_code);
+ CAM_DBG(CAM_ISP, "Packet size 0x%x", packet->header.size);
+ CAM_DBG(CAM_ISP, "packet op %d", packet->header.op_code);
/* preprocess the configuration */
memset(&cfg, 0, sizeof(cfg));
@@ -871,13 +1403,12 @@
cfg.out_map_entries = req_isp->fence_map_out;
cfg.in_map_entries = req_isp->fence_map_in;
- CDBG("%s: try to prepare config packet......\n", __func__);
+ CAM_DBG(CAM_ISP, "try to prepare config packet......");
rc = ctx->hw_mgr_intf->hw_prepare_update(
ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
if (rc != 0) {
- pr_err("%s: Prepare config packet failed in HW layer\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Prepare config packet failed in HW layer");
rc = -EFAULT;
goto free_req;
}
@@ -886,8 +1417,8 @@
req_isp->num_fence_map_in = cfg.num_in_map_entries;
req_isp->num_acked = 0;
- CDBG("%s: num_entry: %d, num fence out: %d, num fence in: %d\n",
- __func__, req_isp->num_cfg, req_isp->num_fence_map_out,
+ CAM_DBG(CAM_ISP, "num_entry: %d, num fence out: %d, num fence in: %d",
+ req_isp->num_cfg, req_isp->num_fence_map_out,
req_isp->num_fence_map_in);
req->request_id = packet->header.request_id;
@@ -899,20 +1430,20 @@
add_req.req_id = req->request_id;
rc = ctx->ctx_crm_intf->add_req(&add_req);
if (rc) {
- pr_err("%s: Error: Adding request id=%llu\n", __func__,
+ CAM_ERR(CAM_ISP, "Error: Adding request id=%llu",
req->request_id);
goto free_req;
}
}
- CDBG("%s: Packet request id 0x%llx\n", __func__,
+ CAM_DBG(CAM_ISP, "Packet request id 0x%llx",
packet->header.request_id);
spin_lock_bh(&ctx->lock);
list_add_tail(&req->list, &ctx->pending_req_list);
spin_unlock_bh(&ctx->lock);
- CDBG("%s: Preprocessing Config %lld successful\n", __func__,
+ CAM_DBG(CAM_ISP, "Preprocessing Config %lld successful",
req->request_id);
return rc;
@@ -935,26 +1466,28 @@
struct cam_hw_release_args release;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
+ struct cam_isp_hw_cmd_args hw_cmd_args;
if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready!\n");
+ CAM_ERR(CAM_ISP, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
- CDBG("%s: session_hdl 0x%x, num_resources %d, hdl type %d, res %lld\n",
- __func__, cmd->session_handle, cmd->num_resources,
+ CAM_DBG(CAM_ISP,
+ "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
+ cmd->session_handle, cmd->num_resources,
cmd->handle_type, cmd->resource_hdl);
if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
- pr_err("Too much resources in the acquire!\n");
+ CAM_ERR(CAM_ISP, "Too much resources in the acquire");
rc = -ENOMEM;
goto end;
}
/* for now we only support user pointer */
if (cmd->handle_type != 1) {
- pr_err("%s: Only user pointer is supported!", __func__);
+ CAM_ERR(CAM_ISP, "Only user pointer is supported");
rc = -EINVAL;
goto end;
}
@@ -966,8 +1499,8 @@
goto end;
}
- CDBG("%s: start copy %d resources from user\n",
- __func__, cmd->num_resources);
+ CAM_DBG(CAM_ISP, "start copy %d resources from user",
+ cmd->num_resources);
if (copy_from_user(isp_res, (void __user *)cmd->resource_hdl,
sizeof(*isp_res)*cmd->num_resources)) {
@@ -984,10 +1517,39 @@
rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
¶m);
if (rc != 0) {
- pr_err("Acquire device failed\n");
+ CAM_ERR(CAM_ISP, "Acquire device failed");
goto free_res;
}
+ /* Query the context has rdi only resource */
+ hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
+ hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT;
+ rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+ &hw_cmd_args);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "HW command failed");
+ goto free_hw;
+ }
+
+ if (hw_cmd_args.u.is_rdi_only_context) {
+ /*
+ * this context has rdi only resource assign rdi only
+ * state machine
+ */
+ CAM_DBG(CAM_ISP, "RDI only session Context");
+
+ ctx_isp->substate_machine_irq =
+ cam_isp_ctx_rdi_only_activated_state_machine_irq;
+ ctx_isp->substate_machine =
+ cam_isp_ctx_rdi_only_activated_state_machine;
+ } else {
+ CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
+ ctx_isp->substate_machine_irq =
+ cam_isp_ctx_activated_state_machine_irq;
+ ctx_isp->substate_machine =
+ cam_isp_ctx_activated_state_machine;
+ }
+
ctx_isp->hw_ctx = param.ctxt_to_hw_map;
req_hdl_param.session_hdl = cmd->session_handle;
@@ -997,11 +1559,11 @@
req_hdl_param.ops = ctx->crm_ctx_intf;
req_hdl_param.priv = ctx;
- CDBG("%s: get device handle form bridge\n", __func__);
+ CAM_DBG(CAM_ISP, "get device handle form bridge");
ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
if (ctx->dev_hdl <= 0) {
rc = -EFAULT;
- pr_err("Can not create device handle\n");
+ CAM_ERR(CAM_ISP, "Can not create device handle");
goto free_hw;
}
cmd->dev_handle = ctx->dev_hdl;
@@ -1011,7 +1573,8 @@
ctx->state = CAM_CTX_ACQUIRED;
- CDBG("%s:%d: Acquire success.\n", __func__, __LINE__);
+ trace_cam_context_state("ISP", ctx);
+ CAM_DBG(CAM_ISP, "Acquire success.");
kfree(isp_res);
return rc;
@@ -1032,10 +1595,12 @@
rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
- if (!rc && ctx->link_hdl)
+ if (!rc && ctx->link_hdl) {
ctx->state = CAM_CTX_READY;
+ trace_cam_context_state("ISP", ctx);
+ }
- CDBG("%s: next state %d\n", __func__, ctx->state);
+ CAM_DBG(CAM_ISP, "next state %d", ctx->state);
return rc;
}
@@ -1044,16 +1609,18 @@
{
int rc = 0;
- CDBG("%s:%d: Enter.........\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "Enter.........");
ctx->link_hdl = link->link_hdl;
ctx->ctx_crm_intf = link->crm_cb;
/* change state only if we had the init config */
- if (!list_empty(&ctx->pending_req_list))
+ if (!list_empty(&ctx->pending_req_list)) {
ctx->state = CAM_CTX_READY;
+ trace_cam_context_state("ISP", ctx);
+ }
- CDBG("%s: next state %d\n", __func__, ctx->state);
+ CAM_DBG(CAM_ISP, "next state %d", ctx->state);
return rc;
}
@@ -1100,8 +1667,7 @@
if (list_empty(&ctx->pending_req_list)) {
/* should never happen */
- pr_err("%s: Start device with empty configuration\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Start device with empty configuration");
rc = -EFAULT;
goto end;
} else {
@@ -1111,8 +1677,7 @@
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
if (!ctx_isp->hw_ctx) {
- pr_err("%s:%d: Wrong hw context pointer.\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Wrong hw context pointer.");
rc = -EFAULT;
goto end;
}
@@ -1121,6 +1686,8 @@
arg.num_hw_update_entries = req_isp->num_cfg;
ctx_isp->frame_id = 0;
+ ctx_isp->active_req_cnt = 0;
+ ctx_isp->reported_req_id = 0;
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
/*
@@ -1129,14 +1696,16 @@
* irq handling comes early
*/
ctx->state = CAM_CTX_ACTIVATED;
+ trace_cam_context_state("ISP", ctx);
rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
if (rc) {
/* HW failure. user need to clean up the resource */
- pr_err("Start HW failed\n");
+ CAM_ERR(CAM_ISP, "Start HW failed");
ctx->state = CAM_CTX_READY;
+ trace_cam_context_state("ISP", ctx);
goto end;
}
- CDBG("%s: start device success\n", __func__);
+ CAM_DBG(CAM_ISP, "start device success");
end:
return rc;
}
@@ -1149,6 +1718,7 @@
ctx->link_hdl = 0;
ctx->ctx_crm_intf = NULL;
ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("ISP", ctx);
return rc;
}
@@ -1168,7 +1738,7 @@
spin_lock_bh(&ctx->lock);
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
spin_unlock_bh(&ctx->lock);
- CDBG("%s: next substate %d", __func__, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
/* stop hw first */
if (ctx_isp->hw_ctx) {
@@ -1182,8 +1752,8 @@
struct cam_ctx_request, list);
list_del_init(&req->list);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
- CDBG("%s: signal fence in pending list. fence num %d\n",
- __func__, req_isp->num_fence_map_out);
+ CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d",
+ req_isp->num_fence_map_out);
for (i = 0; i < req_isp->num_fence_map_out; i++)
if (req_isp->fence_map_out[i].sync_id != -1) {
cam_sync_signal(
@@ -1198,8 +1768,8 @@
struct cam_ctx_request, list);
list_del_init(&req->list);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
- CDBG("%s: signal fence in active list. fence num %d\n",
- __func__, req_isp->num_fence_map_out);
+ CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
+ req_isp->num_fence_map_out);
for (i = 0; i < req_isp->num_fence_map_out; i++)
if (req_isp->fence_map_out[i].sync_id != -1) {
cam_sync_signal(
@@ -1209,8 +1779,10 @@
list_add_tail(&req->list, &ctx->free_req_list);
}
ctx_isp->frame_id = 0;
+ ctx_isp->active_req_cnt = 0;
+ ctx_isp->reported_req_id = 0;
- CDBG("%s: next state %d", __func__, ctx->state);
+ CAM_DBG(CAM_ISP, "next state %d", ctx->state);
return rc;
}
@@ -1221,6 +1793,7 @@
__cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("ISP", ctx);
return rc;
}
@@ -1248,6 +1821,7 @@
ctx->ctx_crm_intf = NULL;
ctx->state = CAM_CTX_AVAILABLE;
+ trace_cam_context_state("ISP", ctx);
return rc;
}
@@ -1259,21 +1833,22 @@
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
- CDBG("%s: Enter: apply req in Substate %d\n",
- __func__, ctx_isp->substate_activated);
+ trace_cam_apply_req("ISP", apply);
+ CAM_DBG(CAM_ISP, "Enter: apply req in Substate %d request _id:%lld",
+ ctx_isp->substate_activated, apply->request_id);
if (ctx_isp->substate_machine[ctx_isp->substate_activated].
crm_ops.apply_req) {
rc = ctx_isp->substate_machine[ctx_isp->substate_activated].
crm_ops.apply_req(ctx, apply);
} else {
- pr_err("%s: No handle function in activated substate %d\n",
- __func__, ctx_isp->substate_activated);
+ CAM_ERR(CAM_ISP, "No handle function in activated substate %d",
+ ctx_isp->substate_activated);
rc = -EFAULT;
}
if (rc)
- pr_err("%s: Apply failed in active substate %d\n",
- __func__, ctx_isp->substate_activated);
+ CAM_ERR(CAM_ISP, "Apply failed in active substate %d",
+ ctx_isp->substate_activated);
return rc;
}
@@ -1288,18 +1863,22 @@
(struct cam_isp_context *)ctx->ctx_priv;
spin_lock_bh(&ctx->lock);
- CDBG("%s: Enter: State %d, Substate %d, evt id %d\n",
- __func__, ctx->state, ctx_isp->substate_activated, evt_id);
+
+ trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
+ __cam_isp_ctx_get_event_ts(evt_id, evt_data));
+
+ CAM_DBG(CAM_ISP, "Enter: State %d, Substate %d, evt id %d",
+ ctx->state, ctx_isp->substate_activated, evt_id);
if (ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
irq_ops[evt_id]) {
rc = ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
irq_ops[evt_id](ctx_isp, evt_data);
} else {
- CDBG("%s: No handle function for substate %d\n", __func__,
+ CAM_DBG(CAM_ISP, "No handle function for substate %d",
ctx_isp->substate_activated);
}
- CDBG("%s: Exit: State %d Substate %d\n",
- __func__, ctx->state, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "Exit: State %d Substate %d",
+ ctx->state, ctx_isp->substate_activated);
spin_unlock_bh(&ctx->lock);
return rc;
}
@@ -1374,7 +1953,7 @@
int i;
if (!ctx || !ctx_base) {
- pr_err("%s: Invalid Context\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid Context");
goto err;
}
@@ -1383,6 +1962,8 @@
ctx->base = ctx_base;
ctx->frame_id = 0;
+ ctx->active_req_cnt = 0;
+ ctx->reported_req_id = 0;
ctx->hw_ctx = NULL;
ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
ctx->substate_machine = cam_isp_ctx_activated_state_machine;
@@ -1397,7 +1978,7 @@
rc = cam_context_init(ctx_base, crm_node_intf, hw_intf, ctx->req_base,
CAM_CTX_REQ_MAX);
if (rc) {
- pr_err("%s: Camera Context Base init failed\n", __func__);
+ CAM_ERR(CAM_ISP, "Camera Context Base init failed");
goto err;
}
@@ -1417,7 +1998,7 @@
cam_context_deinit(ctx->base);
if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
- pr_err("%s: ISP context substate is invalid\n", __func__);
+ CAM_ERR(CAM_ISP, "ISP context substate is invalid");
memset(ctx, 0, sizeof(*ctx));
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index dae1dda..b0b883c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -106,7 +106,9 @@
* @req_base: Common request object storage
* @req_isp: ISP private request object storage
* @hw_ctx: HW object returned by the acquire device command
- *
+ * @sof_timestamp_val: Captured time stamp value at sof hw event
+ * @active_req_cnt: Counter for the active request
+ * @reported_req_id: Last reported request id
*/
struct cam_isp_context {
struct cam_context *base;
@@ -120,6 +122,9 @@
struct cam_isp_ctx_req req_isp[CAM_CTX_REQ_MAX];
void *hw_ctx;
+ uint64_t sof_timestamp_val;
+ int32_t active_req_cnt;
+ int64_t reported_req_id;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
index 4c819cf..2bf7795 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
@@ -25,6 +25,7 @@
#include "cam_hw_mgr_intf.h"
#include "cam_isp_hw_mgr_intf.h"
#include "cam_node.h"
+#include "cam_debug_util.h"
static struct cam_isp_dev g_isp_dev;
@@ -44,13 +45,13 @@
for (i = 0; i < CAM_CTX_MAX; i++) {
rc = cam_isp_context_deinit(&g_isp_dev.ctx_isp[i]);
if (rc)
- pr_err("%s: ISP context %d deinit failed\n",
- __func__, i);
+ CAM_ERR(CAM_ISP, "ISP context %d deinit failed",
+ i);
}
rc = cam_subdev_remove(&g_isp_dev.sd);
if (rc)
- pr_err("%s: Unregister failed\n", __func__);
+ CAM_ERR(CAM_ISP, "Unregister failed");
memset(&g_isp_dev, 0, sizeof(g_isp_dev));
return 0;
@@ -67,7 +68,7 @@
rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
CAM_IFE_DEVICE_TYPE);
if (rc) {
- pr_err("%s: ISP cam_subdev_probe failed!\n", __func__);
+ CAM_ERR(CAM_ISP, "ISP cam_subdev_probe failed!");
goto err;
}
node = (struct cam_node *) g_isp_dev.sd.token;
@@ -75,7 +76,7 @@
memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf));
rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf);
if (rc != 0) {
- pr_err("%s: Can not initialized ISP HW manager!\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not initialized ISP HW manager!");
goto unregister;
}
@@ -85,7 +86,7 @@
&node->crm_node_intf,
&node->hw_mgr_intf);
if (rc) {
- pr_err("%s: ISP context init failed!\n", __func__);
+ CAM_ERR(CAM_ISP, "ISP context init failed!");
goto unregister;
}
}
@@ -93,11 +94,11 @@
rc = cam_node_init(node, &hw_mgr_intf, g_isp_dev.ctx, CAM_CTX_MAX,
CAM_ISP_DEV_NAME);
if (rc) {
- pr_err("%s: ISP node init failed!\n", __func__);
+ CAM_ERR(CAM_ISP, "ISP node init failed!");
goto unregister;
}
- pr_info("%s: Camera ISP probe complete\n", __func__);
+ CAM_INFO(CAM_ISP, "Camera ISP probe complete");
return 0;
unregister:
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
index 2c6eaba..7e3c353 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
@@ -7,6 +7,7 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += hw_utils/ isp_hw/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_hw_mgr.o cam_ife_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 2bc4b00..4a5b1c3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -24,9 +24,7 @@
#include "cam_ife_hw_mgr.h"
#include "cam_cdm_intf_api.h"
#include "cam_packet_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
#define CAM_IFE_HW_ENTRIES_MAX 20
@@ -41,7 +39,7 @@
struct cam_query_cap_cmd *query = hw_caps_args;
struct cam_isp_query_cap_cmd query_isp;
- CDBG("%s: enter\n", __func__);
+ CAM_DBG(CAM_ISP, "enter");
if (copy_from_user(&query_isp, (void __user *)query->caps_handle,
sizeof(struct cam_isp_query_cap_cmd))) {
@@ -66,7 +64,7 @@
sizeof(struct cam_isp_query_cap_cmd)))
rc = -EFAULT;
- CDBG("%s: exit rc :%d !\n", __func__, rc);
+ CAM_DBG(CAM_ISP, "exit rc :%d !", rc);
return rc;
}
@@ -100,7 +98,7 @@
if (!isp_hw_res->hw_res[i])
continue;
hw_intf = isp_hw_res->hw_res[i]->hw_intf;
- CDBG("%s: enabled vfe hardware %d\n", __func__,
+ CAM_DBG(CAM_ISP, "enabled vfe hardware %d",
hw_intf->hw_idx);
if (hw_intf->hw_ops.init) {
rc = hw_intf->hw_ops.init(hw_intf->hw_priv,
@@ -113,7 +111,7 @@
return 0;
err:
- pr_err("%s: INIT HW res failed! (type:%d, id:%d)", __func__,
+ CAM_ERR(CAM_ISP, "INIT HW res failed! (type:%d, id:%d)",
isp_hw_res->res_type, isp_hw_res->res_id);
return rc;
}
@@ -134,19 +132,18 @@
isp_hw_res->hw_res[i],
sizeof(struct cam_isp_resource_node));
if (rc) {
- pr_err("%s: Can not start HW resources!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Can not start HW resources!");
goto err;
}
} else {
- pr_err("%s:function null\n", __func__);
+ CAM_ERR(CAM_ISP, "function null");
goto err;
}
}
return 0;
err:
- pr_err("%s: Start hw res failed! (type:%d, id:%d)", __func__,
+ CAM_ERR(CAM_ISP, "Start hw res failed! (type:%d, id:%d)",
isp_hw_res->res_type, isp_hw_res->res_id);
return rc;
}
@@ -166,7 +163,7 @@
isp_hw_res->hw_res[i],
sizeof(struct cam_isp_resource_node));
else
- pr_err("%s:stop null\n", __func__);
+ CAM_ERR(CAM_ISP, "stop null");
}
}
@@ -213,7 +210,7 @@
struct cam_ife_hw_mgr_res, list);
list_del_init(&res_ptr->list);
} else {
- pr_err("No more free ife hw mgr ctx!\n");
+ CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx!");
rc = -1;
}
*res = res_ptr;
@@ -237,11 +234,12 @@
isp_hw_res->hw_res[i],
sizeof(struct cam_isp_resource_node));
if (rc)
- pr_err("%s:Release hw resrouce id %d failed!\n",
- __func__, isp_hw_res->res_id);
+ CAM_ERR(CAM_ISP,
+ "Release hw resrouce id %d failed!",
+ isp_hw_res->res_id);
isp_hw_res->hw_res[i] = NULL;
} else
- pr_err("%s:Release null\n", __func__);
+ CAM_ERR(CAM_ISP, "Release null");
}
/* caller should make sure the resource is in a list */
list_del_init(&isp_hw_res->list);
@@ -328,8 +326,8 @@
ife_ctx->common.cb_priv = NULL;
memset(ife_ctx->common.event_cb, 0, sizeof(ife_ctx->common.event_cb));
- CDBG("%s:%d: release context completed ctx id:%d\n",
- __func__, __LINE__, ife_ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "release context completed ctx id:%d",
+ ife_ctx->ctx_index);
return 0;
}
@@ -364,7 +362,7 @@
struct cam_ife_hw_mgr_ctx, list);
list_del_init(&ctx_ptr->list);
} else {
- pr_err("No more free ife hw mgr ctx!\n");
+ CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx!");
rc = -1;
}
*ife_ctx = ctx_ptr;
@@ -381,7 +379,7 @@
uint32_t i;
if (!ctx->num_base) {
- CDBG("%s: Add split id = %d for base idx = %d\n", __func__,
+ CAM_DBG(CAM_ISP, "Add split id = %d for base idx = %d",
split_id, base_idx);
ctx->base[0].split_id = split_id;
ctx->base[0].idx = base_idx;
@@ -400,8 +398,8 @@
}
if (i == CAM_IFE_HW_NUM_MAX) {
- CDBG("%s: Add split id = %d for base idx = %d\n",
- __func__, split_id, base_idx);
+ CAM_DBG(CAM_ISP, "Add split id = %d for base idx = %d",
+ split_id, base_idx);
ctx->base[ctx->num_base].split_id = split_id;
ctx->base[ctx->num_base].idx = base_idx;
ctx->num_base++;
@@ -417,7 +415,7 @@
uint32_t i;
if (list_empty(&ctx->res_list_ife_src)) {
- pr_err("%s: Error! Mux List empty\n", __func__);
+ CAM_ERR(CAM_ISP, "Error! Mux List empty");
return -ENODEV;
}
@@ -440,7 +438,7 @@
res->hw_intf->hw_idx);
}
}
- CDBG("%s: ctx base num = %d\n", __func__, ctx->num_base);
+ CAM_DBG(CAM_ISP, "ctx base num = %d", ctx->num_base);
return 0;
}
@@ -474,7 +472,7 @@
vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_3;
break;
default:
- pr_err("%s: invalid resource type\n", __func__);
+ CAM_ERR(CAM_ISP, "invalid resource type");
goto err;
}
@@ -489,6 +487,7 @@
continue;
vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
+ vfe_acquire.vfe_out.ctx = ife_ctx;
vfe_acquire.vfe_out.out_port_info = out_port;
vfe_acquire.vfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT;
vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
@@ -497,15 +496,15 @@
&vfe_acquire,
sizeof(struct cam_vfe_acquire_args));
if (rc) {
- pr_err("%s: Can not acquire out resource 0x%x\n",
- __func__, out_port->res_type);
+ CAM_ERR(CAM_ISP, "Can not acquire out resource 0x%x",
+ out_port->res_type);
goto err;
}
break;
}
if (i == in_port->num_out_res) {
- pr_err("%s: Can not acquire out resource\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not acquire out resource");
goto err;
}
@@ -535,16 +534,16 @@
out_port = &in_port->data[i];
k = out_port->res_type & 0xFF;
if (k >= CAM_IFE_HW_OUT_RES_MAX) {
- pr_err("%s: invalid output resource type 0x%x\n",
- __func__, out_port->res_type);
+ CAM_ERR(CAM_ISP, "invalid output resource type 0x%x",
+ out_port->res_type);
continue;
}
if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
continue;
- CDBG("%s: res_type 0x%x\n",
- __func__, out_port->res_type);
+ CAM_DBG(CAM_ISP, "res_type 0x%x",
+ out_port->res_type);
ife_out_res = &ife_ctx->res_list_ife_out[k];
ife_out_res->is_dual_vfe = in_port->usage_type;
@@ -552,6 +551,7 @@
vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
+ vfe_acquire.vfe_out.ctx = ife_ctx;
vfe_acquire.vfe_out.out_port_info = out_port;
vfe_acquire.vfe_out.is_dual = ife_src_res->is_dual_vfe;
vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
@@ -585,15 +585,16 @@
&vfe_acquire,
sizeof(struct cam_vfe_acquire_args));
if (rc) {
- pr_err("%s:Can not acquire out resource 0x%x\n",
- __func__, out_port->res_type);
+ CAM_ERR(CAM_ISP,
+ "Can not acquire out resource 0x%x",
+ out_port->res_type);
goto err;
}
ife_out_res->hw_res[j] =
vfe_acquire.vfe_out.rsrc_node;
- CDBG("%s: resource type :0x%x res id:0x%x\n",
- __func__, ife_out_res->hw_res[j]->res_type,
+ CAM_DBG(CAM_ISP, "resource type :0x%x res id:0x%x",
+ ife_out_res->hw_res[j]->res_type,
ife_out_res->hw_res[j]->res_id);
}
@@ -633,8 +634,7 @@
ife_src_res, in_port);
break;
default:
- pr_err("%s: Fatal: Unknown IFE SRC resource!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Fatal: Unknown IFE SRC resource!");
break;
}
if (rc)
@@ -668,7 +668,7 @@
rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
&ife_src_res);
if (rc) {
- pr_err("%s: No more free hw mgr resource!\n", __func__);
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
goto err;
}
cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src,
@@ -708,8 +708,7 @@
vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
break;
default:
- pr_err("%s: Wrong IFE CSID Resource Node!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Wrong IFE CSID Resource Node!");
goto err;
}
ife_src_res->res_type = vfe_acquire.rsrc_type;
@@ -734,13 +733,15 @@
&vfe_acquire,
sizeof(struct cam_vfe_acquire_args));
if (rc) {
- pr_err("%s:Can not acquire IFE HW res %d!\n",
- __func__, csid_res->res_id);
+ CAM_ERR(CAM_ISP,
+ "Can not acquire IFE HW res %d",
+ csid_res->res_id);
goto err;
}
ife_src_res->hw_res[i] = vfe_acquire.vfe_in.rsrc_node;
- CDBG("%s:acquire success res type :0x%x res id:0x%x\n",
- __func__, ife_src_res->hw_res[i]->res_type,
+ CAM_DBG(CAM_ISP,
+ "acquire success res type :0x%x res id:0x%x",
+ ife_src_res->hw_res[i]->res_type,
ife_src_res->hw_res[i]->res_id);
}
@@ -777,7 +778,7 @@
rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res);
if (rc) {
- pr_err("%s: No more free hw mgr resource!\n", __func__);
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
goto err;
}
cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
@@ -808,13 +809,12 @@
}
if (i == CAM_IFE_CSID_HW_NUM_MAX) {
- pr_err("%s: Can not acquire ife csid ipp resrouce!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Can not acquire ife csid ipp resrouce!");
goto err;
}
- CDBG("%s: acquired csid(%d) left ipp resrouce successfully!\n",
- __func__, i);
+ CAM_DBG(CAM_ISP, "acquired csid(%d) left ipp resrouce successfully!",
+ i);
csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH;
csid_res->res_id = CAM_IFE_PIX_PATH_RES_IPP;
@@ -839,14 +839,14 @@
}
if (j == CAM_IFE_CSID_HW_NUM_MAX) {
- pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
- __func__);
+ CAM_ERR(CAM_ISP,
+ "Can not acquire ife csid rdi resrouce!");
goto err;
}
csid_res->hw_res[1] = csid_acquire.node_res;
- CDBG("%s:acquired csid(%d)right ipp resrouce successfully!\n",
- __func__, j);
+ CAM_DBG(CAM_ISP,
+ "acquired csid(%d)right ipp resrouce successfully!", j);
}
csid_res->parent = &ife_ctx->res_list_ife_in;
@@ -879,7 +879,7 @@
break;
default:
path_id = CAM_IFE_PIX_PATH_RES_MAX;
- CDBG("%s: maximum rdi output type exceeded\n", __func__);
+ CAM_DBG(CAM_ISP, "maximum rdi output type exceeded");
break;
}
@@ -910,7 +910,7 @@
rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
&csid_res);
if (rc) {
- pr_err("%s: No more free hw mgr resource!\n",
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource!",
__func__);
goto err;
}
@@ -928,6 +928,7 @@
csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
csid_acquire.cid = cid_res_id;
csid_acquire.in_port = in_port;
+ csid_acquire.out_port = out_port;
csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
for (j = 0; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
@@ -944,8 +945,8 @@
}
if (j == CAM_IFE_CSID_HW_NUM_MAX) {
- pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
- __func__);
+ CAM_ERR(CAM_ISP,
+ "Can not acquire ife csid rdi resrouce!");
goto err;
}
@@ -978,7 +979,7 @@
ife_ctx->res_list_ife_in.res_id = in_port->res_type;
ife_ctx->res_list_ife_in.is_dual_vfe = in_port->usage_type;
} else if (ife_ctx->res_list_ife_in.res_id != in_port->res_type) {
- pr_err("%s: No Free resource for this context!\n", __func__);
+ CAM_ERR(CAM_ISP, "No Free resource for this context!");
goto err;
} else {
/* else do nothing */
@@ -1032,7 +1033,7 @@
/* no dual vfe for TPG */
if ((in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) &&
(in_port->usage_type != 0)) {
- pr_err("%s: No Dual VFE on TPG input!\n", __func__);
+ CAM_ERR(CAM_ISP, "No Dual VFE on TPG input!");
goto err;
}
@@ -1040,7 +1041,7 @@
rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &cid_res);
if (rc) {
- pr_err("%s: No more free hw mgr resource!\n", __func__);
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
goto err;
}
cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, &cid_res);
@@ -1062,8 +1063,7 @@
}
if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
- pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Can not acquire ife csid rdi resrouce!");
goto err;
}
@@ -1093,8 +1093,8 @@
}
if (j == CAM_IFE_CSID_HW_NUM_MAX) {
- pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
- __func__);
+ CAM_ERR(CAM_ISP,
+ "Can not acquire ife csid rdi resrouce!");
goto err;
}
cid_res->hw_res[1] = csid_acquire.node_res;
@@ -1110,7 +1110,8 @@
}
static int cam_ife_mgr_acquire_hw_for_ctx(
struct cam_ife_hw_mgr_ctx *ife_ctx,
- struct cam_isp_in_port_info *in_port)
+ struct cam_isp_in_port_info *in_port,
+ uint32_t *num_pix_port, uint32_t *num_rdi_port)
{
int rc = -1;
int is_dual_vfe = 0;
@@ -1123,16 +1124,14 @@
/* get root node resource */
rc = cam_ife_hw_mgr_acquire_res_root(ife_ctx, in_port);
if (rc) {
- pr_err("%s:%d: Can not acquire csid rx resource!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Can not acquire csid rx resource!");
goto err;
}
/* get cid resource */
rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id);
if (rc) {
- pr_err("%s%d: Acquire IFE CID resource Failed!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed!");
goto err;
}
@@ -1140,7 +1139,7 @@
&pixel_count, &rdi_count);
if (!pixel_count && !rdi_count) {
- pr_err("%s: Error! no PIX or RDI resource\n", __func__);
+ CAM_ERR(CAM_ISP, "Error! no PIX or RDI resource");
return -EINVAL;
}
@@ -1149,8 +1148,8 @@
rc = cam_ife_hw_mgr_acquire_res_ife_csid_ipp(ife_ctx, in_port,
cid_res_id);
if (rc) {
- pr_err("%s%d: Acquire IFE CSID IPP resource Failed!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP,
+ "Acquire IFE CSID IPP resource Failed!");
goto err;
}
}
@@ -1160,8 +1159,8 @@
rc = cam_ife_hw_mgr_acquire_res_ife_csid_rdi(ife_ctx, in_port,
cid_res_id);
if (rc) {
- pr_err("%s%d: Acquire IFE CSID RDI resource Failed!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP,
+ "Acquire IFE CSID RDI resource Failed!");
goto err;
}
}
@@ -1169,18 +1168,19 @@
/* get ife src resource */
rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port);
if (rc) {
- pr_err("%s%d: Acquire IFE SRC resource Failed!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Acquire IFE SRC resource Failed!");
goto err;
}
rc = cam_ife_hw_mgr_acquire_res_ife_out(ife_ctx, in_port);
if (rc) {
- pr_err("%s%d: Acquire IFE OUT resource Failed!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Acquire IFE OUT resource Failed!");
goto err;
}
+ *num_pix_port += pixel_count;
+ *num_rdi_port += rdi_count;
+
return 0;
err:
/* release resource at the acquire entry funciton */
@@ -1190,11 +1190,11 @@
void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata,
enum cam_cdm_cb_status status, uint32_t cookie)
{
- CDBG("%s: Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%d\n",
- __func__, handle, userdata, status, cookie);
+ CAM_DBG(CAM_ISP,
+ "Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%d",
+ handle, userdata, status, cookie);
}
-
/* entry function: acquire_hw */
static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv,
void *acquire_hw_args)
@@ -1206,19 +1206,21 @@
struct cam_ife_hw_mgr_ctx *ife_ctx;
struct cam_isp_in_port_info *in_port = NULL;
struct cam_isp_resource *isp_resource = NULL;
- struct cam_cdm_acquire_data cdm_acquire;
+ struct cam_cdm_acquire_data cdm_acquire;
+ uint32_t num_pix_port = 0;
+ uint32_t num_rdi_port = 0;
- CDBG("%s: Enter...\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter...");
if (!acquire_args || acquire_args->num_acq <= 0) {
- pr_err("%s: Nothing to acquire. Seems like error\n", __func__);
+ CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
return -EINVAL;
}
/* get the ife ctx */
rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
if (rc || !ife_ctx) {
- pr_err("Get ife hw context failed!\n");
+ CAM_ERR(CAM_ISP, "Get ife hw context failed!");
goto err;
}
@@ -1245,12 +1247,12 @@
cdm_acquire.id = CAM_CDM_VIRTUAL;
cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback;
if (!cam_cdm_acquire(&cdm_acquire)) {
- CDBG("Successfully acquired the CDM HW hdl=%x\n",
+ CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
cdm_acquire.handle);
ife_ctx->cdm_handle = cdm_acquire.handle;
ife_ctx->cdm_ops = cdm_acquire.ops;
} else {
- pr_err("Failed to acquire the CDM HW\n");
+ CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
goto err;
}
@@ -1261,32 +1263,38 @@
if (isp_resource[i].resource_id != CAM_ISP_RES_ID_PORT)
continue;
- CDBG("%s: start copy from user handle %lld with len = %d\n",
- __func__, isp_resource[i].res_hdl,
+ CAM_DBG(CAM_ISP,
+ "start copy from user handle %lld with len = %d",
+ isp_resource[i].res_hdl,
isp_resource[i].length);
in_port = memdup_user((void __user *)isp_resource[i].res_hdl,
isp_resource[i].length);
if (in_port > 0) {
- rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port);
+ rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
+ &num_pix_port, &num_rdi_port);
kfree(in_port);
if (rc) {
- pr_err("%s: can not acquire resource!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "can not acquire resource");
goto free_res;
}
} else {
- pr_err("%s: copy from user failed with in_port = %pK",
- __func__, in_port);
+ CAM_ERR(CAM_ISP,
+ "copy from user failed with in_port = %pK",
+ in_port);
rc = -EFAULT;
goto free_res;
}
}
+
+ /* Check whether context has only RDI resource */
+ if (!num_pix_port)
+ ife_ctx->is_rdi_only_context = 1;
+
/* Process base info */
rc = cam_ife_mgr_process_base_info(ife_ctx);
if (rc) {
- pr_err("%s: Error process) base info!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Error process) base info!");
return -EINVAL;
}
@@ -1295,14 +1303,14 @@
cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx);
- CDBG("%s: Exit...(success)!\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit...(success)!");
return 0;
free_res:
cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
err:
- CDBG("%s: Exit...(rc=%d)!\n", __func__, rc);
+ CAM_DBG(CAM_ISP, "Exit...(rc=%d)!", rc);
return rc;
}
@@ -1316,25 +1324,25 @@
struct cam_cdm_bl_request *cdm_cmd;
struct cam_ife_hw_mgr_ctx *ctx;
- CDBG("%s: Enter\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter");
if (!hw_mgr_priv || !config_hw_args) {
- pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
cfg = config_hw_args;
ctx = (struct cam_ife_hw_mgr_ctx *)cfg->ctxt_to_hw_map;
if (!ctx) {
- pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+ CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
return -EPERM;
}
if (!ctx->ctx_in_use || !ctx->cdm_cmd) {
- pr_err("%s: Invalid context parameters !\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid context parameters !");
return -EPERM;
}
- CDBG("%s%d: Enter...ctx id:%d\n", __func__, __LINE__, ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "Enter ctx id:%d", ctx->ctx_index);
if (cfg->num_hw_update_entries > 0) {
cdm_cmd = ctx->cdm_cmd;
@@ -1353,11 +1361,11 @@
rc = cam_cdm_submit_bls(ctx->cdm_handle, cdm_cmd);
if (rc)
- pr_err("Failed to apply the configs\n");
+ CAM_ERR(CAM_ISP, "Failed to apply the configs");
} else {
- pr_err("No commands to config\n");
+ CAM_ERR(CAM_ISP, "No commands to config");
}
- CDBG("%s: Exit\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -1372,22 +1380,21 @@
uint32_t i, master_base_idx = 0;
if (!hw_mgr_priv || !stop_hw_args) {
- pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
- pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+ CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
return -EPERM;
}
- CDBG("%s%d: Enter...ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
ctx->ctx_index);
/* stop resource will remove the irq mask from the hardware */
if (!ctx->num_base) {
- pr_err("%s%d: error number of bases are zero\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "error number of bases are zero");
return -EINVAL;
}
@@ -1443,7 +1450,7 @@
/* update vote bandwidth should be done at the HW layer */
- CDBG("%s%d Exit...ctx id:%d rc :%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d",
ctx->ctx_index, rc);
return rc;
@@ -1459,23 +1466,22 @@
uint32_t i, master_base_idx = 0;
if (!hw_mgr_priv || !stop_hw_args) {
- pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
- pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+ CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
return -EPERM;
}
- CDBG("%s%d: Enter...ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, " Enter...ctx id:%d",
ctx->ctx_index);
/* Note:stop resource will remove the irq mask from the hardware */
if (!ctx->num_base) {
- pr_err("%s%d: error number of bases are zero\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "error number of bases are zero");
return -EINVAL;
}
@@ -1520,8 +1526,8 @@
}
if (cam_cdm_stream_off(ctx->cdm_handle))
- pr_err("%s%d: CDM stream off failed %d\n",
- __func__, __LINE__, ctx->cdm_handle);
+ CAM_ERR(CAM_ISP, "CDM stream off failed %d",
+ ctx->cdm_handle);
/* IFE mux in resources */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
@@ -1557,8 +1563,7 @@
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
cam_ife_hw_mgr_deinit_hw_res(&ctx->res_list_ife_out[i]);
- CDBG("%s%d Exit...ctx id:%d rc :%d\n", __func__, __LINE__,
- ctx->ctx_index, rc);
+ CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d", ctx->ctx_index, rc);
return rc;
}
@@ -1572,7 +1577,7 @@
struct cam_csid_reset_cfg_args csid_reset_args;
if (!hw_mgr) {
- CDBG("%s: Invalid arguments\n", __func__);
+ CAM_DBG(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -1594,13 +1599,13 @@
for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
if (hw_idx != hw_mgr->ife_devices[i]->hw_idx)
continue;
- CDBG("%d:VFE (id = %d) reset\n", __LINE__, hw_idx);
+ CAM_DBG(CAM_ISP, "VFE (id = %d) reset", hw_idx);
vfe_hw_intf = hw_mgr->ife_devices[i];
vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv, NULL, 0);
break;
}
- CDBG("%d: Exit Successfully\n", __LINE__);
+ CAM_DBG(CAM_ISP, "Exit Successfully");
return 0;
}
@@ -1614,74 +1619,68 @@
uint32_t i;
if (!hw_mgr_priv || !start_hw_args) {
- pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
- pr_err("%s: Invalid context is used!\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid context is used!");
return -EPERM;
}
- CDBG("%s%d Enter... ctx id:%d\n", __func__, __LINE__,
- ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "Enter... ctx id:%d", ctx->ctx_index);
- CDBG("%s%d START IFE OUT ... in ctx id:%d\n", __func__, __LINE__,
- ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d", ctx->ctx_index);
/* start the IFE out devices */
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
if (rc) {
- pr_err("%s: Can not start IFE OUT (%d)!\n",
- __func__, i);
+ CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)!", i);
goto err;
}
}
- CDBG("%s%d START IFE SRC ... in ctx id:%d\n", __func__, __LINE__,
- ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d", ctx->ctx_index);
/* Start the IFE mux in devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not start IFE MUX (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
- CDBG("%s:%d: START CSID HW ... in ctx id:%d\n", __func__, __LINE__,
- ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d", ctx->ctx_index);
/* Start the IFE CSID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not start IFE CSID (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
- CDBG("%s%d START CID SRC ... in ctx id:%d\n", __func__, __LINE__,
- ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d", ctx->ctx_index);
/* Start the IFE CID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not start IFE CSID (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
/* Start IFE root node: do nothing */
- CDBG("%s: Exit...(success)\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit...(success)");
return 0;
err:
cam_ife_mgr_stop_hw(hw_mgr_priv, start_hw_args);
- CDBG("%s: Exit...(rc=%d)\n", __func__, rc);
+ CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
return rc;
}
@@ -1694,17 +1693,17 @@
uint32_t i;
if (!hw_mgr_priv || !start_hw_args) {
- pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
- pr_err("%s: Invalid context is used!\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid context is used!");
return -EPERM;
}
- CDBG("%s%d Enter... ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "Enter... ctx id:%d",
ctx->ctx_index);
/* update Bandwidth should be done at the hw layer */
@@ -1713,127 +1712,127 @@
/* INIT IFE Root: do nothing */
- CDBG("%s%d INIT IFE CID ... in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "INIT IFE CID ... in ctx id:%d",
ctx->ctx_index);
/* INIT IFE CID */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not INIT IFE CID.(id :%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not INIT IFE CID.(id :%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
- CDBG("%s%d INIT IFE csid ... in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "INIT IFE csid ... in ctx id:%d",
ctx->ctx_index);
/* INIT IFE csid */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not INIT IFE CSID.(id :%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not INIT IFE CSID.(id :%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
/* INIT IFE SRC */
- CDBG("%s%d INIT IFE SRC in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "INIT IFE SRC in ctx id:%d",
ctx->ctx_index);
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not INIT IFE SRC (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
/* INIT IFE OUT */
- CDBG("%s%d INIT IFE OUT RESOURCES in ctx id:%d\n", __func__,
- __LINE__, ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "INIT IFE OUT RESOURCES in ctx id:%d",
+ ctx->ctx_index);
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
rc = cam_ife_hw_mgr_init_hw_res(&ctx->res_list_ife_out[i]);
if (rc) {
- pr_err("%s: Can not INIT IFE OUT (%d)!\n",
- __func__, ctx->res_list_ife_out[i].res_id);
+ CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d)!",
+ ctx->res_list_ife_out[i].res_id);
goto err;
}
}
- CDBG("%s: start cdm interface\n", __func__);
+ CAM_DBG(CAM_ISP, "start cdm interface");
rc = cam_cdm_stream_on(ctx->cdm_handle);
if (rc) {
- pr_err("%s: Can not start cdm (%d)!\n",
- __func__, ctx->cdm_handle);
+ CAM_ERR(CAM_ISP, "Can not start cdm (%d)!",
+ ctx->cdm_handle);
goto err;
}
/* Apply initial configuration */
- CDBG("%s: Config HW\n", __func__);
+ CAM_DBG(CAM_ISP, "Config HW");
rc = cam_ife_mgr_config_hw(hw_mgr_priv, start_hw_args);
if (rc) {
- pr_err("%s: Config HW failed\n", __func__);
+ CAM_ERR(CAM_ISP, "Config HW failed");
goto err;
}
- CDBG("%s%d START IFE OUT ... in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d",
ctx->ctx_index);
/* start the IFE out devices */
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
if (rc) {
- pr_err("%s: Can not start IFE OUT (%d)!\n",
- __func__, i);
+ CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)!",
+ i);
goto err;
}
}
- CDBG("%s%d START IFE SRC ... in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d",
ctx->ctx_index);
/* Start the IFE mux in devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not start IFE MUX (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
- CDBG("%s:%d: START CSID HW ... in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d",
ctx->ctx_index);
/* Start the IFE CSID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not start IFE CSID (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
- CDBG("%s%d START CID SRC ... in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d",
ctx->ctx_index);
/* Start the IFE CID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not start IFE CSID (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
/* Start IFE root node: do nothing */
- CDBG("%s: Exit...(success)\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit...(success)");
return 0;
err:
cam_ife_mgr_stop_hw(hw_mgr_priv, start_hw_args);
- CDBG("%s: Exit...(rc=%d)\n", __func__, rc);
+ CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
return rc;
}
@@ -1856,17 +1855,17 @@
struct cam_ife_hw_mgr_ctx *ctx;
if (!hw_mgr_priv || !release_hw_args) {
- pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
ctx = (struct cam_ife_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
- pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+ CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
return -EPERM;
}
- CDBG("%s%d Enter...ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
ctx->ctx_index);
/* we should called the stop hw before this already */
@@ -1882,7 +1881,7 @@
/* clean context */
list_del_init(&ctx->list);
ctx->ctx_in_use = 0;
- CDBG("%s%d Exit...ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "Exit...ctx id:%d",
ctx->ctx_index);
cam_ife_hw_mgr_put_ctx(&hw_mgr->free_ctx_list, &ctx);
return rc;
@@ -1896,34 +1895,34 @@
(struct cam_hw_prepare_update_args *) prepare_hw_update_args;
struct cam_ife_hw_mgr_ctx *ctx;
struct cam_ife_hw_mgr *hw_mgr;
- struct cam_isp_kmd_buf_info kmd_buf;
+ struct cam_kmd_buf_info kmd_buf;
uint32_t i;
bool fill_fence = true;
if (!hw_mgr_priv || !prepare_hw_update_args) {
- pr_err("%s: Invalid args\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid args");
return -EINVAL;
}
- CDBG("%s:%d enter\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "enter");
ctx = (struct cam_ife_hw_mgr_ctx *) prepare->ctxt_to_hw_map;
hw_mgr = (struct cam_ife_hw_mgr *)hw_mgr_priv;
- rc = cam_isp_validate_packet(prepare->packet);
+ rc = cam_packet_util_validate_packet(prepare->packet);
if (rc)
return rc;
- CDBG("%s:%d enter\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "enter");
/* Pre parse the packet*/
- rc = cam_isp_get_kmd_buffer(prepare->packet, &kmd_buf);
+ rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf);
if (rc)
return rc;
rc = cam_packet_util_process_patches(prepare->packet,
hw_mgr->mgr_common.cmd_iommu_hdl);
if (rc) {
- pr_err("%s: Patch ISP packet failed.\n", __func__);
+ CAM_ERR(CAM_ISP, "Patch ISP packet failed.");
return rc;
}
@@ -1932,7 +1931,7 @@
prepare->num_out_map_entries = 0;
for (i = 0; i < ctx->num_base; i++) {
- CDBG("%s: process cmd buffer for device %d\n", __func__, i);
+ CAM_DBG(CAM_ISP, "process cmd buffer for device %d", i);
/* Add change base */
rc = cam_isp_add_change_base(prepare, &ctx->res_list_ife_src,
@@ -1980,6 +1979,92 @@
return rc;
}
+static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+ int rc = 0;
+ struct cam_isp_hw_cmd_args *hw_cmd_args = cmd_args;
+ struct cam_ife_hw_mgr_ctx *ctx;
+
+ if (!hw_mgr_priv || !cmd_args) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_ife_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map;
+ if (!ctx || !ctx->ctx_in_use) {
+ CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+ return -EPERM;
+ }
+
+ switch (hw_cmd_args->cmd_type) {
+ case CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT:
+ if (ctx->is_rdi_only_context)
+ hw_cmd_args->u.is_rdi_only_context = 1;
+ else
+ hw_cmd_args->u.is_rdi_only_context = 0;
+
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
+ hw_cmd_args->cmd_type);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_ife_mgr_cmd_get_sof_timestamp(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ uint64_t *time_stamp)
+{
+ int rc = -EINVAL;
+ uint32_t i;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_csid_get_time_stamp_args csid_get_time;
+
+ list_for_each_entry(hw_mgr_res, &ife_ctx->res_list_ife_csid, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i] ||
+ (i == CAM_ISP_HW_SPLIT_RIGHT))
+ continue;
+ /*
+ * Get the SOF time stamp from left resource only.
+ * Left resource is master for dual vfe case and
+ * Rdi only context case left resource only hold
+ * the RDI resource
+ */
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf->hw_ops.process_cmd) {
+ csid_get_time.node_res =
+ hw_mgr_res->hw_res[i];
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_IFE_CSID_CMD_GET_TIME_STAMP,
+ &csid_get_time,
+ sizeof(
+ struct cam_csid_get_time_stamp_args));
+ if (!rc)
+ *time_stamp =
+ csid_get_time.time_stamp_val;
+ /*
+ * Single VFE case, Get the time stamp from available
+ * one csid hw in the context
+ * Dual VFE case, get the time stamp from master(left)
+ * would be sufficient
+ */
+ goto end;
+ }
+ }
+ }
+end:
+ if (rc)
+ CAM_ERR(CAM_ISP, "error in getting sof time stamp");
+
+ return rc;
+}
+
static int cam_ife_mgr_process_recovery_cb(void *priv, void *data)
{
int32_t rc = 0;
@@ -1993,13 +2078,14 @@
struct cam_ife_hw_mgr_ctx *ctx = NULL;
/* Here recovery is performed */
- CDBG("%s:Enter: ErrorType = %d\n", __func__, error_type);
+ CAM_DBG(CAM_ISP, "Enter: ErrorType = %d", error_type);
switch (error_type) {
case CAM_ISP_HW_ERROR_OVERFLOW:
case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW:
if (!recovery_data->affected_ctx[0]) {
- pr_err("No context is affected but recovery called\n");
+ CAM_ERR(CAM_ISP,
+ "No context is affected but recovery called");
kfree(recovery_data);
return 0;
}
@@ -2027,9 +2113,9 @@
break;
default:
- pr_err("%s: Invalid Error\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid Error");
}
- CDBG("%s:Exit: ErrorType = %d\n", __func__, error_type);
+ CAM_DBG(CAM_ISP, "Exit: ErrorType = %d", error_type);
kfree(recovery_data);
return rc;
@@ -2052,12 +2138,11 @@
memcpy(recovery_data, ife_mgr_recovery_data,
sizeof(struct cam_hw_event_recovery_data));
- CDBG("%s: Enter: error_type (%d)\n", __func__,
- recovery_data->error_type);
+ CAM_DBG(CAM_ISP, "Enter: error_type (%d)", recovery_data->error_type);
task = cam_req_mgr_workq_get_task(g_ife_hw_mgr.workq);
if (!task) {
- pr_err("%s: No empty task frame\n", __func__);
+ CAM_ERR(CAM_ISP, "No empty task frame");
kfree(recovery_data);
return -ENOMEM;
}
@@ -2086,7 +2171,7 @@
uint32_t max_idx = ife_hwr_mgr_ctx->num_base;
uint32_t ctx_affected_core_idx[CAM_IFE_HW_NUM_MAX] = {0};
- CDBG("%s:Enter:max_idx = %d\n", __func__, max_idx);
+ CAM_DBG(CAM_ISP, "Enter:max_idx = %d", max_idx);
while (i < max_idx) {
if (affected_core[ife_hwr_mgr_ctx->base[i].idx])
@@ -2107,7 +2192,7 @@
j = j - 1;
}
}
- CDBG("%s:Exit\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -2133,11 +2218,11 @@
struct cam_hw_stop_args stop_args;
uint32_t i = 0;
- CDBG("%s:Enter\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter");
return 0;
if (!recovery_data) {
- pr_err("%s: recovery_data parameter is NULL\n",
+ CAM_ERR(CAM_ISP, "recovery_data parameter is NULL",
__func__);
return -EINVAL;
}
@@ -2154,7 +2239,7 @@
* Check if current core_idx matches the HW associated
* with this context
*/
- CDBG("%s:Calling match Hw idx\n", __func__);
+ CAM_DBG(CAM_ISP, "Calling match Hw idx");
if (cam_ife_hw_mgr_match_hw_idx(ife_hwr_mgr_ctx, affected_core))
continue;
@@ -2164,7 +2249,7 @@
stop_args.ctxt_to_hw_map = ife_hwr_mgr_ctx;
/* Add affected_context in list of recovery data*/
- CDBG("%s:Add new entry in affected_ctx_list\n", __func__);
+ CAM_DBG(CAM_ISP, "Add new entry in affected_ctx_list");
if (recovery_data->no_of_context < CAM_CTX_MAX)
recovery_data->affected_ctx[
recovery_data->no_of_context++] =
@@ -2177,7 +2262,7 @@
*/
if (!cam_ife_mgr_stop_hw_in_overflow(&hw_mgr_priv,
&stop_args)) {
- CDBG("%s:Calling Error handler CB\n", __func__);
+ CAM_DBG(CAM_ISP, "Calling Error handler CB");
ife_hwr_irq_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
CAM_ISP_HW_EVENT_ERROR, error_event_data);
}
@@ -2185,10 +2270,10 @@
/* fill the affected_core in recovery data */
for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
recovery_data->affected_core[i] = affected_core[i];
- CDBG("%s: Vfe core %d is affected (%d)\n",
- __func__, i, recovery_data->affected_core[i]);
+ CAM_DBG(CAM_ISP, "Vfe core %d is affected (%d)",
+ i, recovery_data->affected_core[i]);
}
- CDBG("%s:Exit\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit");
return 0;
}
@@ -2208,7 +2293,7 @@
core_idx = evt_payload->core_index;
rc = evt_payload->error_type;
- CDBG("%s: Enter: error_type (%d)\n", __func__, evt_payload->error_type);
+ CAM_DBG(CAM_ISP, "Enter: error_type (%d)", evt_payload->error_type);
switch (evt_payload->error_type) {
case CAM_ISP_HW_ERROR_OVERFLOW:
case CAM_ISP_HW_ERROR_P2I_ERROR:
@@ -2227,11 +2312,11 @@
cam_ife_hw_mgr_do_error_recovery(&recovery_data);
break;
default:
- CDBG("%s: None error. Error type (%d)\n", __func__,
+ CAM_DBG(CAM_ISP, "None error. Error type (%d)",
evt_payload->error_type);
}
- CDBG("%s: Exit (%d)\n", __func__, rc);
+ CAM_DBG(CAM_ISP, "Exit (%d)", rc);
return rc;
}
@@ -2241,26 +2326,26 @@
* of dual VFE.
* RDI path does not support DUAl VFE
*/
-static int cam_ife_hw_mgr_handle_rup_for_camif_hw_res(
+static int cam_ife_hw_mgr_handle_reg_update(
void *handler_priv,
void *payload)
{
struct cam_isp_resource_node *hw_res;
struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
struct cam_vfe_top_irq_evt_payload *evt_payload;
- struct cam_ife_hw_mgr_res *isp_ife_camif_res = NULL;
+ struct cam_ife_hw_mgr_res *ife_src_res = NULL;
cam_hw_event_cb_func ife_hwr_irq_rup_cb;
struct cam_isp_hw_reg_update_event_data rup_event_data;
uint32_t core_idx;
uint32_t rup_status = -EINVAL;
- CDBG("%s: Enter\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter");
ife_hwr_mgr_ctx = handler_priv;
evt_payload = payload;
if (!handler_priv || !payload) {
- pr_err("%s: Invalid Parameter\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid Parameter");
return -EPERM;
}
@@ -2269,64 +2354,83 @@
ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
- list_for_each_entry(isp_ife_camif_res,
+ list_for_each_entry(ife_src_res,
&ife_hwr_mgr_ctx->res_list_ife_src, list) {
- if (isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+ if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
continue;
- CDBG("%s: camif resource id = %d, curr_core_idx = %d\n",
- __func__, isp_ife_camif_res->res_id, core_idx);
- switch (isp_ife_camif_res->res_id) {
+ CAM_DBG(CAM_ISP, "resource id = %d, curr_core_idx = %d",
+ ife_src_res->res_id, core_idx);
+ switch (ife_src_res->res_id) {
case CAM_ISP_HW_VFE_IN_CAMIF:
- if (isp_ife_camif_res->is_dual_vfe)
+ if (ife_src_res->is_dual_vfe)
/* It checks for slave core RUP ACK*/
- hw_res = isp_ife_camif_res->hw_res[1];
+ hw_res = ife_src_res->hw_res[1];
else
- hw_res = isp_ife_camif_res->hw_res[0];
+ hw_res = ife_src_res->hw_res[0];
if (!hw_res) {
- pr_err("%s: CAMIF device is NULL\n", __func__);
+ CAM_ERR(CAM_ISP, "CAMIF device is NULL");
break;
}
- CDBG("%s: current_core_id = %d , core_idx res = %d\n",
- __func__, core_idx,
- hw_res->hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP,
+ "current_core_id = %d , core_idx res = %d",
+ core_idx, hw_res->hw_intf->hw_idx);
if (core_idx == hw_res->hw_intf->hw_idx) {
rup_status = hw_res->bottom_half_handler(
hw_res, evt_payload);
}
+
+ if (!rup_status) {
+ ife_hwr_irq_rup_cb(
+ ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_REG_UPDATE,
+ &rup_event_data);
+ }
break;
case CAM_ISP_HW_VFE_IN_RDI0:
case CAM_ISP_HW_VFE_IN_RDI1:
case CAM_ISP_HW_VFE_IN_RDI2:
- hw_res = isp_ife_camif_res->hw_res[0];
+ case CAM_ISP_HW_VFE_IN_RDI3:
+ if (!ife_hwr_mgr_ctx->is_rdi_only_context)
+ continue;
+
+ /*
+ * This is RDI only context, send Reg update and epoch
+ * HW event to cam context
+ */
+ hw_res = ife_src_res->hw_res[0];
if (!hw_res) {
- pr_err("%s: RDI Device is NULL\n", __func__);
+ CAM_ERR(CAM_ISP, "RDI Device is NULL");
break;
}
+
if (core_idx == hw_res->hw_intf->hw_idx)
- /* Need to process rdi reg update */
- rup_status = -EINVAL;
+ rup_status = hw_res->bottom_half_handler(
+ hw_res, evt_payload);
+
+ if (!rup_status) {
+ /* Send the Reg update hw event */
+ ife_hwr_irq_rup_cb(
+ ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_REG_UPDATE,
+ &rup_event_data);
+ }
break;
default:
- pr_err("%s: invalid resource id (%d)", __func__,
- isp_ife_camif_res->res_id);
- }
-
- /* only do callback for pixel reg update for now */
- if (!rup_status && (isp_ife_camif_res->res_id ==
- CAM_ISP_HW_VFE_IN_CAMIF)) {
- ife_hwr_irq_rup_cb(ife_hwr_mgr_ctx->common.cb_priv,
- CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
+ CAM_ERR(CAM_ISP, "Invalid resource id (%d)",
+ ife_src_res->res_id);
}
}
- CDBG("%s: Exit (rup_status = %d)!\n", __func__, rup_status);
+ if (!rup_status)
+ CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
+
return 0;
}
@@ -2351,13 +2455,13 @@
if ((epoch_cnt[core_idx0] - epoch_cnt[core_idx1] > 1) ||
(epoch_cnt[core_idx1] - epoch_cnt[core_idx0] > 1)) {
- pr_warn("%s:One of the VFE of dual VFE cound not generate error\n",
- __func__);
+ CAM_WARN(CAM_ISP,
+ "One of the VFE of dual VFE cound not generate error");
rc = -1;
return rc;
}
- CDBG("Only one core_index has given EPOCH\n");
+ CAM_DBG(CAM_ISP, "Only one core_index has given EPOCH");
return rc;
}
@@ -2379,7 +2483,7 @@
uint32_t core_index0;
uint32_t core_index1;
- CDBG("%s:Enter\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter");
ife_hwr_mgr_ctx = handler_priv;
evt_payload = payload;
@@ -2404,8 +2508,7 @@
case 0:
/* EPOCH check for Left side VFE */
if (!hw_res_l) {
- pr_err("%s: Left Device is NULL\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Left Device is NULL");
break;
}
@@ -2426,8 +2529,7 @@
/* SOF check for Left side VFE (Master)*/
if ((!hw_res_l) || (!hw_res_r)) {
- pr_err("%s: Dual VFE Device is NULL\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
break;
}
if (core_idx == hw_res_l->hw_intf->hw_idx) {
@@ -2465,12 +2567,14 @@
/* Error */
default:
- pr_err("%s: error with hw_res\n", __func__);
+ CAM_ERR(CAM_ISP, "error with hw_res");
}
}
- CDBG("%s: Exit (epoch_status = %d)!\n", __func__, epoch_status);
+ if (!epoch_status)
+ CAM_DBG(CAM_ISP, "Exit epoch_status = %d", epoch_status);
+
return 0;
}
@@ -2495,142 +2599,197 @@
if ((sof_cnt[core_idx0] - sof_cnt[core_idx1] > 1) ||
(sof_cnt[core_idx1] - sof_cnt[core_idx0] > 1)) {
- pr_err("%s: One VFE of dual VFE cound not generate SOF\n",
- __func__);
+ CAM_ERR(CAM_ISP, "One VFE of dual VFE cound not generate SOF");
rc = -1;
return rc;
}
- pr_info("Only one core_index has given SOF\n");
+ CAM_INFO(CAM_ISP, "Only one core_index has given SOF");
return rc;
}
-static int cam_ife_hw_mgr_handle_sof_for_camif_hw_res(
- void *handler_priv,
- void *payload)
+static int cam_ife_hw_mgr_process_camif_sof(
+ struct cam_ife_hw_mgr_res *isp_ife_camif_res,
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx,
+ struct cam_vfe_top_irq_evt_payload *evt_payload)
{
- int32_t rc = -1;
+ struct cam_isp_hw_sof_event_data sof_done_event_data;
+ cam_hw_event_cb_func ife_hwr_irq_sof_cb;
struct cam_isp_resource_node *hw_res_l = NULL;
struct cam_isp_resource_node *hw_res_r = NULL;
- struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
- struct cam_vfe_top_irq_evt_payload *evt_payload;
- struct cam_ife_hw_mgr_res *isp_ife_camif_res = NULL;
- cam_hw_event_cb_func ife_hwr_irq_sof_cb;
- struct cam_isp_hw_sof_event_data sof_done_event_data;
+ int32_t rc = -EINVAL;
uint32_t core_idx;
uint32_t sof_status = 0;
uint32_t core_index0;
uint32_t core_index1;
- CDBG("%s:Enter\n", __func__);
-
- ife_hwr_mgr_ctx = handler_priv;
- evt_payload = payload;
- if (!evt_payload) {
- pr_err("%s: no payload\n", __func__);
- return IRQ_HANDLED;
- }
+ CAM_DBG(CAM_ISP, "Enter");
core_idx = evt_payload->core_index;
+ hw_res_l = isp_ife_camif_res->hw_res[0];
+ hw_res_r = isp_ife_camif_res->hw_res[1];
+ CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
+ isp_ife_camif_res->is_dual_vfe);
+
ife_hwr_irq_sof_cb =
ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
- evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;
-
- list_for_each_entry(isp_ife_camif_res,
- &ife_hwr_mgr_ctx->res_list_ife_src, list) {
-
- if ((isp_ife_camif_res->res_type ==
- CAM_IFE_HW_MGR_RES_UNINIT) ||
- (isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
- continue;
-
- hw_res_l = isp_ife_camif_res->hw_res[0];
- hw_res_r = isp_ife_camif_res->hw_res[1];
-
- CDBG("%s:is_dual_vfe ? = %d\n", __func__,
- isp_ife_camif_res->is_dual_vfe);
- switch (isp_ife_camif_res->is_dual_vfe) {
- /* Handling Single VFE Scenario */
- case 0:
- /* SOF check for Left side VFE */
- if (!hw_res_l) {
- pr_err("%s: VFE Device is NULL\n",
- __func__);
- break;
- }
- CDBG("%s: curr_core_idx = %d, core idx hw = %d\n",
- __func__, core_idx,
- hw_res_l->hw_intf->hw_idx);
-
- if (core_idx == hw_res_l->hw_intf->hw_idx) {
- sof_status = hw_res_l->bottom_half_handler(
- hw_res_l, evt_payload);
- if (!sof_status)
- ife_hwr_irq_sof_cb(
- ife_hwr_mgr_ctx->common.cb_priv,
- CAM_ISP_HW_EVENT_SOF,
- &sof_done_event_data);
- }
-
+ switch (isp_ife_camif_res->is_dual_vfe) {
+ /* Handling Single VFE Scenario */
+ case 0:
+ /* SOF check for Left side VFE */
+ if (!hw_res_l) {
+ CAM_ERR(CAM_ISP, "VFE Device is NULL");
break;
+ }
+ CAM_DBG(CAM_ISP, "curr_core_idx = %d,core idx hw = %d",
+ core_idx, hw_res_l->hw_intf->hw_idx);
- /* Handling Dual VFE Scenario */
- case 1:
- /* SOF check for Left side VFE */
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ sof_status = hw_res_l->bottom_half_handler(hw_res_l,
+ evt_payload);
+ if (!sof_status) {
+ cam_ife_mgr_cmd_get_sof_timestamp(
+ ife_hwr_mgr_ctx,
+ &sof_done_event_data.timestamp);
- if (!hw_res_l) {
- pr_err("%s: VFE Device is NULL\n",
- __func__);
- break;
- }
- CDBG("%s: curr_core_idx = %d, idx associated hw = %d\n",
- __func__, core_idx,
- hw_res_l->hw_intf->hw_idx);
-
- if (core_idx == hw_res_l->hw_intf->hw_idx) {
- sof_status = hw_res_l->bottom_half_handler(
- hw_res_l, evt_payload);
- if (!sof_status)
- ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
- }
-
- /* SOF check for Right side VFE */
- if (!hw_res_r) {
- pr_err("%s: VFE Device is NULL\n",
- __func__);
- break;
- }
- CDBG("%s: curr_core_idx = %d, idx associated hw = %d\n",
- __func__, core_idx,
- hw_res_r->hw_intf->hw_idx);
- if (core_idx == hw_res_r->hw_intf->hw_idx) {
- sof_status = hw_res_r->bottom_half_handler(
- hw_res_r, evt_payload);
- if (!sof_status)
- ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
- }
-
- core_index0 = hw_res_l->hw_intf->hw_idx;
- core_index1 = hw_res_r->hw_intf->hw_idx;
-
- rc = cam_ife_hw_mgr_check_sof_for_dual_vfe(
- ife_hwr_mgr_ctx, core_index0, core_index1);
-
- if (!rc)
ife_hwr_irq_sof_cb(
ife_hwr_mgr_ctx->common.cb_priv,
CAM_ISP_HW_EVENT_SOF,
&sof_done_event_data);
+ }
+ }
+ break;
+
+ /* Handling Dual VFE Scenario */
+ case 1:
+ /* SOF check for Left side VFE */
+
+ if (!hw_res_l) {
+ CAM_ERR(CAM_ISP, "VFE Device is NULL");
+ break;
+ }
+ CAM_DBG(CAM_ISP, "curr_core_idx = %d, res hw idx= %d",
+ core_idx,
+ hw_res_l->hw_intf->hw_idx);
+
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ sof_status = hw_res_l->bottom_half_handler(
+ hw_res_l, evt_payload);
+ if (!sof_status)
+ ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+ }
+
+ /* SOF check for Right side VFE */
+ if (!hw_res_r) {
+ CAM_ERR(CAM_ISP, "VFE Device is NULL");
+ break;
+ }
+ CAM_DBG(CAM_ISP, "curr_core_idx = %d, ews hw idx= %d",
+ core_idx,
+ hw_res_r->hw_intf->hw_idx);
+ if (core_idx == hw_res_r->hw_intf->hw_idx) {
+ sof_status = hw_res_r->bottom_half_handler(hw_res_r,
+ evt_payload);
+ if (!sof_status)
+ ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+ }
+
+ core_index0 = hw_res_l->hw_intf->hw_idx;
+ core_index1 = hw_res_r->hw_intf->hw_idx;
+
+ rc = cam_ife_hw_mgr_check_sof_for_dual_vfe(ife_hwr_mgr_ctx,
+ core_index0, core_index1);
+
+ if (!rc)
+ ife_hwr_irq_sof_cb(ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
+
+ break;
+
+ default:
+ CAM_ERR(CAM_ISP, "error with hw_res");
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "Exit (sof_status = %d)!", sof_status);
+
+ return 0;
+}
+
+static int cam_ife_hw_mgr_handle_sof(
+ void *handler_priv,
+ void *payload)
+{
+ int32_t rc = -EINVAL;
+ struct cam_isp_resource_node *hw_res = NULL;
+ struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_ife_hw_mgr_res *ife_src_res = NULL;
+ cam_hw_event_cb_func ife_hw_irq_sof_cb;
+ struct cam_isp_hw_sof_event_data sof_done_event_data;
+ uint32_t sof_status = 0;
+
+ CAM_DBG(CAM_ISP, "Enter");
+
+ ife_hw_mgr_ctx = handler_priv;
+ evt_payload = payload;
+ if (!evt_payload) {
+ CAM_ERR(CAM_ISP, "no payload");
+ return IRQ_HANDLED;
+ }
+ ife_hw_irq_sof_cb =
+ ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
+
+ evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;
+
+ list_for_each_entry(ife_src_res,
+ &ife_hw_mgr_ctx->res_list_ife_src, list) {
+
+ if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+ continue;
+
+ switch (ife_src_res->res_id) {
+ case CAM_ISP_HW_VFE_IN_RDI0:
+ case CAM_ISP_HW_VFE_IN_RDI1:
+ case CAM_ISP_HW_VFE_IN_RDI2:
+ case CAM_ISP_HW_VFE_IN_RDI3:
+ /* check if it is rdi only context */
+ if (ife_hw_mgr_ctx->is_rdi_only_context) {
+ hw_res = ife_src_res->hw_res[0];
+ sof_status = hw_res->bottom_half_handler(
+ hw_res, evt_payload);
+
+ if (!sof_status) {
+ cam_ife_mgr_cmd_get_sof_timestamp(
+ ife_hw_mgr_ctx,
+ &sof_done_event_data.timestamp);
+
+ ife_hw_irq_sof_cb(
+ ife_hw_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_SOF,
+ &sof_done_event_data);
+ }
+
+ CAM_DBG(CAM_ISP, "sof_status = %d", sof_status);
+
+ /* this is RDI only context so exit from here */
+ return 0;
+ }
break;
+ case CAM_ISP_HW_VFE_IN_CAMIF:
+ rc = cam_ife_hw_mgr_process_camif_sof(ife_src_res,
+ ife_hw_mgr_ctx, evt_payload);
+ break;
default:
- pr_err("%s: error with hw_res\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid resource id :%d",
+ ife_src_res->res_id);
+ break;
}
}
- CDBG("%s: Exit (sof_status = %d)!\n", __func__, sof_status);
return 0;
}
@@ -2640,11 +2799,11 @@
{
int32_t buf_done_status = 0;
- int32_t i = 0;
+ int32_t i;
int32_t rc = 0;
cam_hw_event_cb_func ife_hwr_irq_wm_done_cb;
struct cam_isp_resource_node *hw_res_l = NULL;
- struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = handler_priv;
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = NULL;
struct cam_vfe_bus_irq_evt_payload *evt_payload = payload;
struct cam_ife_hw_mgr_res *isp_ife_out_res = NULL;
struct cam_hw_event_recovery_data recovery_data;
@@ -2653,8 +2812,9 @@
uint32_t error_resc_handle[CAM_IFE_HW_OUT_RES_MAX];
uint32_t num_of_error_handles = 0;
- CDBG("%s:Enter\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter");
+ ife_hwr_mgr_ctx = evt_payload->ctx;
ife_hwr_irq_wm_done_cb =
ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
@@ -2715,7 +2875,7 @@
/* Report for Successful buf_done event if any */
if (buf_done_event_data.num_handles > 0 &&
ife_hwr_irq_wm_done_cb) {
- CDBG("%s: notify isp context\n", __func__);
+ CAM_DBG(CAM_ISP, "notify isp context");
ife_hwr_irq_wm_done_cb(
ife_hwr_mgr_ctx->common.cb_priv,
CAM_ISP_HW_EVENT_DONE,
@@ -2734,13 +2894,12 @@
}
break;
}
- CDBG("%s:buf_done status:(%d),isp_ife_out_res->res_id : 0x%x\n",
- __func__, buf_done_status, isp_ife_out_res->res_id);
+ if (!buf_done_status)
+ CAM_DBG(CAM_ISP,
+ "buf_done status:(%d),out_res->res_id: 0x%x",
+ buf_done_status, isp_ife_out_res->res_id);
}
-
- CDBG("%s: Exit (buf_done_status (Success) = %d)!\n", __func__,
- buf_done_status);
return rc;
err:
@@ -2759,8 +2918,8 @@
* for the first phase, we are going to reset entire HW.
*/
- CDBG("%s: Exit (buf_done_status (Error) = %d)!\n", __func__,
- buf_done_status);
+ CAM_DBG(CAM_ISP, "Exit (buf_done_status (Error) = %d)!",
+ buf_done_status);
return rc;
}
@@ -2775,31 +2934,21 @@
return rc;
evt_payload = evt_payload_priv;
- ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
+ ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)evt_payload->ctx;
- CDBG("addr of evt_payload = %llx\n", (uint64_t)evt_payload);
- CDBG("bus_irq_status_0: = %x\n", evt_payload->irq_reg_val[0]);
- CDBG("bus_irq_status_1: = %x\n", evt_payload->irq_reg_val[1]);
- CDBG("bus_irq_status_2: = %x\n", evt_payload->irq_reg_val[2]);
- CDBG("bus_irq_comp_err: = %x\n", evt_payload->irq_reg_val[3]);
- CDBG("bus_irq_comp_owrt: = %x\n", evt_payload->irq_reg_val[4]);
- CDBG("bus_irq_dual_comp_err: = %x\n", evt_payload->irq_reg_val[5]);
- CDBG("bus_irq_dual_comp_owrt: = %x\n", evt_payload->irq_reg_val[6]);
+ CAM_DBG(CAM_ISP, "addr of evt_payload = %llx", (uint64_t)evt_payload);
+ CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
+ CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
+ CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]);
+ CAM_DBG(CAM_ISP, "bus_irq_comp_err: = %x", evt_payload->irq_reg_val[3]);
+ CAM_DBG(CAM_ISP, "bus_irq_comp_owrt: = %x",
+ evt_payload->irq_reg_val[4]);
+ CAM_DBG(CAM_ISP, "bus_irq_dual_comp_err: = %x",
+ evt_payload->irq_reg_val[5]);
+ CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x",
+ evt_payload->irq_reg_val[6]);
- /*
- * If overflow/overwrite/error/violation are pending
- * for this context it needs to be handled remaining
- * interrupts are ignored.
- */
- rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
- evt_payload_priv);
- if (rc) {
- pr_err("%s: Encountered Error (%d), ignoring other irqs\n",
- __func__, rc);
- return IRQ_HANDLED;
- }
-
- CDBG("%s: Calling Buf_done\n", __func__);
+ CAM_DBG(CAM_ISP, "Calling Buf_done");
/* WM Done */
return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
evt_payload_priv);
@@ -2817,10 +2966,11 @@
evt_payload = evt_payload_priv;
ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
- CDBG("addr of evt_payload = %llx\n", (uint64_t)evt_payload);
- CDBG("irq_status_0: = %x\n", evt_payload->irq_reg_val[0]);
- CDBG("irq_status_1: = %x\n", evt_payload->irq_reg_val[1]);
- CDBG("Violation register: = %x\n", evt_payload->irq_reg_val[2]);
+ CAM_DBG(CAM_ISP, "addr of evt_payload = %llx", (uint64_t)evt_payload);
+ CAM_DBG(CAM_ISP, "irq_status_0: = %x", evt_payload->irq_reg_val[0]);
+ CAM_DBG(CAM_ISP, "irq_status_1: = %x", evt_payload->irq_reg_val[1]);
+ CAM_DBG(CAM_ISP, "Violation register: = %x",
+ evt_payload->irq_reg_val[2]);
/*
* If overflow/overwrite/error/violation are pending
@@ -2830,22 +2980,22 @@
rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
evt_payload_priv);
if (rc) {
- pr_err("%s: Encountered Error (%d), ignoring other irqs\n",
- __func__, rc);
+ CAM_ERR(CAM_ISP, "Encountered Error (%d), ignoring other irqs",
+ rc);
return IRQ_HANDLED;
}
- CDBG("%s: Calling SOF\n", __func__);
+ CAM_DBG(CAM_ISP, "Calling SOF");
/* SOF IRQ */
- cam_ife_hw_mgr_handle_sof_for_camif_hw_res(ife_hwr_mgr_ctx,
+ cam_ife_hw_mgr_handle_sof(ife_hwr_mgr_ctx,
evt_payload_priv);
- CDBG("%s: Calling RUP\n", __func__);
+ CAM_DBG(CAM_ISP, "Calling RUP");
/* REG UPDATE */
- cam_ife_hw_mgr_handle_rup_for_camif_hw_res(ife_hwr_mgr_ctx,
+ cam_ife_hw_mgr_handle_reg_update(ife_hwr_mgr_ctx,
evt_payload_priv);
- CDBG("%s: Calling EPOCH\n", __func__);
+ CAM_DBG(CAM_ISP, "Calling EPOCH");
/* EPOCH IRQ */
cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
evt_payload_priv);
@@ -2891,15 +3041,14 @@
int i, j;
struct cam_iommu_handle cdm_handles;
- CDBG("%s: Enter\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter");
memset(&g_ife_hw_mgr, 0, sizeof(g_ife_hw_mgr));
mutex_init(&g_ife_hw_mgr.ctx_mutex);
if (CAM_IFE_HW_NUM_MAX != CAM_IFE_CSID_HW_NUM_MAX) {
- pr_err("%s: Fatal, CSID num is different then IFE num!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Fatal, CSID num is different then IFE num!");
goto end;
}
@@ -2915,15 +3064,16 @@
j++;
g_ife_hw_mgr.cdm_reg_map[i] = &soc_info->reg_map[0];
- CDBG("reg_map: mem base = 0x%llx, cam_base = 0x%llx\n",
- (uint64_t) soc_info->reg_map[0].mem_base,
+ CAM_DBG(CAM_ISP,
+ "reg_map: mem base = %pK cam_base = 0x%llx",
+ (void __iomem *)soc_info->reg_map[0].mem_base,
(uint64_t) soc_info->reg_map[0].mem_cam_base);
} else {
g_ife_hw_mgr.cdm_reg_map[i] = NULL;
}
}
if (j == 0) {
- pr_err("%s: no valid IFE HW!\n", __func__);
+ CAM_ERR(CAM_ISP, "no valid IFE HW!");
goto end;
}
@@ -2934,7 +3084,7 @@
j++;
}
if (!j) {
- pr_err("%s: no valid IFE CSID HW!\n", __func__);
+ CAM_ERR(CAM_ISP, "no valid IFE CSID HW!");
goto end;
}
@@ -2953,26 +3103,27 @@
*/
if (cam_smmu_get_handle("ife",
&g_ife_hw_mgr.mgr_common.img_iommu_hdl)) {
- pr_err("%s: Can not get iommu handle.\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not get iommu handle.");
goto end;
}
if (cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
CAM_SMMU_ATTACH)) {
- pr_err("%s: Attach iommu handle failed.\n", __func__);
+ CAM_ERR(CAM_ISP, "Attach iommu handle failed.");
goto end;
}
- CDBG("got iommu_handle=%d\n", g_ife_hw_mgr.mgr_common.img_iommu_hdl);
+ CAM_DBG(CAM_ISP, "got iommu_handle=%d",
+ g_ife_hw_mgr.mgr_common.img_iommu_hdl);
g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1;
if (!cam_cdm_get_iommu_handle("ife", &cdm_handles)) {
- CDBG("Successfully acquired the CDM iommu handles\n");
+ CAM_DBG(CAM_ISP, "Successfully acquired the CDM iommu handles");
g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = cdm_handles.non_secure;
g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure =
cdm_handles.secure;
} else {
- CDBG("Failed to acquire the CDM iommu handles\n");
+ CAM_DBG(CAM_ISP, "Failed to acquire the CDM iommu handles");
g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = -1;
g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure = -1;
}
@@ -3007,7 +3158,7 @@
sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
if (!g_ife_hw_mgr.ctx_pool[i].cdm_cmd) {
rc = -ENOMEM;
- pr_err("Allocation Failed for cdm command\n");
+ CAM_ERR(CAM_ISP, "Allocation Failed for cdm command");
goto end;
}
@@ -3028,7 +3179,7 @@
&g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ);
if (rc < 0) {
- pr_err("%s: Unable to create worker\n", __func__);
+ CAM_ERR(CAM_ISP, "Unable to create worker");
goto end;
}
@@ -3043,8 +3194,9 @@
hw_mgr_intf->hw_release = cam_ife_mgr_release_hw;
hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
+ hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
- CDBG("%s: Exit\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit");
return 0;
end:
if (rc) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 174d2ce..6dfdb21 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -108,6 +108,7 @@
* @epoch_cnt epoch count value per core, used for dual VFE
* @overflow_pending flat to specify the overflow is pending for the
* context
+ * @is_rdi_only_context flag to specify the context has only rdi resource
*/
struct cam_ife_hw_mgr_ctx {
struct list_head list;
@@ -138,6 +139,7 @@
uint32_t sof_cnt[CAM_IFE_HW_NUM_MAX];
uint32_t epoch_cnt[CAM_IFE_HW_NUM_MAX];
atomic_t overflow_pending;
+ uint32_t is_rdi_only_context;
};
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
index 2e23222..2f18895 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
@@ -12,6 +12,7 @@
#include "cam_isp_hw_mgr_intf.h"
#include "cam_ife_hw_mgr.h"
+#include "cam_debug_util.h"
int cam_isp_hw_mgr_init(struct device_node *of_node,
@@ -26,7 +27,7 @@
if (strnstr(compat_str, "ife", strlen(compat_str)))
rc = cam_ife_hw_mgr_init(hw_mgr);
else {
- pr_err("%s: Invalid ISP hw type\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid ISP hw type");
rc = -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile
index 19da180..b60e7de 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile
@@ -6,6 +6,7 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_tasklet_util.o cam_isp_packet_parser.o
obj-$(CONFIG_SPECTRA_CAMERA) += irq_controller/
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index 3c72279..0a0eecb 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -15,130 +15,13 @@
#include "cam_mem_mgr.h"
#include "cam_vfe_hw_intf.h"
#include "cam_isp_packet_parser.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
-static int cam_isp_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
- size_t *len)
-{
- int rc = 0;
- uint64_t kmd_buf_addr = 0;
-
- rc = cam_mem_get_cpu_buf(handle, &kmd_buf_addr, len);
- if (rc) {
- pr_err("%s:%d Unable to get the virtual address rc:%d\n",
- __func__, __LINE__, rc);
- rc = -ENOMEM;
- } else {
- if (kmd_buf_addr && *len)
- *buf_addr = (uint32_t *)kmd_buf_addr;
- else {
- pr_err("%s:%d Invalid addr and length :%ld\n",
- __func__, __LINE__, *len);
- rc = -ENOMEM;
- }
- }
- return rc;
-}
-
-static int cam_isp_validate_cmd_desc(
- struct cam_cmd_buf_desc *cmd_desc)
-{
- if (cmd_desc->length > cmd_desc->size ||
- (cmd_desc->mem_handle <= 0)) {
- pr_err("%s:%d invalid cmd arg %d %d %d %d\n",
- __func__, __LINE__, cmd_desc->offset,
- cmd_desc->length, cmd_desc->mem_handle,
- cmd_desc->size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int cam_isp_validate_packet(struct cam_packet *packet)
-{
- if (!packet)
- return -EINVAL;
-
- CDBG("%s:%d num cmd buf:%d num of io config:%d kmd buf index:%d\n",
- __func__, __LINE__, packet->num_cmd_buf,
- packet->num_io_configs, packet->kmd_cmd_buf_index);
-
- if (packet->kmd_cmd_buf_index >= packet->num_cmd_buf ||
- (!packet->header.size) ||
- packet->cmd_buf_offset > packet->header.size ||
- packet->io_configs_offset > packet->header.size) {
- pr_err("%s:%d invalid packet:%d %d %d %d %d\n",
- __func__, __LINE__, packet->kmd_cmd_buf_index,
- packet->num_cmd_buf, packet->cmd_buf_offset,
- packet->io_configs_offset, packet->header.size);
- return -EINVAL;
- }
-
- CDBG("%s:%d exit\n", __func__, __LINE__);
- return 0;
-}
-
-int cam_isp_get_kmd_buffer(struct cam_packet *packet,
- struct cam_isp_kmd_buf_info *kmd_buf)
-{
- int rc = 0;
- size_t len = 0;
- struct cam_cmd_buf_desc *cmd_desc;
- uint32_t *cpu_addr;
-
- if (!packet || !kmd_buf) {
- pr_err("%s:%d Invalid arg\n", __func__, __LINE__);
- rc = -EINVAL;
- return rc;
- }
-
- /* Take first command descriptor and add offset to it for kmd*/
- cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)
- &packet->payload + packet->cmd_buf_offset);
- cmd_desc += packet->kmd_cmd_buf_index;
-
- CDBG("%s:%d enter\n", __func__, __LINE__);
- rc = cam_isp_validate_cmd_desc(cmd_desc);
- if (rc)
- return rc;
-
- CDBG("%s:%d enter\n", __func__, __LINE__);
- rc = cam_isp_get_cmd_mem_addr(cmd_desc->mem_handle, &cpu_addr,
- &len);
- if (rc)
- return rc;
-
- if (len < cmd_desc->size) {
- pr_err("%s:%d invalid memory len:%ld and cmd desc size:%d\n",
- __func__, __LINE__, len, cmd_desc->size);
- return -EINVAL;
- }
-
- cpu_addr += cmd_desc->offset/4 + packet->kmd_cmd_buf_offset/4;
- CDBG("%s:%d total size %d, cmd size: %d, KMD buffer size: %d\n",
- __func__, __LINE__, cmd_desc->size, cmd_desc->length,
- cmd_desc->size - cmd_desc->length);
- CDBG("%s:%d: handle 0x%x, cmd offset %d, kmd offset %d, addr 0x%pK\n",
- __func__, __LINE__, cmd_desc->mem_handle, cmd_desc->offset,
- packet->kmd_cmd_buf_offset, cpu_addr);
-
- kmd_buf->cpu_addr = cpu_addr;
- kmd_buf->handle = cmd_desc->mem_handle;
- kmd_buf->offset = cmd_desc->offset + packet->kmd_cmd_buf_offset;
- kmd_buf->size = cmd_desc->size - cmd_desc->length;
- kmd_buf->used_bytes = 0;
-
- return rc;
-}
+#include "cam_debug_util.h"
int cam_isp_add_change_base(
struct cam_hw_prepare_update_args *prepare,
struct list_head *res_list_isp_src,
uint32_t base_idx,
- struct cam_isp_kmd_buf_info *kmd_buf_info)
+ struct cam_kmd_buf_info *kmd_buf_info)
{
int rc = -EINVAL;
struct cam_ife_hw_mgr_res *hw_mgr_res;
@@ -152,9 +35,8 @@
/* Max one hw entries required for each base */
if (num_ent + 1 >= prepare->max_hw_update_entries) {
- pr_err("%s:%d Insufficient HW entries :%d %d\n",
- __func__, __LINE__, num_ent,
- prepare->max_hw_update_entries);
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
+ num_ent, prepare->max_hw_update_entries);
return -EINVAL;
}
@@ -220,8 +102,8 @@
((uint8_t *)&prepare->packet->payload +
prepare->packet->cmd_buf_offset);
- CDBG("%s:%d split id = %d, number of command buffers:%d\n", __func__,
- __LINE__, split_id, prepare->packet->num_cmd_buf);
+ CAM_DBG(CAM_ISP, "split id = %d, number of command buffers:%d",
+ split_id, prepare->packet->num_cmd_buf);
for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
if (!cmd_desc[i].length)
@@ -229,19 +111,18 @@
/* One hw entry space required for left or right or common */
if (num_ent + 1 >= prepare->max_hw_update_entries) {
- pr_err("%s:%d Insufficient HW entries :%d %d\n",
- __func__, __LINE__, num_ent,
- prepare->max_hw_update_entries);
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
+ num_ent, prepare->max_hw_update_entries);
return -EINVAL;
}
- rc = cam_isp_validate_cmd_desc(&cmd_desc[i]);
+ rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
if (rc)
return rc;
cmd_meta_data = cmd_desc[i].meta_data;
- CDBG("%s:%d meta type: %d, split_id: %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "meta type: %d, split_id: %d",
cmd_meta_data, split_id);
switch (cmd_meta_data) {
@@ -288,8 +169,8 @@
num_ent++;
break;
default:
- pr_err("%s:%d invalid cdm command meta data %d\n",
- __func__, __LINE__, cmd_meta_data);
+ CAM_ERR(CAM_ISP, "invalid cdm command meta data %d",
+ cmd_meta_data);
return -EINVAL;
}
}
@@ -304,7 +185,7 @@
int iommu_hdl,
struct cam_hw_prepare_update_args *prepare,
uint32_t base_idx,
- struct cam_isp_kmd_buf_info *kmd_buf_info,
+ struct cam_kmd_buf_info *kmd_buf_info,
struct cam_ife_hw_mgr_res *res_list_isp_out,
uint32_t size_isp_out,
bool fill_fence)
@@ -331,33 +212,31 @@
/* Max one hw entries required for each base */
if (prepare->num_hw_update_entries + 1 >=
prepare->max_hw_update_entries) {
- pr_err("%s:%d Insufficient HW entries :%d %d\n",
- __func__, __LINE__, prepare->num_hw_update_entries,
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
+ prepare->num_hw_update_entries,
prepare->max_hw_update_entries);
return -EINVAL;
}
for (i = 0; i < prepare->packet->num_io_configs; i++) {
- CDBG("%s:%d ======= io config idx %d ============\n",
- __func__, __LINE__, i);
- CDBG("%s:%d resource_type:%d fence:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "======= io config idx %d ============", i);
+ CAM_DBG(CAM_ISP, "resource_type:%d fence:%d",
io_cfg[i].resource_type, io_cfg[i].fence);
- CDBG("%s:%d format: %d\n", __func__, __LINE__,
- io_cfg[i].format);
- CDBG("%s:%d direction %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "format: %d", io_cfg[i].format);
+ CAM_DBG(CAM_ISP, "direction %d",
io_cfg[i].direction);
if (io_cfg[i].direction == CAM_BUF_OUTPUT) {
res_id_out = io_cfg[i].resource_type & 0xFF;
if (res_id_out >= size_isp_out) {
- pr_err("%s:%d invalid out restype:%x\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "invalid out restype:%x",
io_cfg[i].resource_type);
return -EINVAL;
}
- CDBG("%s:%d configure output io with fill fence %d\n",
- __func__, __LINE__, fill_fence);
+ CAM_DBG(CAM_ISP,
+ "configure output io with fill fence %d",
+ fill_fence);
if (fill_fence) {
if (num_out_buf <
prepare->max_out_map_entries) {
@@ -368,8 +247,7 @@
sync_id = io_cfg[i].fence;
num_out_buf++;
} else {
- pr_err("%s:%d ln_out:%d max_ln:%d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "ln_out:%d max_ln:%d",
num_out_buf,
prepare->max_out_map_entries);
return -EINVAL;
@@ -378,15 +256,15 @@
hw_mgr_res = &res_list_isp_out[res_id_out];
if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
- pr_err("%s:%d io res id:%d not valid\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "io res id:%d not valid",
io_cfg[i].resource_type);
return -EINVAL;
}
} else if (io_cfg[i].direction == CAM_BUF_INPUT) {
res_id_in = io_cfg[i].resource_type & 0xFF;
- CDBG("%s:%d configure input io with fill fence %d\n",
- __func__, __LINE__, fill_fence);
+ CAM_DBG(CAM_ISP,
+ "configure input io with fill fence %d",
+ fill_fence);
if (fill_fence) {
if (num_in_buf < prepare->max_in_map_entries) {
prepare->in_map_entries[num_in_buf].
@@ -397,8 +275,7 @@
io_cfg[i].fence;
num_in_buf++;
} else {
- pr_err("%s:%d ln_in:%d imax_ln:%d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "ln_in:%d imax_ln:%d",
num_in_buf,
prepare->max_in_map_entries);
return -EINVAL;
@@ -406,13 +283,12 @@
}
continue;
} else {
- pr_err("%s:%d Invalid io config direction :%d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "Invalid io config direction :%d",
io_cfg[i].direction);
return -EINVAL;
}
- CDBG("%s:%d setup mem io\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "setup mem io");
for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
if (!hw_mgr_res->hw_res[j])
continue;
@@ -422,9 +298,9 @@
res = hw_mgr_res->hw_res[j];
if (res->res_id != io_cfg[i].resource_type) {
- pr_err("%s:%d wm err res id:%d io res id:%d\n",
- __func__, __LINE__, res->res_id,
- io_cfg[i].resource_type);
+ CAM_ERR(CAM_ISP,
+ "wm err res id:%d io res id:%d",
+ res->res_id, io_cfg[i].resource_type);
return -EINVAL;
}
@@ -439,14 +315,16 @@
io_cfg[i].mem_handle[plane_id],
iommu_hdl, &io_addr[plane_id], &size);
if (rc) {
- pr_err("%s:%d no io addr for plane%d\n",
- __func__, __LINE__, plane_id);
+ CAM_ERR(CAM_ISP,
+ "no io addr for plane%d",
+ plane_id);
rc = -ENOMEM;
return rc;
}
if (io_addr[plane_id] >> 32) {
- pr_err("Invalid mapped address\n");
+ CAM_ERR(CAM_ISP,
+ "Invalid mapped address");
rc = -EINVAL;
return rc;
}
@@ -454,13 +332,13 @@
/* need to update with offset */
io_addr[plane_id] +=
io_cfg[i].offsets[plane_id];
- CDBG("%s: get io_addr for plane %d: 0x%llx\n",
- __func__, plane_id,
- io_addr[plane_id]);
+ CAM_DBG(CAM_ISP,
+ "get io_addr for plane %d: 0x%llx",
+ plane_id, io_addr[plane_id]);
}
if (!plane_id) {
- pr_err("%s:%d No valid planes for res%d\n",
- __func__, __LINE__, res->res_id);
+ CAM_ERR(CAM_ISP, "No valid planes for res%d",
+ res->res_id);
rc = -ENOMEM;
return rc;
}
@@ -471,8 +349,9 @@
(kmd_buf_info->used_bytes +
io_cfg_used_bytes);
} else {
- pr_err("%s:%d no free kmd memory for base %d\n",
- __func__, __LINE__, base_idx);
+ CAM_ERR(CAM_ISP,
+ "no free kmd memory for base %d",
+ base_idx);
rc = -ENOMEM;
return rc;
}
@@ -485,8 +364,8 @@
update_buf.num_buf = plane_id;
update_buf.io_cfg = &io_cfg[i];
- CDBG("%s:%d: cmd buffer 0x%pK, size %d\n", __func__,
- __LINE__, update_buf.cdm.cmd_buf_addr,
+ CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
+ update_buf.cdm.cmd_buf_addr,
update_buf.cdm.size);
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
@@ -494,8 +373,8 @@
sizeof(struct cam_isp_hw_get_buf_update));
if (rc) {
- pr_err("%s:%d get buf cmd error:%d\n",
- __func__, __LINE__, res->res_id);
+ CAM_ERR(CAM_ISP, "get buf cmd error:%d",
+ res->res_id);
rc = -ENOMEM;
return rc;
}
@@ -503,7 +382,7 @@
}
}
- CDBG("%s: io_cfg_used_bytes %d, fill_fence %d\n", __func__,
+ CAM_DBG(CAM_ISP, "io_cfg_used_bytes %d, fill_fence %d",
io_cfg_used_bytes, fill_fence);
if (io_cfg_used_bytes) {
/* Update the HW entries */
@@ -533,7 +412,7 @@
struct cam_hw_prepare_update_args *prepare,
struct list_head *res_list_isp_src,
uint32_t base_idx,
- struct cam_isp_kmd_buf_info *kmd_buf_info)
+ struct cam_kmd_buf_info *kmd_buf_info)
{
int rc = -EINVAL;
struct cam_isp_resource_node *res;
@@ -546,8 +425,7 @@
/* Max one hw entries required for each base */
if (prepare->num_hw_update_entries + 1 >=
prepare->max_hw_update_entries) {
- pr_err("%s:%d Insufficient HW entries :%d %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
prepare->num_hw_update_entries,
prepare->max_hw_update_entries);
return -EINVAL;
@@ -572,9 +450,8 @@
(kmd_buf_info->used_bytes +
reg_update_size);
} else {
- pr_err("%s:%d no free mem %d %d %d\n",
- __func__, __LINE__, base_idx,
- kmd_buf_info->size,
+ CAM_ERR(CAM_ISP, "no free mem %d %d %d",
+ base_idx, kmd_buf_info->size,
kmd_buf_info->used_bytes +
reg_update_size);
rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
index ecc71b3..4a7eff8 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -19,9 +17,7 @@
#include <linux/ratelimit.h>
#include "cam_tasklet_util.h"
#include "cam_irq_controller.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
#define CAM_TASKLETQ_SIZE 256
@@ -95,14 +91,14 @@
*tasklet_cmd = NULL;
if (!atomic_read(&tasklet->tasklet_active)) {
- pr_err_ratelimited("Tasklet is not active!\n");
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Tasklet is not active!\n");
rc = -EPIPE;
return rc;
}
spin_lock_irqsave(&tasklet->tasklet_lock, flags);
if (list_empty(&tasklet->free_cmd_list)) {
- pr_err_ratelimited("No more free tasklet cmd!\n");
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "No more free tasklet cmd!\n");
rc = -ENODEV;
goto spin_unlock;
} else {
@@ -162,22 +158,22 @@
*tasklet_cmd = NULL;
if (!atomic_read(&tasklet->tasklet_active)) {
- pr_err("Tasklet is not active!\n");
+ CAM_ERR(CAM_ISP, "Tasklet is not active!");
rc = -EPIPE;
return rc;
}
- CDBG("Dequeue before lock.\n");
+ CAM_DBG(CAM_ISP, "Dequeue before lock.");
spin_lock_irqsave(&tasklet->tasklet_lock, flags);
if (list_empty(&tasklet->used_cmd_list)) {
- CDBG("End of list reached. Exit\n");
+ CAM_DBG(CAM_ISP, "End of list reached. Exit");
rc = -ENODEV;
goto spin_unlock;
} else {
*tasklet_cmd = list_first_entry(&tasklet->used_cmd_list,
struct cam_tasklet_queue_cmd, list);
list_del_init(&(*tasklet_cmd)->list);
- CDBG("Dequeue Successful\n");
+ CAM_DBG(CAM_ISP, "Dequeue Successful");
}
spin_unlock:
@@ -197,14 +193,14 @@
int rc;
if (!bottom_half) {
- pr_err("NULL bottom half\n");
+ CAM_ERR(CAM_ISP, "NULL bottom half");
return -EINVAL;
}
rc = cam_tasklet_get_cmd(tasklet, &tasklet_cmd);
if (tasklet_cmd) {
- CDBG("%s: Enqueue tasklet cmd\n", __func__);
+ CAM_DBG(CAM_ISP, "Enqueue tasklet cmd");
tasklet_cmd->bottom_half_handler = bottom_half_handler;
tasklet_cmd->payload = evt_payload_priv;
spin_lock_irqsave(&tasklet->tasklet_lock, flags);
@@ -213,7 +209,7 @@
spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
tasklet_schedule(&tasklet->tasklet);
} else {
- pr_err("%s: tasklet cmd is NULL!\n", __func__);
+ CAM_ERR(CAM_ISP, "tasklet cmd is NULL!");
}
return rc;
@@ -229,7 +225,8 @@
tasklet = kzalloc(sizeof(struct cam_tasklet_info), GFP_KERNEL);
if (!tasklet) {
- CDBG("Error! Unable to allocate memory for tasklet");
+ CAM_DBG(CAM_ISP,
+ "Error! Unable to allocate memory for tasklet");
*tasklet_info = NULL;
return -ENOMEM;
}
@@ -271,7 +268,8 @@
struct cam_tasklet_queue_cmd *tasklet_cmd_temp;
if (atomic_read(&tasklet->tasklet_active)) {
- pr_err("Tasklet already active. idx = %d\n", tasklet->index);
+ CAM_ERR(CAM_ISP, "Tasklet already active. idx = %d",
+ tasklet->index);
return -EBUSY;
}
atomic_set(&tasklet->tasklet_active, 1);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
index 9730fc2..7ac729f 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
@@ -18,47 +18,7 @@
#include "cam_isp_hw_mgr_intf.h"
#include "cam_ife_hw_mgr.h"
#include "cam_hw_intf.h"
-
-/**
- * @brief KMD scratch buffer information
- *
- * @handle: Memory handle
- * @cpu_addr: Cpu address
- * @offset: Offset from the start of the buffer
- * @size: Size of the buffer
- * @used_bytes: Used memory in bytes
- *
- */
-struct cam_isp_kmd_buf_info {
- int handle;
- uint32_t *cpu_addr;
- uint32_t offset;
- uint32_t size;
- uint32_t used_bytes;
-};
-
-
-/**
- * @brief Validate the packet
- *
- * @packet: Packet to be validated
- *
- * @return: 0 for success
- * -EINVAL for Fail
- */
-int cam_isp_validate_packet(struct cam_packet *packet);
-
-/**
- * @brief Get the kmd buffer from the packet command descriptor
- *
- * @packet: Packet data
- * @kmd_buf: Extracted the KMD buffer information
- *
- * @return: 0 for success
- * -EINVAL for Fail
- */
-int cam_isp_get_kmd_buffer(struct cam_packet *packet,
- struct cam_isp_kmd_buf_info *kmd_buf_info);
+#include "cam_packet_util.h"
/**
* @brief Add change base in the hw entries list
@@ -77,7 +37,7 @@
struct cam_hw_prepare_update_args *prepare,
struct list_head *res_list_isp_src,
uint32_t base_idx,
- struct cam_isp_kmd_buf_info *kmd_buf_info);
+ struct cam_kmd_buf_info *kmd_buf_info);
/**
* @brief Add command buffer in the HW entries list for given
@@ -112,7 +72,7 @@
int cam_isp_add_io_buffers(int iommu_hdl,
struct cam_hw_prepare_update_args *prepare,
uint32_t base_idx,
- struct cam_isp_kmd_buf_info *kmd_buf_info,
+ struct cam_kmd_buf_info *kmd_buf_info,
struct cam_ife_hw_mgr_res *res_list_isp_out,
uint32_t size_isp_out,
bool fill_fence);
@@ -134,7 +94,7 @@
struct cam_hw_prepare_update_args *prepare,
struct list_head *res_list_isp_src,
uint32_t base_idx,
- struct cam_isp_kmd_buf_info *kmd_buf_info);
+ struct cam_kmd_buf_info *kmd_buf_info);
#endif /*_CAM_ISP_HW_PARSER_H */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
index bf4d174..2341b38 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -10,16 +10,12 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include "cam_io_util.h"
#include "cam_irq_controller.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
/**
* struct cam_irq_evt_handler:
@@ -143,21 +139,21 @@
if (!register_info->num_registers || !register_info->irq_reg_set ||
!name || !mem_base) {
- pr_err("Invalid parameters\n");
+ CAM_ERR(CAM_ISP, "Invalid parameters");
rc = -EINVAL;
return rc;
}
controller = kzalloc(sizeof(struct cam_irq_controller), GFP_KERNEL);
if (!controller) {
- CDBG("Failed to allocate IRQ Controller\n");
+ CAM_DBG(CAM_ISP, "Failed to allocate IRQ Controller");
return -ENOMEM;
}
controller->irq_register_arr = kzalloc(register_info->num_registers *
sizeof(struct cam_irq_register_obj), GFP_KERNEL);
if (!controller->irq_register_arr) {
- CDBG("Failed to allocate IRQ register Arr\n");
+ CAM_DBG(CAM_ISP, "Failed to allocate IRQ register Arr");
rc = -ENOMEM;
goto reg_alloc_error;
}
@@ -165,7 +161,7 @@
controller->irq_status_arr = kzalloc(register_info->num_registers *
sizeof(uint32_t), GFP_KERNEL);
if (!controller->irq_status_arr) {
- CDBG("Failed to allocate IRQ status Arr\n");
+ CAM_DBG(CAM_ISP, "Failed to allocate IRQ status Arr");
rc = -ENOMEM;
goto status_alloc_error;
}
@@ -174,14 +170,14 @@
kzalloc(register_info->num_registers * sizeof(uint32_t),
GFP_KERNEL);
if (!controller->th_payload.evt_status_arr) {
- CDBG("Failed to allocate BH payload bit mask Arr\n");
+ CAM_DBG(CAM_ISP, "Failed to allocate BH payload bit mask Arr");
rc = -ENOMEM;
goto evt_mask_alloc_error;
}
controller->name = name;
- CDBG("num_registers: %d\n", register_info->num_registers);
+ CAM_DBG(CAM_ISP, "num_registers: %d", register_info->num_registers);
for (i = 0; i < register_info->num_registers; i++) {
controller->irq_register_arr[i].index = i;
controller->irq_register_arr[i].mask_reg_offset =
@@ -190,11 +186,11 @@
register_info->irq_reg_set[i].clear_reg_offset;
controller->irq_register_arr[i].status_reg_offset =
register_info->irq_reg_set[i].status_reg_offset;
- CDBG("i %d mask_reg_offset: 0x%x\n", i,
+ CAM_DBG(CAM_ISP, "i %d mask_reg_offset: 0x%x", i,
controller->irq_register_arr[i].mask_reg_offset);
- CDBG("i %d clear_reg_offset: 0x%x\n", i,
+ CAM_DBG(CAM_ISP, "i %d clear_reg_offset: 0x%x", i,
controller->irq_register_arr[i].clear_reg_offset);
- CDBG("i %d status_reg_offset: 0x%x\n", i,
+ CAM_DBG(CAM_ISP, "i %d status_reg_offset: 0x%x", i,
controller->irq_register_arr[i].status_reg_offset);
}
controller->num_registers = register_info->num_registers;
@@ -202,11 +198,11 @@
controller->global_clear_offset = register_info->global_clear_offset;
controller->mem_base = mem_base;
- CDBG("global_clear_bitmask: 0x%x\n",
+ CAM_DBG(CAM_ISP, "global_clear_bitmask: 0x%x",
controller->global_clear_bitmask);
- CDBG("global_clear_offset: 0x%x\n",
+ CAM_DBG(CAM_ISP, "global_clear_offset: 0x%x",
controller->global_clear_offset);
- CDBG("mem_base: 0x%llx\n", (uint64_t)controller->mem_base);
+ CAM_DBG(CAM_ISP, "mem_base: %pK", (void __iomem *)controller->mem_base);
INIT_LIST_HEAD(&controller->evt_handler_list_head);
for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++)
@@ -246,19 +242,21 @@
unsigned long flags;
if (!controller || !handler_priv || !evt_bit_mask_arr) {
- pr_err("Invalid params: ctlr=%pK handler_priv=%pK bit_mask_arr = %pK\n",
+ CAM_ERR(CAM_ISP,
+ "Inval params: ctlr=%pK hdl_priv=%pK bit_mask_arr=%pK",
controller, handler_priv, evt_bit_mask_arr);
return -EINVAL;
}
if (!top_half_handler) {
- pr_err("Missing top half handler\n");
+ CAM_ERR(CAM_ISP, "Missing top half handler");
return -EINVAL;
}
if (bottom_half_handler &&
(!bottom_half || !bottom_half_enqueue_func)) {
- pr_err("Invalid params: bh_handler=%pK bh=%pK bh_enq_f=%pK\n",
+ CAM_ERR(CAM_ISP,
+ "Invalid params: bh_handler=%pK bh=%pK bh_enq_f=%pK",
bottom_half_handler,
bottom_half,
bottom_half_enqueue_func);
@@ -266,29 +264,21 @@
}
if (priority >= CAM_IRQ_PRIORITY_MAX) {
- pr_err("Invalid priority=%u, max=%u\n", priority,
+ CAM_ERR(CAM_ISP, "Invalid priority=%u, max=%u", priority,
CAM_IRQ_PRIORITY_MAX);
return -EINVAL;
}
- if (sizeof(evt_bit_mask_arr) !=
- sizeof(uint32_t) * controller->num_registers) {
- pr_err("Invalid evt_mask size = %lu expected = %lu\n",
- sizeof(evt_bit_mask_arr),
- sizeof(uint32_t) * controller->num_registers);
- return -EINVAL;
- }
-
evt_handler = kzalloc(sizeof(struct cam_irq_evt_handler), GFP_KERNEL);
if (!evt_handler) {
- CDBG("Error allocating hlist_node\n");
+ CAM_DBG(CAM_ISP, "Error allocating hlist_node");
return -ENOMEM;
}
evt_handler->evt_bit_mask_arr = kzalloc(sizeof(uint32_t) *
controller->num_registers, GFP_KERNEL);
if (!evt_handler->evt_bit_mask_arr) {
- CDBG("Error allocating hlist_node\n");
+ CAM_DBG(CAM_ISP, "Error allocating hlist_node");
rc = -ENOMEM;
goto free_evt_handler;
}
@@ -306,6 +296,8 @@
evt_handler->bottom_half = bottom_half;
evt_handler->bottom_half_enqueue_func = bottom_half_enqueue_func;
evt_handler->index = controller->hdl_idx++;
+
+ /* Avoid rollover to negative values */
if (controller->hdl_idx > 0x3FFFFFFF)
controller->hdl_idx = 1;
@@ -352,7 +344,7 @@
list_for_each_entry_safe(evt_handler, evt_handler_temp,
&controller->evt_handler_list_head, list_node) {
if (evt_handler->index == handle) {
- CDBG("unsubscribe item %d\n", handle);
+ CAM_DBG(CAM_ISP, "unsubscribe item %d", handle);
list_del_init(&evt_handler->list_node);
list_del_init(&evt_handler->th_list_node);
found = 1;
@@ -435,7 +427,7 @@
int rc = -EINVAL;
int i;
- CDBG("Enter\n");
+ CAM_DBG(CAM_ISP, "Enter");
if (list_empty(th_list_head))
return;
@@ -447,7 +439,7 @@
if (!is_irq_match)
continue;
- CDBG("match found\n");
+ CAM_DBG(CAM_ISP, "match found");
cam_irq_th_payload_init(th_payload);
th_payload->handler_priv = evt_handler->handler_priv;
@@ -468,7 +460,8 @@
(void *)th_payload);
if (!rc && evt_handler->bottom_half_handler) {
- CDBG("Enqueuing bottom half\n");
+ CAM_DBG(CAM_ISP, "Enqueuing bottom half for %s",
+ controller->name);
if (evt_handler->bottom_half_enqueue_func) {
evt_handler->bottom_half_enqueue_func(
evt_handler->bottom_half,
@@ -479,7 +472,7 @@
}
}
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
}
irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv)
@@ -492,6 +485,8 @@
if (!controller)
return IRQ_NONE;
+ CAM_DBG(CAM_ISP, "locking controller %pK name %s rw_lock %pK",
+ controller, controller->name, &controller->rw_lock);
read_lock(&controller->rw_lock);
for (i = 0; i < controller->num_registers; i++) {
controller->irq_status_arr[i] = cam_io_r_mb(
@@ -500,30 +495,34 @@
cam_io_w_mb(controller->irq_status_arr[i],
controller->mem_base +
controller->irq_register_arr[i].clear_reg_offset);
- CDBG("Read irq status%d = 0x%x\n", i,
+ CAM_DBG(CAM_ISP, "Read irq status%d (0x%x) = 0x%x", i,
+ controller->irq_register_arr[i].status_reg_offset,
controller->irq_status_arr[i]);
for (j = 0; j < CAM_IRQ_PRIORITY_MAX; j++) {
if (controller->irq_register_arr[i].
top_half_enable_mask[j] &
controller->irq_status_arr[i])
need_th_processing[j] = true;
- CDBG("i %d j %d need_th_processing = %d\n",
+ CAM_DBG(CAM_ISP,
+ "i %d j %d need_th_processing = %d",
i, j, need_th_processing[j]);
}
}
read_unlock(&controller->rw_lock);
+ CAM_DBG(CAM_ISP, "unlocked controller %pK name %s rw_lock %pK",
+ controller, controller->name, &controller->rw_lock);
- CDBG("Status Registers read Successful\n");
+ CAM_DBG(CAM_ISP, "Status Registers read Successful");
if (controller->global_clear_offset)
cam_io_w_mb(controller->global_clear_bitmask,
controller->mem_base + controller->global_clear_offset);
- CDBG("Status Clear done\n");
+ CAM_DBG(CAM_ISP, "Status Clear done");
for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++) {
if (need_th_processing[i]) {
- CDBG("%s: Invoke TH processing\n", __func__);
+ CAM_DBG(CAM_ISP, "Invoke TH processing");
cam_irq_controller_th_processing(controller,
&controller->th_list_head[i]);
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
index 9f2204b4..0480cd3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -50,32 +50,32 @@
/**
* struct cam_isp_hw_sof_event_data - Event payload for CAM_HW_EVENT_SOF
*
- * @timestamp: Timestamp for the buf done event
+ * @timestamp: Time stamp for the sof event
*
*/
struct cam_isp_hw_sof_event_data {
- struct timeval timestamp;
+ uint64_t timestamp;
};
/**
* struct cam_isp_hw_reg_update_event_data - Event payload for
* CAM_HW_EVENT_REG_UPDATE
*
- * @timestamp: Timestamp for the buf done event
+ * @timestamp: Time stamp for the reg update event
*
*/
struct cam_isp_hw_reg_update_event_data {
- struct timeval timestamp;
+ uint64_t timestamp;
};
/**
* struct cam_isp_hw_epoch_event_data - Event payload for CAM_HW_EVENT_EPOCH
*
- * @timestamp: Timestamp for the buf done event
+ * @timestamp: Time stamp for the epoch event
*
*/
struct cam_isp_hw_epoch_event_data {
- struct timeval timestamp;
+ uint64_t timestamp;
};
/**
@@ -90,31 +90,53 @@
uint32_t num_handles;
uint32_t resource_handle[
CAM_NUM_OUT_PER_COMP_IRQ_MAX];
- struct timeval timestamp;
+ uint64_t timestamp;
};
/**
* struct cam_isp_hw_eof_event_data - Event payload for CAM_HW_EVENT_EOF
*
- * @timestamp: Timestamp for the buf done event
+ * @timestamp: Timestamp for the eof event
*
*/
struct cam_isp_hw_eof_event_data {
- struct timeval timestamp;
+ uint64_t timestamp;
};
/**
* struct cam_isp_hw_error_event_data - Event payload for CAM_HW_EVENT_ERROR
*
- * @error_type: error type for the error event
- * @timestamp: Timestamp for the buf done event
+ * @error_type: Error type for the error event
+ * @timestamp: Timestamp for the error event
*
*/
struct cam_isp_hw_error_event_data {
uint32_t error_type;
- struct timeval timestamp;
+ uint64_t timestamp;
};
+/* enum cam_isp_hw_mgr_command - Hardware manager command type */
+enum cam_isp_hw_mgr_command {
+ CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT,
+ CAM_ISP_HW_MGR_CMD_MAX,
+};
+
+/**
+ * struct cam_isp_hw_cmd_args - Payload for hw manager command
+ *
+ * @ctxt_to_hw_map: HW context from the acquire
+ * @cmd_type HW command type
+ * @get_context Get context type information
+ */
+struct cam_isp_hw_cmd_args {
+ void *ctxt_to_hw_map;
+ uint32_t cmd_type;
+ union {
+ uint32_t is_rdi_only_context;
+ } u;
+};
+
+
/**
* cam_isp_hw_mgr_init()
*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
index 1615d21f..4c6745c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
@@ -1,5 +1,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index f09fdc7..a2f773e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -19,10 +19,7 @@
#include "cam_isp_hw.h"
#include "cam_soc_util.h"
#include "cam_io_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
+#include "cam_debug_util.h"
/* Timeout value in msec */
#define IFE_CSID_TIMEOUT 1000
@@ -62,79 +59,127 @@
return rc;
}
-static int cam_ife_csid_get_format(uint32_t res_id,
- uint32_t decode_fmt, uint32_t *path_fmt, uint32_t *plain_fmt)
+static int cam_ife_csid_get_format(uint32_t input_fmt,
+ uint32_t *path_fmt)
{
int rc = 0;
- if (res_id >= CAM_IFE_PIX_PATH_RES_RDI_0 &&
- res_id <= CAM_IFE_PIX_PATH_RES_RDI_3) {
- *path_fmt = 0xf;
- return 0;
- }
-
- switch (decode_fmt) {
+ switch (input_fmt) {
case CAM_FORMAT_MIPI_RAW_6:
*path_fmt = 0;
- *plain_fmt = 0;
break;
case CAM_FORMAT_MIPI_RAW_8:
*path_fmt = 1;
- *plain_fmt = 0;
break;
case CAM_FORMAT_MIPI_RAW_10:
*path_fmt = 2;
- *plain_fmt = 1;
break;
case CAM_FORMAT_MIPI_RAW_12:
*path_fmt = 3;
- *plain_fmt = 1;
break;
case CAM_FORMAT_MIPI_RAW_14:
*path_fmt = 4;
- *plain_fmt = 1;
break;
case CAM_FORMAT_MIPI_RAW_16:
*path_fmt = 5;
- *plain_fmt = 1;
break;
case CAM_FORMAT_MIPI_RAW_20:
*path_fmt = 6;
- *plain_fmt = 2;
break;
case CAM_FORMAT_DPCM_10_6_10:
*path_fmt = 7;
- *plain_fmt = 1;
break;
case CAM_FORMAT_DPCM_10_8_10:
*path_fmt = 8;
- *plain_fmt = 1;
break;
case CAM_FORMAT_DPCM_12_6_12:
*path_fmt = 9;
- *plain_fmt = 1;
break;
case CAM_FORMAT_DPCM_12_8_12:
*path_fmt = 0xA;
- *plain_fmt = 1;
break;
case CAM_FORMAT_DPCM_14_8_14:
*path_fmt = 0xB;
- *plain_fmt = 1;
break;
case CAM_FORMAT_DPCM_14_10_14:
*path_fmt = 0xC;
- *plain_fmt = 1;
break;
default:
- pr_err("%s:%d:CSID:%d un supported format\n",
- __func__, __LINE__, decode_fmt);
+ CAM_ERR(CAM_ISP, "CSID:%d un supported format",
+ input_fmt);
rc = -EINVAL;
}
return rc;
}
+static int cam_ife_csid_get_rdi_format(uint32_t input_fmt,
+ uint32_t output_fmt, uint32_t *path_fmt, uint32_t *plain_fmt)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_ISP, "input format:%d output format:%d",
+ input_fmt, output_fmt);
+
+ switch (output_fmt) {
+ case CAM_FORMAT_MIPI_RAW_6:
+ case CAM_FORMAT_MIPI_RAW_8:
+ case CAM_FORMAT_MIPI_RAW_10:
+ case CAM_FORMAT_MIPI_RAW_12:
+ case CAM_FORMAT_MIPI_RAW_14:
+ case CAM_FORMAT_MIPI_RAW_16:
+ case CAM_FORMAT_MIPI_RAW_20:
+ case CAM_FORMAT_DPCM_10_6_10:
+ case CAM_FORMAT_DPCM_10_8_10:
+ case CAM_FORMAT_DPCM_12_6_12:
+ case CAM_FORMAT_DPCM_12_8_12:
+ case CAM_FORMAT_DPCM_14_8_14:
+ case CAM_FORMAT_DPCM_14_10_14:
+ *path_fmt = 0xF;
+ *plain_fmt = 0;
+ break;
+
+ case CAM_FORMAT_PLAIN8:
+ rc = cam_ife_csid_get_format(input_fmt, path_fmt);
+ if (rc)
+ goto error;
+
+ *plain_fmt = 0;
+ break;
+ case CAM_FORMAT_PLAIN16_8:
+ case CAM_FORMAT_PLAIN16_10:
+ case CAM_FORMAT_PLAIN16_12:
+ case CAM_FORMAT_PLAIN16_14:
+ case CAM_FORMAT_PLAIN16_16:
+ rc = cam_ife_csid_get_format(input_fmt, path_fmt);
+ if (rc)
+ goto error;
+
+ *plain_fmt = 1;
+ break;
+ case CAM_FORMAT_PLAIN32_20:
+ rc = cam_ife_csid_get_format(input_fmt, path_fmt);
+ if (rc)
+ goto error;
+
+ *plain_fmt = 2;
+ break;
+ default:
+ *path_fmt = 0xF;
+ *plain_fmt = 0;
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "path format value:%d plain format value:%d",
+ *path_fmt, *plain_fmt);
+
+ return 0;
+error:
+ return rc;
+
+}
+
+
static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
uint32_t res_type)
@@ -166,8 +211,8 @@
if (i == CAM_IFE_CSID_CID_RES_MAX) {
if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
- pr_err("%s:%d:CSID:%d TPG CID not available\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_ERR(CAM_ISP, "CSID:%d TPG CID not available",
+ csid_hw->hw_intf->hw_idx);
rc = -EINVAL;
}
@@ -182,8 +227,7 @@
csid_hw->cid_res[j].res_state =
CAM_ISP_RESOURCE_STATE_RESERVED;
*res = &csid_hw->cid_res[j];
- CDBG("%s:%d:CSID:%d CID %d allocated\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d CID %d allocated",
csid_hw->hw_intf->hw_idx,
csid_hw->cid_res[j].res_id);
break;
@@ -191,8 +235,8 @@
}
if (j == CAM_IFE_CSID_CID_RES_MAX) {
- pr_err("%s:%d:CSID:%d Free cid is not available\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_ERR(CAM_ISP, "CSID:%d Free cid is not available",
+ csid_hw->hw_intf->hw_idx);
rc = -EINVAL;
}
}
@@ -213,13 +257,13 @@
csid_reg = csid_hw->csid_info->csid_reg;
if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
- pr_err("%s:%d:CSID:%d Invalid HW State:%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid HW State:%d",
+ csid_hw->hw_intf->hw_idx,
csid_hw->hw_info->hw_state);
return -EINVAL;
}
- CDBG("%s:%d:CSID:%d Csid reset\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d Csid reset",
csid_hw->hw_intf->hw_idx);
init_completion(&csid_hw->csid_top_complete);
@@ -287,14 +331,12 @@
soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_rst_strobes_addr);
- CDBG("%s:%d: Waiting for reset complete from irq handler\n",
- __func__, __LINE__);
-
+ CAM_DBG(CAM_ISP, " Waiting for reset complete from irq handler");
rc = wait_for_completion_timeout(&csid_hw->csid_top_complete,
msecs_to_jiffies(IFE_CSID_TIMEOUT));
if (rc <= 0) {
- pr_err("%s:%d:CSID:%d reset completion in fail rc = %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
+ CAM_ERR(CAM_ISP, "CSID:%d reset completion in fail rc = %d",
+ csid_hw->hw_intf->hw_idx, rc);
if (rc == 0)
rc = -ETIMEDOUT;
} else {
@@ -331,26 +373,26 @@
res = reset->node_res;
if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
- pr_err("%s:%d:CSID:%d Invalid hw state :%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid hw state :%d",
+ csid_hw->hw_intf->hw_idx,
csid_hw->hw_info->hw_state);
return -EINVAL;
}
if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
- CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
rc = -EINVAL;
goto end;
}
- CDBG("%s:%d:CSID:%d resource:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d resource:%d",
csid_hw->hw_intf->hw_idx, res->res_id);
if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
if (!csid_reg->ipp_reg) {
- pr_err("%s:%d:CSID:%d IPP not supported :%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d IPP not supported :%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
return -EINVAL;
}
@@ -368,8 +410,8 @@
} else {
id = res->res_id;
if (!csid_reg->rdi_reg[id]) {
- pr_err("%s:%d:CSID:%d RDI res not supported :%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d RDI res not supported :%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
return -EINVAL;
}
@@ -401,8 +443,8 @@
rc = wait_for_completion_timeout(complete,
msecs_to_jiffies(IFE_CSID_TIMEOUT));
if (rc <= 0) {
- pr_err("%s:%d CSID:%d Res id %d fail rc = %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Res id %d fail rc = %d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, rc);
if (rc == 0)
rc = -ETIMEDOUT;
@@ -423,8 +465,9 @@
int rc = 0;
struct cam_ife_csid_cid_data *cid_data;
- CDBG("%s:%d CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->res_type,
cid_reserv->in_port->lane_type,
cid_reserv->in_port->lane_num,
@@ -432,8 +475,8 @@
cid_reserv->in_port->vc);
if (cid_reserv->in_port->res_type >= CAM_ISP_IFE_IN_RES_MAX) {
- pr_err("%s:%d:CSID:%d Invalid phy sel %d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid phy sel %d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->res_type);
rc = -EINVAL;
goto end;
@@ -441,8 +484,8 @@
if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX &&
cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
- pr_err("%s:%d:CSID:%d Invalid lane type %d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid lane type %d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->lane_type);
rc = -EINVAL;
goto end;
@@ -451,8 +494,8 @@
if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_DPHY &&
cid_reserv->in_port->lane_num > 4) &&
cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
- pr_err("%s:%d:CSID:%d Invalid lane num %d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid lane num %d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->lane_num);
rc = -EINVAL;
goto end;
@@ -460,8 +503,8 @@
if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY &&
cid_reserv->in_port->lane_num > 3) &&
cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
- pr_err("%s:%d: CSID:%d Invalid lane type %d & num %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, " CSID:%d Invalid lane type %d & num %d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->lane_type,
cid_reserv->in_port->lane_num);
rc = -EINVAL;
@@ -471,8 +514,8 @@
/* CSID CSI2 v2.0 supports 31 vc */
if (cid_reserv->in_port->dt > 0x3f ||
cid_reserv->in_port->vc > 0x1f) {
- pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->vc, cid_reserv->in_port->dt);
rc = -EINVAL;
goto end;
@@ -481,8 +524,8 @@
if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG && (
(cid_reserv->in_port->format < CAM_FORMAT_MIPI_RAW_8 &&
cid_reserv->in_port->format > CAM_FORMAT_MIPI_RAW_16))) {
- pr_err("%s:%d: CSID:%d Invalid tpg decode fmt %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, " CSID:%d Invalid tpg decode fmt %d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->format);
rc = -EINVAL;
goto end;
@@ -538,8 +581,7 @@
csid_hw->csi2_rx_cfg.phy_sel = 0;
if (cid_reserv->in_port->format >
CAM_FORMAT_MIPI_RAW_16) {
- pr_err("%s:%d: Wrong TPG format\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_ISP, " Wrong TPG format");
rc = -EINVAL;
goto end;
}
@@ -562,8 +604,9 @@
cid_reserv->node_res = &csid_hw->cid_res[0];
csid_hw->csi2_reserve_cnt++;
- CDBG("%s:%d:CSID:%d CID :%d resource acquired successfully\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d CID :%d resource acquired successfully",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->node_res->res_id);
} else {
rc = cam_ife_csid_cid_get(csid_hw, &cid_reserv->node_res,
@@ -572,14 +615,13 @@
/* if success then increment the reserve count */
if (!rc) {
if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
- pr_err("%s:%d:CSID%d reserve cnt reached max\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP,
+ "CSID%d reserve cnt reached max",
csid_hw->hw_intf->hw_idx);
rc = -EINVAL;
} else {
csid_hw->csi2_reserve_cnt++;
- CDBG("%s:%d:CSID:%d CID:%d acquired\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d CID:%d acquired",
csid_hw->hw_intf->hw_idx,
cid_reserv->node_res->res_id);
}
@@ -601,8 +643,8 @@
/* CSID CSI2 v2.0 supports 31 vc */
if (reserve->in_port->dt > 0x3f || reserve->in_port->vc > 0x1f ||
(reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
- pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d mode:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d mode:%d",
+ csid_hw->hw_intf->hw_idx,
reserve->in_port->vc, reserve->in_port->dt,
reserve->sync_mode);
rc = -EINVAL;
@@ -613,8 +655,9 @@
case CAM_IFE_PIX_PATH_RES_IPP:
if (csid_hw->ipp_res.res_state !=
CAM_ISP_RESOURCE_STATE_AVAILABLE) {
- CDBG("%s:%d:CSID:%d IPP resource not available %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d IPP resource not available %d",
+ csid_hw->hw_intf->hw_idx,
csid_hw->ipp_res.res_state);
rc = -EINVAL;
goto end;
@@ -622,8 +665,8 @@
if (cam_ife_csid_is_ipp_format_supported(
reserve->in_port->format)) {
- pr_err("%s:%d:CSID:%d res id:%d un support format %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP,
+ "CSID:%d res id:%d un support format %d",
csid_hw->hw_intf->hw_idx, reserve->res_id,
reserve->in_port->format);
rc = -EINVAL;
@@ -632,8 +675,8 @@
/* assign the IPP resource */
res = &csid_hw->ipp_res;
- CDBG("%s:%d:CSID:%d IPP resource:%d acquired successfully\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d IPP resource:%d acquired successfully",
csid_hw->hw_intf->hw_idx, res->res_id);
break;
@@ -643,23 +686,24 @@
case CAM_IFE_PIX_PATH_RES_RDI_3:
if (csid_hw->rdi_res[reserve->res_id].res_state !=
CAM_ISP_RESOURCE_STATE_AVAILABLE) {
- CDBG("%s:%d:CSID:%d RDI:%d resource not available %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d RDI:%d resource not available %d",
+ csid_hw->hw_intf->hw_idx,
reserve->res_id,
csid_hw->rdi_res[reserve->res_id].res_state);
rc = -EINVAL;
goto end;
} else {
res = &csid_hw->rdi_res[reserve->res_id];
- CDBG("%s:%d:CSID:%d RDI resource:%d acquire success\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d RDI resource:%d acquire success",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
}
break;
default:
- pr_err("%s:%d:CSID:%d Invalid res id:%d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res id:%d",
csid_hw->hw_intf->hw_idx, reserve->res_id);
rc = -EINVAL;
goto end;
@@ -668,6 +712,18 @@
res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
path_data = (struct cam_ife_csid_path_cfg *)res->res_priv;
+ /* store the output format for RDI */
+ switch (reserve->res_id) {
+ case CAM_IFE_PIX_PATH_RES_RDI_0:
+ case CAM_IFE_PIX_PATH_RES_RDI_1:
+ case CAM_IFE_PIX_PATH_RES_RDI_2:
+ case CAM_IFE_PIX_PATH_RES_RDI_3:
+ path_data->output_fmt = reserve->out_port->format;
+ break;
+ default:
+ break;
+ }
+
path_data->cid = reserve->cid;
path_data->decode_fmt = reserve->in_port->format;
path_data->master_idx = reserve->master_idx;
@@ -711,31 +767,30 @@
/* overflow check before increment */
if (csid_hw->hw_info->open_count == UINT_MAX) {
- pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_ERR(CAM_ISP, "CSID:%d Open count reached max",
+ csid_hw->hw_intf->hw_idx);
return -EINVAL;
}
/* Increment ref Count */
csid_hw->hw_info->open_count++;
if (csid_hw->hw_info->open_count > 1) {
- CDBG("%s:%d: CSID hw has already been enabled\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "CSID hw has already been enabled");
return rc;
}
- CDBG("%s:%d:CSID:%d init CSID HW\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d init CSID HW",
csid_hw->hw_intf->hw_idx);
rc = cam_ife_csid_enable_soc_resources(soc_info);
if (rc) {
- pr_err("%s:%d:CSID:%d Enable SOC failed\n", __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "CSID:%d Enable SOC failed",
csid_hw->hw_intf->hw_idx);
goto err;
}
- CDBG("%s:%d:CSID:%d enable top irq interrupt\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d enable top irq interrupt",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
@@ -745,8 +800,8 @@
rc = cam_ife_csid_global_reset(csid_hw);
if (rc) {
- pr_err("%s:%d CSID:%d csid_reset fail rc = %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
+ CAM_ERR(CAM_ISP, "CSID:%d csid_reset fail rc = %d",
+ csid_hw->hw_intf->hw_idx, rc);
rc = -ETIMEDOUT;
goto disable_soc;
}
@@ -756,7 +811,7 @@
* SW register reset also reset the mask irq, so poll the irq status
* to check the reset complete.
*/
- CDBG("%s:%d:CSID:%d Reset Software registers\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d Reset Software registers",
csid_hw->hw_intf->hw_idx);
cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb_sw_all,
@@ -768,8 +823,7 @@
status, (status & 0x1) == 0x1,
CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
if (rc < 0) {
- pr_err("%s:%d: software register reset timeout.....\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "software register reset timeout.....");
rc = -ETIMEDOUT;
goto disable_soc;
}
@@ -801,7 +855,7 @@
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_hw_version_addr);
- CDBG("%s:%d:CSID:%d CSID HW version: 0x%x\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d CSID HW version: 0x%x",
csid_hw->hw_intf->hw_idx, val);
return 0;
@@ -830,7 +884,7 @@
soc_info = &csid_hw->hw_info->soc_info;
csid_reg = csid_hw->csid_info->csid_reg;
- CDBG("%s:%d:CSID:%d De-init CSID HW\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d De-init CSID HW",
csid_hw->hw_intf->hw_idx);
/*disable the top IRQ interrupt */
@@ -839,8 +893,8 @@
rc = cam_ife_csid_disable_soc_resources(soc_info);
if (rc)
- pr_err("%s:%d:CSID:%d Disable CSID SOC failed\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed",
+ csid_hw->hw_intf->hw_idx);
csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
return rc;
@@ -856,8 +910,8 @@
csid_hw->tpg_start_cnt++;
if (csid_hw->tpg_start_cnt == 1) {
/*Enable the TPG */
- CDBG("%s:%d CSID:%d start CSID TPG\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "CSID:%d start CSID TPG",
+ csid_hw->hw_intf->hw_idx);
soc_info = &csid_hw->hw_info->soc_info;
{
@@ -865,44 +919,37 @@
uint32_t i;
uint32_t base = 0x600;
- CDBG("%s:%d: ================== TPG ===============\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "================ TPG ============");
for (i = 0; i < 16; i++) {
val = cam_io_r_mb(
soc_info->reg_map[0].mem_base +
base + i * 4);
- CDBG("%s:%d reg 0x%x = 0x%x\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
(base + i*4), val);
}
- CDBG("%s:%d: ================== IPP ===============\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "================ IPP =============");
base = 0x200;
for (i = 0; i < 10; i++) {
val = cam_io_r_mb(
soc_info->reg_map[0].mem_base +
base + i * 4);
- CDBG("%s:%d reg 0x%x = 0x%x\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
(base + i*4), val);
}
- CDBG("%s:%d: ================== RX ===============\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "================ RX =============");
base = 0x100;
for (i = 0; i < 5; i++) {
val = cam_io_r_mb(
soc_info->reg_map[0].mem_base +
base + i * 4);
- CDBG("%s:%d reg 0x%x = 0x%x\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
(base + i*4), val);
}
}
- CDBG("%s:%d: =============== TPG control ===============\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "============ TPG control ============");
val = (4 << 20);
val |= (0x80 << 8);
val |= (((csid_hw->csi2_rx_cfg.lane_num - 1) & 0x3) << 4);
@@ -912,8 +959,7 @@
csid_tpg_ctrl_addr);
val = cam_io_r_mb(soc_info->reg_map[0].mem_base + 0x600);
- CDBG("%s:%d reg 0x%x = 0x%x\n", __func__, __LINE__,
- 0x600, val);
+ CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x", 0x600, val);
}
return 0;
@@ -934,8 +980,8 @@
/* disable the TPG */
if (!csid_hw->tpg_start_cnt) {
- CDBG("%s:%d CSID:%d stop CSID TPG\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "CSID:%d stop CSID TPG",
+ csid_hw->hw_intf->hw_idx);
/*stop the TPG */
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
@@ -956,8 +1002,8 @@
csid_reg = csid_hw->csid_info->csid_reg;
soc_info = &csid_hw->hw_info->soc_info;
- CDBG("%s:%d CSID:%d TPG config\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "CSID:%d TPG config",
+ csid_hw->hw_intf->hw_idx);
/* configure one DT, infinite frames */
val = (0 << 16) | (1 << 10) | CAM_IFE_CSID_TPG_VC_VAL;
@@ -1012,13 +1058,13 @@
csid_reg = csid_hw->csid_info->csid_reg;
soc_info = &csid_hw->hw_info->soc_info;
- CDBG("%s:%d CSID:%d count:%d config csi2 rx\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+ CAM_DBG(CAM_ISP, "CSID:%d count:%d config csi2 rx",
+ csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
/* overflow check before increment */
if (csid_hw->csi2_cfg_cnt == UINT_MAX) {
- pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_ERR(CAM_ISP, "CSID:%d Open count reached max",
+ csid_hw->hw_intf->hw_idx);
return -EINVAL;
}
@@ -1033,7 +1079,7 @@
val = (csid_hw->csi2_rx_cfg.lane_num - 1) |
(csid_hw->csi2_rx_cfg.lane_cfg << 4) |
(csid_hw->csi2_rx_cfg.lane_type << 24);
- val |= csid_hw->csi2_rx_cfg.phy_sel & 0x3;
+ val |= (csid_hw->csi2_rx_cfg.phy_sel & 0x3) << 20;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
@@ -1081,15 +1127,15 @@
struct cam_hw_soc_info *soc_info;
if (res->res_id >= CAM_IFE_CSID_CID_MAX) {
- pr_err("%s:%d CSID:%d Invalid res id :%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res id :%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
return -EINVAL;
}
csid_reg = csid_hw->csid_info->csid_reg;
soc_info = &csid_hw->hw_info->soc_info;
- CDBG("%s:%d CSID:%d cnt : %d Disable csi2 rx\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+ CAM_DBG(CAM_ISP, "CSID:%d cnt : %d Disable csi2 rx",
+ csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
if (csid_hw->csi2_cfg_cnt)
csid_hw->csi2_cfg_cnt--;
@@ -1114,22 +1160,21 @@
struct cam_ife_csid_path_cfg *path_data;
struct cam_ife_csid_reg_offset *csid_reg;
struct cam_hw_soc_info *soc_info;
- uint32_t path_format = 0, plain_format = 0, val = 0;
+ uint32_t path_format = 0, val = 0;
path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
csid_reg = csid_hw->csid_info->csid_reg;
soc_info = &csid_hw->hw_info->soc_info;
if (!csid_reg->ipp_reg) {
- pr_err("%s:%d CSID:%d IPP:%d is not supported on HW\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d IPP:%d is not supported on HW",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
return -EINVAL;
}
- CDBG("%s:%d: Enabled IPP Path.......\n", __func__, __LINE__);
- rc = cam_ife_csid_get_format(res->res_id,
- path_data->decode_fmt, &path_format, &plain_format);
+ CAM_DBG(CAM_ISP, "Enabled IPP Path.......");
+ rc = cam_ife_csid_get_format(path_data->decode_fmt, &path_format);
if (rc)
return rc;
@@ -1150,6 +1195,10 @@
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+ /* select the post irq sub sample strobe for time stamp capture */
+ cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_ipp_cfg1_addr);
+
if (path_data->crop_enable) {
val = ((path_data->width +
path_data->start_pixel) & 0xFFFF <<
@@ -1228,15 +1277,16 @@
soc_info = &csid_hw->hw_info->soc_info;
if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
- pr_err("%s:%d:CSID:%d Res type %d res_id:%d in wrong state %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP,
+ "CSID:%d Res type %d res_id:%d in wrong state %d",
+ csid_hw->hw_intf->hw_idx,
res->res_type, res->res_id, res->res_state);
rc = -EINVAL;
}
if (!csid_reg->ipp_reg) {
- pr_err("%s:%d:CSID:%d IPP %d is not supported on HW\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d IPP %d is not supported on HW",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
rc = -EINVAL;
}
@@ -1266,20 +1316,21 @@
soc_info = &csid_hw->hw_info->soc_info;
if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
- pr_err("%s:%d:CSID:%d res type:%d res_id:%d Invalid state%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP,
+ "CSID:%d res type:%d res_id:%d Invalid state%d",
+ csid_hw->hw_intf->hw_idx,
res->res_type, res->res_id, res->res_state);
return -EINVAL;
}
if (!csid_reg->ipp_reg) {
- pr_err("%s:%d:CSID:%d IPP %d not supported on HW\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d IPP %d not supported on HW",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
return -EINVAL;
}
- CDBG("%s:%d: enable IPP path.......\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "enable IPP path.......");
/*Resume at frame boundary */
if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
@@ -1296,8 +1347,7 @@
/* for slave mode, not need to resume for slave device */
/* Enable the required ipp interrupts */
- val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
- CSID_PATH_INFO_INPUT_SOF|CSID_PATH_INFO_INPUT_EOF;
+ val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
@@ -1322,40 +1372,40 @@
soc_info = &csid_hw->hw_info->soc_info;
if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
- CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
return -EINVAL;
}
if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
- CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, res->res_state);
return rc;
}
if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
- CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid state%d",
+ csid_hw->hw_intf->hw_idx, res->res_id,
res->res_state);
return -EINVAL;
}
if (!csid_reg->ipp_reg) {
- pr_err("%s:%d:CSID:%d IPP%d is not supported on HW\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+ CAM_ERR(CAM_ISP, "CSID:%d IPP%d is not supported on HW",
+ csid_hw->hw_intf->hw_idx, res->res_id);
return -EINVAL;
}
if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
- pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
+ CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d",
+ csid_hw->hw_intf->hw_idx, stop_cmd);
return -EINVAL;
}
- CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
csid_hw->hw_intf->hw_idx, res->res_id);
if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
@@ -1407,13 +1457,13 @@
id = res->res_id;
if (!csid_reg->rdi_reg[id]) {
- pr_err("%s:%d CSID:%d RDI:%d is not supported on HW\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx, id);
+ CAM_ERR(CAM_ISP, "CSID:%d RDI:%d is not supported on HW",
+ csid_hw->hw_intf->hw_idx, id);
return -EINVAL;
}
- rc = cam_ife_csid_get_format(res->res_id,
- path_data->decode_fmt, &path_format, &plain_fmt);
+ rc = cam_ife_csid_get_rdi_format(path_data->decode_fmt,
+ path_data->output_fmt, &path_format, &plain_fmt);
if (rc)
return rc;
@@ -1435,6 +1485,10 @@
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+ /* select the post irq sub sample strobe for time stamp capture */
+ cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr);
+
if (path_data->crop_enable) {
val = ((path_data->width +
path_data->start_pixel) & 0xFFFF <<
@@ -1506,8 +1560,8 @@
if (res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
!csid_reg->rdi_reg[id]) {
- pr_err("%s:%d:CSID:%d Invalid res id%d state:%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res id%d state:%d",
+ csid_hw->hw_intf->hw_idx, res->res_id,
res->res_state);
return -EINVAL;
}
@@ -1538,8 +1592,9 @@
if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
!csid_reg->rdi_reg[id]) {
- pr_err("%s:%d:CSID:%d invalid res type:%d res_id:%d state%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP,
+ "CSID:%d invalid res type:%d res_id:%d state%d",
+ csid_hw->hw_intf->hw_idx,
res->res_type, res->res_id, res->res_state);
return -EINVAL;
}
@@ -1550,8 +1605,7 @@
csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
/* Enable the required RDI interrupts */
- val = (CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
- CSID_PATH_INFO_INPUT_SOF | CSID_PATH_INFO_INPUT_EOF);
+ val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
@@ -1577,35 +1631,35 @@
if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX ||
!csid_reg->rdi_reg[res->res_id]) {
- CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
return -EINVAL;
}
if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
- CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, res->res_state);
return rc;
}
if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
- CDBG("%s:%d:CSID:%d Res:%d Invalid res_state%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid res_state%d",
+ csid_hw->hw_intf->hw_idx, res->res_id,
res->res_state);
return -EINVAL;
}
if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
- pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
+ CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d",
+ csid_hw->hw_intf->hw_idx, stop_cmd);
return -EINVAL;
}
- CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
csid_hw->hw_intf->hw_idx, res->res_id);
init_completion(&csid_hw->csid_rdin_complete[id]);
@@ -1646,15 +1700,15 @@
if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
- CDBG("%s:%d:CSID:%d Invalid res_type:%d res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
res->res_id);
return -EINVAL;
}
if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
- pr_err("%s:%d:CSID:%d Invalid dev state :%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d",
+ csid_hw->hw_intf->hw_idx,
csid_hw->hw_info->hw_state);
return -EINVAL;
}
@@ -1698,22 +1752,22 @@
soc_info = &csid_hw->hw_info->soc_info;
if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
- CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
return -EINVAL;
}
if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
- CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, res->res_state);
return rc;
}
if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
- CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid state%d",
+ csid_hw->hw_intf->hw_idx, res->res_id,
res->res_state);
return -EINVAL;
}
@@ -1726,8 +1780,8 @@
rc = wait_for_completion_timeout(complete,
msecs_to_jiffies(IFE_CSID_TIMEOUT));
if (rc <= 0) {
- pr_err("%s:%d:CSID%d stop at frame boundary failid:%drc:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID%d stop at frame boundary failid:%drc:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, rc);
if (rc == 0)
/* continue even have timeout */
@@ -1766,7 +1820,7 @@
struct cam_ife_csid_reg_offset *csid_reg;
if (!hw_priv || !get_hw_cap_args) {
- pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
return -EINVAL;
}
@@ -1781,8 +1835,9 @@
hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
- CDBG("%s:%d:CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx, hw_caps->no_rdis,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d",
+ csid_hw->hw_intf->hw_idx, hw_caps->no_rdis,
hw_caps->no_pix, hw_caps->major_version, hw_caps->minor_version,
hw_caps->version_incr);
@@ -1799,7 +1854,7 @@
if (!hw_priv || !reset_args || (arg_size !=
sizeof(struct cam_csid_reset_cfg_args))) {
- pr_err("%s:%d:CSID:Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID:Invalid args");
return -EINVAL;
}
@@ -1815,8 +1870,8 @@
rc = cam_ife_csid_path_reset(csid_hw, reset);
break;
default:
- pr_err("%s:%d:CSID:Invalid reset type :%d\n", __func__,
- __LINE__, reset->reset_type);
+ CAM_ERR(CAM_ISP, "CSID:Invalid reset type :%d",
+ reset->reset_type);
rc = -EINVAL;
break;
}
@@ -1834,7 +1889,7 @@
if (!hw_priv || !reserve_args || (arg_size !=
sizeof(struct cam_csid_hw_reserve_resource_args))) {
- pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
return -EINVAL;
}
@@ -1851,8 +1906,8 @@
rc = cam_ife_csid_path_reserve(csid_hw, reserv);
break;
default:
- pr_err("%s:%d:CSID:%d Invalid res type :%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, reserv->res_type);
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type :%d",
+ csid_hw->hw_intf->hw_idx, reserv->res_type);
rc = -EINVAL;
break;
}
@@ -1871,7 +1926,7 @@
if (!hw_priv || !release_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
return -EINVAL;
}
@@ -1884,31 +1939,33 @@
res->res_id >= CAM_IFE_CSID_CID_MAX) ||
(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
- pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
res->res_id);
rc = -EINVAL;
goto end;
}
if (res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
- CDBG("%s:%d:CSID:%d res type:%d Res %d in released state\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d res type:%d Res %d in released state",
+ csid_hw->hw_intf->hw_idx,
res->res_type, res->res_id);
goto end;
}
if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
- CDBG("%s:%d:CSID:%d res type:%d Res id:%d invalid state:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d res type:%d Res id:%d invalid state:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_type, res->res_id, res->res_state);
rc = -EINVAL;
goto end;
}
- CDBG("%s:%d:CSID:%d res type :%d Resource id:%d\n", __func__, __LINE__,
- csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+ CAM_DBG(CAM_ISP, "CSID:%d res type :%d Resource id:%d",
+ csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
switch (res->res_type) {
case CAM_ISP_RESOURCE_CID:
@@ -1926,8 +1983,8 @@
memset(&csid_hw->csi2_rx_cfg, 0,
sizeof(struct cam_ife_csid_csi2_rx_cfg));
- CDBG("%s:%d:CSID:%d res id :%d cnt:%d reserv cnt:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP, "CSID:%d res id :%d cnt:%d reserv cnt:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt);
break;
@@ -1935,8 +1992,8 @@
res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
break;
default:
- pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
res->res_id);
rc = -EINVAL;
break;
@@ -1958,7 +2015,7 @@
if (!hw_priv || !init_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
return -EINVAL;
}
@@ -1972,8 +2029,8 @@
res->res_id >= CAM_IFE_CSID_CID_MAX) ||
(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
- pr_err("%s:%d:CSID:%d Invalid res tpe:%d res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res tpe:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
res->res_id);
rc = -EINVAL;
goto end;
@@ -1982,14 +2039,15 @@
if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
(res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
- pr_err("%s:%d:CSID:%d res type:%d res_id:%dInvalid state %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP,
+ "CSID:%d res type:%d res_id:%dInvalid state %d",
+ csid_hw->hw_intf->hw_idx,
res->res_type, res->res_id, res->res_state);
rc = -EINVAL;
goto end;
}
- CDBG("%s:%d CSID:%d res type :%d res_id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d res type :%d res_id:%d",
csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
@@ -2010,8 +2068,8 @@
break;
default:
- pr_err("%s:%d:CSID:%d Invalid res type state %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type state %d",
+ csid_hw->hw_intf->hw_idx,
res->res_type);
break;
}
@@ -2033,7 +2091,7 @@
if (!hw_priv || !deinit_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("%s:%d:CSID:Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID:Invalid arguments");
return -EINVAL;
}
@@ -2043,8 +2101,8 @@
mutex_lock(&csid_hw->hw_info->hw_mutex);
if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
- CDBG("%s:%d:CSID:%d Res:%d already in De-init state\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in De-init state",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
goto end;
}
@@ -2061,8 +2119,8 @@
break;
default:
- pr_err("%s:%d:CSID:%d Invalid Res type %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid Res type %d",
+ csid_hw->hw_intf->hw_idx,
res->res_type);
goto end;
}
@@ -2086,7 +2144,7 @@
if (!hw_priv || !start_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
return -EINVAL;
}
@@ -2095,19 +2153,18 @@
res = (struct cam_isp_resource_node *)start_args;
csid_reg = csid_hw->csid_info->csid_reg;
- mutex_lock(&csid_hw->hw_info->hw_mutex);
if ((res->res_type == CAM_ISP_RESOURCE_CID &&
res->res_id >= CAM_IFE_CSID_CID_MAX) ||
(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
- CDBG("%s:%d:CSID:%d Invalid res tpe:%d res id:%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res tpe:%d res id:%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
res->res_id);
rc = -EINVAL;
goto end;
}
- CDBG("%s:%d CSID:%d res_type :%d res_id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d res_type :%d res_id:%d",
csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
switch (res->res_type) {
@@ -2122,13 +2179,12 @@
rc = cam_ife_csid_enable_rdi_path(csid_hw, res);
break;
default:
- pr_err("%s:%d:CSID:%d Invalid res type%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+ csid_hw->hw_intf->hw_idx,
res->res_type);
break;
}
end:
- mutex_unlock(&csid_hw->hw_info->hw_mutex);
return rc;
}
@@ -2144,14 +2200,13 @@
if (!hw_priv || !stop_args ||
(arg_size != sizeof(struct cam_csid_hw_stop_args))) {
- pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
return -EINVAL;
}
csid_stop = (struct cam_csid_hw_stop_args *) stop_args;
csid_hw_info = (struct cam_hw_info *)hw_priv;
csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
- mutex_lock(&csid_hw->hw_info->hw_mutex);
/* Stop the resource first */
for (i = 0; i < csid_stop->num_res; i++) {
res = csid_stop->node_res[i];
@@ -2170,8 +2225,8 @@
break;
default:
- pr_err("%s:%d:CSID:%d Invalid res type%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+ csid_hw->hw_intf->hw_idx,
res->res_type);
break;
}
@@ -2183,9 +2238,10 @@
if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
rc = cam_ife_csid_res_wait_for_halt(csid_hw, res);
+ else
+ res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
}
- mutex_unlock(&csid_hw->hw_info->hw_mutex);
return rc;
}
@@ -2193,7 +2249,7 @@
static int cam_ife_csid_read(void *hw_priv,
void *read_args, uint32_t arg_size)
{
- pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: un supported");
return -EINVAL;
}
@@ -2201,7 +2257,7 @@
static int cam_ife_csid_write(void *hw_priv,
void *write_args, uint32_t arg_size)
{
- pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: un supported");
return -EINVAL;
}
@@ -2213,25 +2269,23 @@
struct cam_hw_info *csid_hw_info;
if (!hw_priv || !cmd_args) {
- pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
return -EINVAL;
}
csid_hw_info = (struct cam_hw_info *)hw_priv;
csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
- mutex_lock(&csid_hw->hw_info->hw_mutex);
switch (cmd_type) {
case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
break;
default:
- pr_err("%s:%d:CSID:%d un supported cmd:%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, cmd_type);
+ CAM_ERR(CAM_ISP, "CSID:%d un supported cmd:%d",
+ csid_hw->hw_intf->hw_idx, cmd_type);
rc = -EINVAL;
break;
}
- mutex_unlock(&csid_hw->hw_info->hw_mutex);
return rc;
@@ -2247,11 +2301,10 @@
csid_hw = (struct cam_ife_csid_hw *)data;
- CDBG("%s:%d:CSID %d IRQ Handling\n", __func__, __LINE__,
- csid_hw->hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "CSID %d IRQ Handling", csid_hw->hw_intf->hw_idx);
if (!data) {
- pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
return IRQ_HANDLED;
}
@@ -2290,55 +2343,52 @@
cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_irq_cmd_addr);
- CDBG("%s:%d: irq_status_rx = 0x%x\n", __func__, __LINE__,
- irq_status_rx);
- CDBG("%s:%d: irq_status_ipp = 0x%x\n", __func__, __LINE__,
- irq_status_ipp);
+ CAM_DBG(CAM_ISP, "irq_status_rx = 0x%x", irq_status_rx);
+ CAM_DBG(CAM_ISP, "irq_status_ipp = 0x%x", irq_status_ipp);
if (irq_status_top) {
- CDBG("%s:%d: CSID global reset complete......Exit\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "CSID global reset complete......Exit");
complete(&csid_hw->csid_top_complete);
return IRQ_HANDLED;
}
if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
- CDBG("%s:%d: csi rx reset complete\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "csi rx reset complete");
complete(&csid_hw->csid_csi2_complete);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
- pr_err_ratelimited("%s:%d:CSID:%d lane 0 over flow\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d lane 0 over flow",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
- pr_err_ratelimited("%s:%d:CSID:%d lane 1 over flow\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d lane 1 over flow",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
- pr_err_ratelimited("%s:%d:CSID:%d lane 2 over flow\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d lane 2 over flow",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
- pr_err_ratelimited("%s:%d:CSID:%d lane 3 over flow\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d lane 3 over flow",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
- pr_err_ratelimited("%s:%d:CSID:%d TG OVER FLOW\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d TG OVER FLOW",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
- pr_err_ratelimited("%s:%d:CSID:%d CPHY_EOT_RECEPTION\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d CPHY_EOT_RECEPTION",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
- pr_err_ratelimited("%s:%d:CSID:%d CPHY_SOT_RECEPTION\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d CPHY_SOT_RECEPTION",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
- pr_err_ratelimited("%s:%d:CSID:%d CPHY_PH_CRC\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d CPHY_PH_CRC",
+ csid_hw->hw_intf->hw_idx);
}
/*read the IPP errors */
@@ -2346,25 +2396,23 @@
/* IPP reset done bit */
if (irq_status_ipp &
BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
- CDBG("%s%d: CSID IPP reset complete\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "CSID IPP reset complete");
complete(&csid_hw->csid_ipp_complete);
}
if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOF)
- CDBG("%s: CSID IPP SOF received\n", __func__);
+ CAM_DBG(CAM_ISP, "CSID IPP SOF received");
if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOL)
- CDBG("%s: CSID IPP SOL received\n", __func__);
+ CAM_DBG(CAM_ISP, "CSID IPP SOL received");
if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOL)
- CDBG("%s: CSID IPP EOL received\n", __func__);
+ CAM_DBG(CAM_ISP, "CSID IPP EOL received");
if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
- CDBG("%s: CSID IPP EOF received\n", __func__);
+ CAM_DBG(CAM_ISP, "CSID IPP EOF received");
if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
complete(&csid_hw->csid_ipp_complete);
if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
- pr_err("%s:%d:CSID:%d IPP fifo over flow\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "CSID:%d IPP fifo over flow",
csid_hw->hw_intf->hw_idx);
/*Stop IPP path immediately */
cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
@@ -2376,17 +2424,20 @@
for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
if (irq_status_rdi[i] &
BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
- CDBG("%s:%d: CSID rdi%d reset complete\n",
- __func__, __LINE__, i);
+ CAM_DBG(CAM_ISP, "CSID rdi%d reset complete", i);
complete(&csid_hw->csid_rdin_complete[i]);
}
+ if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_SOF)
+ CAM_DBG(CAM_ISP, "CSID RDI SOF received");
+ if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
+ CAM_DBG(CAM_ISP, "CSID RDI EOF received");
+
if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
complete(&csid_hw->csid_rdin_complete[i]);
if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
- pr_err("%s:%d:CSID:%d RDI fifo over flow\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "CSID:%d RDI fifo over flow",
csid_hw->hw_intf->hw_idx);
/*Stop RDI path immediately */
cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
@@ -2395,7 +2446,7 @@
}
}
- CDBG("%s:%d:IRQ Handling exit\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "IRQ Handling exit");
return IRQ_HANDLED;
}
@@ -2410,8 +2461,7 @@
struct cam_ife_csid_hw *ife_csid_hw = NULL;
if (csid_idx >= CAM_IFE_CSID_HW_RES_MAX) {
- pr_err("%s:%d: Invalid csid index:%d\n", __func__, __LINE__,
- csid_idx);
+ CAM_ERR(CAM_ISP, "Invalid csid index:%d", csid_idx);
return rc;
}
@@ -2421,7 +2471,7 @@
ife_csid_hw->hw_intf = csid_hw_intf;
ife_csid_hw->hw_info = csid_hw_info;
- CDBG("%s:%d: type %d index %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "type %d index %d",
ife_csid_hw->hw_intf->hw_type, csid_idx);
@@ -2440,8 +2490,7 @@
rc = cam_ife_csid_init_soc_resources(&ife_csid_hw->hw_info->soc_info,
cam_ife_csid_irq, ife_csid_hw);
if (rc < 0) {
- pr_err("%s:%d:CSID:%d Failed to init_soc\n", __func__, __LINE__,
- csid_idx);
+ CAM_ERR(CAM_ISP, "CSID:%d Failed to init_soc", csid_idx);
goto err;
}
@@ -2533,7 +2582,7 @@
uint32_t i;
if (!ife_csid_hw) {
- pr_err("%s:%d: Invalid param\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid param");
return rc;
}
@@ -2547,8 +2596,7 @@
for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
kfree(ife_csid_hw->cid_res[i].res_priv);
+ cam_ife_csid_deinit_soc_resources(&ife_csid_hw->hw_info->soc_info);
return 0;
}
-
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
index d36c576..ef585c3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -65,6 +65,7 @@
#define CSID_PATH_ERROR_PIX_COUNT BIT(13)
#define CSID_PATH_ERROR_LINE_COUNT BIT(14)
+/* enum cam_csid_path_halt_mode select the path halt mode control */
enum cam_csid_path_halt_mode {
CSID_HALT_MODE_INTERNAL,
CSID_HALT_MODE_GLOBAL,
@@ -72,6 +73,16 @@
CSID_HALT_MODE_SLAVE,
};
+/**
+ *enum cam_csid_path_timestamp_stb_sel - select the sof/eof strobes used to
+ * capture the timestamp
+ */
+enum cam_csid_path_timestamp_stb_sel {
+ CSID_TIMESTAMP_STB_PRE_HALT,
+ CSID_TIMESTAMP_STB_POST_HALT,
+ CSID_TIMESTAMP_STB_POST_IRQ,
+ CSID_TIMESTAMP_STB_MAX,
+};
struct cam_ife_csid_ipp_reg_offset {
/*Image pixel path register offsets*/
@@ -343,6 +354,7 @@
* @dt : Data type number
* @cid cid number, it is same as DT_ID number in HW
* @decode_fmt: input decode format
+ * @output_fmt: output resource format, needed for RDI resource
* @crop_enable: crop is enable or disabled, if enabled
* then remaining parameters are valid.
* @start_pixel: start pixel
@@ -362,6 +374,7 @@
uint32_t dt;
uint32_t cid;
uint32_t decode_fmt;
+ uint32_t output_fmt;
bool crop_enable;
uint32_t start_pixel;
uint32_t width;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
index 003d83f..5a57046 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
@@ -16,9 +16,7 @@
#include "cam_ife_csid_core.h"
#include "cam_ife_csid_dev.h"
#include "cam_ife_csid_hw_intf.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static struct cam_hw_intf *cam_ife_csid_hw_list[CAM_IFE_CSID_HW_RES_MAX] = {
0, 0, 0, 0};
@@ -34,7 +32,7 @@
uint32_t csid_dev_idx;
int rc = 0;
- CDBG("%s:%d probe called\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "probe called");
csid_hw_intf = kzalloc(sizeof(*csid_hw_intf), GFP_KERNEL);
if (!csid_hw_intf) {
@@ -60,8 +58,7 @@
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
- pr_err("%s:%d No matching table for the IFE CSID HW!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "No matching table for the IFE CSID HW!");
rc = -EINVAL;
goto free_dev;
}
@@ -72,6 +69,7 @@
csid_hw_info->core_info = csid_dev;
csid_hw_info->soc_info.pdev = pdev;
+ csid_hw_info->soc_info.index = csid_dev_idx;
csid_hw_data = (struct cam_ife_csid_hw_info *)match_dev->data;
/* need to setup the pdev before call the ife hw probe init */
@@ -82,7 +80,7 @@
goto free_dev;
platform_set_drvdata(pdev, csid_dev);
- CDBG("%s:%d CSID:%d probe successful\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d probe successful",
csid_hw_intf->hw_idx);
@@ -113,7 +111,7 @@
csid_hw_intf = csid_dev->hw_intf;
csid_hw_info = csid_dev->hw_info;
- CDBG("%s:%d CSID:%d remove\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d remove",
csid_dev->hw_intf->hw_idx);
cam_ife_csid_hw_deinit(csid_dev);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
index 4ed4da5..36c6df0 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
@@ -15,7 +15,6 @@
#include "cam_ife_csid_core.h"
#include "cam_ife_csid_dev.h"
-
#define CAM_CSID_LITE_DRV_NAME "csid_lite_170"
#define CAM_CSID_LITE_VERSION_V170 0x10070000
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
index c718bba..72050aa 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -9,11 +9,10 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-
+#include <linux/slab.h>
#include "cam_ife_csid_soc.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
static int cam_ife_csid_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
@@ -50,6 +49,14 @@
irq_handler_t csid_irq_handler, void *irq_data)
{
int rc = 0;
+ struct cam_cpas_register_params cpas_register_param;
+ struct cam_csid_soc_private *soc_private;
+
+ soc_private = kzalloc(sizeof(struct cam_csid_soc_private), GFP_KERNEL);
+ if (!soc_private)
+ return -ENOMEM;
+
+ soc_info->soc_private = soc_private;
rc = cam_ife_csid_get_dt_properties(soc_info);
if (rc < 0)
@@ -59,36 +66,117 @@
rc = cam_ife_csid_request_platform_resource(soc_info, csid_irq_handler,
irq_data);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP,
+ "Error Request platform resources failed rc=%d", rc);
+ goto free_soc_private;
+ }
+
+ memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+ strlcpy(cpas_register_param.identifier, "csid",
+ CAM_HW_IDENTIFIER_LENGTH);
+ cpas_register_param.cell_index = soc_info->index;
+ cpas_register_param.dev = &soc_info->pdev->dev;
+ rc = cam_cpas_register_client(&cpas_register_param);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
+ goto release_soc;
+ } else {
+ soc_private->cpas_handle = cpas_register_param.client_handle;
+ }
+
+ return rc;
+
+release_soc:
+ cam_soc_util_release_platform_resource(soc_info);
+free_soc_private:
+ kfree(soc_private);
+
+ return rc;
+}
+
+int cam_ife_csid_deinit_soc_resources(
+ struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+ struct cam_csid_soc_private *soc_private;
+
+ soc_private = soc_info->soc_private;
+ if (!soc_private) {
+ CAM_ERR(CAM_ISP, "Error soc_private NULL");
+ return -ENODEV;
+ }
+
+ rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc);
+
+ rc = cam_soc_util_release_platform_resource(soc_info);
if (rc < 0)
return rc;
- CDBG("%s: mem_base is 0x%llx\n", __func__,
- (uint64_t) soc_info->reg_map[0].mem_base);
-
return rc;
}
int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
+ struct cam_csid_soc_private *soc_private;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+
+ soc_private = soc_info->soc_private;
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = 640000000;
+ axi_vote.uncompressed_bw = 640000000;
+
+ CAM_DBG(CAM_ISP, "csid vote compressed_bw:%lld uncompressed_bw:%lld",
+ axi_vote.compressed_bw, axi_vote.uncompressed_bw);
+
+ rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error CPAS start failed");
+ rc = -EFAULT;
+ goto end;
+ }
rc = cam_soc_util_enable_platform_resource(soc_info, true,
CAM_TURBO_VOTE, true);
if (rc) {
- pr_err("%s: enable platform failed\n", __func__);
- return rc;
+ CAM_ERR(CAM_ISP, "enable platform failed");
+ goto stop_cpas;
}
return rc;
+
+stop_cpas:
+ cam_cpas_stop(soc_private->cpas_handle);
+end:
+ return rc;
}
int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
+ struct cam_csid_soc_private *soc_private;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error Invalid params");
+ return -EINVAL;
+ }
+ soc_private = soc_info->soc_private;
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc)
- pr_err("%s: Disable platform failed\n", __func__);
+ CAM_ERR(CAM_ISP, "Disable platform failed");
+
+ rc = cam_cpas_stop(soc_private->cpas_handle);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error CPAS stop failed rc=%d", rc);
+ return rc;
+ }
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
index 218e05a..1a30722 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
@@ -15,6 +15,19 @@
#include "cam_isp_hw.h"
+/*
+ * struct cam_csid_soc_private:
+ *
+ * @Brief: Private SOC data specific to CSID HW Driver
+ *
+ * @cpas_handle: Handle returned on registering with CPAS driver.
+ * This handle is used for all further interface
+ * with CPAS.
+ */
+struct cam_csid_soc_private {
+ uint32_t cpas_handle;
+};
+
/**
* struct csid_device_soc_info - CSID SOC info object
*
@@ -38,6 +51,17 @@
int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t csid_irq_handler, void *irq_data);
+
+/**
+ * cam_ife_csid_deinit_soc_resources()
+ *
+ * @brief: csid de initialization function for the soc info
+ *
+ * @soc_info: soc info structure pointer
+ *
+ */
+int cam_ife_csid_deinit_soc_resources(struct cam_hw_soc_info *soc_info);
+
/**
* cam_ife_csid_enable_soc_resources()
*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
index ecc6f0e..52b712a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -69,6 +69,7 @@
* if type is path then res id need to be filled
* @res_id : res id to be reserved
* @in_port : input port resource info
+ * @out_port: output port resource info, used for RDI path only
* @sync_mode : Sync mode
* Sync mode could be master, slave or none
* @master_idx: master device index to be configured in the slave path
@@ -83,6 +84,7 @@
enum cam_isp_resource_type res_type;
uint32_t res_id;
struct cam_isp_in_port_info *in_port;
+ struct cam_isp_out_port_info *out_port;
enum cam_isp_hw_sync_mode sync_mode;
uint32_t master_idx;
uint32_t cid;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index 15db6a6..418280a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -108,6 +108,7 @@
* (Default is Master in case of Single VFE)
* @dual_slave_core: If Master and Slave exists, HW Index of Slave
* @cdm_ops: CDM operations
+ * @ctx: Context data
*/
struct cam_vfe_hw_vfe_out_acquire_args {
struct cam_isp_resource_node *rsrc_node;
@@ -118,6 +119,7 @@
uint32_t is_master;
uint32_t dual_slave_core;
struct cam_cdm_utils_ops *cdm_ops;
+ void *ctx;
};
/*
@@ -192,22 +194,21 @@
*
* @list: list_head node for the payload
* @core_index: Index of VFE HW that generated this IRQ event
- * @core_info: Private data of handler in bottom half context
* @evt_id: IRQ event
* @irq_reg_val: IRQ and Error register values, read when IRQ was
* handled
* @error_type: Identify different errors
* @ts: Timestamp
+ * @ctx: Context data received during acquire
*/
struct cam_vfe_bus_irq_evt_payload {
- struct list_head list;
- uint32_t core_index;
- void *core_info;
- uint32_t evt_id;
- uint32_t irq_reg_val[CAM_IFE_BUS_IRQ_REGISTERS_MAX];
- uint32_t error_type;
- struct cam_vfe_bus_ver2_priv *bus_priv;
- struct cam_isp_timestamp ts;
+ struct list_head list;
+ uint32_t core_index;
+ uint32_t evt_id;
+ uint32_t irq_reg_val[CAM_IFE_BUS_IRQ_REGISTERS_MAX];
+ uint32_t error_type;
+ struct cam_isp_timestamp ts;
+ void *ctx;
};
/*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index f6aab7f..e330c84 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -24,9 +22,7 @@
#include "cam_vfe_bus.h"
#include "cam_vfe_top.h"
#include "cam_ife_hw_mgr.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static const char drv_name[] = "vfe";
@@ -51,11 +47,6 @@
0x00000000,
};
-static uint32_t bus_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
- 0x00000200,
- 0x00000000,
-};
-
static int cam_vfe_get_evt_payload(struct cam_vfe_hw_core_info *core_info,
struct cam_vfe_top_irq_evt_payload **evt_payload)
{
@@ -63,7 +54,7 @@
if (list_empty(&core_info->free_payload_list)) {
*evt_payload = NULL;
spin_unlock(&core_info->spin_lock);
- pr_err_ratelimited("No free payload, core info 0x%x\n",
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload, core info 0x%x\n",
core_info->cpas_handle);
return -ENODEV;
}
@@ -83,11 +74,11 @@
unsigned long flags;
if (!core_info) {
- pr_err("Invalid param core_info NULL");
+ CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
return -EINVAL;
}
if (*evt_payload == NULL) {
- pr_err("No payload to put\n");
+ CAM_ERR(CAM_ISP, "No payload to put");
return -EINVAL;
}
@@ -105,9 +96,9 @@
struct cam_vfe_hw_core_info *core_info = NULL;
int rc = 0;
- CDBG("Enter\n");
+ CAM_DBG(CAM_ISP, "Enter");
if (!hw_priv) {
- pr_err("%s: Invalid arguments\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -118,7 +109,7 @@
core_info->vfe_top->top_priv,
get_hw_cap_args, arg_size);
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -130,11 +121,11 @@
handler_priv = th_payload->handler_priv;
- CDBG("Enter\n");
- CDBG("IRQ status_0 = 0x%x\n", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "Enter");
+ CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
if (th_payload->evt_status_arr[0] & (1<<31)) {
- CDBG("Calling Complete for RESET CMD\n");
+ CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
complete(handler_priv->reset_complete);
/*
@@ -148,7 +139,7 @@
rc = 0;
}
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -159,9 +150,9 @@
struct cam_vfe_hw_core_info *core_info = NULL;
int rc = 0;
- CDBG("Enter\n");
+ CAM_DBG(CAM_ISP, "Enter");
if (!hw_priv) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -169,7 +160,7 @@
vfe_hw->open_count++;
if (vfe_hw->open_count > 1) {
mutex_unlock(&vfe_hw->hw_mutex);
- CDBG("VFE has already been initialized cnt %d\n",
+ CAM_DBG(CAM_ISP, "VFE has already been initialized cnt %d",
vfe_hw->open_count);
return 0;
}
@@ -181,22 +172,32 @@
/* Turn ON Regulators, Clocks and other SOC resources */
rc = cam_vfe_enable_soc_resources(soc_info);
if (rc) {
- pr_err("Enable SOC failed\n");
+ CAM_ERR(CAM_ISP, "Enable SOC failed");
rc = -EFAULT;
goto decrement_open_cnt;
}
- CDBG("Enable soc done\n");
+ CAM_DBG(CAM_ISP, "Enable soc done");
+
+ rc = core_info->vfe_bus->hw_ops.init(core_info->vfe_bus->bus_priv,
+ NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Bus HW init Failed rc=%d", rc);
+ goto disable_soc;
+ }
/* Do HW Reset */
rc = cam_vfe_reset(hw_priv, NULL, 0);
if (rc) {
- pr_err("Reset Failed\n");
- goto disable_soc;
+ CAM_ERR(CAM_ISP, "Reset Failed rc=%d", rc);
+ goto deinit_bus;
}
return 0;
+deinit_bus:
+ core_info->vfe_bus->hw_ops.deinit(core_info->vfe_bus->bus_priv,
+ NULL, 0);
disable_soc:
cam_vfe_disable_soc_resources(soc_info);
decrement_open_cnt:
@@ -212,22 +213,22 @@
struct cam_hw_soc_info *soc_info = NULL;
int rc = 0;
- CDBG("Enter\n");
+ CAM_DBG(CAM_ISP, "Enter");
if (!hw_priv) {
- pr_err("%s: Invalid arguments\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
mutex_lock(&vfe_hw->hw_mutex);
if (!vfe_hw->open_count) {
mutex_unlock(&vfe_hw->hw_mutex);
- pr_err("Error! Unbalanced deinit\n");
+ CAM_ERR(CAM_ISP, "Error! Unbalanced deinit");
return -EFAULT;
}
vfe_hw->open_count--;
if (vfe_hw->open_count) {
mutex_unlock(&vfe_hw->hw_mutex);
- CDBG("open_cnt non-zero =%d\n", vfe_hw->open_count);
+ CAM_DBG(CAM_ISP, "open_cnt non-zero =%d", vfe_hw->open_count);
return 0;
}
mutex_unlock(&vfe_hw->hw_mutex);
@@ -235,14 +236,14 @@
soc_info = &vfe_hw->soc_info;
/* Turn OFF Regulators, Clocks and other SOC resources */
- CDBG("Disable SOC resource\n");
+ CAM_DBG(CAM_ISP, "Disable SOC resource");
rc = cam_vfe_disable_soc_resources(soc_info);
if (rc)
- pr_err("Disable SOC failed\n");
+ CAM_ERR(CAM_ISP, "Disable SOC failed");
vfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -253,10 +254,10 @@
struct cam_vfe_hw_core_info *core_info = NULL;
int rc;
- CDBG("Enter\n");
+ CAM_DBG(CAM_ISP, "Enter");
if (!hw_priv) {
- pr_err("Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
return -EINVAL;
}
@@ -274,28 +275,28 @@
top_reset_irq_reg_mask, &core_info->irq_payload,
cam_vfe_reset_irq_top_half, NULL, NULL, NULL);
if (core_info->irq_handle < 0) {
- pr_err("subscribe irq controller failed\n");
+ CAM_ERR(CAM_ISP, "subscribe irq controller failed");
return -EFAULT;
}
reinit_completion(&vfe_hw->hw_complete);
- CDBG("calling RESET\n");
+ CAM_DBG(CAM_ISP, "calling RESET");
core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv, NULL, 0);
- CDBG("waiting for vfe reset complete\n");
+ CAM_DBG(CAM_ISP, "waiting for vfe reset complete");
/* Wait for Completion or Timeout of 500ms */
rc = wait_for_completion_timeout(&vfe_hw->hw_complete, 500);
if (!rc)
- pr_err("Error! Reset Timeout\n");
+ CAM_ERR(CAM_ISP, "Error! Reset Timeout");
- CDBG("reset complete done (%d)\n", rc);
+ CAM_DBG(CAM_ISP, "reset complete done (%d)", rc);
rc = cam_irq_controller_unsubscribe_irq(
core_info->vfe_irq_controller, core_info->irq_handle);
if (rc)
- pr_err("Error! Unsubscribe failed\n");
+ CAM_ERR(CAM_ISP, "Error! Unsubscribe failed");
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -319,12 +320,13 @@
handler_priv = th_payload->handler_priv;
- CDBG("IRQ status_0 = %x\n", th_payload->evt_status_arr[0]);
- CDBG("IRQ status_1 = %x\n", th_payload->evt_status_arr[1]);
+ CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
rc = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
if (rc) {
- pr_err_ratelimited("No tasklet_cmd is free in queue\n");
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "No tasklet_cmd is free in queue\n");
return rc;
}
@@ -341,14 +343,14 @@
evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
irq_reg_offset[i]);
}
- CDBG("Violation status = %x\n", evt_payload->irq_reg_val[2]);
+ CAM_DBG(CAM_ISP, "Violation status = %x", evt_payload->irq_reg_val[2]);
/*
* need to handle overflow condition here, otherwise irq storm
* will block everything.
*/
if (evt_payload->irq_reg_val[1]) {
- pr_err("Mask all the interrupts\n");
+ CAM_ERR(CAM_ISP, "Mask all the interrupts");
cam_io_w(0, handler_priv->mem_base + 0x60);
cam_io_w(0, handler_priv->mem_base + 0x5C);
@@ -357,7 +359,7 @@
th_payload->evt_payload_priv = evt_payload;
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -371,7 +373,7 @@
if (!hw_priv || !reserve_args || (arg_size !=
sizeof(struct cam_vfe_acquire_args))) {
- pr_err("Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
return -EINVAL;
}
core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
@@ -382,12 +384,13 @@
rc = core_info->vfe_top->hw_ops.reserve(
core_info->vfe_top->top_priv,
acquire,
- sizeof(acquire));
+ sizeof(*acquire));
else if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_OUT)
- rc = core_info->vfe_bus->acquire_resource(
- core_info->vfe_bus->bus_priv, acquire);
+ rc = core_info->vfe_bus->hw_ops.reserve(
+ core_info->vfe_bus->bus_priv, acquire,
+ sizeof(*acquire));
else
- pr_err("Invalid res type:%d\n", acquire->rsrc_type);
+ CAM_ERR(CAM_ISP, "Invalid res type:%d", acquire->rsrc_type);
mutex_unlock(&vfe_hw->hw_mutex);
@@ -404,7 +407,7 @@
if (!hw_priv || !release_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
return -EINVAL;
}
@@ -415,12 +418,13 @@
if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN)
rc = core_info->vfe_top->hw_ops.release(
core_info->vfe_top->top_priv, isp_res,
- sizeof(struct cam_isp_resource_node));
+ sizeof(*isp_res));
else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT)
- rc = core_info->vfe_bus->release_resource(
- core_info->vfe_bus->bus_priv, isp_res);
+ rc = core_info->vfe_bus->hw_ops.release(
+ core_info->vfe_bus->bus_priv, isp_res,
+ sizeof(*isp_res));
else
- pr_err("Invalid res type:%d\n", isp_res->res_type);
+ CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
mutex_unlock(&vfe_hw->hw_mutex);
@@ -437,7 +441,7 @@
if (!hw_priv || !start_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
return -EINVAL;
}
@@ -466,20 +470,12 @@
core_info->vfe_top->top_priv, isp_res,
sizeof(struct cam_isp_resource_node));
else
- pr_err("Error! subscribe irq controller failed\n");
+ CAM_ERR(CAM_ISP,
+ "Error! subscribe irq controller failed");
} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
- isp_res->irq_handle = cam_irq_controller_subscribe_irq(
- core_info->vfe_irq_controller, CAM_IRQ_PRIORITY_2,
- bus_irq_reg_mask, &core_info->irq_payload,
- core_info->vfe_bus->top_half_handler,
- cam_ife_mgr_do_tasklet_buf_done,
- isp_res->tasklet_info, cam_tasklet_enqueue_cmd);
- if (isp_res->irq_handle > 0)
- rc = core_info->vfe_bus->start_resource(isp_res);
- else
- pr_err("Error! subscribe irq controller failed\n");
+ rc = core_info->vfe_bus->hw_ops.start(isp_res, NULL, 0);
} else {
- pr_err("Invalid res type:%d\n", isp_res->res_type);
+ CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
}
mutex_unlock(&vfe_hw->hw_mutex);
@@ -496,7 +492,7 @@
if (!hw_priv || !stop_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
return -EINVAL;
}
@@ -513,9 +509,9 @@
} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
cam_irq_controller_unsubscribe_irq(
core_info->vfe_irq_controller, isp_res->irq_handle);
- rc = core_info->vfe_bus->stop_resource(isp_res);
+ rc = core_info->vfe_bus->hw_ops.stop(isp_res, NULL, 0);
} else {
- pr_err("Invalid res type:%d\n", isp_res->res_type);
+ CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
}
mutex_unlock(&vfe_hw->hw_mutex);
@@ -543,7 +539,7 @@
int rc = 0;
if (!hw_priv) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -560,13 +556,13 @@
break;
case CAM_VFE_HW_CMD_GET_BUF_UPDATE:
- rc = core_info->vfe_bus->process_cmd(
+ rc = core_info->vfe_bus->hw_ops.process_cmd(
core_info->vfe_bus->bus_priv, cmd_type, cmd_args,
arg_size);
break;
default:
- pr_err("Invalid cmd type:%d\n", cmd_type);
+ CAM_ERR(CAM_ISP, "Invalid cmd type:%d", cmd_type);
rc = -EINVAL;
break;
}
@@ -596,13 +592,13 @@
int rc = -EINVAL;
int i;
- CDBG("Enter");
+ CAM_DBG(CAM_ISP, "Enter");
rc = cam_irq_controller_init(drv_name,
CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX),
vfe_hw_info->irq_reg_info, &core_info->vfe_irq_controller);
if (rc) {
- pr_err("Error! cam_irq_controller_init failed\n");
+ CAM_ERR(CAM_ISP, "Error! cam_irq_controller_init failed");
return rc;
}
@@ -610,16 +606,16 @@
soc_info, hw_intf, vfe_hw_info->top_hw_info,
&core_info->vfe_top);
if (rc) {
- pr_err("Error! cam_vfe_top_init failed\n");
- return rc;
+ CAM_ERR(CAM_ISP, "Error! cam_vfe_top_init failed");
+ goto deinit_controller;
}
- rc = cam_vfe_bus_init(vfe_hw_info->bus_version,
- soc_info->reg_map[0].mem_base, hw_intf,
- vfe_hw_info->bus_hw_info, NULL, &core_info->vfe_bus);
+ rc = cam_vfe_bus_init(vfe_hw_info->bus_version, soc_info, hw_intf,
+ vfe_hw_info->bus_hw_info, core_info->vfe_irq_controller,
+ &core_info->vfe_bus);
if (rc) {
- pr_err("Error! cam_vfe_bus_init failed\n");
- return rc;
+ CAM_ERR(CAM_ISP, "Error! cam_vfe_bus_init failed");
+ goto deinit_top;
}
INIT_LIST_HEAD(&core_info->free_payload_list);
@@ -632,4 +628,47 @@
spin_lock_init(&core_info->spin_lock);
return rc;
+
+deinit_top:
+ cam_vfe_top_deinit(vfe_hw_info->top_version,
+ &core_info->vfe_top);
+
+deinit_controller:
+ cam_irq_controller_deinit(&core_info->vfe_irq_controller);
+
+ return rc;
}
+
+int cam_vfe_core_deinit(struct cam_vfe_hw_core_info *core_info,
+ struct cam_vfe_hw_info *vfe_hw_info)
+{
+ int rc = -EINVAL;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&core_info->spin_lock, flags);
+
+ INIT_LIST_HEAD(&core_info->free_payload_list);
+ for (i = 0; i < CAM_VFE_EVT_MAX; i++)
+ INIT_LIST_HEAD(&core_info->evt_payload[i].list);
+
+ rc = cam_vfe_bus_deinit(vfe_hw_info->bus_version,
+ &core_info->vfe_bus);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Error cam_vfe_bus_deinit failed rc=%d", rc);
+
+ rc = cam_vfe_top_deinit(vfe_hw_info->top_version,
+ &core_info->vfe_top);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Error cam_vfe_top_deinit failed rc=%d", rc);
+
+ rc = cam_irq_controller_deinit(&core_info->vfe_irq_controller);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error cam_irq_controller_deinit failed rc=%d", rc);
+
+ spin_unlock_irqrestore(&core_info->spin_lock, flags);
+
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
index 94b4cf0..ee29e1cf 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
@@ -89,4 +89,7 @@
struct cam_hw_intf *hw_intf,
struct cam_vfe_hw_info *vfe_hw_info);
+int cam_vfe_core_deinit(struct cam_vfe_hw_core_info *core_info,
+ struct cam_vfe_hw_info *vfe_hw_info);
+
#endif /* _CAM_VFE_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
index 40279ae..3e2307c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
@@ -10,7 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
@@ -18,9 +17,7 @@
#include "cam_vfe_dev.h"
#include "cam_vfe_core.h"
#include "cam_vfe_soc.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static struct cam_hw_intf *cam_vfe_hw_list[CAM_VFE_HW_NUM_MAX] = {0, 0, 0, 0};
@@ -62,14 +59,15 @@
vfe_hw_intf->hw_ops.process_cmd = cam_vfe_process_cmd;
vfe_hw_intf->hw_type = CAM_ISP_HW_TYPE_VFE;
- CDBG("type %d index %d\n", vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "type %d index %d",
+ vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
platform_set_drvdata(pdev, vfe_hw_intf);
vfe_hw->core_info = kzalloc(sizeof(struct cam_vfe_hw_core_info),
GFP_KERNEL);
if (!vfe_hw->core_info) {
- CDBG("Failed to alloc for core\n");
+ CAM_DBG(CAM_ISP, "Failed to alloc for core");
rc = -ENOMEM;
goto free_vfe_hw;
}
@@ -78,7 +76,7 @@
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
- pr_err("Of_match Failed\n");
+ CAM_ERR(CAM_ISP, "Of_match Failed");
rc = -EINVAL;
goto free_core_info;
}
@@ -88,14 +86,14 @@
rc = cam_vfe_init_soc_resources(&vfe_hw->soc_info, cam_vfe_irq,
vfe_hw);
if (rc < 0) {
- pr_err("Failed to init soc\n");
+ CAM_ERR(CAM_ISP, "Failed to init soc rc=%d", rc);
goto free_core_info;
}
rc = cam_vfe_core_init(core_info, &vfe_hw->soc_info,
vfe_hw_intf, hw_info);
if (rc < 0) {
- pr_err("Failed to init core\n");
+ CAM_ERR(CAM_ISP, "Failed to init core rc=%d", rc);
goto deinit_soc;
}
@@ -110,11 +108,13 @@
cam_vfe_init_hw(vfe_hw, NULL, 0);
cam_vfe_deinit_hw(vfe_hw, NULL, 0);
- CDBG("VFE%d probe successful\n", vfe_hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "VFE%d probe successful", vfe_hw_intf->hw_idx);
return rc;
deinit_soc:
+ if (cam_vfe_deinit_soc_resources(&vfe_hw->soc_info))
+ CAM_ERR(CAM_ISP, "Failed to deinit soc");
free_core_info:
kfree(vfe_hw->core_info);
free_vfe_hw:
@@ -125,6 +125,61 @@
return rc;
}
+int cam_vfe_remove(struct platform_device *pdev)
+{
+ struct cam_hw_info *vfe_hw = NULL;
+ struct cam_hw_intf *vfe_hw_intf = NULL;
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ int rc = 0;
+
+ vfe_hw_intf = platform_get_drvdata(pdev);
+ if (!vfe_hw_intf) {
+ CAM_ERR(CAM_ISP, "Error! No data in pdev");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "type %d index %d",
+ vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
+
+ if (vfe_hw_intf->hw_idx < CAM_VFE_HW_NUM_MAX)
+ cam_vfe_hw_list[vfe_hw_intf->hw_idx] = NULL;
+
+ vfe_hw = vfe_hw_intf->hw_priv;
+ if (!vfe_hw) {
+ CAM_ERR(CAM_ISP, "Error! HW data is NULL");
+ rc = -ENODEV;
+ goto free_vfe_hw_intf;
+ }
+
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+ if (!core_info) {
+ CAM_ERR(CAM_ISP, "Error! core data NULL");
+ rc = -EINVAL;
+ goto deinit_soc;
+ }
+
+ rc = cam_vfe_core_deinit(core_info, core_info->vfe_hw_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP, "Failed to deinit core rc=%d", rc);
+
+ kfree(vfe_hw->core_info);
+
+deinit_soc:
+ rc = cam_vfe_deinit_soc_resources(&vfe_hw->soc_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP, "Failed to deinit soc rc=%d", rc);
+
+ mutex_destroy(&vfe_hw->hw_mutex);
+ kfree(vfe_hw);
+
+ CAM_DBG(CAM_ISP, "VFE%d remove successful", vfe_hw_intf->hw_idx);
+
+free_vfe_hw_intf:
+ kfree(vfe_hw_intf);
+
+ return rc;
+}
+
int cam_vfe_hw_init(struct cam_hw_intf **vfe_hw, uint32_t hw_idx)
{
int rc = 0;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h
index ca54d81..9e73528 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h
@@ -27,4 +27,16 @@
*/
int cam_vfe_probe(struct platform_device *pdev);
+/*
+ * cam_vfe_remove()
+ *
+ * @brief: Driver remove function
+ *
+ * @pdev: Platform Device pointer
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_remove(struct platform_device *pdev);
+
#endif /* _CAM_VFE_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
index 9f8f8c5..3b2ead2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -10,14 +10,10 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/slab.h>
#include "cam_cpas_api.h"
#include "cam_vfe_soc.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static int cam_vfe_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
@@ -25,7 +21,7 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc) {
- pr_err("Error! get DT properties failed\n");
+ CAM_ERR(CAM_ISP, "Error! get DT properties failed rc=%d", rc);
return rc;
}
@@ -40,6 +36,21 @@
rc = cam_soc_util_request_platform_resource(soc_info, vfe_irq_handler,
irq_data);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error! Request platform resource failed rc=%d", rc);
+
+ return rc;
+}
+
+static int cam_vfe_release_platform_resource(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_release_platform_resource(soc_info);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error! Release platform resource failed rc=%d", rc);
return rc;
}
@@ -54,21 +65,22 @@
soc_private = kzalloc(sizeof(struct cam_vfe_soc_private),
GFP_KERNEL);
if (!soc_private) {
- CDBG("Error! soc_private Alloc Failed\n");
+ CAM_DBG(CAM_ISP, "Error! soc_private Alloc Failed");
return -ENOMEM;
}
soc_info->soc_private = soc_private;
rc = cam_vfe_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("Error! Get DT properties failed\n");
+ CAM_ERR(CAM_ISP, "Error! Get DT properties failed rc=%d", rc);
goto free_soc_private;
}
rc = cam_vfe_request_platform_resource(soc_info, vfe_irq_handler,
irq_data);
if (rc < 0) {
- pr_err("Error! Request platform resources failed\n");
+ CAM_ERR(CAM_ISP,
+ "Error! Request platform resources failed rc=%d", rc);
goto free_soc_private;
}
@@ -79,7 +91,7 @@
cpas_register_param.dev = &soc_info->pdev->dev;
rc = cam_cpas_register_client(&cpas_register_param);
if (rc) {
- pr_err("CPAS registration failed\n");
+ CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
goto release_soc;
} else {
soc_private->cpas_handle = cpas_register_param.client_handle;
@@ -95,6 +107,36 @@
return rc;
}
+int cam_vfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+ struct cam_vfe_soc_private *soc_private;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error! soc_info NULL");
+ return -ENODEV;
+ }
+
+ soc_private = soc_info->soc_private;
+ if (!soc_private) {
+ CAM_ERR(CAM_ISP, "Error! soc_private NULL");
+ return -ENODEV;
+ }
+
+ rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc);
+
+ rc = cam_vfe_release_platform_resource(soc_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Error! Release platform resources failed rc=%d", rc);
+
+ kfree(soc_private);
+
+ return rc;
+}
+
int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
@@ -103,7 +145,7 @@
struct cam_axi_vote axi_vote;
if (!soc_info) {
- pr_err("Error! Invalid params\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid params");
rc = -EINVAL;
goto end;
}
@@ -117,7 +159,7 @@
rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
if (rc) {
- pr_err("Error! CPAS start failed.\n");
+ CAM_ERR(CAM_ISP, "Error! CPAS start failed rc=%d", rc);
rc = -EFAULT;
goto end;
}
@@ -125,7 +167,7 @@
rc = cam_soc_util_enable_platform_resource(soc_info, true,
CAM_TURBO_VOTE, true);
if (rc) {
- pr_err("Error! enable platform failed\n");
+ CAM_ERR(CAM_ISP, "Error! enable platform failed rc=%d", rc);
goto stop_cpas;
}
@@ -144,7 +186,7 @@
struct cam_vfe_soc_private *soc_private;
if (!soc_info) {
- pr_err("Error! Invalid params\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid params");
rc = -EINVAL;
return rc;
}
@@ -152,13 +194,13 @@
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc) {
- pr_err("%s: disable platform failed\n", __func__);
+ CAM_ERR(CAM_ISP, "Disable platform failed rc=%d", rc);
return rc;
}
rc = cam_cpas_stop(soc_private->cpas_handle);
if (rc) {
- pr_err("Error! CPAS stop failed.\n");
+ CAM_ERR(CAM_ISP, "Error! CPAS stop failed rc=%d", rc);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
index 27fb192..094c977 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
@@ -45,6 +45,18 @@
irq_handler_t vfe_irq_handler, void *irq_data);
/*
+ * cam_vfe_deinit_soc_resources()
+ *
+ * @Brief: Deinitialize SOC resources including private data
+ *
+ * @soc_info: Device soc information
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/*
* cam_vfe_enable_soc_resources()
*
* @brief: Enable regulator, irq resources, start CPAS
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
index 2245ab1..0af32ad 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
@@ -16,9 +16,6 @@
#include "cam_vfe_core.h"
#include "cam_vfe_dev.h"
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
static const struct of_device_id cam_vfe170_dt_match[] = {
{
.compatible = "qcom,vfe170",
@@ -30,6 +27,7 @@
static struct platform_driver cam_vfe170_driver = {
.probe = cam_vfe_probe,
+ .remove = cam_vfe_remove,
.driver = {
.name = "cam_vfe170",
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
index b550071..275c7b5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -117,6 +117,28 @@
.reg_update_cmd = 0x000004AC,
};
+static struct cam_vfe_rdi_ver2_reg vfe170_rdi_reg = {
+ .reg_update_cmd = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_reg_data vfe_170_rdi_0_data = {
+ .reg_update_cmd_data = 0x2,
+ .sof_irq_mask = 0x8000000,
+ .reg_update_irq_mask = 0x20,
+};
+
+static struct cam_vfe_rdi_reg_data vfe_170_rdi_1_data = {
+ .reg_update_cmd_data = 0x4,
+ .sof_irq_mask = 0x10000000,
+ .reg_update_irq_mask = 0x40,
+};
+
+static struct cam_vfe_rdi_reg_data vfe_170_rdi_2_data = {
+ .reg_update_cmd_data = 0x8,
+ .sof_irq_mask = 0x20000000,
+ .reg_update_irq_mask = 0x80,
+};
+
static struct cam_vfe_top_ver2_hw_info vfe170_top_hw_info = {
.common_reg = &vfe170_top_common_reg,
.camif_hw_info = {
@@ -124,6 +146,16 @@
.camif_reg = &vfe170_camif_reg,
.reg_data = &vfe_170_camif_reg_data,
},
+ .rdi_hw_info = {
+ .common_reg = &vfe170_top_common_reg,
+ .rdi_reg = &vfe170_rdi_reg,
+ .reg_data = {
+ &vfe_170_rdi_0_data,
+ &vfe_170_rdi_1_data,
+ &vfe_170_rdi_2_data,
+ NULL,
+ },
+ },
.mux_type = {
CAM_VFE_CAMIF_VER_2_0,
CAM_VFE_RDI_VER_1_0,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
index cea1137..4a328ee 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
@@ -1,7 +1,10 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
index 50952f8..c6c3272 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
@@ -10,30 +10,47 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include "cam_vfe_bus.h"
#include "cam_vfe_bus_ver1.h"
#include "cam_vfe_bus_ver2.h"
+#include "cam_debug_util.h"
int cam_vfe_bus_init(uint32_t bus_version,
- void __iomem *mem_base,
+ struct cam_hw_soc_info *soc_info,
struct cam_hw_intf *hw_intf,
void *bus_hw_info,
void *vfe_irq_controller,
- struct cam_vfe_bus **vfe_bus)
+ struct cam_vfe_bus **vfe_bus)
{
int rc = -ENODEV;
switch (bus_version) {
case CAM_VFE_BUS_VER_2_0:
- rc = cam_vfe_bus_ver2_init(mem_base, hw_intf, bus_hw_info,
+ rc = cam_vfe_bus_ver2_init(soc_info, hw_intf, bus_hw_info,
vfe_irq_controller, vfe_bus);
break;
default:
- pr_err("Unsupported Bus Version %x\n", bus_version);
+ CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version);
break;
}
return rc;
}
+
+int cam_vfe_bus_deinit(uint32_t bus_version,
+ struct cam_vfe_bus **vfe_bus)
+{
+ int rc = -ENODEV;
+
+ switch (bus_version) {
+ case CAM_VFE_BUS_VER_2_0:
+ rc = cam_vfe_bus_ver2_deinit(vfe_bus);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version);
+ break;
+ }
+
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index c4fae99..f37ec38 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -10,44 +10,42 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/ratelimit.h>
#include <linux/slab.h>
#include "cam_io_util.h"
#include "cam_cdm_util.h"
#include "cam_hw_intf.h"
+#include "cam_ife_hw_mgr.h"
#include "cam_vfe_hw_intf.h"
#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
#include "cam_vfe_bus.h"
#include "cam_vfe_bus_ver2.h"
#include "cam_vfe_core.h"
+#include "cam_debug_util.h"
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+static const char drv_name[] = "vfe_bus";
-#define FRAME_BASED_EN 0
+#define CAM_VFE_BUS_IRQ_REG0 0
+#define CAM_VFE_BUS_IRQ_REG1 1
+#define CAM_VFE_BUS_IRQ_REG2 2
+#define CAM_VFE_BUS_IRQ_MAX 3
+
+#define CAM_VFE_BUS_VER2_PAYLOAD_MAX 256
+
+#define CAM_VFE_RDI_BUS_DEFAULT_WIDTH 0xFF01
+#define CAM_VFE_RDI_BUS_DEFAULT_STRIDE 0xFF01
#define MAX_BUF_UPDATE_REG_NUM \
(sizeof(struct cam_vfe_bus_ver2_reg_offset_bus_client)/4)
#define MAX_REG_VAL_PAIR_SIZE \
- (MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
+ (MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
#define CAM_VFE_ADD_REG_VAL_PAIR(buf_array, index, offset, val) \
- do { \
- buf_array[index++] = offset; \
- buf_array[index++] = val; \
- } while (0)
-
-static uint32_t irq_reg_offset[CAM_IFE_BUS_IRQ_REGISTERS_MAX] = {
- 0x0000205C,
- 0x00002060,
- 0x00002064,
- 0x0000206C,
- 0x00002070,
- 0x00002074,
- 0x00002078,
-};
+ do { \
+ buf_array[index++] = offset; \
+ buf_array[index++] = val; \
+ } while (0)
enum cam_vfe_bus_packer_format {
PACKER_FMT_PLAIN_128 = 0x0,
@@ -70,23 +68,29 @@
};
struct cam_vfe_bus_ver2_common_data {
+ uint32_t core_index;
void __iomem *mem_base;
struct cam_hw_intf *hw_intf;
void *bus_irq_controller;
void *vfe_irq_controller;
struct cam_vfe_bus_ver2_reg_offset_common *common_reg;
uint32_t io_buf_update[
- MAX_REG_VAL_PAIR_SIZE];
+ MAX_REG_VAL_PAIR_SIZE];
+
+ struct cam_vfe_bus_irq_evt_payload evt_payload[
+ CAM_VFE_BUS_VER2_PAYLOAD_MAX];
+ struct list_head free_payload_list;
};
struct cam_vfe_bus_ver2_wm_resource_data {
uint32_t index;
struct cam_vfe_bus_ver2_common_data *common_data;
struct cam_vfe_bus_ver2_reg_offset_bus_client *hw_regs;
+ void *ctx;
uint32_t irq_enabled;
-
uint32_t init_cfg_done;
+
uint32_t offset;
uint32_t width;
uint32_t height;
@@ -127,6 +131,8 @@
uint32_t dual_slave_core;
uint32_t intra_client_mask;
uint32_t composite_mask;
+
+ void *ctx;
};
struct cam_vfe_bus_ver2_vfe_out_data {
@@ -147,7 +153,6 @@
struct cam_cdm_utils_ops *cdm_util_ops;
};
-
struct cam_vfe_bus_ver2_priv {
struct cam_vfe_bus_ver2_common_data common_data;
@@ -159,12 +164,59 @@
struct list_head free_dual_comp_grp;
struct list_head used_comp_grp;
- struct cam_vfe_bus_irq_evt_payload evt_payload[128];
- struct list_head free_payload_list;
+ uint32_t irq_handle;
};
+static int cam_vfe_bus_get_evt_payload(
+ struct cam_vfe_bus_ver2_common_data *common_data,
+ struct cam_vfe_bus_irq_evt_payload **evt_payload)
+{
+ if (list_empty(&common_data->free_payload_list)) {
+ *evt_payload = NULL;
+ CAM_ERR(CAM_ISP, "No free payload");
+ return -ENODEV;
+ }
+
+ *evt_payload = list_first_entry(&common_data->free_payload_list,
+ struct cam_vfe_bus_irq_evt_payload, list);
+ list_del_init(&(*evt_payload)->list);
+ return 0;
+}
+
static int cam_vfe_bus_put_evt_payload(void *core_info,
- struct cam_vfe_bus_irq_evt_payload **evt_payload);
+ struct cam_vfe_bus_irq_evt_payload **evt_payload)
+{
+ struct cam_vfe_bus_ver2_common_data *common_data = NULL;
+ uint32_t *ife_irq_regs = NULL;
+ uint32_t status_reg0, status_reg1, status_reg2;
+
+ if (!core_info) {
+ CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+ return -EINVAL;
+ }
+ if (*evt_payload == NULL) {
+ CAM_ERR(CAM_ISP, "No payload to put");
+ return -EINVAL;
+ }
+
+ ife_irq_regs = (*evt_payload)->irq_reg_val;
+ status_reg0 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
+ status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
+ status_reg2 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2];
+
+ if (status_reg0 || status_reg1 || status_reg2) {
+ CAM_DBG(CAM_ISP, "status0 0x%x status1 0x%x status2 0x%x",
+ status_reg0, status_reg1, status_reg2);
+ return 0;
+ }
+
+ common_data = core_info;
+ list_add_tail(&(*evt_payload)->list,
+ &common_data->free_payload_list);
+ *evt_payload = NULL;
+
+ return 0;
+}
static int cam_vfe_bus_ver2_get_intra_client_mask(
enum cam_vfe_bus_ver2_vfe_core_id dual_slave_core,
@@ -176,7 +228,8 @@
*intra_client_mask = 0;
if (dual_slave_core == current_core) {
- pr_err("Invalid params. Same core as Master and Slave\n");
+ CAM_ERR(CAM_ISP,
+ "Invalid params. Same core as Master and Slave");
return -EINVAL;
}
@@ -190,7 +243,7 @@
*intra_client_mask = 0x2;
break;
default:
- pr_err("Invalid value for slave core %u\n",
+ CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
dual_slave_core);
rc = -EINVAL;
break;
@@ -205,7 +258,7 @@
*intra_client_mask = 0x2;
break;
default:
- pr_err("Invalid value for slave core %u\n",
+ CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
dual_slave_core);
rc = -EINVAL;
break;
@@ -220,14 +273,15 @@
*intra_client_mask = 0x2;
break;
default:
- pr_err("Invalid value for slave core %u\n",
+ CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
dual_slave_core);
rc = -EINVAL;
break;
}
break;
default:
- pr_err("Invalid value for master core %u\n", current_core);
+ CAM_ERR(CAM_ISP,
+ "Invalid value for master core %u", current_core);
rc = -EINVAL;
break;
}
@@ -295,6 +349,19 @@
case CAM_FORMAT_MIPI_RAW_14:
case CAM_FORMAT_MIPI_RAW_16:
case CAM_FORMAT_MIPI_RAW_20:
+ case CAM_FORMAT_DPCM_10_6_10:
+ case CAM_FORMAT_DPCM_10_8_10:
+ case CAM_FORMAT_DPCM_12_6_12:
+ case CAM_FORMAT_DPCM_12_8_12:
+ case CAM_FORMAT_DPCM_14_8_14:
+ case CAM_FORMAT_DPCM_14_10_14:
+ case CAM_FORMAT_PLAIN8:
+ case CAM_FORMAT_PLAIN16_8:
+ case CAM_FORMAT_PLAIN16_10:
+ case CAM_FORMAT_PLAIN16_12:
+ case CAM_FORMAT_PLAIN16_14:
+ case CAM_FORMAT_PLAIN16_16:
+ case CAM_FORMAT_PLAIN32_20:
case CAM_FORMAT_PLAIN128:
return 1;
default:
@@ -371,7 +438,6 @@
case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
switch (format) {
case CAM_FORMAT_PLAIN64:
- case CAM_FORMAT_PLAIN128:
return 1;
default:
break;
@@ -390,7 +456,8 @@
break;
}
- pr_err("Unsupported format %u for resource_type %u", format, res_type);
+ CAM_ERR(CAM_ISP, "Unsupported format %u for resource_type %u",
+ format, res_type);
return -EINVAL;
}
@@ -583,7 +650,31 @@
switch (out_fmt) {
case CAM_FORMAT_NV21:
case CAM_FORMAT_NV12:
- return PACKER_FMT_PLAIN_8;
+ return PACKER_FMT_PLAIN_8_LSB_MSB_10;
+ case CAM_FORMAT_PLAIN64:
+ return PACKER_FMT_PLAIN_64;
+ case CAM_FORMAT_MIPI_RAW_6:
+ case CAM_FORMAT_MIPI_RAW_8:
+ case CAM_FORMAT_MIPI_RAW_10:
+ case CAM_FORMAT_MIPI_RAW_12:
+ case CAM_FORMAT_MIPI_RAW_14:
+ case CAM_FORMAT_MIPI_RAW_16:
+ case CAM_FORMAT_MIPI_RAW_20:
+ case CAM_FORMAT_QTI_RAW_8:
+ case CAM_FORMAT_QTI_RAW_10:
+ case CAM_FORMAT_QTI_RAW_12:
+ case CAM_FORMAT_QTI_RAW_14:
+ case CAM_FORMAT_PLAIN128:
+ case CAM_FORMAT_PLAIN8:
+ case CAM_FORMAT_PLAIN16_8:
+ case CAM_FORMAT_PLAIN16_10:
+ case CAM_FORMAT_PLAIN16_12:
+ case CAM_FORMAT_PLAIN16_14:
+ case CAM_FORMAT_PLAIN16_16:
+ case CAM_FORMAT_PLAIN32_20:
+ case CAM_FORMAT_PD8:
+ case CAM_FORMAT_PD10:
+ return PACKER_FMT_PLAIN_128;
default:
return PACKER_FMT_MAX;
}
@@ -592,6 +683,8 @@
static int cam_vfe_bus_acquire_wm(
struct cam_vfe_bus_ver2_priv *ver2_bus_priv,
struct cam_isp_out_port_info *out_port_info,
+ void *tasklet,
+ void *ctx,
enum cam_vfe_bus_ver2_vfe_out_type vfe_out_res_id,
enum cam_vfe_bus_plane_type plane,
enum cam_isp_hw_split_id split_id,
@@ -609,16 +702,18 @@
/* No need to allocate for BUS VER2. VFE OUT to WM is fixed. */
wm_idx = cam_vfe_bus_get_wm_idx(vfe_out_res_id, plane);
if (wm_idx < 0 || wm_idx >= CAM_VFE_BUS_VER2_MAX_CLIENTS) {
- pr_err("Unsupported VFE out %d plane %d\n",
+ CAM_ERR(CAM_ISP, "Unsupported VFE out %d plane %d",
vfe_out_res_id, plane);
return -EINVAL;
}
wm_res_local = &ver2_bus_priv->bus_client[wm_idx];
+ wm_res_local->tasklet_info = tasklet;
wm_res_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
rsrc_data = wm_res_local->res_priv;
rsrc_data->irq_enabled = subscribe_irq;
+ rsrc_data->ctx = ctx;
rsrc_data->format = out_port_info->format;
rsrc_data->pack_fmt = cam_vfe_bus_get_packer_fmt(rsrc_data->format);
@@ -626,8 +721,9 @@
rsrc_data->height = out_port_info->height;
if (rsrc_data->index < 3) {
- rsrc_data->width = rsrc_data->width * 5/4 * rsrc_data->height;
- rsrc_data->height = 1;
+ rsrc_data->width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH;
+ rsrc_data->height = 0;
+ rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE;
rsrc_data->pack_fmt = 0x0;
rsrc_data->en_cfg = 0x3;
} else if (rsrc_data->index < 5 ||
@@ -661,21 +757,18 @@
}
break;
default:
- pr_err("Invalid plane type %d\n", plane);
+ CAM_ERR(CAM_ISP, "Invalid plane type %d", plane);
return -EINVAL;
}
- rsrc_data->pack_fmt = 0xE;
rsrc_data->en_cfg = 0x1;
} else if (rsrc_data->index >= 11) {
rsrc_data->width = 0;
rsrc_data->height = 0;
- rsrc_data->pack_fmt = 0x0;
rsrc_data->stride = 1;
rsrc_data->en_cfg = 0x3;
} else {
rsrc_data->width = rsrc_data->width * 4;
rsrc_data->height = rsrc_data->height / 2;
- rsrc_data->pack_fmt = 0x0;
rsrc_data->en_cfg = 0x1;
}
@@ -718,6 +811,8 @@
rsrc_data->ubwc_meta_offset = 0;
rsrc_data->init_cfg_done = 0;
rsrc_data->en_cfg = 0;
+
+ wm_res->tasklet_info = NULL;
wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
return 0;
@@ -730,10 +825,10 @@
wm_res->res_priv;
struct cam_vfe_bus_ver2_common_data *common_data =
rsrc_data->common_data;
+ uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_addr);
cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_cfg);
- cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->frame_inc);
cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
cam_io_w_mb(rsrc_data->width,
@@ -743,24 +838,48 @@
cam_io_w(rsrc_data->pack_fmt,
common_data->mem_base + rsrc_data->hw_regs->packer_cfg);
- cam_io_w(0xFFFFFFFF, common_data->mem_base +
- rsrc_data->hw_regs->irq_subsample_pattern);
- cam_io_w(0x0, common_data->mem_base +
- rsrc_data->hw_regs->irq_subsample_period);
+ /* Configure stride for RDIs */
+ if (rsrc_data->index < 3)
+ cam_io_w_mb(rsrc_data->stride, (common_data->mem_base +
+ rsrc_data->hw_regs->stride));
- cam_io_w(0xFFFFFFFF,
- common_data->mem_base + rsrc_data->hw_regs->framedrop_pattern);
- cam_io_w(0x0,
- common_data->mem_base + rsrc_data->hw_regs->framedrop_period);
+ /* Subscribe IRQ */
+ if (rsrc_data->irq_enabled) {
+ CAM_DBG(CAM_ISP, "Subscribe WM%d IRQ", rsrc_data->index);
+ bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG1] =
+ (1 << rsrc_data->index);
+ wm_res->irq_handle = cam_irq_controller_subscribe_irq(
+ common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
+ bus_irq_reg_mask, wm_res,
+ wm_res->top_half_handler,
+ cam_ife_mgr_do_tasklet_buf_done,
+ wm_res->tasklet_info, cam_tasklet_enqueue_cmd);
+ if (wm_res->irq_handle < 0) {
+ CAM_ERR(CAM_ISP, "Subscribe IRQ failed for WM %d",
+ rsrc_data->index);
+ return -EFAULT;
+ }
+ }
- CDBG("WM res %d width = %d, height = %d\n", rsrc_data->index,
+ /* enable ubwc if needed*/
+ if (rsrc_data->en_ubwc) {
+ cam_io_w_mb(0x1, common_data->mem_base +
+ rsrc_data->hw_regs->ubwc_regs->mode_cfg);
+ }
+
+ /* Enable WM */
+ cam_io_w_mb(rsrc_data->en_cfg, common_data->mem_base +
+ rsrc_data->hw_regs->cfg);
+
+ CAM_DBG(CAM_ISP, "WM res %d width = %d, height = %d", rsrc_data->index,
rsrc_data->width, rsrc_data->height);
- CDBG("WM res %d pk_fmt = %d\n", rsrc_data->index,
+ CAM_DBG(CAM_ISP, "WM res %d pk_fmt = %d", rsrc_data->index,
rsrc_data->pack_fmt & PACKER_FMT_MAX);
- CDBG("WM res %d stride = %d, burst len = %d\n",
+ CAM_DBG(CAM_ISP, "WM res %d stride = %d, burst len = %d",
rsrc_data->index, rsrc_data->stride, 0xf);
- CDBG("enable WM res %d offset 0x%x val 0x%x\n", rsrc_data->index,
- (uint32_t) rsrc_data->hw_regs->cfg, rsrc_data->en_cfg);
+ CAM_DBG(CAM_ISP, "enable WM res %d offset 0x%x val 0x%x",
+ rsrc_data->index, (uint32_t) rsrc_data->hw_regs->cfg,
+ rsrc_data->en_cfg);
wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
@@ -779,14 +898,12 @@
cam_io_w_mb(0x0,
common_data->mem_base + rsrc_data->hw_regs->cfg);
- CDBG("irq_enabled %d", rsrc_data->irq_enabled);
+ CAM_DBG(CAM_ISP, "irq_enabled %d", rsrc_data->irq_enabled);
/* Unsubscribe IRQ */
- if (rsrc_data->irq_enabled) {
- /*
- * Currently all WM IRQ are unsubscribed in one place. Need to
- * make it dynamic.
- */
- }
+ if (rsrc_data->irq_enabled)
+ rc = cam_irq_controller_unsubscribe_irq(
+ common_data->bus_irq_controller,
+ wm_res->irq_handle);
/* Halt & Reset WM */
cam_io_w_mb(BIT(rsrc_data->index),
@@ -800,7 +917,43 @@
static int cam_vfe_bus_handle_wm_done_top_half(uint32_t evt_id,
struct cam_irq_th_payload *th_payload)
{
- return -EPERM;
+ int32_t rc;
+ int i;
+ struct cam_isp_resource_node *wm_res = NULL;
+ struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data = NULL;
+ struct cam_vfe_bus_irq_evt_payload *evt_payload;
+
+ wm_res = th_payload->handler_priv;
+ if (!wm_res) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! No resource\n");
+ return -ENODEV;
+ }
+
+ rsrc_data = wm_res->res_priv;
+
+ CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+ rc = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "No tasklet_cmd is free in queue\n");
+ return rc;
+ }
+
+ cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+ evt_payload->ctx = rsrc_data->ctx;
+ evt_payload->core_index = rsrc_data->common_data->core_index;
+ evt_payload->evt_id = evt_id;
+
+ for (i = 0; i < th_payload->num_registers; i++)
+ evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+ th_payload->evt_payload_priv = evt_payload;
+
+ CAM_DBG(CAM_ISP, "Exit");
+ return rc;
}
static int cam_vfe_bus_handle_wm_done_bottom_half(void *wm_node,
@@ -825,9 +978,10 @@
~BIT(rsrc_data->index);
rc = CAM_VFE_IRQ_STATUS_SUCCESS;
}
+ CAM_DBG(CAM_ISP, "status_reg %x rc %d", status_reg, rc);
if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
- cam_vfe_bus_put_evt_payload(evt_payload->core_info,
+ cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
&evt_payload);
return rc;
@@ -838,15 +992,13 @@
struct cam_vfe_bus_ver2_hw_info *ver2_hw_info,
struct cam_isp_resource_node *wm_res)
{
- int rc = 0;
struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data;
rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_wm_resource_data),
GFP_KERNEL);
if (!rsrc_data) {
- CDBG("Failed to alloc for wm res priv\n");
- rc = -ENOMEM;
- return rc;
+ CAM_DBG(CAM_ISP, "Failed to alloc for WM res priv");
+ return -ENOMEM;
}
wm_res->res_priv = rsrc_data;
@@ -863,7 +1015,32 @@
wm_res->bottom_half_handler = cam_vfe_bus_handle_wm_done_bottom_half;
wm_res->hw_intf = ver2_bus_priv->common_data.hw_intf;
- return rc;
+ return 0;
+}
+
+static int cam_vfe_bus_deinit_wm_resource(
+ struct cam_isp_resource_node *wm_res)
+{
+ struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data;
+
+ wm_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+ INIT_LIST_HEAD(&wm_res->list);
+
+ wm_res->start = NULL;
+ wm_res->stop = NULL;
+ wm_res->top_half_handler = NULL;
+ wm_res->bottom_half_handler = NULL;
+ wm_res->hw_intf = NULL;
+
+ rsrc_data = wm_res->res_priv;
+ wm_res->res_priv = NULL;
+ if (!rsrc_data) {
+ CAM_ERR(CAM_ISP, "Error! WM res priv is NULL");
+ return -ENOMEM;
+ }
+ kfree(rsrc_data);
+
+ return 0;
}
static void cam_vfe_bus_add_wm_to_comp_grp(
@@ -901,6 +1078,8 @@
static int cam_vfe_bus_acquire_comp_grp(
struct cam_vfe_bus_ver2_priv *ver2_bus_priv,
struct cam_isp_out_port_info *out_port_info,
+ void *tasklet,
+ void *ctx,
uint32_t unique_id,
uint32_t is_dual,
uint32_t is_master,
@@ -919,7 +1098,7 @@
/* First find a free group */
if (is_dual) {
if (list_empty(&ver2_bus_priv->free_dual_comp_grp)) {
- pr_err("No Free Composite Group\n");
+ CAM_ERR(CAM_ISP, "No Free Composite Group");
return -ENODEV;
}
comp_grp_local = list_first_entry(
@@ -932,7 +1111,7 @@
&rsrc_data->intra_client_mask);
} else {
if (list_empty(&ver2_bus_priv->free_comp_grp)) {
- pr_err("No Free Composite Group\n");
+ CAM_ERR(CAM_ISP, "No Free Composite Group");
return -ENODEV;
}
comp_grp_local = list_first_entry(
@@ -942,6 +1121,7 @@
}
list_del(&comp_grp_local->list);
+ comp_grp_local->tasklet_info = tasklet;
comp_grp_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
rsrc_data->is_master = is_master;
@@ -957,13 +1137,14 @@
/* Do not support runtime change in composite mask */
if (comp_grp_local->res_state ==
CAM_ISP_RESOURCE_STATE_STREAMING) {
- pr_err("Invalid State %d Comp Grp %u\n",
+ CAM_ERR(CAM_ISP, "Invalid State %d Comp Grp %u",
comp_grp_local->res_state,
rsrc_data->comp_grp_type);
return -EBUSY;
}
}
+ rsrc_data->ctx = ctx;
*comp_grp = comp_grp_local;
return rc;
@@ -978,7 +1159,7 @@
int match_found = 0;
if (!in_comp_grp) {
- pr_err("Invalid Params Comp Grp %pK\n", in_rsrc_data);
+ CAM_ERR(CAM_ISP, "Invalid Params Comp Grp %pK", in_rsrc_data);
return -EINVAL;
}
@@ -997,7 +1178,7 @@
}
if (!match_found) {
- pr_err("Could not find matching Comp Grp type %u\n",
+ CAM_ERR(CAM_ISP, "Could not find matching Comp Grp type %u",
in_rsrc_data->comp_grp_type);
return -ENODEV;
}
@@ -1017,6 +1198,7 @@
in_rsrc_data->composite_mask = 0;
in_rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX;
+ comp_grp->tasklet_info = NULL;
comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
return 0;
@@ -1029,17 +1211,14 @@
comp_grp->res_priv;
struct cam_vfe_bus_ver2_common_data *common_data =
rsrc_data->common_data;
-
- /*
- * Individual Comp_Grp Subscribe IRQ can be done here once
- * dynamic IRQ enable support is added.
- */
+ uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
rsrc_data->hw_regs->comp_mask);
- CDBG("composite_mask is 0x%x\n", rsrc_data->composite_mask);
- CDBG("composite_mask addr 0x%x\n", rsrc_data->hw_regs->comp_mask);
+ CAM_DBG(CAM_ISP, "composite_mask is 0x%x", rsrc_data->composite_mask);
+ CAM_DBG(CAM_ISP, "composite_mask addr 0x%x",
+ rsrc_data->hw_regs->comp_mask);
if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 &&
@@ -1055,9 +1234,30 @@
cam_io_w_mb(intra_client_en, common_data->mem_base +
common_data->common_reg->dual_master_comp_cfg);
+
+ bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG2] = (1 << dual_comp_grp);
+ } else {
+ /* IRQ bits for COMP GRP start at 5. So add 5 to the shift */
+ bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG0] =
+ (1 << (rsrc_data->comp_grp_type + 5));
+ }
+
+ /* Subscribe IRQ */
+ CAM_DBG(CAM_ISP, "Subscribe COMP_GRP%d IRQ", rsrc_data->comp_grp_type);
+ comp_grp->irq_handle = cam_irq_controller_subscribe_irq(
+ common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
+ bus_irq_reg_mask, comp_grp,
+ comp_grp->top_half_handler,
+ cam_ife_mgr_do_tasklet_buf_done,
+ comp_grp->tasklet_info, cam_tasklet_enqueue_cmd);
+ if (comp_grp->irq_handle < 0) {
+ CAM_ERR(CAM_ISP, "Subscribe IRQ failed for comp_grp %d",
+ rsrc_data->comp_grp_type);
+ return -EFAULT;
}
comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
return rc;
}
@@ -1070,6 +1270,9 @@
rsrc_data->common_data;
/* Unsubscribe IRQ */
+ rc = cam_irq_controller_unsubscribe_irq(
+ common_data->bus_irq_controller,
+ comp_grp->irq_handle);
cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
rsrc_data->hw_regs->comp_mask);
@@ -1097,7 +1300,43 @@
static int cam_vfe_bus_handle_comp_done_top_half(uint32_t evt_id,
struct cam_irq_th_payload *th_payload)
{
- return -EPERM;
+ int32_t rc;
+ int i;
+ struct cam_isp_resource_node *comp_grp = NULL;
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL;
+ struct cam_vfe_bus_irq_evt_payload *evt_payload;
+
+ comp_grp = th_payload->handler_priv;
+ if (!comp_grp) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! No resource\n");
+ return -ENODEV;
+ }
+
+ rsrc_data = comp_grp->res_priv;
+
+ CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+ rc = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "No tasklet_cmd is free in queue\n");
+ return rc;
+ }
+
+ cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+ evt_payload->ctx = rsrc_data->ctx;
+ evt_payload->core_index = rsrc_data->common_data->core_index;
+ evt_payload->evt_id = evt_id;
+
+ for (i = 0; i < th_payload->num_registers; i++)
+ evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+ th_payload->evt_payload_priv = evt_payload;
+
+ CAM_DBG(CAM_ISP, "Exit");
+ return rc;
}
static int cam_vfe_bus_handle_comp_done_bottom_half(
@@ -1113,12 +1352,13 @@
uint32_t comp_err_reg;
uint32_t comp_grp_id;
+ CAM_DBG(CAM_ISP, "comp grp type %d", rsrc_data->comp_grp_type);
+
if (!evt_payload)
return rc;
cam_ife_irq_regs = evt_payload->irq_reg_val;
- CDBG("comp grp type %d\n", rsrc_data->comp_grp_type);
switch (rsrc_data->comp_grp_type) {
case CAM_VFE_BUS_VER2_COMP_GRP_0:
case CAM_VFE_BUS_VER2_COMP_GRP_1:
@@ -1155,8 +1395,8 @@
rc = CAM_VFE_IRQ_STATUS_SUCCESS;
}
- CDBG("status reg = 0x%x, bit index = %d\n",
- status_reg, (comp_grp_id + 5));
+ CAM_DBG(CAM_ISP, "status reg = 0x%x, bit index = %d rc %d",
+ status_reg, (comp_grp_id + 5), rc);
break;
case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0:
@@ -1197,11 +1437,13 @@
break;
default:
rc = CAM_VFE_IRQ_STATUS_ERR;
+ CAM_ERR(CAM_ISP, "Error! Invalid comp_grp_type %u",
+ rsrc_data->comp_grp_type);
break;
}
if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
- cam_vfe_bus_put_evt_payload(evt_payload->core_info,
+ cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
&evt_payload);
return rc;
@@ -1212,13 +1454,12 @@
struct cam_vfe_bus_ver2_hw_info *ver2_hw_info,
struct cam_isp_resource_node *comp_grp)
{
- struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data =
- comp_grp->res_priv;
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL;
rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_comp_grp_data),
GFP_KERNEL);
if (!rsrc_data) {
- CDBG("Failed to alloc for comp_grp_priv\n");
+ CAM_DBG(CAM_ISP, "Failed to alloc for comp_grp_priv");
return -ENOMEM;
}
comp_grp->res_priv = rsrc_data;
@@ -1231,7 +1472,6 @@
rsrc_data->hw_regs = &ver2_hw_info->comp_grp_reg[index];
rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX;
-
if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5)
list_add_tail(&comp_grp->list,
@@ -1250,7 +1490,34 @@
return 0;
}
-static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args)
+static int cam_vfe_bus_deinit_comp_grp(
+ struct cam_isp_resource_node *comp_grp)
+{
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data =
+ comp_grp->res_priv;
+
+ comp_grp->start = NULL;
+ comp_grp->stop = NULL;
+ comp_grp->top_half_handler = NULL;
+ comp_grp->bottom_half_handler = NULL;
+ comp_grp->hw_intf = NULL;
+
+ list_del_init(&comp_grp->list);
+ comp_grp->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+
+ comp_grp->res_priv = NULL;
+
+ if (!rsrc_data) {
+ CAM_ERR(CAM_ISP, "Error! comp_grp_priv is NULL");
+ return -ENODEV;
+ }
+ kfree(rsrc_data);
+
+ return 0;
+}
+
+static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args,
+ uint32_t args_size)
{
int rc = -ENODEV;
int i;
@@ -1266,14 +1533,14 @@
struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
if (!bus_priv || !acquire_args) {
- pr_err("Invalid Param");
+ CAM_ERR(CAM_ISP, "Invalid Param");
return -EINVAL;
}
out_acquire_args = &acq_args->vfe_out;
format = out_acquire_args->out_port_info->format;
- CDBG("Acquiring resource type 0x%x\n",
+ CAM_DBG(CAM_ISP, "Acquiring resource type 0x%x",
out_acquire_args->out_port_info->res_type);
vfe_out_res_id = cam_vfe_bus_get_out_res_id(
@@ -1287,7 +1554,7 @@
rsrc_node = &ver2_bus_priv->vfe_out[vfe_out_res_id];
if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
- pr_err("Resource not available: Res_id %d state:%d\n",
+ CAM_ERR(CAM_ISP, "Resource not available: Res_id %d state:%d",
vfe_out_res_id, rsrc_node->res_state);
return -EBUSY;
}
@@ -1306,30 +1573,45 @@
CAM_ISP_RES_COMP_GROUP_ID_MAX)) {
rc = cam_vfe_bus_acquire_comp_grp(ver2_bus_priv,
out_acquire_args->out_port_info,
+ acq_args->tasklet,
+ out_acquire_args->ctx,
out_acquire_args->unique_id,
out_acquire_args->is_dual,
out_acquire_args->is_master,
out_acquire_args->dual_slave_core,
&rsrc_data->comp_grp);
- if (rc < 0)
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "VFE%d Comp_Grp acquire fail for Out %d rc=%d",
+ rsrc_data->common_data->core_index,
+ vfe_out_res_id, rc);
return rc;
+ }
subscribe_irq = 0;
- } else
+ } else {
subscribe_irq = 1;
+ }
/* Reserve WM */
for (i = 0; i < num_wm; i++) {
rc = cam_vfe_bus_acquire_wm(ver2_bus_priv,
out_acquire_args->out_port_info,
+ acq_args->tasklet,
+ out_acquire_args->ctx,
vfe_out_res_id,
i,
out_acquire_args->split_id,
subscribe_irq,
&rsrc_data->wm_res[i],
&client_done_mask);
- if (rc < 0)
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "VFE%d WM acquire failed for Out %d rc=%d",
+ rsrc_data->common_data->core_index,
+ vfe_out_res_id, rc);
goto release_wm;
+ }
if (rsrc_data->comp_grp)
cam_vfe_bus_add_wm_to_comp_grp(rsrc_data->comp_grp,
@@ -1339,7 +1621,7 @@
rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
out_acquire_args->rsrc_node = rsrc_node;
- CDBG("Acquire successful\n");
+ CAM_DBG(CAM_ISP, "Acquire successful");
return rc;
release_wm:
@@ -1352,14 +1634,24 @@
return rc;
}
-static int cam_vfe_bus_release_vfe_out(void *bus_priv,
- struct cam_isp_resource_node *vfe_out)
+static int cam_vfe_bus_release_vfe_out(void *bus_priv, void *release_args,
+ uint32_t args_size)
{
uint32_t i;
- struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+ struct cam_isp_resource_node *vfe_out = NULL;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+
+ if (!bus_priv || !release_args) {
+ CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK",
+ bus_priv, release_args);
+ return -EINVAL;
+ }
+
+ vfe_out = release_args;
+ rsrc_data = vfe_out->res_priv;
if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
- pr_err("Error! Invalid resource state:%d\n",
+ CAM_ERR(CAM_ISP, "Error! Invalid resource state:%d",
vfe_out->res_state);
}
@@ -1381,43 +1673,35 @@
return 0;
}
-static int cam_vfe_bus_start_vfe_out(struct cam_isp_resource_node *vfe_out)
+static int cam_vfe_bus_start_vfe_out(
+ struct cam_isp_resource_node *vfe_out)
{
int rc = 0, i;
- struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
- struct cam_vfe_bus_ver2_common_data *common_data =
- rsrc_data->common_data;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+ struct cam_vfe_bus_ver2_common_data *common_data = NULL;
- CDBG("Start resource index %d\n", rsrc_data->out_type);
+ if (!vfe_out) {
+ CAM_ERR(CAM_ISP, "Invalid input");
+ return -EINVAL;
+ }
+
+ rsrc_data = vfe_out->res_priv;
+ common_data = rsrc_data->common_data;
+
+ CAM_DBG(CAM_ISP, "Start resource index %d", rsrc_data->out_type);
if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
- pr_err("Error! Invalid resource state:%d\n",
+ CAM_ERR(CAM_ISP, "Error! Invalid resource state:%d",
vfe_out->res_state);
return -EACCES;
}
- /* Enable IRQ Mask */
- cam_io_w_mb(0x00001FE0, common_data->mem_base + 0x2044);
- cam_io_w_mb(0x000FFFE7, common_data->mem_base + 0x2048);
- cam_io_w_mb(0x000000FF, common_data->mem_base + 0x204c);
-
for (i = 0; i < rsrc_data->num_wm; i++)
rc = cam_vfe_bus_start_wm(rsrc_data->wm_res[i]);
if (rsrc_data->comp_grp)
rc = cam_vfe_bus_start_comp_grp(rsrc_data->comp_grp);
- /* VFE_MODULE_BUS_CGC_OVERRIDE */
- cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x0000003C);
- /* VFE_MODULE_COLOR_CGC_OVERRIDE */
- cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x00000034);
- /* VFE_MODULE_ZOOM_CGC_OVERRIDE */
- cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x38);
- /* VFE_MODULE_LENS_CGC_OVERRIDE */
- cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x0000002C);
- /* VFE_MODULE_STATS_CGC_OVERRIDE */
- cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x00000030);
-
/* BUS_WR_INPUT_IF_ADDR_SYNC_CFG */
cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x0000207C);
/* BUS_WR_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
@@ -1440,13 +1724,22 @@
/* BUS_WR_TEST_BUS_CTRL */
cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x0000211C);
+ vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
return rc;
}
-static int cam_vfe_bus_stop_vfe_out(struct cam_isp_resource_node *vfe_out)
+static int cam_vfe_bus_stop_vfe_out(
+ struct cam_isp_resource_node *vfe_out)
{
int rc = 0, i;
- struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+
+ if (!vfe_out) {
+ CAM_ERR(CAM_ISP, "Invalid input");
+ return -EINVAL;
+ }
+
+ rsrc_data = vfe_out->res_priv;
if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
@@ -1459,7 +1752,6 @@
for (i = 0; i < rsrc_data->num_wm; i++)
rc = cam_vfe_bus_stop_wm(rsrc_data->wm_res[i]);
- vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
return rc;
@@ -1506,7 +1798,7 @@
rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_vfe_out_data),
GFP_KERNEL);
if (!rsrc_data) {
- CDBG("Error! Failed to alloc for vfe out priv\n");
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe out priv");
rc = -ENOMEM;
return rc;
}
@@ -1533,122 +1825,39 @@
return 0;
}
-static int cam_vfe_bus_get_evt_payload(
- struct cam_vfe_bus_ver2_priv *bus_priv,
- struct cam_vfe_bus_irq_evt_payload **evt_payload)
+static int cam_vfe_bus_deinit_vfe_out_resource(
+ struct cam_isp_resource_node *vfe_out)
{
- if (list_empty(&bus_priv->free_payload_list)) {
- *evt_payload = NULL;
- pr_err("No free payload\n");
- return -ENODEV;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+
+ vfe_out->start = NULL;
+ vfe_out->stop = NULL;
+ vfe_out->top_half_handler = NULL;
+ vfe_out->bottom_half_handler = NULL;
+ vfe_out->hw_intf = NULL;
+
+ vfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+ INIT_LIST_HEAD(&vfe_out->list);
+ vfe_out->res_priv = NULL;
+
+ if (!rsrc_data) {
+ CAM_ERR(CAM_ISP, "Error! vfe out priv is NULL");
+ return -ENOMEM;
}
+ kfree(rsrc_data);
- *evt_payload = list_first_entry(&bus_priv->free_payload_list,
- struct cam_vfe_bus_irq_evt_payload, list);
- list_del_init(&(*evt_payload)->list);
- return 0;
-}
-
-static int cam_vfe_bus_put_evt_payload(void *core_info,
- struct cam_vfe_bus_irq_evt_payload **evt_payload)
-{
- struct cam_vfe_bus_ver2_priv *bus_priv = NULL;
- uint32_t *cam_ife_irq_regs = (*evt_payload)->irq_reg_val;
- uint32_t status_reg0, status_reg1;
-
- status_reg0 = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
- status_reg1 = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
-
- if (status_reg0 || status_reg1) {
- CDBG("status0 0x%x status1 0x%x\n", status_reg0, status_reg1);
- return 0;
- }
-
- if (!core_info) {
- pr_err("Invalid param core_info NULL");
- return -EINVAL;
- }
- if (*evt_payload == NULL) {
- pr_err("No payload to put\n");
- return -EINVAL;
- }
- bus_priv = (*evt_payload)->bus_priv;
- list_add_tail(&(*evt_payload)->list, &bus_priv->free_payload_list);
- *evt_payload = NULL;
return 0;
}
static int cam_vfe_bus_ver2_handle_irq(uint32_t evt_id,
struct cam_irq_th_payload *th_payload)
{
- int32_t rc;
- int i;
- struct cam_vfe_irq_handler_priv *handler_priv;
- struct cam_vfe_hw_core_info *core_info;
- struct cam_vfe_bus_irq_evt_payload *evt_payload;
- struct cam_vfe_bus *bus_info;
struct cam_vfe_bus_ver2_priv *bus_priv;
- struct cam_irq_controller_reg_info *reg_info;
- uint32_t irq_mask;
- int found = 0;
- handler_priv = th_payload->handler_priv;
- core_info = handler_priv->core_info;
- bus_info = core_info->vfe_bus;
- bus_priv = bus_info->bus_priv;
- reg_info = &bus_priv->common_data.common_reg->irq_reg_info;
-
- /*
- * add reset ack handling here once supported.
- * Just clear all the bus irq status registers and ignore the reset.
- */
-
- CDBG("Enter\n");
- rc = cam_vfe_bus_get_evt_payload(bus_priv, &evt_payload);
- if (rc) {
- pr_err("No tasklet_cmd is free in queue\n");
- return rc;
- }
-
- cam_isp_hw_get_timestamp(&evt_payload->ts);
-
- evt_payload->core_index = handler_priv->core_index;
- evt_payload->core_info = handler_priv->core_info;
- evt_payload->bus_priv = bus_priv;
- CDBG("core_idx %d, core_info %llx\n", handler_priv->core_index,
- (uint64_t)handler_priv->core_info);
-
- for (i = 0; i < CAM_IFE_BUS_IRQ_REGISTERS_MAX; i++) {
- irq_mask = cam_io_r(handler_priv->mem_base +
- irq_reg_offset[i] - (0xC * 2));
- evt_payload->irq_reg_val[i] = irq_mask &
- cam_io_r(handler_priv->mem_base + irq_reg_offset[i]);
- if (evt_payload->irq_reg_val[i])
- found = 1;
- CDBG("irq_status%d = 0x%x\n", i, evt_payload->irq_reg_val[i]);
- }
- for (i = 0; i <= CAM_IFE_IRQ_BUS_REG_STATUS2; i++) {
- cam_io_w(evt_payload->irq_reg_val[i], handler_priv->mem_base +
- reg_info->irq_reg_set[i].clear_reg_offset);
- CDBG("Clear irq_status%d = 0x%x offset 0x%x\n", i,
- evt_payload->irq_reg_val[i],
- reg_info->irq_reg_set[i].clear_reg_offset);
- }
- cam_io_w(reg_info->global_clear_bitmask, handler_priv->mem_base +
- reg_info->global_clear_offset);
- CDBG("Global clear bitmask = 0x%x offset 0x%x\n",
- reg_info->global_clear_bitmask,
- reg_info->global_clear_offset);
-
- if (found)
- th_payload->evt_payload_priv = evt_payload;
- else {
- cam_vfe_bus_put_evt_payload(evt_payload->core_info,
- &evt_payload);
- rc = -ENOMSG;
- }
-
- return rc;
+ bus_priv = th_payload->handler_priv;
+ CAM_DBG(CAM_ISP, "Enter");
+ return cam_irq_controller_handle_irq(evt_id,
+ bus_priv->common_data.bus_irq_controller);
}
static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
@@ -1675,12 +1884,13 @@
update_buf->cdm.res->res_priv;
if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
- pr_err("Failed! Invalid data\n");
+ CAM_ERR(CAM_ISP, "Failed! Invalid data");
return -EINVAL;
}
if (update_buf->num_buf != vfe_out_data->num_wm) {
- pr_err("Failed! Invalid number buffers:%d required:%d\n",
+ CAM_ERR(CAM_ISP,
+ "Failed! Invalid number buffers:%d required:%d",
update_buf->num_buf, vfe_out_data->num_wm);
return -EINVAL;
}
@@ -1690,7 +1900,8 @@
for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
- pr_err("reg_val_pair %d exceeds the array limit %lu\n",
+ CAM_ERR(CAM_ISP,
+ "reg_val_pair %d exceeds the array limit %lu",
j, MAX_REG_VAL_PAIR_SIZE);
return -ENOMEM;
}
@@ -1698,14 +1909,14 @@
wm_data = vfe_out_data->wm_res[i]->res_priv;
/* For initial configuration program all bus registers */
- if (wm_data->stride != io_cfg->planes[i].plane_stride ||
- !wm_data->init_cfg_done) {
+ if ((wm_data->stride != io_cfg->planes[i].plane_stride ||
+ !wm_data->init_cfg_done) && (wm_data->index >= 3)) {
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->stride,
io_cfg->planes[i].plane_stride);
wm_data->stride = io_cfg->planes[i].plane_stride;
}
- CDBG("image stride 0x%x\n", wm_data->stride);
+ CAM_DBG(CAM_ISP, "image stride 0x%x", wm_data->stride);
if (wm_data->framedrop_pattern != io_cfg->framedrop_pattern ||
!wm_data->init_cfg_done) {
@@ -1714,7 +1925,8 @@
io_cfg->framedrop_pattern);
wm_data->framedrop_pattern = io_cfg->framedrop_pattern;
}
- CDBG("framedrop pattern 0x%x\n", wm_data->framedrop_pattern);
+ CAM_DBG(CAM_ISP, "framedrop pattern 0x%x",
+ wm_data->framedrop_pattern);
if (wm_data->framedrop_period != io_cfg->framedrop_period ||
!wm_data->init_cfg_done) {
@@ -1723,7 +1935,8 @@
io_cfg->framedrop_period);
wm_data->framedrop_period = io_cfg->framedrop_period;
}
- CDBG("framedrop period 0x%x\n", wm_data->framedrop_period);
+ CAM_DBG(CAM_ISP, "framedrop period 0x%x",
+ wm_data->framedrop_period);
if (wm_data->irq_subsample_period != io_cfg->subsample_period
|| !wm_data->init_cfg_done) {
@@ -1733,7 +1946,7 @@
wm_data->irq_subsample_period =
io_cfg->subsample_period;
}
- CDBG("irq subsample period 0x%x\n",
+ CAM_DBG(CAM_ISP, "irq subsample period 0x%x",
wm_data->irq_subsample_period);
if (wm_data->irq_subsample_pattern != io_cfg->subsample_pattern
@@ -1744,13 +1957,13 @@
wm_data->irq_subsample_pattern =
io_cfg->subsample_pattern;
}
- CDBG("irq subsample pattern 0x%x\n",
+ CAM_DBG(CAM_ISP, "irq subsample pattern 0x%x",
wm_data->irq_subsample_pattern);
if (wm_data->en_ubwc) {
if (!wm_data->hw_regs->ubwc_regs) {
- pr_err("%s: No UBWC register to configure.\n",
- __func__);
+ CAM_ERR(CAM_ISP,
+ "No UBWC register to configure.");
return -EINVAL;
}
if (wm_data->packer_cfg !=
@@ -1762,7 +1975,8 @@
wm_data->packer_cfg =
io_cfg->planes[i].packer_config;
}
- CDBG("packer cfg 0x%x\n", wm_data->packer_cfg);
+ CAM_DBG(CAM_ISP, "packer cfg 0x%x",
+ wm_data->packer_cfg);
if (wm_data->tile_cfg != io_cfg->planes[i].tile_config
|| !wm_data->init_cfg_done) {
@@ -1772,7 +1986,7 @@
wm_data->tile_cfg =
io_cfg->planes[i].tile_config;
}
- CDBG("tile cfg 0x%x\n", wm_data->tile_cfg);
+ CAM_DBG(CAM_ISP, "tile cfg 0x%x", wm_data->tile_cfg);
if (wm_data->h_init != io_cfg->planes[i].h_init ||
!wm_data->init_cfg_done) {
@@ -1781,7 +1995,7 @@
io_cfg->planes[i].h_init);
wm_data->h_init = io_cfg->planes[i].h_init;
}
- CDBG("h_init 0x%x\n", wm_data->h_init);
+ CAM_DBG(CAM_ISP, "h_init 0x%x", wm_data->h_init);
if (wm_data->v_init != io_cfg->planes[i].v_init ||
!wm_data->init_cfg_done) {
@@ -1790,7 +2004,7 @@
io_cfg->planes[i].v_init);
wm_data->v_init = io_cfg->planes[i].v_init;
}
- CDBG("v_init 0x%x\n", wm_data->v_init);
+ CAM_DBG(CAM_ISP, "v_init 0x%x", wm_data->v_init);
if (wm_data->ubwc_meta_stride !=
io_cfg->planes[i].meta_stride ||
@@ -1802,7 +2016,8 @@
wm_data->ubwc_meta_stride =
io_cfg->planes[i].meta_stride;
}
- CDBG("meta stride 0x%x\n", wm_data->ubwc_meta_stride);
+ CAM_DBG(CAM_ISP, "meta stride 0x%x",
+ wm_data->ubwc_meta_stride);
if (wm_data->ubwc_mode_cfg !=
io_cfg->planes[i].mode_config ||
@@ -1813,7 +2028,8 @@
wm_data->ubwc_mode_cfg =
io_cfg->planes[i].mode_config;
}
- CDBG("ubwc mode cfg 0x%x\n", wm_data->ubwc_mode_cfg);
+ CAM_DBG(CAM_ISP, "ubwc mode cfg 0x%x",
+ wm_data->ubwc_mode_cfg);
if (wm_data->ubwc_meta_offset !=
io_cfg->planes[i].meta_offset ||
@@ -1825,14 +2041,14 @@
wm_data->ubwc_meta_offset =
io_cfg->planes[i].meta_offset;
}
- CDBG("ubwc meta offset 0x%x\n",
+ CAM_DBG(CAM_ISP, "ubwc meta offset 0x%x",
wm_data->ubwc_meta_offset);
/* UBWC meta address */
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->ubwc_regs->meta_addr,
update_buf->image_buf[i]);
- CDBG("ubwc meta addr 0x%llx\n",
+ CAM_DBG(CAM_ISP, "ubwc meta addr 0x%llx",
update_buf->image_buf[i]);
}
@@ -1847,7 +2063,7 @@
wm_data->hw_regs->image_addr,
update_buf->image_buf[i]);
- CDBG("image address 0x%x\n", reg_val_pair[j-1]);
+ CAM_DBG(CAM_ISP, "image address 0x%x", reg_val_pair[j-1]);
frame_inc = io_cfg->planes[i].plane_stride *
io_cfg->planes[i].slice_height;
@@ -1868,7 +2084,8 @@
/* cdm util returns dwords, need to convert to bytes */
if ((size * 4) > update_buf->cdm.size) {
- pr_err("Failed! Buf size:%d insufficient, expected size:%d\n",
+ CAM_ERR(CAM_ISP,
+ "Failed! Buf size:%d insufficient, expected size:%d",
update_buf->cdm.size, size);
return -ENOMEM;
}
@@ -1882,13 +2099,76 @@
return 0;
}
+static int cam_vfe_bus_start_hw(void *hw_priv,
+ void *start_hw_args, uint32_t arg_size)
+{
+ return cam_vfe_bus_start_vfe_out(hw_priv);
+}
+
+static int cam_vfe_bus_stop_hw(void *hw_priv,
+ void *stop_hw_args, uint32_t arg_size)
+{
+ return cam_vfe_bus_stop_vfe_out(hw_priv);
+}
+
+static int cam_vfe_bus_init_hw(void *hw_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_vfe_bus_ver2_priv *bus_priv = hw_priv;
+ uint32_t top_irq_reg_mask[2] = {0};
+
+ if (!bus_priv) {
+ CAM_ERR(CAM_ISP, "Error! Invalid args");
+ return -EINVAL;
+ }
+
+ top_irq_reg_mask[0] = (1 << 9);
+
+ bus_priv->irq_handle = cam_irq_controller_subscribe_irq(
+ bus_priv->common_data.vfe_irq_controller,
+ CAM_IRQ_PRIORITY_2,
+ top_irq_reg_mask,
+ bus_priv,
+ cam_vfe_bus_ver2_handle_irq,
+ NULL,
+ NULL,
+ NULL);
+
+ if (bus_priv->irq_handle <= 0) {
+ CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int cam_vfe_bus_deinit_hw(void *hw_priv,
+ void *deinit_hw_args, uint32_t arg_size)
+{
+ struct cam_vfe_bus_ver2_priv *bus_priv = hw_priv;
+ int rc;
+
+ if (!bus_priv || (bus_priv->irq_handle <= 0)) {
+ CAM_ERR(CAM_ISP, "Error! Invalid args");
+ return -EINVAL;
+ }
+
+ rc = cam_irq_controller_unsubscribe_irq(
+ bus_priv->common_data.vfe_irq_controller,
+ bus_priv->irq_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Failed to unsubscribe irq rc=%d", rc);
+
+ return rc;
+}
+
static int cam_vfe_bus_process_cmd(void *priv,
uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
{
int rc = -EINVAL;
if (!priv || !cmd_args) {
- pr_err_ratelimited("Error! Invalid input arguments\n");
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! Invalid input arguments\n");
return -EINVAL;
}
@@ -1897,7 +2177,7 @@
rc = cam_vfe_bus_update_buf(priv, cmd_args, arg_size);
break;
default:
- pr_err_ratelimited("Error! Invalid camif process command:%d\n",
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Inval camif process command:%d\n",
cmd_type);
break;
}
@@ -1906,7 +2186,7 @@
}
int cam_vfe_bus_ver2_init(
- void __iomem *mem_base,
+ struct cam_hw_soc_info *soc_info,
struct cam_hw_intf *hw_intf,
void *bus_hw_info,
void *vfe_irq_controller,
@@ -1917,29 +2197,48 @@
struct cam_vfe_bus *vfe_bus_local;
struct cam_vfe_bus_ver2_hw_info *ver2_hw_info = bus_hw_info;
- CDBG("Enter\n");
+ CAM_DBG(CAM_ISP, "Enter");
+
+ if (!soc_info || !hw_intf || !bus_hw_info || !vfe_irq_controller) {
+ CAM_ERR(CAM_ISP,
+ "Inval_prms soc_info:%pK hw_intf:%pK hw_info%pK",
+ soc_info, hw_intf, bus_hw_info);
+ CAM_ERR(CAM_ISP, "controller: %pK", vfe_irq_controller);
+ rc = -EINVAL;
+ goto end;
+ }
vfe_bus_local = kzalloc(sizeof(struct cam_vfe_bus), GFP_KERNEL);
if (!vfe_bus_local) {
- CDBG("Failed to alloc for vfe_bus\n");
+ CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus");
rc = -ENOMEM;
- goto err_alloc_bus;
+ goto end;
}
bus_priv = kzalloc(sizeof(struct cam_vfe_bus_ver2_priv),
GFP_KERNEL);
if (!bus_priv) {
- CDBG("Failed to alloc for vfe_bus_priv\n");
+ CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus_priv");
rc = -ENOMEM;
- goto err_alloc_priv;
+ goto free_bus_local;
}
vfe_bus_local->bus_priv = bus_priv;
- bus_priv->common_data.mem_base = mem_base;
+ bus_priv->common_data.core_index = soc_info->index;
+ bus_priv->common_data.mem_base =
+ CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX);
bus_priv->common_data.hw_intf = hw_intf;
bus_priv->common_data.vfe_irq_controller = vfe_irq_controller;
bus_priv->common_data.common_reg = &ver2_hw_info->common_reg;
+ rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base,
+ &ver2_hw_info->common_reg.irq_reg_info,
+ &bus_priv->common_data.bus_irq_controller);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error! cam_irq_controller_init failed");
+ goto free_bus_priv;
+ }
+
INIT_LIST_HEAD(&bus_priv->free_comp_grp);
INIT_LIST_HEAD(&bus_priv->free_dual_comp_grp);
INIT_LIST_HEAD(&bus_priv->used_comp_grp);
@@ -1948,8 +2247,8 @@
rc = cam_vfe_bus_init_wm_resource(i, bus_priv, bus_hw_info,
&bus_priv->bus_client[i]);
if (rc < 0) {
- pr_err("Error! Init WM failed\n");
- goto err_init_wm;
+ CAM_ERR(CAM_ISP, "Error! Init WM failed rc=%d", rc);
+ goto deinit_wm;
}
}
@@ -1957,8 +2256,8 @@
rc = cam_vfe_bus_init_comp_grp(i, bus_priv, bus_hw_info,
&bus_priv->comp_grp[i]);
if (rc < 0) {
- pr_err("Error! Init Comp Grp failed\n");
- goto err_init_comp_grp;
+ CAM_ERR(CAM_ISP, "Init Comp Grp failed rc=%d", rc);
+ goto deinit_comp_grp;
}
}
@@ -1966,36 +2265,123 @@
rc = cam_vfe_bus_init_vfe_out_resource(i, bus_priv, bus_hw_info,
&bus_priv->vfe_out[i]);
if (rc < 0) {
- pr_err("Error! Init VFE Out failed\n");
- goto err_init_vfe_out;
+ CAM_ERR(CAM_ISP, "Init VFE Out failed rc=%d", rc);
+ goto deinit_vfe_out;
}
}
- INIT_LIST_HEAD(&bus_priv->free_payload_list);
- for (i = 0; i < 128; i++) {
- INIT_LIST_HEAD(&bus_priv->evt_payload[i].list);
- list_add_tail(&bus_priv->evt_payload[i].list,
- &bus_priv->free_payload_list);
+ INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
+ for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++) {
+ INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
+ list_add_tail(&bus_priv->common_data.evt_payload[i].list,
+ &bus_priv->common_data.free_payload_list);
}
- vfe_bus_local->acquire_resource = cam_vfe_bus_acquire_vfe_out;
- vfe_bus_local->release_resource = cam_vfe_bus_release_vfe_out;
- vfe_bus_local->start_resource = cam_vfe_bus_start_vfe_out;
- vfe_bus_local->stop_resource = cam_vfe_bus_stop_vfe_out;
- vfe_bus_local->top_half_handler = cam_vfe_bus_ver2_handle_irq;
+ vfe_bus_local->hw_ops.reserve = cam_vfe_bus_acquire_vfe_out;
+ vfe_bus_local->hw_ops.release = cam_vfe_bus_release_vfe_out;
+ vfe_bus_local->hw_ops.start = cam_vfe_bus_start_hw;
+ vfe_bus_local->hw_ops.stop = cam_vfe_bus_stop_hw;
+ vfe_bus_local->hw_ops.init = cam_vfe_bus_init_hw;
+ vfe_bus_local->hw_ops.deinit = cam_vfe_bus_deinit_hw;
+ vfe_bus_local->top_half_handler = cam_vfe_bus_ver2_handle_irq;
vfe_bus_local->bottom_half_handler = NULL;
- vfe_bus_local->process_cmd = cam_vfe_bus_process_cmd;
+ vfe_bus_local->hw_ops.process_cmd = cam_vfe_bus_process_cmd;
*vfe_bus = vfe_bus_local;
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
-err_init_vfe_out:
-err_init_comp_grp:
-err_init_wm:
+deinit_vfe_out:
+ if (i < 0)
+ i = CAM_VFE_BUS_VER2_VFE_OUT_MAX;
+ for (--i; i >= 0; i--)
+ cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]);
+
+deinit_comp_grp:
+ if (i < 0)
+ i = CAM_VFE_BUS_VER2_COMP_GRP_MAX;
+ for (--i; i >= 0; i--)
+ cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
+
+deinit_wm:
+ if (i < 0)
+ i = CAM_VFE_BUS_VER2_MAX_CLIENTS;
+ for (--i; i >= 0; i--)
+ cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]);
+
+free_bus_priv:
kfree(vfe_bus_local->bus_priv);
-err_alloc_priv:
+
+free_bus_local:
kfree(vfe_bus_local);
-err_alloc_bus:
+
+end:
return rc;
}
+
+int cam_vfe_bus_ver2_deinit(
+ struct cam_vfe_bus **vfe_bus)
+{
+ int i, rc = 0;
+ struct cam_vfe_bus_ver2_priv *bus_priv = NULL;
+ struct cam_vfe_bus *vfe_bus_local;
+
+ if (!vfe_bus || !*vfe_bus) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input");
+ return -EINVAL;
+ }
+ vfe_bus_local = *vfe_bus;
+
+ bus_priv = vfe_bus_local->bus_priv;
+ if (!bus_priv) {
+ CAM_ERR(CAM_ISP, "Error! bus_priv is NULL");
+ rc = -ENODEV;
+ goto free_bus_local;
+ }
+
+ INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
+ for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++)
+ INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
+
+ for (i = 0; i < CAM_VFE_BUS_VER2_MAX_CLIENTS; i++) {
+ rc = cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Error! Deinit WM failed rc=%d", rc);
+ }
+
+ for (i = 0; i < CAM_VFE_BUS_VER2_COMP_GRP_MAX; i++) {
+ rc = cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Error! Deinit Comp Grp failed rc=%d", rc);
+ }
+
+ for (i = 0; i < CAM_VFE_BUS_VER2_VFE_OUT_MAX; i++) {
+ rc = cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Error! Deinit VFE Out failed rc=%d", rc);
+ }
+
+ INIT_LIST_HEAD(&bus_priv->free_comp_grp);
+ INIT_LIST_HEAD(&bus_priv->free_dual_comp_grp);
+ INIT_LIST_HEAD(&bus_priv->used_comp_grp);
+
+ rc = cam_irq_controller_deinit(
+ &bus_priv->common_data.bus_irq_controller);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error! Deinit IRQ Controller failed rc=%d", rc);
+
+ kfree(vfe_bus_local->bus_priv);
+
+free_bus_local:
+ kfree(vfe_bus_local);
+
+ *vfe_bus = NULL;
+
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
index e451174..ba98077 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
@@ -171,18 +171,34 @@
*
* @Brief: Initialize Bus layer
*
- * @mem_base: Mapped base address of register space
+ * @soc_info: Soc Information for the associated HW
* @hw_intf: HW Interface of HW to which this resource belongs
* @bus_hw_info: BUS HW info that contains details of BUS registers
* @vfe_irq_controller: VFE IRQ Controller to use for subscribing to Top
* level IRQs
* @vfe_bus: Pointer to vfe_bus structure which will be filled
* and returned on successful initialize
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
*/
-int cam_vfe_bus_ver2_init(void __iomem *mem_base,
+int cam_vfe_bus_ver2_init(
+ struct cam_hw_soc_info *soc_info,
struct cam_hw_intf *hw_intf,
void *bus_hw_info,
void *vfe_irq_controller,
struct cam_vfe_bus **vfe_bus);
+/*
+ * cam_vfe_bus_ver2_deinit()
+ *
+ * @Brief: Deinitialize Bus layer
+ *
+ * @vfe_bus: Pointer to vfe_bus structure to deinitialize
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_bus_ver2_deinit(struct cam_vfe_bus **vfe_bus);
+
#endif /* _CAM_VFE_BUS_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
index d202c13..c089911 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
@@ -14,6 +14,7 @@
#define _CAM_VFE_BUS_H_
#include <uapi/media/cam_isp.h>
+#include "cam_hw_intf.h"
#include "cam_isp_hw.h"
#define CAM_VFE_BUS_VER_1_0 0x1000
@@ -31,25 +32,14 @@
* @Brief: Bus interface structure
*
* @bus_priv: Private data of BUS
- * @acquire_resource: Function pointer for acquiring BUS output resource
- * @release_resource: Function pointer for releasing BUS resource
- * @start_resource: Function for starting BUS Output resource
- * @stop_resource: Function for stopping BUS Output resource
- * @process_cmd: Function to process commands specific to BUS
- * resources
+ * @hw_ops: Hardware interface functions
* @top_half_handler: Top Half handler function
* @bottom_half_handler: Bottom Half handler function
*/
struct cam_vfe_bus {
void *bus_priv;
- int (*acquire_resource)(void *bus_priv, void *acquire_args);
- int (*release_resource)(void *bus_priv,
- struct cam_isp_resource_node *vfe_out);
- int (*start_resource)(struct cam_isp_resource_node *vfe_out);
- int (*stop_resource)(struct cam_isp_resource_node *vfe_out);
- int (*process_cmd)(void *priv, uint32_t cmd_type, void *cmd_args,
- uint32_t arg_size);
+ struct cam_hw_ops hw_ops;
CAM_IRQ_HANDLER_TOP_HALF top_half_handler;
CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler;
};
@@ -60,19 +50,36 @@
* @Brief: Initialize Bus layer
*
* @bus_version: Version of BUS to initialize
- * @mem_base: Mapped base address of register space
+ * @soc_info: Soc Information for the associated HW
* @hw_intf: HW Interface of HW to which this resource belongs
* @bus_hw_info: BUS HW info that contains details of BUS registers
* @vfe_irq_controller: VFE IRQ Controller to use for subscribing to Top
* level IRQs
* @vfe_bus: Pointer to vfe_bus structure which will be filled
* and returned on successful initialize
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
*/
int cam_vfe_bus_init(uint32_t bus_version,
- void __iomem *mem_base,
+ struct cam_hw_soc_info *soc_info,
struct cam_hw_intf *hw_intf,
void *bus_hw_info,
void *vfe_irq_controller,
struct cam_vfe_bus **vfe_bus);
+/*
+ * cam_vfe_bus_deinit()
+ *
+ * @Brief: Deinitialize Bus layer
+ *
+ * @bus_version: Version of BUS to deinitialize
+ * @vfe_bus: Pointer to vfe_bus structure to deinitialize
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_bus_deinit(uint32_t bus_version,
+ struct cam_vfe_bus **vfe_bus);
+
#endif /* _CAM_VFE_BUS_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 6dd67df..e70ecc5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -10,9 +10,7 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
-#include <linux/slab.h>
+ #include <linux/slab.h>
#include <uapi/media/cam_isp.h>
#include "cam_io_util.h"
#include "cam_isp_hw_mgr_intf.h"
@@ -20,9 +18,7 @@
#include "cam_vfe_top.h"
#include "cam_vfe_top_ver2.h"
#include "cam_vfe_camif_ver2.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
struct cam_vfe_mux_camif_data {
void __iomem *mem_base;
@@ -55,7 +51,7 @@
rc = 0;
break;
default:
- pr_err("Error! Invalid pix pattern:%d\n", pattern);
+ CAM_ERR(CAM_ISP, "Error! Invalid pix pattern:%d", pattern);
rc = -EINVAL;
break;
}
@@ -96,12 +92,12 @@
uint32_t val = 0;
if (!camif_res) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
if (camif_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
- pr_err("Error! Invalid camif res res_state:%d\n",
+ CAM_ERR(CAM_ISP, "Error! Invalid camif res res_state:%d",
camif_res->res_state);
return -EINVAL;
}
@@ -129,7 +125,7 @@
/* Reg Update */
cam_io_w_mb(0x1, rsrc_data->mem_base + 0x4AC);
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return 0;
}
@@ -142,7 +138,7 @@
int rc = 0;
if (!camif_res) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
@@ -188,26 +184,26 @@
payload = evt_payload_priv;
irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
- CDBG("event ID:%d\n", payload->evt_id);
- CDBG("irq_status_0 = %x\n", irq_status0);
+ CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+ CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
switch (payload->evt_id) {
case CAM_ISP_HW_EVENT_SOF:
if (irq_status0 & camif_priv->reg_data->sof_irq_mask) {
- CDBG("Received SOF\n");
+ CAM_DBG(CAM_ISP, "Received SOF");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
break;
case CAM_ISP_HW_EVENT_EPOCH:
if (irq_status0 & camif_priv->reg_data->epoch0_irq_mask) {
- CDBG("Received EPOCH\n");
+ CAM_DBG(CAM_ISP, "Received EPOCH");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
cam_vfe_put_evt_payload(payload->core_info, &payload);
break;
case CAM_ISP_HW_EVENT_REG_UPDATE:
if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) {
- CDBG("Received REG_UPDATE_ACK\n");
+ CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
break;
@@ -215,7 +211,7 @@
break;
}
- CDBG("returing status = %d\n", ret);
+ CAM_DBG(CAM_ISP, "returing status = %d", ret);
return ret;
}
@@ -229,9 +225,9 @@
struct cam_vfe_camif_ver2_hw_info *camif_info = camif_hw_info;
camif_priv = kzalloc(sizeof(struct cam_vfe_mux_camif_data),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!camif_priv) {
- CDBG("Error! Failed to alloc for camif_priv\n");
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for camif_priv");
return -ENOMEM;
}
@@ -251,3 +247,24 @@
return 0;
}
+int cam_vfe_camif_ver2_deinit(
+ struct cam_isp_resource_node *camif_node)
+{
+ struct cam_vfe_mux_camif_data *camif_priv = camif_node->res_priv;
+
+ camif_node->start = NULL;
+ camif_node->stop = NULL;
+ camif_node->top_half_handler = NULL;
+ camif_node->bottom_half_handler = NULL;
+
+ camif_node->res_priv = NULL;
+
+ if (!camif_priv) {
+ CAM_ERR(CAM_ISP, "Error! camif_priv is NULL");
+ return -ENODEV;
+ }
+
+ kfree(camif_priv);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
index cc6aab0..553abf2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
@@ -75,4 +75,7 @@
void *camif_hw_info,
struct cam_isp_resource_node *camif_node);
+int cam_vfe_camif_ver2_deinit(
+ struct cam_isp_resource_node *camif_node);
+
#endif /* _CAM_VFE_CAMIF_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
index 5f77a7c..df7b0f9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -10,21 +10,19 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/slab.h>
#include "cam_vfe_rdi.h"
#include "cam_isp_hw_mgr_intf.h"
#include "cam_vfe_hw_intf.h"
#include "cam_io_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
struct cam_vfe_mux_rdi_data {
void __iomem *mem_base;
struct cam_hw_intf *hw_intf;
struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+ struct cam_vfe_rdi_ver2_reg *rdi_reg;
+ struct cam_vfe_rdi_reg_data *reg_data;
enum cam_isp_hw_sync_mode sync_mode;
};
@@ -51,12 +49,12 @@
int rc = 0;
if (!rdi_res) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
if (rdi_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
- pr_err("Error! Invalid rdi res res_state:%d\n",
+ CAM_ERR(CAM_ISP, "Error! Invalid rdi res res_state:%d",
rdi_res->res_state);
return -EINVAL;
}
@@ -67,7 +65,7 @@
/* Reg Update */
cam_io_w_mb(0x2, rsrc_data->mem_base + 0x4AC);
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -80,7 +78,7 @@
int rc = 0;
if (!rdi_res) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
@@ -103,13 +101,14 @@
int rc = -EINVAL;
if (!priv || !cmd_args) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
switch (cmd_type) {
default:
- pr_err("Error! unsupported RDI process command:%d\n", cmd_type);
+ CAM_ERR(CAM_ISP,
+ "unsupported RDI process command:%d", cmd_type);
break;
}
@@ -139,23 +138,28 @@
payload = evt_payload_priv;
irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
- CDBG("event ID:%d\n", payload->evt_id);
- CDBG("irq_status_0 = %x\n", irq_status0);
+ CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+ CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
switch (payload->evt_id) {
case CAM_ISP_HW_EVENT_SOF:
- if (irq_status0 & 0x8000000)
+ if (irq_status0 & rdi_priv->reg_data->sof_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received SOF");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
break;
case CAM_ISP_HW_EVENT_REG_UPDATE:
- if (irq_status0 & 0x20)
+ if (irq_status0 & rdi_priv->reg_data->reg_update_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received REG UPDATE");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ cam_vfe_put_evt_payload(payload->core_info, &payload);
break;
default:
break;
}
- CDBG("returing status = %d\n", ret);
+ CAM_DBG(CAM_ISP, "returing status = %d", ret);
return ret;
}
@@ -166,11 +170,12 @@
struct cam_isp_resource_node *rdi_node)
{
struct cam_vfe_mux_rdi_data *rdi_priv = NULL;
+ struct cam_vfe_rdi_ver2_hw_info *rdi_info = rdi_hw_info;
rdi_priv = kzalloc(sizeof(struct cam_vfe_mux_rdi_data),
GFP_KERNEL);
if (!rdi_priv) {
- CDBG("Error! Failed to alloc for rdi_priv\n");
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for rdi_priv");
return -ENOMEM;
}
@@ -178,6 +183,31 @@
rdi_priv->mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
rdi_priv->hw_intf = hw_intf;
+ rdi_priv->common_reg = rdi_info->common_reg;
+ rdi_priv->rdi_reg = rdi_info->rdi_reg;
+
+ switch (rdi_node->res_id) {
+ case CAM_ISP_HW_VFE_IN_RDI0:
+ rdi_priv->reg_data = rdi_info->reg_data[0];
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI1:
+ rdi_priv->reg_data = rdi_info->reg_data[1];
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI2:
+ rdi_priv->reg_data = rdi_info->reg_data[2];
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI3:
+ if (rdi_info->reg_data[3]) {
+ rdi_priv->reg_data = rdi_info->reg_data[3];
+ } else {
+ CAM_ERR(CAM_ISP, "Error! RDI3 is not supported");
+ goto err_init;
+ }
+ break;
+ default:
+ CAM_DBG(CAM_ISP, "invalid Resource id:%d", rdi_node->res_id);
+ goto err_init;
+ }
rdi_node->start = cam_vfe_rdi_resource_start;
rdi_node->stop = cam_vfe_rdi_resource_stop;
@@ -185,5 +215,28 @@
rdi_node->bottom_half_handler = cam_vfe_rdi_handle_irq_bottom_half;
return 0;
+err_init:
+ kfree(rdi_priv);
+ return -EINVAL;
}
+int cam_vfe_rdi_ver2_deinit(
+ struct cam_isp_resource_node *rdi_node)
+{
+ struct cam_vfe_mux_rdi_data *rdi_priv = rdi_node->res_priv;
+
+ rdi_node->start = NULL;
+ rdi_node->stop = NULL;
+ rdi_node->top_half_handler = NULL;
+ rdi_node->bottom_half_handler = NULL;
+
+ rdi_node->res_priv = NULL;
+
+ if (!rdi_priv) {
+ CAM_ERR(CAM_ISP, "Error! rdi_priv NULL");
+ return -ENODEV;
+ }
+ kfree(rdi_priv);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
index 967cec3..04e4f02 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
@@ -16,18 +16,22 @@
#include "cam_isp_hw.h"
#include "cam_vfe_top.h"
+#define CAM_VFE_RDI_VER2_MAX 4
+
struct cam_vfe_rdi_ver2_reg {
uint32_t reg_update_cmd;
};
struct cam_vfe_rdi_reg_data {
+ uint32_t reg_update_cmd_data;
+ uint32_t sof_irq_mask;
uint32_t reg_update_irq_mask;
};
struct cam_vfe_rdi_ver2_hw_info {
- struct cam_vfe_top_ver2_reg_offset_common *common_reg;
- struct cam_vfe_rdi_ver2_reg *rdi_reg;
- struct cam_vfe_rdi_reg_data *reg_data;
+ struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+ struct cam_vfe_rdi_ver2_reg *rdi_reg;
+ struct cam_vfe_rdi_reg_data *reg_data[CAM_VFE_RDI_VER2_MAX];
};
int cam_vfe_rdi_ver2_acquire_resource(
@@ -43,4 +47,7 @@
void *rdi_hw_info,
struct cam_isp_resource_node *rdi_node);
+int cam_vfe_rdi_ver2_deinit(
+ struct cam_isp_resource_node *rdi_node);
+
#endif /* _CAM_VFE_RDI_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
index e2bceb8..8eb1835 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
@@ -10,10 +10,9 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include "cam_vfe_top.h"
#include "cam_vfe_top_ver2.h"
+#include "cam_debug_util.h"
int cam_vfe_top_init(uint32_t top_version,
struct cam_hw_soc_info *soc_info,
@@ -29,7 +28,24 @@
vfe_top);
break;
default:
- pr_err("Error! Unsupported Version %x\n", top_version);
+ CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
+ break;
+ }
+
+ return rc;
+}
+
+int cam_vfe_top_deinit(uint32_t top_version,
+ struct cam_vfe_top **vfe_top)
+{
+ int rc = -EINVAL;
+
+ switch (top_version) {
+ case CAM_VFE_TOP_VER_2_0:
+ rc = cam_vfe_top_ver2_deinit(vfe_top);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
break;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index 3ef4f49..1a3eeae 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -10,17 +10,13 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/slab.h>
#include "cam_io_util.h"
#include "cam_cdm_util.h"
#include "cam_vfe_hw_intf.h"
#include "cam_vfe_top.h"
#include "cam_vfe_top_ver2.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
struct cam_vfe_top_ver2_common_data {
struct cam_hw_soc_info *soc_info;
@@ -43,13 +39,13 @@
struct cam_cdm_utils_ops *cdm_util_ops = NULL;
if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
- pr_err("Error! Invalid cmd size\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid cmd size");
return -EINVAL;
}
if (!cdm_args || !cdm_args->res || !top_priv ||
!top_priv->common_data.soc_info) {
- pr_err("Error! Invalid args\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid args");
return -EINVAL;
}
@@ -57,22 +53,22 @@
(struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
if (!cdm_util_ops) {
- pr_err("Invalid CDM ops\n");
+ CAM_ERR(CAM_ISP, "Invalid CDM ops");
return -EINVAL;
}
size = cdm_util_ops->cdm_required_size_changebase();
/* since cdm returns dwords, we need to convert it into bytes */
if ((size * 4) > cdm_args->size) {
- pr_err("buf size:%d is not sufficient, expected: %d\n",
+ CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
cdm_args->size, size);
return -EINVAL;
}
mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(
top_priv->common_data.soc_info, VFE_CORE_BASE_IDX);
- CDBG("core %d mem_base 0x%x\n", top_priv->common_data.soc_info->index,
- mem_base);
+ CAM_DBG(CAM_ISP, "core %d mem_base 0x%x",
+ top_priv->common_data.soc_info->index, mem_base);
cdm_util_ops->cdm_write_changebase(cdm_args->cmd_buf_addr, mem_base);
cdm_args->used_bytes = (size * 4);
@@ -90,26 +86,26 @@
struct cam_cdm_utils_ops *cdm_util_ops = NULL;
if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
- pr_err("Error! Invalid cmd size\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid cmd size");
return -EINVAL;
}
if (!cdm_args || !cdm_args->res) {
- pr_err("Error! Invalid args\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid args");
return -EINVAL;
}
cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
if (!cdm_util_ops) {
- pr_err("Error! Invalid CDM ops\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid CDM ops");
return -EINVAL;
}
size = cdm_util_ops->cdm_required_size_reg_random(1);
/* since cdm returns dwords, we need to convert it into bytes */
if ((size * 4) > cdm_args->size) {
- pr_err("Error! buf size:%d is not sufficient, expected: %d\n",
+ CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
cdm_args->size, size);
return -EINVAL;
}
@@ -153,7 +149,7 @@
struct cam_vfe_top_ver2_reg_offset_common *reg_common = NULL;
if (!top_priv) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -169,7 +165,7 @@
CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) +
reg_common->global_reset_cmd);
- CDBG("Reset HW exit\n");
+ CAM_DBG(CAM_ISP, "Reset HW exit");
return 0;
}
@@ -183,7 +179,7 @@
int rc = -EINVAL;
if (!device_priv || !reserve_args) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
@@ -228,16 +224,16 @@
struct cam_isp_resource_node *mux_res;
if (!device_priv || !release_args) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
mux_res = (struct cam_isp_resource_node *)release_args;
- CDBG("%s: Resource in state %d\n", __func__, mux_res->res_state);
+ CAM_DBG(CAM_ISP, "Resource in state %d", mux_res->res_state);
if (mux_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) {
- pr_err("Error! Resource in Invalid res_state :%d\n",
+ CAM_ERR(CAM_ISP, "Error! Resource in Invalid res_state :%d",
mux_res->res_state);
return -EINVAL;
}
@@ -254,7 +250,7 @@
int rc = 0;
if (!device_priv || !start_args) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
@@ -268,7 +264,7 @@
mux_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
rc = 0;
} else {
- pr_err("Invalid res id:%d\n", mux_res->res_id);
+ CAM_ERR(CAM_ISP, "Invalid res id:%d", mux_res->res_id);
rc = -EINVAL;
}
@@ -283,7 +279,7 @@
int rc = 0;
if (!device_priv || !stop_args) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
@@ -295,7 +291,7 @@
mux_res->res_id <= CAM_ISP_HW_VFE_IN_RDI3)) {
rc = mux_res->stop(mux_res);
} else {
- pr_err("Invalid res id:%d\n", mux_res->res_id);
+ CAM_ERR(CAM_ISP, "Invalid res id:%d", mux_res->res_id);
rc = -EINVAL;
}
@@ -322,7 +318,7 @@
struct cam_vfe_top_ver2_priv *top_priv;
if (!device_priv || !cmd_args) {
- pr_err("Error! Invalid arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid arguments");
return -EINVAL;
}
top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
@@ -337,7 +333,7 @@
break;
default:
rc = -EINVAL;
- pr_err("Error! Invalid cmd:%d\n", cmd_type);
+ CAM_ERR(CAM_ISP, "Error! Invalid cmd:%d", cmd_type);
break;
}
@@ -357,17 +353,17 @@
vfe_top = kzalloc(sizeof(struct cam_vfe_top), GFP_KERNEL);
if (!vfe_top) {
- CDBG("Error! Failed to alloc for vfe_top\n");
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe_top");
rc = -ENOMEM;
- goto err_alloc_top;
+ goto end;
}
top_priv = kzalloc(sizeof(struct cam_vfe_top_ver2_priv),
GFP_KERNEL);
if (!top_priv) {
- CDBG("Error! Failed to alloc for vfe_top_priv\n");
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe_top_priv");
rc = -ENOMEM;
- goto err_alloc_priv;
+ goto free_vfe_top;
}
vfe_top->top_priv = top_priv;
@@ -384,16 +380,17 @@
&ver2_hw_info->camif_hw_info,
&top_priv->mux_rsrc[i]);
if (rc)
- goto err_mux_init;
+ goto deinit_resources;
} else {
/* set the RDI resource id */
top_priv->mux_rsrc[i].res_id =
- CAM_ISP_HW_VFE_IN_RDI0 + j;
+ CAM_ISP_HW_VFE_IN_RDI0 + j++;
+
rc = cam_vfe_rdi_ver2_init(hw_intf, soc_info,
- NULL, &top_priv->mux_rsrc[i]);
+ &ver2_hw_info->rdi_hw_info,
+ &top_priv->mux_rsrc[i]);
if (rc)
goto deinit_resources;
- j++;
}
}
@@ -416,10 +413,71 @@
return rc;
deinit_resources:
-err_mux_init:
+ for (--i; i >= 0; i--) {
+ if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
+ if (cam_vfe_camif_ver2_deinit(&top_priv->mux_rsrc[i]))
+ CAM_ERR(CAM_ISP, "Camif Deinit failed");
+ } else {
+ if (cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]))
+ CAM_ERR(CAM_ISP, "RDI Deinit failed");
+ }
+ top_priv->mux_rsrc[i].res_state =
+ CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+ }
+
kfree(vfe_top->top_priv);
-err_alloc_priv:
+free_vfe_top:
kfree(vfe_top);
-err_alloc_top:
+end:
return rc;
}
+
+int cam_vfe_top_ver2_deinit(struct cam_vfe_top **vfe_top_ptr)
+{
+ int i, rc = 0;
+ struct cam_vfe_top_ver2_priv *top_priv = NULL;
+ struct cam_vfe_top *vfe_top;
+
+ if (!vfe_top_ptr) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input");
+ return -EINVAL;
+ }
+
+ vfe_top = *vfe_top_ptr;
+ if (!vfe_top) {
+ CAM_ERR(CAM_ISP, "Error! vfe_top NULL");
+ return -ENODEV;
+ }
+
+ top_priv = vfe_top->top_priv;
+ if (!top_priv) {
+ CAM_ERR(CAM_ISP, "Error! vfe_top_priv NULL");
+ rc = -ENODEV;
+ goto free_vfe_top;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ top_priv->mux_rsrc[i].res_state =
+ CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+ if (top_priv->mux_rsrc[i].res_id ==
+ CAM_ISP_HW_VFE_IN_CAMIF) {
+ rc = cam_vfe_camif_ver2_deinit(&top_priv->mux_rsrc[i]);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d",
+ rc);
+ } else {
+ rc = cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]);
+ if (rc)
+ CAM_ERR(CAM_ISP, "RDI deinit failed rc=%d", rc);
+ }
+ }
+
+ kfree(vfe_top->top_priv);
+
+free_vfe_top:
+ kfree(vfe_top);
+ *vfe_top_ptr = NULL;
+
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
index 1038721..bafd7f2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
@@ -52,12 +52,15 @@
struct cam_vfe_top_ver2_hw_info {
struct cam_vfe_top_ver2_reg_offset_common *common_reg;
struct cam_vfe_camif_ver2_hw_info camif_hw_info;
+ struct cam_vfe_rdi_ver2_hw_info rdi_hw_info;
uint32_t mux_type[CAM_VFE_TOP_VER2_MUX_MAX];
};
int cam_vfe_top_ver2_init(struct cam_hw_soc_info *soc_info,
struct cam_hw_intf *hw_intf,
void *top_hw_info,
- struct cam_vfe_top **vfe_top);
+ struct cam_vfe_top **vfe_top);
+
+int cam_vfe_top_ver2_deinit(struct cam_vfe_top **vfe_top);
#endif /* _CAM_VFE_TOP_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
index 44c046d..dbb211f 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
@@ -50,4 +50,7 @@
void *top_hw_info,
struct cam_vfe_top **vfe_top);
+int cam_vfe_top_deinit(uint32_t top_version,
+ struct cam_vfe_top **vfe_top);
+
#endif /* _CAM_VFE_TOP_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
index e6da6ca..f514139 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
@@ -1,4 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_dev.o \
cam_req_mgr_util.o \
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index edfc245..c150244 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -10,14 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-MEM-MGR %s:%d " fmt, __func__, __LINE__
-
-#ifdef CONFIG_MEM_MGR_DBG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mutex.h>
@@ -27,6 +19,7 @@
#include "cam_req_mgr_util.h"
#include "cam_mem_mgr.h"
#include "cam_smmu_api.h"
+#include "cam_debug_util.h"
static struct cam_mem_table tbl;
@@ -36,12 +29,12 @@
{
*vaddr = (uintptr_t)ion_map_kernel(tbl.client, hdl);
if (IS_ERR_OR_NULL((void *)*vaddr)) {
- pr_err("kernel map fail");
+ CAM_ERR(CAM_CRM, "kernel map fail");
return -ENOSPC;
}
if (ion_handle_get_size(tbl.client, hdl, len)) {
- pr_err("kernel get len failed");
+ CAM_ERR(CAM_CRM, "kernel get len failed");
ion_unmap_kernel(tbl.client, hdl);
return -ENOSPC;
}
@@ -69,7 +62,7 @@
tbl.client = msm_ion_client_create("camera_global_pool");
if (IS_ERR_OR_NULL(tbl.client)) {
- pr_err("fail to create client\n");
+ CAM_ERR(CAM_CRM, "fail to create client");
rc = -EINVAL;
}
@@ -92,7 +85,7 @@
rc = cam_mem_util_client_create();
if (rc < 0) {
- pr_err("fail to create ion client\n");
+ CAM_ERR(CAM_CRM, "fail to create ion client");
goto client_fail;
}
@@ -127,10 +120,12 @@
mutex_lock(&tbl.m_lock);
for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
if (!tbl.bufq[i].active) {
- CDBG("Buffer inactive at idx=%d, continuing\n", i);
+ CAM_DBG(CAM_CRM,
+ "Buffer inactive at idx=%d, continuing", i);
continue;
} else {
- pr_err("Active buffer at idx=%d, possible leak\n", i);
+ CAM_ERR(CAM_CRM,
+ "Active buffer at idx=%d, possible leak", i);
}
mutex_lock(&tbl.bufq[i].q_lock);
@@ -221,7 +216,7 @@
iova_ptr,
len_ptr);
if (rc < 0)
- pr_err("fail to get buf hdl :%d", buf_handle);
+ CAM_ERR(CAM_CRM, "fail to get buf hdl :%d", buf_handle);
handle_mismatch:
mutex_unlock(&tbl.bufq[idx].q_lock);
@@ -255,7 +250,7 @@
ion_hdl = tbl.bufq[idx].i_hdl;
if (!ion_hdl) {
- pr_err("Invalid ION handle\n");
+ CAM_ERR(CAM_CRM, "Invalid ION handle");
rc = -EINVAL;
goto exit_func;
}
@@ -310,7 +305,7 @@
rc = ion_handle_get_flags(tbl.client, tbl.bufq[idx].i_hdl,
&ion_flag);
if (rc) {
- pr_err("cache get flags failed %d\n", rc);
+ CAM_ERR(CAM_CRM, "cache get flags failed %d", rc);
goto fail;
}
@@ -326,7 +321,8 @@
ion_cache_ops = ION_IOC_CLEAN_INV_CACHES;
break;
default:
- pr_err("invalid cache ops :%d", cmd->mem_cache_ops);
+ CAM_ERR(CAM_CRM,
+ "invalid cache ops :%d", cmd->mem_cache_ops);
rc = -EINVAL;
goto fail;
}
@@ -337,7 +333,7 @@
tbl.bufq[idx].len,
ion_cache_ops);
if (rc)
- pr_err("cache operation failed %d\n", rc);
+ CAM_ERR(CAM_CRM, "cache operation failed %d", rc);
}
fail:
mutex_unlock(&tbl.bufq[idx].q_lock);
@@ -360,7 +356,7 @@
*fd = ion_share_dma_buf_fd(tbl.client, *hdl);
if (*fd < 0) {
- pr_err("dma buf get fd fail");
+ CAM_ERR(CAM_CRM, "dma buf get fd fail");
rc = -EINVAL;
goto get_fd_fail;
}
@@ -404,19 +400,19 @@
static int cam_mem_util_check_flags(struct cam_mem_mgr_alloc_cmd *cmd)
{
if (!cmd->flags) {
- pr_err("Invalid flags\n");
+ CAM_ERR(CAM_CRM, "Invalid flags");
return -EINVAL;
}
if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
- pr_err("Num of mmu hdl exceeded maximum(%d)\n",
+ CAM_ERR(CAM_CRM, "Num of mmu hdl exceeded maximum(%d)",
CAM_MEM_MMU_MAX_HANDLE);
return -EINVAL;
}
if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
- pr_err("Kernel mapping in secure mode not allowed");
+ CAM_ERR(CAM_CRM, "Kernel mapping in secure mode not allowed");
return -EINVAL;
}
@@ -426,24 +422,25 @@
static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
{
if (!cmd->flags) {
- pr_err("Invalid flags\n");
+ CAM_ERR(CAM_CRM, "Invalid flags");
return -EINVAL;
}
if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
- pr_err("Num of mmu hdl exceeded maximum(%d)\n",
+ CAM_ERR(CAM_CRM, "Num of mmu hdl exceeded maximum(%d)",
CAM_MEM_MMU_MAX_HANDLE);
return -EINVAL;
}
if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
- pr_err("Kernel mapping in secure mode not allowed");
+ CAM_ERR(CAM_CRM, "Kernel mapping in secure mode not allowed");
return -EINVAL;
}
if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
- pr_err("Shared memory buffers are not allowed to be mapped\n");
+ CAM_ERR(CAM_CRM,
+ "Shared memory buffers are not allowed to be mapped");
return -EINVAL;
}
@@ -463,7 +460,7 @@
int dir = cam_mem_util_get_dma_dir(flags);
if (dir < 0) {
- pr_err("fail to map DMA direction\n");
+ CAM_ERR(CAM_CRM, "fail to map DMA direction");
return dir;
}
@@ -476,7 +473,8 @@
len);
if (rc < 0) {
- pr_err("Failed to securely map to smmu");
+ CAM_ERR(CAM_CRM,
+ "Failed to securely map to smmu");
goto multi_map_fail;
}
}
@@ -490,7 +488,7 @@
region);
if (rc < 0) {
- pr_err("Failed to map to smmu");
+ CAM_ERR(CAM_CRM, "Failed to map to smmu");
goto multi_map_fail;
}
}
@@ -520,14 +518,14 @@
size_t len;
if (!cmd) {
- pr_err(" Invalid argument\n");
+ CAM_ERR(CAM_CRM, " Invalid argument");
return -EINVAL;
}
len = cmd->len;
rc = cam_mem_util_check_flags(cmd);
if (rc) {
- pr_err("Invalid flags: flags = %X\n", cmd->flags);
+ CAM_ERR(CAM_CRM, "Invalid flags: flags = %X", cmd->flags);
return rc;
}
@@ -535,7 +533,7 @@
&ion_hdl,
&ion_fd);
if (rc) {
- pr_err("Ion allocation failed\n");
+ CAM_ERR(CAM_CRM, "Ion allocation failed");
return rc;
}
@@ -591,7 +589,7 @@
cmd->out.fd = tbl.bufq[idx].fd;
cmd->out.vaddr = 0;
- CDBG("buf handle: %x, fd: %d, len: %zu\n",
+ CAM_DBG(CAM_CRM, "buf handle: %x, fd: %d, len: %zu",
cmd->out.buf_handle, cmd->out.fd,
tbl.bufq[idx].len);
@@ -613,7 +611,7 @@
size_t len = 0;
if (!cmd || (cmd->fd < 0)) {
- pr_err("Invalid argument\n");
+ CAM_ERR(CAM_CRM, "Invalid argument");
return -EINVAL;
}
@@ -622,13 +620,13 @@
rc = cam_mem_util_check_map_flags(cmd);
if (rc) {
- pr_err("Invalid flags: flags = %X\n", cmd->flags);
+ CAM_ERR(CAM_CRM, "Invalid flags: flags = %X", cmd->flags);
return rc;
}
ion_hdl = ion_import_dma_buf_fd(tbl.client, cmd->fd);
if (IS_ERR_OR_NULL((void *)(ion_hdl))) {
- pr_err("Failed to import ion fd\n");
+ CAM_ERR(CAM_CRM, "Failed to import ion fd");
return -EINVAL;
}
@@ -690,7 +688,7 @@
int rc = -EINVAL;
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- pr_err("Incorrect index\n");
+ CAM_ERR(CAM_CRM, "Incorrect index");
return rc;
}
@@ -725,28 +723,29 @@
enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- pr_err("Incorrect index\n");
+ CAM_ERR(CAM_CRM, "Incorrect index");
return -EINVAL;
}
- CDBG("Flags = %X\n", tbl.bufq[idx].flags);
+ CAM_DBG(CAM_CRM, "Flags = %X", tbl.bufq[idx].flags);
if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
if (tbl.bufq[idx].i_hdl && tbl.bufq[idx].kmdvaddr)
ion_unmap_kernel(tbl.client, tbl.bufq[idx].i_hdl);
- if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE ||
- tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
-
+ /* SHARED flag gets precedence, all other flags after it */
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+ region = CAM_SMMU_REGION_SHARED;
+ } else {
if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
region = CAM_SMMU_REGION_IO;
-
- if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
- region = CAM_SMMU_REGION_SHARED;
-
- rc = cam_mem_util_unmap_hw_va(idx, region);
}
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE ||
+ tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+ rc = cam_mem_util_unmap_hw_va(idx, region);
+
+
mutex_lock(&tbl.bufq[idx].q_lock);
tbl.bufq[idx].flags = 0;
tbl.bufq[idx].buf_handle = -1;
@@ -754,7 +753,8 @@
memset(tbl.bufq[idx].hdls, 0,
sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
- CDBG("Ion handle at idx = %d freeing = %pK, fd = %d, imported %d\n",
+ CAM_DBG(CAM_CRM,
+ "Ion handle at idx = %d freeing = %pK, fd = %d, imported %d",
idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd,
tbl.bufq[idx].is_imported);
@@ -779,27 +779,28 @@
int rc;
if (!cmd) {
- pr_err("Invalid argument\n");
+ CAM_ERR(CAM_CRM, "Invalid argument");
return -EINVAL;
}
idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- pr_err("Incorrect index extracted from mem handle\n");
+ CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
return -EINVAL;
}
if (!tbl.bufq[idx].active) {
- pr_err("Released buffer state should be active\n");
+ CAM_ERR(CAM_CRM, "Released buffer state should be active");
return -EINVAL;
}
if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
- pr_err("Released buf handle not matching within table\n");
+ CAM_ERR(CAM_CRM,
+ "Released buf handle not matching within table");
return -EINVAL;
}
- CDBG("Releasing hdl = %u\n", cmd->buf_handle);
+ CAM_DBG(CAM_CRM, "Releasing hdl = %u", cmd->buf_handle);
rc = cam_mem_util_unmap(idx);
return rc;
@@ -820,16 +821,17 @@
uint32_t mem_handle;
int32_t smmu_hdl = 0;
int32_t num_hdl = 0;
- enum cam_smmu_region_id region;
+ enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
if (!inp || !out) {
- pr_err("Invalid params\n");
+ CAM_ERR(CAM_CRM, "Invalid params");
return -EINVAL;
}
- if (inp->region != CAM_MEM_MGR_REGION_SHARED &&
- inp->region != CAM_MEM_MGR_REGION_NON_SECURE_IO) {
- pr_err("Invalid flags for request mem\n");
+ if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
+ inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
+ inp->flags & CAM_MEM_FLAG_CACHE)) {
+ CAM_ERR(CAM_CRM, "Invalid flags for request mem");
return -EINVAL;
}
@@ -848,29 +850,31 @@
&ion_fd);
if (rc) {
- pr_err("ION alloc failed for shared buffer\n");
+ CAM_ERR(CAM_CRM, "ION alloc failed for shared buffer");
goto ion_fail;
} else {
- CDBG("Got ION fd = %d, hdl = %pK\n", ion_fd, hdl);
+ CAM_DBG(CAM_CRM, "Got ION fd = %d, hdl = %pK", ion_fd, hdl);
}
rc = cam_mem_util_map_cpu_va(hdl, &kvaddr, &request_len);
if (rc) {
- pr_err("Failed to get kernel vaddr\n");
+ CAM_ERR(CAM_CRM, "Failed to get kernel vaddr");
goto map_fail;
}
if (!inp->smmu_hdl) {
- pr_err("Invalid SMMU handle\n");
+ CAM_ERR(CAM_CRM, "Invalid SMMU handle");
rc = -EINVAL;
goto smmu_fail;
}
- if (inp->region == CAM_MEM_MGR_REGION_SHARED)
+ /* SHARED flag gets precedence, all other flags after it */
+ if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
region = CAM_SMMU_REGION_SHARED;
-
- if (inp->region == CAM_MEM_MGR_REGION_NON_SECURE_IO)
- region = CAM_SMMU_REGION_IO;
+ } else {
+ if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE)
+ region = CAM_SMMU_REGION_IO;
+ }
rc = cam_smmu_map_iova(inp->smmu_hdl,
ion_fd,
@@ -880,7 +884,7 @@
region);
if (rc < 0) {
- pr_err("SMMU mapping failed\n");
+ CAM_ERR(CAM_CRM, "SMMU mapping failed");
goto smmu_fail;
}
@@ -915,13 +919,13 @@
out->smmu_hdl = smmu_hdl;
out->mem_handle = mem_handle;
out->len = inp->size;
- out->region = inp->region;
+ out->region = region;
return rc;
slot_fail:
cam_smmu_unmap_iova(inp->smmu_hdl,
ion_fd,
- inp->region);
+ region);
smmu_fail:
ion_unmap_kernel(tbl.client, hdl);
map_fail:
@@ -937,27 +941,28 @@
int rc;
if (!inp) {
- pr_err("Invalid argument\n");
+ CAM_ERR(CAM_CRM, "Invalid argument");
return -EINVAL;
}
idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- pr_err("Incorrect index extracted from mem handle\n");
+ CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
return -EINVAL;
}
if (!tbl.bufq[idx].active) {
- pr_err("Released buffer state should be active\n");
+ CAM_ERR(CAM_CRM, "Released buffer state should be active");
return -EINVAL;
}
if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
- pr_err("Released buf handle not matching within table\n");
+ CAM_ERR(CAM_CRM,
+ "Released buf handle not matching within table");
return -EINVAL;
}
- CDBG("Releasing hdl = %X\n", inp->mem_handle);
+ CAM_DBG(CAM_CRM, "Releasing hdl = %X", inp->mem_handle);
rc = cam_mem_util_unmap(idx);
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
index 32a754e..0858b8a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
@@ -14,13 +14,7 @@
#define _CAM_MEM_MGR_API_H_
#include <media/cam_req_mgr.h>
-
-/* Region IDs for memory manager */
-#define CAM_MEM_MGR_REGION_FIRMWARE 0
-#define CAM_MEM_MGR_REGION_SHARED 1
-#define CAM_MEM_MGR_REGION_NON_SECURE_IO 2
-#define CAM_MEM_MGR_REGION_SECURE_IO 3
-#define CAM_MEM_MGR_REGION_SCRATCH 4
+#include "cam_smmu_api.h"
/**
* struct cam_mem_mgr_request_desc
@@ -36,7 +30,6 @@
uint64_t align;
int32_t smmu_hdl;
uint32_t flags;
- uint32_t region;
};
/**
@@ -55,7 +48,7 @@
int32_t smmu_hdl;
uint32_t mem_handle;
uint64_t len;
- uint32_t region;
+ enum cam_smmu_region_id region;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index ed251eb..3fd42f7 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -19,6 +19,8 @@
#include "cam_req_mgr_core.h"
#include "cam_req_mgr_workq.h"
#include "cam_req_mgr_debug.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
static struct cam_req_mgr_core_device *g_crm_core_dev;
@@ -58,24 +60,25 @@
struct cam_req_mgr_req_tbl *req_tbl = req->l_tbl;
if (!in_q || !req_tbl) {
- CRM_WARN("NULL pointer %pK %pK", in_q, req_tbl);
+ CAM_WARN(CAM_CRM, "NULL pointer %pK %pK", in_q, req_tbl);
return -EINVAL;
}
- CRM_DBG("in_q %pK %pK %d", in_q, req_tbl, req_tbl->num_slots);
+ CAM_DBG(CAM_CRM, "in_q %pK %pK %d", in_q, req_tbl, req_tbl->num_slots);
mutex_lock(&req->lock);
for (i = 0; i < in_q->num_slots; i++) {
- CRM_DBG("IN_Q %d: idx %d, red_id %lld", i,
+ CAM_DBG(CAM_CRM, "IN_Q %d: idx %d, red_id %lld", i,
in_q->slot[i].idx, CRM_GET_REQ_ID(in_q, i));
}
while (req_tbl != NULL) {
for (i = 0; i < req_tbl->num_slots; i++) {
- CRM_DBG("idx= %d, map= %x, state= %d",
+ CAM_DBG(CAM_CRM, "idx= %d, map= %x, state= %d",
req_tbl->slot[i].idx,
req_tbl->slot[i].req_ready_map,
req_tbl->slot[i].state);
}
- CRM_DBG("TBL:id= %d, pd=%d cnt=%d mask=%x skip=%d num_slt= %d",
+ CAM_DBG(CAM_CRM,
+ "TBL:id= %d, pd=%d cnt=%d mask=%x skip=%d num_slt= %d",
req_tbl->id, req_tbl->pd, req_tbl->dev_count,
req_tbl->dev_mask, req_tbl->skip_traverse,
req_tbl->num_slots);
@@ -165,7 +168,7 @@
struct cam_req_mgr_apply *apply_data;
if (!traverse_data->tbl || !traverse_data->apply_data) {
- CRM_ERR("NULL pointer %pK %pK",
+ CAM_ERR(CAM_CRM, "NULL pointer %pK %pK",
traverse_data->tbl, traverse_data->apply_data);
traverse_data->result = 0;
return -EINVAL;
@@ -173,7 +176,7 @@
tbl = traverse_data->tbl;
apply_data = traverse_data->apply_data;
- CRM_DBG("Enter pd %d idx %d state %d skip %d status %d",
+ CAM_DBG(CAM_CRM, "Enter pd %d idx %d state %d skip %d status %d",
tbl->pd, curr_idx, tbl->slot[curr_idx].state,
tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status);
@@ -228,7 +231,7 @@
in_q->slot[idx].req_id = -1;
in_q->slot[idx].skip_idx = 1;
in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
- CRM_DBG("SET IDX SKIP on slot= %d", idx);
+ CAM_DBG(CAM_CRM, "SET IDX SKIP on slot= %d", idx);
}
/**
@@ -246,7 +249,7 @@
return;
do {
tbl->id = req->num_tbl++;
- CRM_DBG("%d: pd %d skip_traverse %d delta %d",
+ CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
tbl->id, tbl->pd, tbl->skip_traverse,
tbl->pd_delta);
tbl = tbl->next;
@@ -275,7 +278,7 @@
max_pd = tbl->pd;
do {
tbl->skip_traverse = max_pd - tbl->pd;
- CRM_DBG("%d: pd %d skip_traverse %d delta %d",
+ CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
tbl->id, tbl->pd, tbl->skip_traverse,
tbl->pd_delta);
tbl = tbl->next;
@@ -298,7 +301,7 @@
struct cam_req_mgr_req_queue *in_q = link->req.in_q;
slot = &in_q->slot[idx];
- CRM_DBG("RESET: idx: %d: slot->status %d", idx, slot->status);
+ CAM_DBG(CAM_CRM, "RESET: idx: %d: slot->status %d", idx, slot->status);
/* Check if CSL has already pushed new request*/
if (slot->status == CRM_SLOT_STATUS_REQ_ADDED)
@@ -312,7 +315,7 @@
/* Reset all pd table slot */
while (tbl != NULL) {
- CRM_DBG("pd: %d: idx %d state %d",
+ CAM_DBG(CAM_CRM, "pd: %d: idx %d state %d",
tbl->pd, idx, tbl->slot[idx].state);
tbl->slot[idx].req_ready_map = 0;
tbl->slot[idx].state = CRM_REQ_STATE_EMPTY;
@@ -338,13 +341,14 @@
__cam_req_mgr_inc_idx(&idx, 1, in_q->num_slots);
slot = &in_q->slot[idx];
- CRM_DBG("idx: %d: slot->status %d", idx, slot->status);
+ CAM_DBG(CAM_CRM, "idx: %d: slot->status %d", idx, slot->status);
/* Check if there is new req from CSL, if not complete req */
if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
__cam_req_mgr_in_q_skip_idx(in_q, idx);
if (in_q->wr_idx != idx)
- CRM_WARN("CHECK here wr %d, rd %d", in_q->wr_idx, idx);
+ CAM_WARN(CAM_CRM,
+ "CHECK here wr %d, rd %d", in_q->wr_idx, idx);
__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
}
}
@@ -376,13 +380,13 @@
if (dev) {
pd = dev->dev_info.p_delay;
if (pd >= CAM_PIPELINE_DELAY_MAX) {
- CRM_WARN("pd %d greater than max",
+ CAM_WARN(CAM_CRM, "pd %d greater than max",
pd);
continue;
}
if (link->req.apply_data[pd].skip_idx ||
link->req.apply_data[pd].req_id < 0) {
- CRM_DBG("skip %d req_id %lld",
+ CAM_DBG(CAM_CRM, "skip %d req_id %lld",
link->req.apply_data[pd].skip_idx,
link->req.apply_data[pd].req_id);
continue;
@@ -393,7 +397,10 @@
idx = link->req.apply_data[pd].idx;
apply_req.report_if_bubble =
in_q->slot[idx].recover;
- CRM_DBG("SEND: pd %d req_id %lld",
+
+ trace_cam_req_mgr_apply_request(link, &apply_req, dev);
+
+ CAM_DBG(CAM_CRM, "SEND: pd %d req_id %lld",
pd, apply_req.request_id);
if (dev->ops && dev->ops->apply_req) {
rc = dev->ops->apply_req(&apply_req);
@@ -403,7 +410,7 @@
}
}
if (rc < 0) {
- CRM_ERR("APPLY FAILED pd %d req_id %lld",
+ CAM_ERR(CAM_CRM, "APPLY FAILED pd %d req_id %lld",
dev->dev_info.p_delay, apply_req.request_id);
/* Apply req failed notify already applied devs */
for (; i >= 0; i--) {
@@ -456,11 +463,12 @@
*/
rc = __cam_req_mgr_traverse(&traverse_data);
- CRM_DBG("SOF: idx %d result %x pd_mask %x rc %d",
+ CAM_DBG(CAM_CRM, "SOF: idx %d result %x pd_mask %x rc %d",
idx, traverse_data.result, link->pd_mask, rc);
if (!rc && traverse_data.result == link->pd_mask) {
- CRM_DBG("APPLY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
+ CAM_DBG(CAM_CRM,
+ "APPLY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
link->link_hdl, idx,
apply_data[2].req_id, apply_data[1].req_id,
apply_data[0].req_id);
@@ -497,12 +505,12 @@
* - if in applied_state, somthign wrong.
* - if in no_req state, no new req
*/
- CRM_DBG("idx %d req_status %d",
+ CAM_DBG(CAM_CRM, "idx %d req_status %d",
in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
slot = &in_q->slot[in_q->rd_idx];
if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
- CRM_DBG("No Pending req");
+ CAM_DBG(CAM_CRM, "No Pending req");
return 0;
}
@@ -516,7 +524,8 @@
slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
if (link->state == CAM_CRM_LINK_STATE_ERR) {
- CRM_WARN("Err recovery done idx %d status %d",
+ CAM_WARN(CAM_CRM,
+ "Err recovery done idx %d status %d",
in_q->rd_idx,
in_q->slot[in_q->rd_idx].status);
mutex_lock(&link->lock);
@@ -552,7 +561,8 @@
* don't expect to enter here.
* @TODO: gracefully handle if recovery fails.
*/
- CRM_ERR("FATAL recovery cant finish idx %d status %d",
+ CAM_ERR(CAM_CRM,
+ "FATAL recovery cant finish idx %d status %d",
in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
rc = -EPERM;
}
@@ -600,7 +610,7 @@
tbl->next = new_tbl;
tbl->pd_delta = tbl->pd - new_tbl->pd;
}
- CRM_DBG("added pd %d tbl to link delta %d", new_tbl->pd,
+ CAM_DBG(CAM_CRM, "added pd %d tbl to link delta %d", new_tbl->pd,
new_tbl->pd_delta);
}
@@ -619,7 +629,7 @@
kzalloc(sizeof(struct cam_req_mgr_req_tbl), GFP_KERNEL);
if (tbl != NULL) {
tbl->num_slots = MAX_REQ_SLOTS;
- CRM_DBG("pd= %d slots= %d", delay, tbl->num_slots);
+ CAM_DBG(CAM_CRM, "pd= %d slots= %d", delay, tbl->num_slots);
}
return tbl;
@@ -636,7 +646,7 @@
{
struct cam_req_mgr_req_tbl *tbl = *l_tbl, *temp;
- CRM_DBG("*l_tbl %pK", tbl);
+ CAM_DBG(CAM_CRM, "*l_tbl %pK", tbl);
while (tbl != NULL) {
temp = tbl->next;
kfree(tbl);
@@ -665,7 +675,7 @@
for (i = 0; i < in_q->num_slots; i++) {
slot = &in_q->slot[idx];
if (slot->req_id == req_id) {
- CRM_DBG("req %lld found at %d %d status %d",
+ CAM_DBG(CAM_CRM, "req %lld found at %d %d status %d",
req_id, idx, slot->idx,
slot->status);
break;
@@ -693,7 +703,7 @@
struct cam_req_mgr_req_queue *in_q = req->in_q;
if (!in_q) {
- CRM_ERR("NULL in_q");
+ CAM_ERR(CAM_CRM, "NULL in_q");
return -EINVAL;
}
@@ -728,7 +738,7 @@
struct cam_req_mgr_req_queue *in_q = req->in_q;
if (!in_q) {
- CRM_ERR("NULL in_q");
+ CAM_ERR(CAM_CRM, "NULL in_q");
return -EINVAL;
}
@@ -757,11 +767,11 @@
struct cam_req_mgr_core_link *link = NULL;
if (!timer) {
- CRM_ERR("NULL timer");
+ CAM_ERR(CAM_CRM, "NULL timer");
return;
}
link = (struct cam_req_mgr_core_link *)timer->parent;
- CRM_ERR("SOF freeze for link %x", link->link_hdl);
+ CAM_ERR(CAM_CRM, "SOF freeze for link %x", link->link_hdl);
}
/**
@@ -858,12 +868,12 @@
struct cam_req_mgr_req_queue *in_q;
if (!session || !g_crm_core_dev) {
- CRM_ERR("NULL session/core_dev ptr");
+ CAM_ERR(CAM_CRM, "NULL session/core_dev ptr");
return NULL;
}
if (session->num_links >= MAX_LINKS_PER_SESSION) {
- CRM_ERR("Reached max links %d per session limit %d",
+ CAM_ERR(CAM_CRM, "Reached max links %d per session limit %d",
session->num_links, MAX_LINKS_PER_SESSION);
return NULL;
}
@@ -871,7 +881,7 @@
link = (struct cam_req_mgr_core_link *)
kzalloc(sizeof(struct cam_req_mgr_core_link), GFP_KERNEL);
if (!link) {
- CRM_ERR("failed to create link, no mem");
+ CAM_ERR(CAM_CRM, "failed to create link, no mem");
return NULL;
}
in_q = &session->in_q;
@@ -892,7 +902,7 @@
mutex_lock(&session->lock);
session->links[session->num_links] = link;
session->num_links++;
- CRM_DBG("Active session links (%d)",
+ CAM_DBG(CAM_CRM, "Active session links (%d)",
session->num_links);
mutex_unlock(&session->lock);
@@ -914,14 +924,14 @@
int32_t i = 0;
if (!session || !*link) {
- CRM_ERR("NULL session/link ptr %pK %pK",
+ CAM_ERR(CAM_CRM, "NULL session/link ptr %pK %pK",
session, *link);
return;
}
mutex_lock(&session->lock);
if (!session->num_links)
- CRM_WARN("No active link or invalid state %d",
+ CAM_WARN(CAM_CRM, "No active link or invalid state %d",
session->num_links);
else {
for (i = 0; i < session->num_links; i++) {
@@ -929,7 +939,7 @@
session->links[i] = NULL;
}
session->num_links--;
- CRM_DBG("Active session links (%d)",
+ CAM_DBG(CAM_CRM, "Active session links (%d)",
session->num_links);
}
kfree(*link);
@@ -958,7 +968,7 @@
struct cam_req_mgr_req_queue *in_q = NULL;
if (!data || !priv) {
- CRM_ERR("input args NULL %pK %pK", data, priv);
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
rc = -EINVAL;
goto end;
}
@@ -975,7 +985,7 @@
* cam_req_mgr_process_flush_req()
*
* @brief: This runs in workque thread context. Call core funcs to check
- * which requests need to be removedcancelled.
+ * which requests need to be removed/cancelled.
* @priv : link information.
* @data : contains information about frame_id, link etc.
*
@@ -993,20 +1003,22 @@
struct crm_task_payload *task_data = NULL;
if (!data || !priv) {
- CRM_ERR("input args NULL %pK %pK", data, priv);
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
rc = -EINVAL;
goto end;
}
link = (struct cam_req_mgr_core_link *)priv;
task_data = (struct crm_task_payload *)data;
flush_info = (struct cam_req_mgr_flush_info *)&task_data->u;
- CRM_DBG("link_hdl %x req_id %lld type %d",
+ CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld type %d",
flush_info->link_hdl,
flush_info->req_id,
flush_info->flush_type);
in_q = link->req.in_q;
+ trace_cam_flush_req(link, flush_info);
+
mutex_lock(&link->req.lock);
if (flush_info->flush_type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
for (i = 0; i < in_q->num_slots; i++) {
@@ -1021,15 +1033,16 @@
CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
idx = __cam_req_mgr_find_slot_for_req(in_q, flush_info->req_id);
if (idx < 0) {
- CRM_ERR("req_id %lld not found in input queue",
+ CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
flush_info->req_id);
} else {
- CRM_DBG("req_id %lld found at idx %d",
+ CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
flush_info->req_id, idx);
slot = &in_q->slot[idx];
if (slot->status == CRM_SLOT_STATUS_REQ_PENDING ||
slot->status == CRM_SLOT_STATUS_REQ_APPLIED) {
- CRM_WARN("req_id %lld can not be cancelled",
+ CAM_WARN(CAM_CRM,
+ "req_id %lld can not be cancelled",
flush_info->req_id);
mutex_unlock(&link->req.lock);
return -EINVAL;
@@ -1075,14 +1088,14 @@
struct crm_task_payload *task_data = NULL;
if (!data || !priv) {
- CRM_ERR("input args NULL %pK %pK", data, priv);
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
rc = -EINVAL;
goto end;
}
link = (struct cam_req_mgr_core_link *)priv;
task_data = (struct crm_task_payload *)data;
sched_req = (struct cam_req_mgr_sched_request *)&task_data->u;
- CRM_DBG("link_hdl %x req_id %lld",
+ CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld",
sched_req->link_hdl,
sched_req->req_id);
@@ -1093,9 +1106,9 @@
if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
slot->status != CRM_SLOT_STATUS_REQ_APPLIED)
- CRM_WARN("in_q overwrite %d", slot->status);
+ CAM_WARN(CAM_CRM, "in_q overwrite %d", slot->status);
- CRM_DBG("sched_req %lld at slot %d",
+ CAM_DBG(CAM_CRM, "sched_req %lld at slot %d",
sched_req->req_id, in_q->wr_idx);
slot->status = CRM_SLOT_STATUS_REQ_ADDED;
@@ -1130,7 +1143,7 @@
struct crm_task_payload *task_data = NULL;
if (!data || !priv) {
- CRM_ERR("input args NULL %pK %pK", data, priv);
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
rc = -EINVAL;
goto end;
}
@@ -1147,7 +1160,7 @@
}
}
if (!tbl) {
- CRM_ERR("dev_hdl not found %x, %x %x",
+ CAM_ERR(CAM_CRM, "dev_hdl not found %x, %x %x",
add_req->dev_hdl,
link->l_dev[0].dev_hdl,
link->l_dev[1].dev_hdl);
@@ -1165,7 +1178,7 @@
mutex_lock(&link->req.lock);
idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
if (idx < 0) {
- CRM_ERR("req %lld not found in in_q", add_req->req_id);
+ CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
rc = -EBADSLT;
mutex_unlock(&link->req.lock);
goto end;
@@ -1173,19 +1186,21 @@
slot = &tbl->slot[idx];
if (slot->state != CRM_REQ_STATE_PENDING &&
slot->state != CRM_REQ_STATE_EMPTY) {
- CRM_WARN("Unexpected state %d for slot %d map %x",
+ CAM_WARN(CAM_CRM, "Unexpected state %d for slot %d map %x",
slot->state, idx, slot->req_ready_map);
}
slot->state = CRM_REQ_STATE_PENDING;
slot->req_ready_map |= (1 << device->dev_bit);
- CRM_DBG("idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
+ CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
idx, add_req->dev_hdl, add_req->req_id, tbl->pd,
slot->req_ready_map);
+ trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device);
+
if (slot->req_ready_map == tbl->dev_mask) {
- CRM_DBG("idx %d req_id %lld pd %d SLOT READY",
+ CAM_DBG(CAM_CRM, "idx %d req_id %lld pd %d SLOT READY",
idx, add_req->req_id, tbl->pd);
slot->state = CRM_REQ_STATE_READY;
}
@@ -1216,14 +1231,14 @@
struct crm_task_payload *task_data = NULL;
if (!data || !priv) {
- CRM_ERR("input args NULL %pK %pK", data, priv);
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
rc = -EINVAL;
goto end;
}
link = (struct cam_req_mgr_core_link *)priv;
task_data = (struct crm_task_payload *)data;
err_info = (struct cam_req_mgr_error_notify *)&task_data->u;
- CRM_DBG("link_hdl %x req_id %lld error %d",
+ CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld error %d",
err_info->link_hdl,
err_info->req_id,
err_info->error);
@@ -1234,20 +1249,22 @@
if (err_info->error == CRM_KMD_ERR_BUBBLE) {
idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id);
if (idx < 0) {
- CRM_ERR("req_id %lld not found in input queue",
+ CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
err_info->req_id);
} else {
- CRM_DBG("req_id %lld found at idx %d",
+ CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
err_info->req_id, idx);
slot = &in_q->slot[idx];
if (!slot->recover) {
- CRM_WARN("err recovery disabled req_id %lld",
+ CAM_WARN(CAM_CRM,
+ "err recovery disabled req_id %lld",
err_info->req_id);
mutex_unlock(&link->req.lock);
return 0;
} else if (slot->status != CRM_SLOT_STATUS_REQ_PENDING
&& slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
- CRM_WARN("req_id %lld can not be recovered %d",
+ CAM_WARN(CAM_CRM,
+ "req_id %lld can not be recovered %d",
err_info->req_id, slot->status);
mutex_unlock(&link->req.lock);
return -EINVAL;
@@ -1302,7 +1319,7 @@
struct crm_task_payload *task_data = NULL;
if (!data || !priv) {
- CRM_ERR("input args NULL %pK %pK", data, priv);
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
rc = -EINVAL;
goto end;
}
@@ -1310,7 +1327,7 @@
task_data = (struct crm_task_payload *)data;
sof_data = (struct cam_req_mgr_sof_notify *)&task_data->u;
- CRM_DBG("link_hdl %x frame_id %lld",
+ CAM_DBG(CAM_CRM, "link_hdl %x frame_id %lld",
sof_data->link_hdl,
sof_data->frame_id);
@@ -1321,11 +1338,11 @@
* Check if current read index is in applied state, if yes make it free
* and increment read index to next slot.
*/
- CRM_DBG("link_hdl %x curent idx %d req_status %d",
+ CAM_DBG(CAM_CRM, "link_hdl %x curent idx %d req_status %d",
link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
if (link->state == CAM_CRM_LINK_STATE_ERR)
- CRM_WARN("Error recovery idx %d status %d",
+ CAM_WARN(CAM_CRM, "Error recovery idx %d status %d",
in_q->rd_idx,
in_q->slot[in_q->rd_idx].status);
@@ -1364,17 +1381,18 @@
struct crm_task_payload *task_data;
if (!add_req) {
- CRM_ERR("sof_data is NULL");
+ CAM_ERR(CAM_CRM, "sof_data is NULL");
rc = -EINVAL;
goto end;
}
- CRM_DBG("E: dev %x dev req %lld", add_req->dev_hdl, add_req->req_id);
+ CAM_DBG(CAM_CRM, "E: dev %x dev req %lld",
+ add_req->dev_hdl, add_req->req_id);
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(add_req->link_hdl);
if (!link) {
- CRM_DBG("link ptr NULL %x", add_req->link_hdl);
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", add_req->link_hdl);
rc = -EINVAL;
goto end;
}
@@ -1382,14 +1400,14 @@
/* Validate if req id is present in input queue */
idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
if (idx < 0) {
- CRM_ERR("req %lld not found in in_q", add_req->req_id);
+ CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
rc = -ENOENT;
goto end;
}
task = cam_req_mgr_workq_get_task(link->workq);
if (!task) {
- CRM_ERR("no empty task dev %x req %lld",
+ CAM_ERR(CAM_CRM, "no empty task dev %x req %lld",
add_req->dev_hdl, add_req->req_id);
rc = -EBUSY;
goto end;
@@ -1403,7 +1421,8 @@
dev_req->dev_hdl = add_req->dev_hdl;
task->process_cb = &cam_req_mgr_process_add_req;
rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
- CRM_DBG("X: dev %x dev req %lld", add_req->dev_hdl, add_req->req_id);
+ CAM_DBG(CAM_CRM, "X: dev %x dev req %lld",
+ add_req->dev_hdl, add_req->req_id);
end:
return rc;
@@ -1428,7 +1447,7 @@
struct crm_task_payload *task_data;
if (!err_info) {
- CRM_ERR("err_info is NULL");
+ CAM_ERR(CAM_CRM, "err_info is NULL");
rc = -EINVAL;
goto end;
}
@@ -1436,7 +1455,7 @@
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(err_info->link_hdl);
if (!link) {
- CRM_DBG("link ptr NULL %x", err_info->link_hdl);
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", err_info->link_hdl);
rc = -EINVAL;
goto end;
}
@@ -1444,7 +1463,7 @@
crm_timer_reset(link->watchdog);
task = cam_req_mgr_workq_get_task(link->workq);
if (!task) {
- CRM_ERR("no empty task req_id %lld", err_info->req_id);
+ CAM_ERR(CAM_CRM, "no empty task req_id %lld", err_info->req_id);
rc = -EBUSY;
goto end;
}
@@ -1482,7 +1501,7 @@
struct crm_task_payload *task_data;
if (!sof_data) {
- CRM_ERR("sof_data is NULL");
+ CAM_ERR(CAM_CRM, "sof_data is NULL");
rc = -EINVAL;
goto end;
}
@@ -1490,7 +1509,7 @@
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(sof_data->link_hdl);
if (!link) {
- CRM_DBG("link ptr NULL %x", sof_data->link_hdl);
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", sof_data->link_hdl);
rc = -EINVAL;
goto end;
}
@@ -1498,7 +1517,8 @@
crm_timer_reset(link->watchdog);
task = cam_req_mgr_workq_get_task(link->workq);
if (!task) {
- CRM_ERR("no empty task frame %lld", sof_data->frame_id);
+ CAM_ERR(CAM_CRM, "no empty task frame %lld",
+ sof_data->frame_id);
rc = -EBUSY;
goto end;
}
@@ -1545,7 +1565,7 @@
return -EPERM;
mutex_init(&link->req.lock);
- CRM_DBG("LOCK_DBG in_q lock %pK", &link->req.lock);
+ CAM_DBG(CAM_CRM, "LOCK_DBG in_q lock %pK", &link->req.lock);
link->req.num_tbl = 0;
rc = __cam_req_mgr_setup_in_q(&link->req);
@@ -1562,7 +1582,7 @@
if (!dev->ops ||
!dev->ops->get_dev_info ||
!dev->ops->link_setup) {
- CRM_ERR("FATAL: device ops NULL");
+ CAM_ERR(CAM_CRM, "FATAL: device ops NULL");
rc = -ENXIO;
goto error;
}
@@ -1570,7 +1590,10 @@
dev->parent = (void *)link;
dev->dev_info.dev_hdl = dev->dev_hdl;
rc = dev->ops->get_dev_info(&dev->dev_info);
- CRM_DBG("%x: connected: %s, id %d, delay %d",
+
+ trace_cam_req_mgr_connect_device(link, &dev->dev_info);
+
+ CAM_DBG(CAM_CRM, "%x: connected: %s, id %d, delay %d",
link_info->session_hdl, dev->dev_info.name,
dev->dev_info.dev_id, dev->dev_info.p_delay);
if (rc < 0 ||
@@ -1578,10 +1601,10 @@
CAM_PIPELINE_DELAY_MAX ||
dev->dev_info.p_delay <
CAM_PIPELINE_DELAY_0) {
- CRM_ERR("get device info failed");
+ CAM_ERR(CAM_CRM, "get device info failed");
goto error;
} else {
- CRM_DBG("%x: connected: %s, delay %d",
+ CAM_DBG(CAM_CRM, "%x: connected: %s, delay %d",
link_info->session_hdl,
dev->dev_info.name,
dev->dev_info.p_delay);
@@ -1610,7 +1633,7 @@
pd_tbl = __cam_req_mgr_find_pd_tbl(link->req.l_tbl,
dev->dev_info.p_delay);
if (!pd_tbl) {
- CRM_ERR("pd %d tbl not found",
+ CAM_ERR(CAM_CRM, "pd %d tbl not found",
dev->dev_info.p_delay);
rc = -ENXIO;
goto error;
@@ -1619,7 +1642,7 @@
pd_tbl = __cam_req_mgr_create_pd_tbl(
dev->dev_info.p_delay);
if (pd_tbl == NULL) {
- CRM_ERR("create new pd tbl failed");
+ CAM_ERR(CAM_CRM, "create new pd tbl failed");
rc = -ENXIO;
goto error;
}
@@ -1666,7 +1689,7 @@
struct cam_req_mgr_core_session *cam_session = NULL;
if (!ses_info) {
- CRM_DBG("NULL session info pointer");
+ CAM_DBG(CAM_CRM, "NULL session info pointer");
return -EINVAL;
}
mutex_lock(&g_crm_core_dev->crm_lock);
@@ -1679,7 +1702,8 @@
session_hdl = cam_create_session_hdl((void *)cam_session);
if (session_hdl < 0) {
- CRM_ERR("unable to create session_hdl = %x", session_hdl);
+ CAM_ERR(CAM_CRM, "unable to create session_hdl = %x",
+ session_hdl);
rc = session_hdl;
kfree(cam_session);
goto end;
@@ -1687,7 +1711,7 @@
ses_info->session_hdl = session_hdl;
mutex_init(&cam_session->lock);
- CRM_DBG("LOCK_DBG session lock %pK", &cam_session->lock);
+ CAM_DBG(CAM_CRM, "LOCK_DBG session lock %pK", &cam_session->lock);
mutex_lock(&cam_session->lock);
cam_session->session_hdl = session_hdl;
@@ -1706,7 +1730,7 @@
struct cam_req_mgr_core_session *cam_session = NULL;
if (!ses_info) {
- CRM_DBG("NULL session info pointer");
+ CAM_DBG(CAM_CRM, "NULL session info pointer");
return -EINVAL;
}
@@ -1714,14 +1738,14 @@
cam_session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(ses_info->session_hdl);
if (!cam_session) {
- CRM_ERR("failed to get session priv");
+ CAM_ERR(CAM_CRM, "failed to get session priv");
rc = -ENOENT;
goto end;
}
mutex_lock(&cam_session->lock);
if (cam_session->num_links) {
- CRM_ERR("destroy session %x num_active_links %d",
+ CAM_ERR(CAM_CRM, "destroy session %x num_active_links %d",
ses_info->session_hdl,
cam_session->num_links);
/* @TODO : Go through active links and destroy ? */
@@ -1733,7 +1757,7 @@
rc = cam_destroy_session_hdl(ses_info->session_hdl);
if (rc < 0)
- CRM_ERR("unable to destroy session_hdl = %x rc %d",
+ CAM_ERR(CAM_CRM, "unable to destroy session_hdl = %x rc %d",
ses_info->session_hdl, rc);
end:
@@ -1750,11 +1774,12 @@
struct cam_req_mgr_core_link *link;
if (!link_info) {
- CRM_DBG("NULL pointer");
+ CAM_DBG(CAM_CRM, "NULL pointer");
return -EINVAL;
}
if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES) {
- CRM_ERR("Invalid num devices %d", link_info->num_devices);
+ CAM_ERR(CAM_CRM, "Invalid num devices %d",
+ link_info->num_devices);
return -EINVAL;
}
@@ -1762,7 +1787,7 @@
cam_session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(link_info->session_hdl);
if (!cam_session) {
- CRM_DBG("NULL pointer");
+ CAM_DBG(CAM_CRM, "NULL pointer");
return -EINVAL;
}
@@ -1771,11 +1796,11 @@
/* Allocate link struct and map it with session's request queue */
link = __cam_req_mgr_reserve_link(cam_session);
if (!link) {
- CRM_ERR("failed to reserve new link");
+ CAM_ERR(CAM_CRM, "failed to reserve new link");
mutex_unlock(&g_crm_core_dev->crm_lock);
return -EINVAL;
}
- CRM_DBG("link reserved %pK %x", link, link->link_hdl);
+ CAM_DBG(CAM_CRM, "link reserved %pK %x", link, link->link_hdl);
memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
root_dev.session_hdl = link_info->session_hdl;
@@ -1785,7 +1810,8 @@
/* Create unique dev handle for link */
link->link_hdl = cam_create_device_hdl(&root_dev);
if (link->link_hdl < 0) {
- CRM_ERR("Insufficient memory to create new device handle");
+ CAM_ERR(CAM_CRM,
+ "Insufficient memory to create new device handle");
mutex_unlock(&link->lock);
rc = link->link_hdl;
goto link_hdl_fail;
@@ -1797,7 +1823,8 @@
rc = __cam_req_mgr_create_subdevs(&link->l_dev,
link_info->num_devices);
if (rc < 0) {
- CRM_ERR("Insufficient memory to create new crm subdevs");
+ CAM_ERR(CAM_CRM,
+ "Insufficient memory to create new crm subdevs");
goto create_subdev_failed;
}
@@ -1816,7 +1843,7 @@
rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
&link->workq, CRM_WORKQ_USAGE_NON_IRQ);
if (rc < 0) {
- CRM_ERR("FATAL: unable to create worker");
+ CAM_ERR(CAM_CRM, "FATAL: unable to create worker");
__cam_req_mgr_destroy_link_info(link);
goto setup_failed;
}
@@ -1862,18 +1889,18 @@
struct cam_req_mgr_core_link *link;
if (!unlink_info) {
- CRM_ERR("NULL pointer");
+ CAM_ERR(CAM_CRM, "NULL pointer");
return -EINVAL;
}
mutex_lock(&g_crm_core_dev->crm_lock);
- CRM_DBG("link_hdl %x", unlink_info->link_hdl);
+ CAM_DBG(CAM_CRM, "link_hdl %x", unlink_info->link_hdl);
/* session hdl's priv data is cam session struct */
cam_session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(unlink_info->session_hdl);
if (!cam_session) {
- CRM_ERR("NULL pointer");
+ CAM_ERR(CAM_CRM, "NULL pointer");
mutex_unlock(&g_crm_core_dev->crm_lock);
return -EINVAL;
}
@@ -1881,7 +1908,7 @@
/* link hdl's priv data is core_link struct */
link = cam_get_device_priv(unlink_info->link_hdl);
if (!link) {
- CRM_ERR("NULL pointer");
+ CAM_ERR(CAM_CRM, "NULL pointer");
mutex_unlock(&g_crm_core_dev->crm_lock);
return -EINVAL;
}
@@ -1905,7 +1932,7 @@
/* Destroy the link handle */
rc = cam_destroy_device_hdl(unlink_info->link_hdl);
if (rc < 0) {
- CRM_ERR("error while destroying dev handle %d %x",
+ CAM_ERR(CAM_CRM, "error while destroying dev handle %d %x",
rc, link->link_hdl);
}
@@ -1926,7 +1953,7 @@
struct crm_task_payload task_data;
if (!sched_req) {
- CRM_ERR("csl_req is NULL");
+ CAM_ERR(CAM_CRM, "csl_req is NULL");
rc = -EINVAL;
goto end;
}
@@ -1934,15 +1961,16 @@
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(sched_req->link_hdl);
if (!link) {
- CRM_DBG("link ptr NULL %x", sched_req->link_hdl);
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", sched_req->link_hdl);
return -EINVAL;
}
session = (struct cam_req_mgr_core_session *)link->parent;
if (!session) {
- CRM_WARN("session ptr NULL %x", sched_req->link_hdl);
+ CAM_WARN(CAM_CRM, "session ptr NULL %x", sched_req->link_hdl);
return -EINVAL;
}
- CRM_DBG("link %x req %lld", sched_req->link_hdl, sched_req->req_id);
+ CAM_DBG(CAM_CRM, "link %x req %lld",
+ sched_req->link_hdl, sched_req->req_id);
task_data.type = CRM_WORKQ_TASK_SCHED_REQ;
sched = (struct cam_req_mgr_sched_request *)&task_data.u;
@@ -1957,7 +1985,8 @@
rc = cam_req_mgr_process_sched_req(link, &task_data);
- CRM_DBG("DONE dev %x req %lld", sched_req->link_hdl, sched_req->req_id);
+ CAM_DBG(CAM_CRM, "DONE dev %x req %lld",
+ sched_req->link_hdl, sched_req->req_id);
end:
return rc;
}
@@ -1966,7 +1995,7 @@
struct cam_req_mgr_sync_mode *sync_links)
{
if (!sync_links) {
- CRM_ERR("NULL pointer");
+ CAM_ERR(CAM_CRM, "NULL pointer");
return -EINVAL;
}
@@ -1985,12 +2014,13 @@
struct cam_req_mgr_core_session *session = NULL;
if (!flush_info) {
- CRM_ERR("flush req is NULL");
+ CAM_ERR(CAM_CRM, "flush req is NULL");
rc = -EFAULT;
goto end;
}
if (flush_info->flush_type >= CAM_REQ_MGR_FLUSH_TYPE_MAX) {
- CRM_ERR("incorrect flush type %x", flush_info->flush_type);
+ CAM_ERR(CAM_CRM, "incorrect flush type %x",
+ flush_info->flush_type);
rc = -EINVAL;
goto end;
}
@@ -1999,12 +2029,12 @@
session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(flush_info->session_hdl);
if (!session) {
- CRM_ERR("Invalid session %x", flush_info->session_hdl);
+ CAM_ERR(CAM_CRM, "Invalid session %x", flush_info->session_hdl);
rc = -EINVAL;
goto end;
}
if (session->num_links <= 0) {
- CRM_WARN("No active links in session %x",
+ CAM_WARN(CAM_CRM, "No active links in session %x",
flush_info->session_hdl);
goto end;
}
@@ -2012,7 +2042,7 @@
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(flush_info->link_hdl);
if (!link) {
- CRM_DBG("link ptr NULL %x", flush_info->link_hdl);
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", flush_info->link_hdl);
rc = -EINVAL;
goto end;
}
@@ -2044,10 +2074,10 @@
int cam_req_mgr_core_device_init(void)
{
- CRM_DBG("Enter g_crm_core_dev %pK", g_crm_core_dev);
+ CAM_DBG(CAM_CRM, "Enter g_crm_core_dev %pK", g_crm_core_dev);
if (g_crm_core_dev) {
- CRM_WARN("core device is already initialized");
+ CAM_WARN(CAM_CRM, "core device is already initialized");
return 0;
}
g_crm_core_dev = (struct cam_req_mgr_core_device *)
@@ -2055,7 +2085,7 @@
if (!g_crm_core_dev)
return -ENOMEM;
- CRM_DBG("g_crm_core_dev %pK", g_crm_core_dev);
+ CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
INIT_LIST_HEAD(&g_crm_core_dev->session_head);
mutex_init(&g_crm_core_dev->crm_lock);
cam_req_mgr_debug_register(g_crm_core_dev);
@@ -2066,11 +2096,11 @@
int cam_req_mgr_core_device_deinit(void)
{
if (!g_crm_core_dev) {
- CRM_ERR("NULL pointer");
+ CAM_ERR(CAM_CRM, "NULL pointer");
return -EINVAL;
}
- CRM_DBG("g_crm_core_dev %pK", g_crm_core_dev);
+ CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
mutex_destroy(&g_crm_core_dev->crm_lock);
kfree(g_crm_core_dev);
g_crm_core_dev = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
index 2a831e8..f61c41e 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
@@ -25,30 +25,5 @@
#define CRM_GET_REQ_ID(in_q, idx) in_q->slot[idx].req_id
-#if (CRM_TRACE_ENABLE == 1)
- #define CRM_DBG(fmt, args...) do { \
- trace_printk("%d: [crm_dbg] "fmt"\n", __LINE__, ##args); \
- pr_debug("%s:%d "fmt"\n", __func__, __LINE__, ##args); \
- } while (0)
-
- #define CRM_WARN(fmt, args...) do { \
- trace_printk("%d: [crm_warn] "fmt"\n", __LINE__, ##args); \
- pr_warn("%s:%d "fmt"\n", __func__, __LINE__, ##args); \
- } while (0)
-
- #define CRM_ERR(fmt, args...) do { \
- trace_printk("%d: [crm_err] "fmt"\n", __LINE__, ##args); \
- pr_err("%s:%d "fmt"\n", __func__, __LINE__, ##args);\
- } while (0)
-#else
- #define CRM_DBG(fmt, args...) pr_debug("%s:%d "fmt"\n", \
- __func__, __LINE__, ##args)
-
- #define CRM_WARN(fmt, args...) pr_warn("%s:%d "fmt"\n", \
- __func__, __LINE__, ##args)
-
- #define CRM_ERR(fmt, args...) pr_err("%s:%d "fmt"\n", \
- __func__, __LINE__, ##args)
-#endif
#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index c495088..7a2bc09 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-REQ-MGR %s:%d " fmt, __func__, __LINE__
-
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -26,6 +24,7 @@
#include "cam_req_mgr_core.h"
#include "cam_subdev.h"
#include "cam_mem_mgr.h"
+#include "cam_debug_util.h"
#define CAM_REQ_MGR_EVENT_MAX 30
@@ -108,7 +107,7 @@
rc = v4l2_fh_open(filep);
if (rc) {
- pr_err("v4l2_fh_open failed: %d\n", rc);
+ CAM_ERR(CAM_CRM, "v4l2_fh_open failed: %d", rc);
goto end;
}
@@ -120,7 +119,7 @@
rc = cam_mem_mgr_init();
if (rc) {
g_dev.open_cnt--;
- pr_err("mem mgr init failed\n");
+ CAM_ERR(CAM_CRM, "mem mgr init failed");
goto mem_mgr_init_fail;
}
@@ -507,19 +506,20 @@
int rc;
if (g_dev.state != true) {
- pr_err("camera root device not ready yet");
+ CAM_ERR(CAM_CRM, "camera root device not ready yet");
return -ENODEV;
}
if (!csd || !csd->name) {
- pr_err("invalid arguments");
+ CAM_ERR(CAM_CRM, "invalid arguments");
return -EINVAL;
}
mutex_lock(&g_dev.dev_lock);
if ((g_dev.subdev_nodes_created) &&
(csd->sd_flags & V4L2_SUBDEV_FL_HAS_DEVNODE)) {
- pr_err("dynamic node is not allowed, name: %s, type : %d",
+ CAM_ERR(CAM_CRM,
+ "dynamic node is not allowed, name: %s, type :%d",
csd->name, csd->ent_function);
rc = -EINVAL;
goto reg_fail;
@@ -538,7 +538,7 @@
rc = v4l2_device_register_subdev(g_dev.v4l2_dev, sd);
if (rc) {
- pr_err("register subdev failed");
+ CAM_ERR(CAM_CRM, "register subdev failed");
goto reg_fail;
}
g_dev.count++;
@@ -552,7 +552,7 @@
int cam_unregister_subdev(struct cam_subdev *csd)
{
if (g_dev.state != true) {
- pr_err("camera root device not ready yet");
+ CAM_ERR(CAM_CRM, "camera root device not ready yet");
return -ENODEV;
}
@@ -603,19 +603,19 @@
rc = cam_req_mgr_util_init();
if (rc) {
- pr_err("cam req mgr util init is failed\n");
+ CAM_ERR(CAM_CRM, "cam req mgr util init is failed");
goto req_mgr_util_fail;
}
rc = cam_mem_mgr_init();
if (rc) {
- pr_err("mem mgr init failed\n");
+ CAM_ERR(CAM_CRM, "mem mgr init failed");
goto mem_mgr_init_fail;
}
rc = cam_req_mgr_core_device_init();
if (rc) {
- pr_err("core device setup failed\n");
+ CAM_ERR(CAM_CRM, "core device setup failed");
goto req_mgr_core_fail;
}
@@ -663,7 +663,7 @@
return -EINVAL;
if (g_dev.state != true) {
- pr_err("camera root device not ready yet");
+ CAM_ERR(CAM_CRM, "camera root device not ready yet");
return -ENODEV;
}
@@ -675,7 +675,7 @@
rc = v4l2_device_register_subdev_nodes(g_dev.v4l2_dev);
if (rc) {
- pr_err("failed to register the sub devices");
+ CAM_ERR(CAM_CRM, "failed to register the sub devices");
goto create_fail;
}
@@ -683,7 +683,7 @@
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
continue;
sd->entity.name = video_device_node_name(sd->devnode);
- pr_debug("created node :%s\n", sd->entity.name);
+ CAM_DBG(CAM_CRM, "created node :%s", sd->entity.name);
}
g_dev.subdev_nodes_created = true;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
index 91860f6..8faf35a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
@@ -196,9 +196,9 @@
* @error : what error device hit while processing this req
*/
struct cam_req_mgr_error_notify {
- int32_t link_hdl;
- int32_t dev_hdl;
- int64_t req_id;
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ uint64_t req_id;
enum cam_req_mgr_device_error error;
};
@@ -210,9 +210,9 @@
*
*/
struct cam_req_mgr_add_request {
- int32_t link_hdl;
- int32_t dev_hdl;
- int64_t req_id;
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ uint64_t req_id;
};
@@ -260,7 +260,7 @@
struct cam_req_mgr_apply_request {
int32_t link_hdl;
int32_t dev_hdl;
- int64_t request_id;
+ uint64_t request_id;
int32_t report_if_bubble;
};
@@ -276,7 +276,7 @@
int32_t link_hdl;
int32_t dev_hdl;
uint32_t type;
- int64_t req_id;
+ uint64_t req_id;
};
/**
@@ -286,9 +286,9 @@
*
*/
struct cam_req_mgr_link_evt_data {
- int32_t link_hdl;
- int32_t dev_hdl;
- int64_t req_id;
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ uint64_t req_id;
enum cam_req_mgr_link_evt_type evt_type;
union {
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
index 9da445d..2aa2ab1 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
@@ -11,12 +11,13 @@
*/
#include "cam_req_mgr_timer.h"
+#include "cam_debug_util.h"
void crm_timer_reset(struct cam_req_mgr_timer *crm_timer)
{
if (!crm_timer)
return;
- CRM_DBG("Starting timer to fire in %d ms. (jiffies=%lu)\n",
+ CAM_DBG(CAM_CRM, "Starting timer to fire in %d ms. (jiffies=%lu)\n",
crm_timer->expires, jiffies);
mod_timer(&crm_timer->sys_timer,
(jiffies + msecs_to_jiffies(crm_timer->expires)));
@@ -27,17 +28,17 @@
struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
if (!timer) {
- CRM_ERR("NULL timer");
+ CAM_ERR(CAM_CRM, "NULL timer");
return;
}
- CRM_DBG("timer %pK parent %pK", timer, timer->parent);
+ CAM_DBG(CAM_CRM, "timer %pK parent %pK", timer, timer->parent);
crm_timer_reset(timer);
}
void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
int32_t expires)
{
- CRM_DBG("new time %d", expires);
+ CAM_DBG(CAM_CRM, "new time %d", expires);
if (crm_timer) {
crm_timer->expires = expires;
crm_timer_reset(crm_timer);
@@ -50,7 +51,7 @@
int ret = 0;
struct cam_req_mgr_timer *crm_timer = NULL;
- CRM_DBG("init timer %d %pK", expires, *timer);
+ CAM_DBG(CAM_CRM, "init timer %d %pK", expires, *timer);
if (*timer == NULL) {
crm_timer = (struct cam_req_mgr_timer *)
kzalloc(sizeof(struct cam_req_mgr_timer), GFP_KERNEL);
@@ -71,7 +72,7 @@
crm_timer_reset(crm_timer);
*timer = crm_timer;
} else {
- CRM_WARN("Timer already exists!!");
+ CAM_WARN(CAM_CRM, "Timer already exists!!");
ret = -EINVAL;
}
end:
@@ -79,7 +80,7 @@
}
void crm_timer_exit(struct cam_req_mgr_timer **crm_timer)
{
- CRM_DBG("destroy timer %pK", *crm_timer);
+ CAM_DBG(CAM_CRM, "destroy timer %pK", *crm_timer);
if (*crm_timer) {
del_timer(&(*crm_timer)->sys_timer);
kfree(*crm_timer);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
index 38048d5..a9134fb 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -20,12 +20,7 @@
#include <linux/random.h>
#include <media/cam_req_mgr.h>
#include "cam_req_mgr_util.h"
-
-#ifdef CONFIG_CAM_REQ_MGR_UTIL_DEBUG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
+#include "cam_debug_util.h"
static struct cam_req_mgr_util_hdl_tbl *hdl_tbl;
static DEFINE_SPINLOCK(hdl_tbl_lock);
@@ -38,7 +33,7 @@
if (hdl_tbl) {
rc = -EINVAL;
- pr_err("Hdl_tbl is already present\n");
+ CAM_ERR(CAM_CRM, "Hdl_tbl is already present");
goto hdl_tbl_check_failed;
}
@@ -79,7 +74,7 @@
{
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
@@ -99,14 +94,14 @@
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
for (i = 0; i < CAM_REQ_MGR_MAX_HANDLES; i++) {
if (hdl_tbl->hdl[i].state == HDL_ACTIVE) {
- pr_err("Dev handle = %x session_handle = %x\n",
+ CAM_ERR(CAM_CRM, "Dev handle = %x session_handle = %x",
hdl_tbl->hdl[i].hdl_value,
hdl_tbl->hdl[i].session_hdl);
hdl_tbl->hdl[i].state = HDL_FREE;
@@ -141,14 +136,14 @@
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
idx = cam_get_free_handle_index();
if (idx < 0) {
- pr_err("Unable to create session handle\n");
+ CAM_ERR(CAM_CRM, "Unable to create session handle");
spin_unlock_bh(&hdl_tbl_lock);
return idx;
}
@@ -174,14 +169,14 @@
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
idx = cam_get_free_handle_index();
if (idx < 0) {
- pr_err("Unable to create device handle\n");
+ CAM_ERR(CAM_CRM, "Unable to create device handle");
spin_unlock_bh(&hdl_tbl_lock);
return idx;
}
@@ -196,7 +191,7 @@
hdl_tbl->hdl[idx].ops = hdl_data->ops;
spin_unlock_bh(&hdl_tbl_lock);
- pr_debug("%s: handle = %x\n", __func__, handle);
+ pr_debug("%s: handle = %x", __func__, handle);
return handle;
}
@@ -208,29 +203,29 @@
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
goto device_priv_fail;
}
idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
- pr_err("Invalid idx\n");
+ CAM_ERR(CAM_CRM, "Invalid idx");
goto device_priv_fail;
}
if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
- pr_err("Invalid state\n");
+ CAM_ERR(CAM_CRM, "Invalid state");
goto device_priv_fail;
}
type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
- pr_err("Invalid type\n");
+ CAM_ERR(CAM_CRM, "Invalid type");
goto device_priv_fail;
}
if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
- pr_err("Invalid hdl\n");
+ CAM_ERR(CAM_CRM, "Invalid hdl");
goto device_priv_fail;
}
@@ -252,29 +247,29 @@
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
goto device_ops_fail;
}
idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
- pr_err("Invalid idx\n");
+ CAM_ERR(CAM_CRM, "Invalid idx");
goto device_ops_fail;
}
if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
- pr_err("Invalid state\n");
+ CAM_ERR(CAM_CRM, "Invalid state");
goto device_ops_fail;
}
type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
- pr_err("Invalid type\n");
+ CAM_ERR(CAM_CRM, "Invalid type");
goto device_ops_fail;
}
if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
- pr_err("Invalid hdl\n");
+ CAM_ERR(CAM_CRM, "Invalid hdl");
goto device_ops_fail;
}
@@ -295,29 +290,29 @@
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
goto destroy_hdl_fail;
}
idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
- pr_err("Invalid idx\n");
+ CAM_ERR(CAM_CRM, "Invalid idx");
goto destroy_hdl_fail;
}
if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
- pr_err("Invalid state\n");
+ CAM_ERR(CAM_CRM, "Invalid state");
goto destroy_hdl_fail;
}
type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
if (type != dev_hdl_type) {
- pr_err("Invalid type %d, %d\n", type, dev_hdl_type);
+ CAM_ERR(CAM_CRM, "Invalid type %d, %d", type, dev_hdl_type);
goto destroy_hdl_fail;
}
if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
- pr_err("Invalid hdl\n");
+ CAM_ERR(CAM_CRM, "Invalid hdl");
goto destroy_hdl_fail;
}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index 38dcb42..c48a391 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -11,6 +11,7 @@
*/
#include "cam_req_mgr_workq.h"
+#include "cam_debug_util.h"
#define WORKQ_ACQUIRE_LOCK(workq, flags) {\
if ((workq)->in_irq) \
@@ -84,7 +85,7 @@
if (task->process_cb)
task->process_cb(task->priv, task->payload);
else
- CRM_WARN("FATAL:no task handler registered for workq");
+ CAM_WARN(CAM_CRM, "FATAL:no task handler registered for workq");
cam_req_mgr_workq_put_task(task);
return 0;
@@ -100,7 +101,7 @@
struct crm_workq_task *task, *task_save;
int32_t i = CRM_TASK_PRIORITY_0;
if (!w) {
- CRM_ERR("NULL task pointer can not schedule");
+ CAM_ERR(CAM_CRM, "NULL task pointer can not schedule");
return;
}
workq = (struct cam_req_mgr_core_workq *)
@@ -113,7 +114,7 @@
atomic_sub(1, &workq->task.pending_cnt);
cam_req_mgr_process_task(task);
}
- CRM_DBG("processed task %pK free_cnt %d",
+ CAM_DBG(CAM_CRM, "processed task %pK free_cnt %d",
task, atomic_read(&workq->task.free_cnt));
}
i++;
@@ -125,7 +126,7 @@
int32_t i = CRM_TASK_PRIORITY_0;
struct crm_workq_task *task, *task_save;
- CRM_DBG("pending_cnt %d",
+ CAM_DBG(CAM_CRM, "pending_cnt %d",
atomic_read(&workq->task.pending_cnt));
while (i < CRM_TASK_PRIORITY_MAX) {
@@ -133,7 +134,7 @@
list_for_each_entry_safe(task, task_save,
&workq->task.process_head[i], entry) {
cam_req_mgr_workq_put_task(task);
- CRM_WARN("flush task %pK, %d, cnt %d",
+ CAM_WARN(CAM_CRM, "flush task %pK, %d, cnt %d",
task, i, atomic_read(
&workq->task.free_cnt));
}
@@ -150,13 +151,13 @@
unsigned long flags = 0;
if (!task) {
- CRM_WARN("NULL task pointer can not schedule");
+ CAM_WARN(CAM_CRM, "NULL task pointer can not schedule");
rc = -EINVAL;
goto end;
}
workq = (struct cam_req_mgr_core_workq *)task->parent;
if (!workq) {
- CRM_DBG("NULL workq pointer suspect mem corruption");
+ CAM_DBG(CAM_CRM, "NULL workq pointer suspect mem corruption");
rc = -EINVAL;
goto end;
}
@@ -167,7 +168,7 @@
if (task->cancel == 1) {
cam_req_mgr_workq_put_task(task);
- CRM_WARN("task aborted and queued back to pool");
+ CAM_WARN(CAM_CRM, "task aborted and queued back to pool");
rc = 0;
goto end;
}
@@ -182,7 +183,7 @@
WORKQ_RELEASE_LOCK(workq, flags);
atomic_add(1, &workq->task.pending_cnt);
- CRM_DBG("enq task %pK pending_cnt %d",
+ CAM_DBG(CAM_CRM, "enq task %pK pending_cnt %d",
task, atomic_read(&workq->task.pending_cnt));
queue_work(workq->job, &workq->work);
@@ -207,7 +208,7 @@
return -ENOMEM;
strlcat(buf, name, sizeof(buf));
- CRM_DBG("create workque crm_workq-%s", name);
+ CAM_DBG(CAM_CRM, "create workque crm_workq-%s", name);
crm_workq->job = alloc_workqueue(buf,
WQ_HIGHPRI | WQ_UNBOUND, 0, NULL);
if (!crm_workq->job) {
@@ -218,7 +219,7 @@
/* Workq attributes initialization */
INIT_WORK(&crm_workq->work, cam_req_mgr_process_workq);
spin_lock_init(&crm_workq->lock_bh);
- CRM_DBG("LOCK_DBG workq %s lock %pK",
+ CAM_DBG(CAM_CRM, "LOCK_DBG workq %s lock %pK",
name, &crm_workq->lock_bh);
/* Task attributes initialization */
@@ -234,7 +235,7 @@
crm_workq->task.num_task,
GFP_KERNEL);
if (!crm_workq->task.pool) {
- CRM_WARN("Insufficient memory %lu",
+ CAM_WARN(CAM_CRM, "Insufficient memory %lu",
sizeof(struct crm_workq_task) *
crm_workq->task.num_task);
kfree(crm_workq);
@@ -250,7 +251,7 @@
cam_req_mgr_workq_put_task(task);
}
*workq = crm_workq;
- CRM_DBG("free tasks %d",
+ CAM_DBG(CAM_CRM, "free tasks %d",
atomic_read(&crm_workq->task.free_cnt));
}
@@ -259,7 +260,7 @@
void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq)
{
- CRM_DBG("destroy workque %pK", crm_workq);
+ CAM_DBG(CAM_CRM, "destroy workque %pK", crm_workq);
if (*crm_workq) {
crm_workq_clear_q(*crm_workq);
if ((*crm_workq)->job) {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
index e515a40..b66480c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
@@ -4,3 +4,5 @@
obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_actuator/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
index 8670d80..4e8ea8b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
@@ -1,8 +1,10 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
obj-$(CONFIG_SPECTRA_CAMERA) += cam_actuator_dev.o cam_actuator_core.o cam_actuator_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index 0a96f18..1dcc54f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -13,7 +13,8 @@
#include <linux/module.h>
#include <cam_sensor_cmn_header.h>
#include "cam_actuator_core.h"
-#include <cam_sensor_util.h>
+#include "cam_sensor_util.h"
+#include "cam_trace.h"
int32_t cam_actuator_slaveInfo_pkt_parser(struct cam_actuator_ctrl_t *a_ctrl,
uint32_t *cmd_buf)
@@ -22,7 +23,7 @@
struct cam_cmd_i2c_info *i2c_info;
if (!a_ctrl || !cmd_buf) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
return -EINVAL;
}
@@ -31,8 +32,8 @@
i2c_info->i2c_freq_mode;
a_ctrl->io_master_info.cci_client->sid =
i2c_info->slave_addr >> 1;
- CDBG("%s:%d Slave addr: 0x%x Freq Mode: %d\n", __func__,
- __LINE__, i2c_info->slave_addr, i2c_info->i2c_freq_mode);
+ CAM_DBG(CAM_ACTUATOR, "Slave addr: 0x%x Freq Mode: %d",
+ i2c_info->slave_addr, i2c_info->i2c_freq_mode);
return rc;
}
@@ -45,13 +46,12 @@
uint32_t i, size;
if (a_ctrl == NULL || i2c_set == NULL) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
return -EINVAL;
}
if (i2c_set->is_settings_valid != 1) {
- pr_err("%s: %d :Error: Invalid settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, " Invalid settings");
return -EINVAL;
}
@@ -61,8 +61,8 @@
rc = camera_io_dev_write(&(a_ctrl->io_master_info),
&(i2c_list->i2c_settings));
if (rc < 0) {
- pr_err("%s: %d :Error: Failed in Applying i2c write settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed in Applying i2c wrt settings");
return rc;
}
} else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) {
@@ -81,8 +81,8 @@
i2c_list->i2c_settings.
reg_setting[i].delay);
if (rc < 0) {
- pr_err("%s: %d :Error: Failed in Applying i2c poll settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR,
+ "i2c poll apply setting Fail");
return rc;
}
}
@@ -98,21 +98,21 @@
struct cam_actuator_ctrl_t *a_ctrl = NULL;
if (!apply) {
- pr_err("%s:%d :Error: Invalid Input Args\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Input Args");
return -EINVAL;
}
a_ctrl = (struct cam_actuator_ctrl_t *)
cam_get_device_priv(apply->dev_hdl);
if (!a_ctrl) {
- pr_err("%s: %d :Error: Device data is NULL\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
return -EINVAL;
}
request_id = apply->request_id % MAX_PER_FRAME_ARRAY;
- CDBG("%s:%d Request Id: %lld\n",
- __func__, __LINE__, apply->request_id);
+
+ trace_cam_apply_req("Actuator", apply);
+
+ CAM_DBG(CAM_ACTUATOR, "Request Id: %lld", apply->request_id);
if ((apply->request_id ==
a_ctrl->i2c_data.per_frame[request_id].request_id) &&
@@ -121,8 +121,9 @@
rc = cam_actuator_apply_settings(a_ctrl,
&a_ctrl->i2c_data.per_frame[request_id]);
if (rc < 0) {
- pr_err("%s:%d Failed in applying the request: %lld\n",
- __func__, __LINE__, apply->request_id);
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed in applying the request: %lld\n",
+ apply->request_id);
return rc;
}
}
@@ -135,12 +136,13 @@
a_ctrl->i2c_data.per_frame[del_req_id].request_id = 0;
rc = delete_request(&a_ctrl->i2c_data.per_frame[del_req_id]);
if (rc < 0) {
- pr_err("%s: %d :Error: Fail deleting the req: %d err: %d\n",
- __func__, __LINE__, del_req_id, rc);
+ CAM_ERR(CAM_ACTUATOR,
+ "Fail deleting the req: %d err: %d\n",
+ del_req_id, rc);
return rc;
}
} else {
- CDBG("%s:%d No Valid Req to clean Up\n", __func__, __LINE__);
+ CAM_DBG(CAM_ACTUATOR, "No Valid Req to clean Up");
}
return rc;
@@ -152,15 +154,14 @@
struct cam_actuator_ctrl_t *a_ctrl = NULL;
if (!link) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
return -EINVAL;
}
a_ctrl = (struct cam_actuator_ctrl_t *)
cam_get_device_priv(link->dev_hdl);
if (!a_ctrl) {
- pr_err("%s:%d :Error: Device data is NULL\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
return -EINVAL;
}
if (link->link_enable) {
@@ -177,7 +178,7 @@
int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info)
{
if (!info) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
return -EINVAL;
}
@@ -204,8 +205,7 @@
struct cam_req_mgr_add_request add_req;
if (!a_ctrl || !arg) {
- pr_err("%s:%d :Error: Invalid Args\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
return -EINVAL;
}
@@ -216,21 +216,21 @@
rc = cam_mem_get_cpu_buf(config.packet_handle,
(uint64_t *)&generic_ptr, &len_of_buff);
if (rc < 0) {
- pr_err("%s:%d :Error: error in converting command Handle %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Error in converting command Handle %d",
+ rc);
return rc;
}
if (config.offset > len_of_buff) {
- pr_err("%s: %d offset is out of bounds: offset: %lld len: %zu\n",
- __func__, __LINE__, config.offset, len_of_buff);
+ CAM_ERR(CAM_ACTUATOR,
+ "offset is out of bounds: offset: %lld len: %zu",
+ config.offset, len_of_buff);
return -EINVAL;
}
csl_packet = (struct cam_packet *)(generic_ptr +
config.offset);
- CDBG("%s:%d Pkt opcode: %d\n",
- __func__, __LINE__, csl_packet->header.op_code);
+ CAM_DBG(CAM_ACTUATOR, "Pkt opcode: %d", csl_packet->header.op_code);
if ((csl_packet->header.op_code & 0xFFFFFF) ==
CAM_ACTUATOR_PACKET_OPCODE_INIT) {
@@ -242,24 +242,22 @@
cmd_desc = (struct cam_cmd_buf_desc *)(offset);
if (csl_packet->num_cmd_buf != 2) {
- pr_err("%s:: %d :Error: cmd Buffers in Init : %d\n",
- __func__, __LINE__, csl_packet->num_cmd_buf);
+ CAM_ERR(CAM_ACTUATOR, "cmd Buffers in Init : %d",
+ csl_packet->num_cmd_buf);
return -EINVAL;
}
rc = cam_mem_get_cpu_buf(cmd_desc[0].mem_handle,
(uint64_t *)&generic_ptr, &len_of_buff);
if (rc < 0) {
- pr_err("%s:%d Failed to get cpu buf\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed to get cpu buf");
return rc;
}
cmd_buf = (uint32_t *)generic_ptr;
cmd_buf += cmd_desc->offset / sizeof(uint32_t);
rc = cam_actuator_slaveInfo_pkt_parser(a_ctrl, cmd_buf);
if (rc < 0) {
- pr_err("%s:%d Failed in parsing the pkt\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed in parsing the pkt");
return rc;
}
cmd_buf += (sizeof(struct cam_cmd_i2c_info)/sizeof(uint32_t));
@@ -268,8 +266,8 @@
rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings,
&cmd_desc[1], 1);
if (rc < 0) {
- pr_err("%s:%d :Error: actuator pkt parsing failed: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Actuator pkt parsing failed: %d",
+ rc);
return rc;
}
} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
@@ -289,8 +287,8 @@
rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings,
cmd_desc, 1);
if (rc < 0) {
- pr_err("%s:%d :Error: actuator pkt parsing failed: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Actuator pkt parsing failed: %d",
+ rc);
return rc;
}
} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
@@ -309,8 +307,8 @@
rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings,
cmd_desc, 1);
if (rc < 0) {
- pr_err("%s:%d :Error: actuator pkt parsing failed: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Actuator pkt parsing failed: %d",
+ rc);
return rc;
}
}
@@ -323,8 +321,8 @@
if (a_ctrl->bridge_intf.crm_cb &&
a_ctrl->bridge_intf.crm_cb->add_req)
a_ctrl->bridge_intf.crm_cb->add_req(&add_req);
- CDBG("%s: %d Req Id: %lld added to Bridge\n",
- __func__, __LINE__, add_req.req_id);
+ CAM_DBG(CAM_ACTUATOR, "Req Id: %lld added to Bridge",
+ add_req.req_id);
}
return rc;
@@ -344,8 +342,7 @@
return 0;
if (cnt >= CAM_SOC_MAX_REGULATOR) {
- pr_err("%s:%d Regulators more than supported %d\n",
- __func__, __LINE__, cnt);
+ CAM_ERR(CAM_ACTUATOR, "Regulators more than supported %d", cnt);
return -EINVAL;
}
@@ -368,8 +365,7 @@
rc = cam_actuator_vreg_control(a_ctrl, 1);
if (rc < 0) {
- pr_err("%s:%d Actuator Reg Failed %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Actuator Reg Failed %d", rc);
return rc;
}
@@ -383,8 +379,7 @@
rc = cam_soc_util_enable_platform_resource(&a_ctrl->soc_info,
false, 0, false);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in req gpio: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Failed in req gpio: %d", rc);
return rc;
}
@@ -408,7 +403,7 @@
rc = cam_actuator_vreg_control(a_ctrl, 0);
if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed %d");
return rc;
}
@@ -426,8 +421,8 @@
rc |= cam_soc_util_disable_platform_resource(&a_ctrl->soc_info,
0, 0);
if (rc < 0)
- pr_err("%s:%d Failed to disable platform resources: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to disable platform resources: %d", rc);
}
return rc;
@@ -440,13 +435,11 @@
struct cam_control *cmd = (struct cam_control *)arg;
if (!a_ctrl || !cmd) {
- pr_err("%s: %d :Error: Invalid Args\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, " Invalid Args");
return -EINVAL;
}
- pr_debug("%s:%d Opcode to Actuator: %d\n",
- __func__, __LINE__, cmd->op_code);
+ pr_debug("Opcode to Actuator: %d", cmd->op_code);
mutex_lock(&(a_ctrl->actuator_mutex));
switch (cmd->op_code) {
@@ -455,8 +448,7 @@
struct cam_create_dev_hdl bridge_params;
if (a_ctrl->bridge_intf.device_hdl != -1) {
- pr_err("%s:%d Device is already acquired\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Device is already acquired");
rc = -EINVAL;
goto release_mutex;
}
@@ -464,8 +456,7 @@
(void __user *) cmd->handle,
sizeof(actuator_acq_dev));
if (rc < 0) {
- pr_err("%s:%d :Error: Failed Copying from user\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed Copying from user\n");
goto release_mutex;
}
@@ -481,12 +472,11 @@
a_ctrl->bridge_intf.session_hdl =
actuator_acq_dev.session_handle;
- CDBG("%s:%d Device Handle: %d\n",
- __func__, __LINE__, actuator_acq_dev.device_handle);
+ CAM_DBG(CAM_ACTUATOR, "Device Handle: %d",
+ actuator_acq_dev.device_handle);
if (copy_to_user((void __user *) cmd->handle, &actuator_acq_dev,
sizeof(struct cam_sensor_acquire_dev))) {
- pr_err("%s:%d :Error: Failed Copy to User\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
rc = -EFAULT;
goto release_mutex;
}
@@ -495,8 +485,7 @@
break;
case CAM_RELEASE_DEV: {
if (a_ctrl->bridge_intf.device_hdl == -1) {
- pr_err("%s:%d :Error: link hdl: %d device hdl: %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ACTUATOR, "link hdl: %d device hdl: %d",
a_ctrl->bridge_intf.device_hdl,
a_ctrl->bridge_intf.link_hdl);
rc = -EINVAL;
@@ -504,8 +493,7 @@
}
rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl);
if (rc < 0)
- pr_err("%s:%d :Error: destroying the device hdl\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "destroying the device hdl");
a_ctrl->bridge_intf.device_hdl = -1;
a_ctrl->bridge_intf.link_hdl = -1;
a_ctrl->bridge_intf.session_hdl = -1;
@@ -517,8 +505,7 @@
actuator_cap.slot_info = a_ctrl->id;
if (copy_to_user((void __user *) cmd->handle, &actuator_cap,
sizeof(struct cam_actuator_query_cap))) {
- pr_err("%s:%d :Error: Failed Copy to User\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
rc = -EFAULT;
goto release_mutex;
}
@@ -527,28 +514,25 @@
case CAM_START_DEV: {
rc = cam_actuator_power_up(a_ctrl);
if (rc < 0) {
- pr_err("%s: %d :Error: Actuator Power up failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, " Actuator Power up failed");
goto release_mutex;
}
rc = camera_io_init(&a_ctrl->io_master_info);
if (rc < 0) {
- pr_err("%s:%d :Error: cci_init failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "cci_init failed");
cam_actuator_power_down(a_ctrl);
}
rc = cam_actuator_apply_settings(a_ctrl,
&a_ctrl->i2c_data.init_settings);
if (rc < 0)
- pr_err("%s: %d :Error: Cannot apply Init settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Cannot apply Init settings");
/* Delete the request even if the apply is failed */
rc = delete_request(&a_ctrl->i2c_data.init_settings);
if (rc < 0) {
- pr_err("%s:%d Fail in deleting the Init settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR,
+ "Fail in deleting the Init settings");
rc = -EINVAL;
goto release_mutex;
}
@@ -557,12 +541,10 @@
case CAM_STOP_DEV: {
rc = camera_io_release(&a_ctrl->io_master_info);
if (rc < 0)
- pr_err("%s:%d :Error: Failed in releasing CCI\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed in releasing CCI");
rc = cam_actuator_power_down(a_ctrl);
if (rc < 0) {
- pr_err("%s:%d :Error: Actuator Power down failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
goto release_mutex;
}
}
@@ -572,8 +554,7 @@
ACT_APPLY_SETTINGS_LATER;
rc = cam_actuator_i2c_pkt_parse(a_ctrl, arg);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in actuator Parsing\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed in actuator Parsing");
}
if (a_ctrl->act_apply_state ==
@@ -581,14 +562,15 @@
rc = cam_actuator_apply_settings(a_ctrl,
&a_ctrl->i2c_data.init_settings);
if (rc < 0)
- pr_err("%s:%d :Error: Cannot apply Update settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR,
+ "Cannot apply Update settings");
/* Delete the request even if the apply is failed */
rc = delete_request(&a_ctrl->i2c_data.init_settings);
if (rc < 0) {
- pr_err("%s: %d :Error: Failed in Deleting the Init Pkt: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed in Deleting the Init Pkt: %d",
+ rc);
goto release_mutex;
}
}
@@ -597,8 +579,7 @@
case CAM_SD_SHUTDOWN:
break;
default:
- pr_err("%s:%d Invalid Opcode %d\n",
- __func__, __LINE__, cmd->op_code);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Opcode %d", cmd->op_code);
}
release_mutex:
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index 48e3c2e..7eba9d1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -14,6 +14,7 @@
#include "cam_req_mgr_dev.h"
#include "cam_actuator_soc.h"
#include "cam_actuator_core.h"
+#include "cam_trace.h"
static long cam_actuator_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
@@ -27,8 +28,7 @@
rc = cam_actuator_driver_cmd(a_ctrl, arg);
break;
default:
- pr_err("%s:%d Invalid ioctl cmd\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid ioctl cmd");
rc = -EINVAL;
break;
}
@@ -42,14 +42,14 @@
struct cam_actuator_ctrl_t *a_ctrl;
if (client == NULL || id == NULL) {
- pr_err("%s:%d: :Error: Invalid Args client: %pK id: %pK\n",
- __func__, __LINE__, client, id);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args client: %pK id: %pK",
+ client, id);
return -EINVAL;
}
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- pr_err("%s %s :Error: i2c_check_functionality failed\n",
- __func__, client->name);
+ CAM_ERR(CAM_ACTUATOR, "%s :: i2c_check_functionality failed",
+ client->name);
rc = -EFAULT;
return rc;
}
@@ -80,7 +80,7 @@
rc = cam_actuator_parse_dt(a_ctrl, &client->dev);
if (rc < 0) {
- pr_err("failed: cam_sensor_parse_dt rc %d", rc);
+ CAM_ERR(CAM_ACTUATOR, "failed: cam_sensor_parse_dt rc %d", rc);
goto free_mem;
}
@@ -99,7 +99,7 @@
a_ctrl = platform_get_drvdata(pdev);
if (!a_ctrl) {
- pr_err("%s: Actuator device is NULL\n", __func__);
+ CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
return 0;
}
@@ -119,7 +119,7 @@
/* Handle I2C Devices */
if (!a_ctrl) {
- pr_err("%s: Actuator device is NULL\n", __func__);
+ CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
return -EINVAL;
}
/*Free Allocated Mem */
@@ -138,7 +138,8 @@
if (copy_from_user(&cmd_data, (void __user *)arg,
sizeof(cmd_data))) {
- pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to copy from user_ptr=%pK size=%zu\n",
(void __user *)arg, sizeof(cmd_data));
return -EFAULT;
}
@@ -148,21 +149,21 @@
cmd = VIDIOC_CAM_CONTROL;
rc = cam_actuator_subdev_ioctl(sd, cmd, &cmd_data);
if (rc < 0) {
- pr_err("%s:%d Failed in actuator suddev handling",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed in actuator suddev handling");
return rc;
}
break;
default:
- pr_err("%s:%d Invalid compat ioctl: %d\n",
- __func__, __LINE__, cmd);
+ CAM_ERR(CAM_ACTUATOR, "Invalid compat ioctl: %d", cmd);
rc = -EINVAL;
}
if (!rc) {
if (copy_to_user((void __user *)arg, &cmd_data,
sizeof(cmd_data))) {
- pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to copy to user_ptr=%pK size=%zu\n",
(void __user *)arg, sizeof(cmd_data));
rc = -EFAULT;
}
@@ -227,8 +228,7 @@
rc = cam_actuator_parse_dt(a_ctrl, &(pdev->dev));
if (rc < 0) {
- pr_err("%s:%d :Error: Paring actuator dt failed rc %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Paring actuator dt failed rc %d", rc);
goto free_ctrl;
}
@@ -251,16 +251,15 @@
rc = cam_register_subdev(&(a_ctrl->v4l2_dev_str));
if (rc < 0) {
- pr_err("%s:%d :ERROR: Fail with cam_register_subdev\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Fail with cam_register_subdev");
goto free_mem;
}
rc = cam_soc_util_request_platform_resource(&a_ctrl->soc_info,
NULL, NULL);
if (rc < 0) {
- pr_err("%s:%d :Error: Requesting Platform Resources failed rc %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR,
+ "Requesting Platform Resources failed rc %d", rc);
goto free_ctrl;
}
@@ -315,14 +314,13 @@
rc = platform_driver_register(&cam_actuator_platform_driver);
if (rc < 0) {
- pr_err("%s platform_driver_register failed rc = %d",
- __func__, rc);
+ CAM_ERR(CAM_ACTUATOR,
+ "platform_driver_register failed rc = %d", rc);
return rc;
}
rc = i2c_add_driver(&cam_actuator_driver_i2c);
if (rc)
- pr_err("%s:%d :Error: i2c_add_driver failed rc = %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "i2c_add_driver failed rc = %d", rc);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
index 19fe4af..fdf881f3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -33,6 +33,7 @@
#include <cam_subdev.h>
#include "cam_sensor_util.h"
#include "cam_soc_util.h"
+#include "cam_debug_util.h"
#define NUM_MASTERS 2
#define NUM_QUEUES 2
@@ -40,13 +41,6 @@
#define TRUE 1
#define FALSE 0
-#undef CDBG
-#ifdef CAM_SENSOR_DEBUG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
#define ACTUATOR_DRIVER_I2C "i2c_actuator"
#define CAMX_ACTUATOR_DEV_NAME "cam-actuator-driver"
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
index 584e4d2..ddc89a8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
@@ -28,8 +28,7 @@
struct platform_device *pdev = NULL;
if (!soc_info->pdev) {
- pr_err("%s:%d :Error:soc_info is not initialized\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "soc_info is not initialized");
return -EINVAL;
}
@@ -41,27 +40,27 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("%s:%d :Error: parsing common soc dt(rc %d)\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "parsing common soc dt(rc %d)", rc);
return rc;
}
rc = of_property_read_u32(of_node, "cci-master",
&(a_ctrl->cci_i2c_master));
- CDBG("cci-master %d, rc %d\n", a_ctrl->cci_i2c_master, rc);
+ CAM_DBG(CAM_ACTUATOR, "cci-master %d, rc %d",
+ a_ctrl->cci_i2c_master, rc);
if (rc < 0 || a_ctrl->cci_i2c_master >= MASTER_MAX) {
- pr_err("%s:%d :Error: Wrong info from dt CCI master as : %d\n",
- __func__, __LINE__, a_ctrl->cci_i2c_master);
+ CAM_ERR(CAM_ACTUATOR, "Wrong info from dt CCI master as : %d",
+ a_ctrl->cci_i2c_master);
return rc;
}
if (!soc_info->gpio_data) {
- pr_info("%s:%d No GPIO found\n", __func__, __LINE__);
+ CAM_INFO(CAM_ACTUATOR, "No GPIO found");
rc = 0;
return rc;
}
if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
- pr_info("%s:%d No GPIO found\n", __func__, __LINE__);
+ CAM_INFO(CAM_ACTUATOR, "No GPIO found");
return -EINVAL;
}
@@ -69,8 +68,7 @@
&a_ctrl->gpio_num_info);
if ((rc < 0) || (!a_ctrl->gpio_num_info)) {
- pr_err("%s:%d No/Error Actuator GPIOs\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "No/Error Actuator GPIOs");
return -EINVAL;
}
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
index 57dfed5..ba81259 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
@@ -3,5 +3,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_cci_dev.o cam_cci_core.o cam_cci_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
index 83e0c19..c69eeaa 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -33,7 +33,7 @@
num_bytes = 4;
break;
default:
- pr_err("%s: %d failed: %d\n", __func__, __LINE__, type);
+ CAM_ERR(CAM_CCI, "failed: %d", type);
num_bytes = 0;
break;
}
@@ -52,9 +52,9 @@
rc = wait_for_completion_timeout(
&cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
if (rc < 0) {
- pr_err("%s:%d wait failed\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "wait failed");
} else if (rc == 0) {
- pr_err("%s:%d wait timeout\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "wait timeout");
/* Set reset pending flag to TRUE */
cci_dev->cci_master_info[master].reset_pending = TRUE;
@@ -72,8 +72,7 @@
&cci_dev->cci_master_info[master].reset_complete,
CCI_TIMEOUT);
if (rc <= 0)
- pr_err("%s:%d wait failed %d\n", __func__, __LINE__,
- rc);
+ CAM_ERR(CAM_CCI, "wait failed %d", rc);
}
}
@@ -91,37 +90,36 @@
read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
- CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d\n",
- __func__, __LINE__, read_val, len,
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d",
+ read_val, len,
cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
if ((read_val + len + 1) > cci_dev->
cci_i2c_queue_info[master][queue].max_queue_size) {
uint32_t reg_val = 0;
uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
- CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD");
cam_io_w_mb(report_val,
base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
read_val++;
- CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d\n",
- __func__, __LINE__, read_val, queue);
+ CAM_DBG(CAM_CCI,
+ "CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d",
+ read_val, queue);
cam_io_w_mb(read_val, base +
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
reg_val = 1 << ((master * 2) + queue);
- CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "CCI_QUEUE_START_ADDR");
atomic_set(&cci_dev->cci_master_info[master].
done_pending[queue], 1);
cam_io_w_mb(reg_val, base +
CCI_QUEUE_START_ADDR);
- CDBG("%s line %d wait_for_completion_timeout\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "wait_for_completion_timeout");
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
rc = wait_for_completion_timeout(&cci_dev->
cci_master_info[master].report_q[queue], CCI_TIMEOUT);
if (rc <= 0) {
- pr_err("%s: wait_for_completion_timeout %d\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Wait_for_completion_timeout %d");
if (rc == 0)
rc = -ETIMEDOUT;
cam_cci_flush_queue(cci_dev, master);
@@ -129,7 +127,7 @@
}
rc = cci_dev->cci_master_info[master].status;
if (rc < 0)
- pr_err("%s failed rc %d\n", __func__, rc);
+ CAM_ERR(CAM_CCI, "Failed rc %d", rc);
}
return rc;
@@ -147,17 +145,17 @@
void __iomem *base = soc_info->reg_map[0].mem_base;
if (!cci_dev) {
- pr_err("%s: failed %d", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "%s: failed %d");
return -EINVAL;
}
rc = cam_cci_validate_queue(cci_dev, 1, master, queue);
if (rc < 0) {
- pr_err("%s: failed %d", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Failed %d");
return rc;
}
- CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
- __func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x",
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset, val);
cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
@@ -186,45 +184,43 @@
uint32_t reg_offset = 0;
/* CCI Top Registers */
- CCI_DBG(" **** %s : %d CCI TOP Registers ****\n", __func__, __LINE__);
+ CCI_DBG(" **** %s : %d CCI TOP Registers ****");
for (i = 0; i < DEBUG_TOP_REG_COUNT; i++) {
reg_offset = DEBUG_TOP_REG_START + i * 4;
read_val = cam_io_r_mb(cci_dev->base + reg_offset);
- CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
- __func__, __LINE__, reg_offset, read_val);
+ CCI_DBG("offset = 0x%X value = 0x%X",
+ reg_offset, read_val);
}
/* CCI Master registers */
- CCI_DBG(" **** %s : %d CCI MASTER%d Registers ****\n",
- __func__, __LINE__, master);
+ CCI_DBG(" ****CCI MASTER %d Registers ****",
+ master);
for (i = 0; i < DEBUG_MASTER_REG_COUNT; i++) {
if (i == 6)
continue;
reg_offset = DEBUG_MASTER_REG_START + master*0x100 + i * 4;
read_val = cam_io_r_mb(cci_dev->base + reg_offset);
- CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
- __func__, __LINE__, reg_offset, read_val);
+ CCI_DBG("offset = 0x%X value = 0x%X", reg_offset, read_val);
}
/* CCI Master Queue registers */
- CCI_DBG(" **** %s : %d CCI MASTER%d QUEUE%d Registers ****\n",
- __func__, __LINE__, master, queue);
+ CCI_DBG(" **** CCI MASTER%d QUEUE%d Registers ****",
+ master, queue);
for (i = 0; i < DEBUG_MASTER_QUEUE_REG_COUNT; i++) {
reg_offset = DEBUG_MASTER_QUEUE_REG_START + master*0x200 +
queue*0x100 + i * 4;
read_val = cam_io_r_mb(cci_dev->base + reg_offset);
- CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
- __func__, __LINE__, reg_offset, read_val);
+ CCI_DBG("offset = 0x%X value = 0x%X",
+ reg_offset, read_val);
}
/* CCI Interrupt registers */
- CCI_DBG(" **** %s : %d CCI Interrupt Registers ****\n",
- __func__, __LINE__);
+ CCI_DBG(" ****CCI Interrupt Registers ****");
for (i = 0; i < DEBUG_INTR_REG_COUNT; i++) {
reg_offset = DEBUG_INTR_REG_START + i * 4;
read_val = cam_io_r_mb(cci_dev->base + reg_offset);
- CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
- __func__, __LINE__, reg_offset, read_val);
+ CCI_DBG("offset = 0x%X value = 0x%X",
+ reg_offset, read_val);
}
}
#endif
@@ -236,21 +232,19 @@
int32_t rc = 0;
if (!cci_dev) {
- pr_err("%s: failed %d", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed");
return -EINVAL;
}
rc = wait_for_completion_timeout(&cci_dev->
cci_master_info[master].report_q[queue], CCI_TIMEOUT);
- CDBG("%s line %d wait DONE_for_completion_timeout\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "wait DONE_for_completion_timeout");
if (rc <= 0) {
#ifdef DUMP_CCI_REGISTERS
cam_cci_dump_registers(cci_dev, master, queue);
#endif
- pr_err("%s: %d wait for queue: %d\n",
- __func__, __LINE__, queue);
+ CAM_ERR(CAM_CCI, "wait for queue: %d", queue);
if (rc == 0)
rc = -ETIMEDOUT;
cam_cci_flush_queue(cci_dev, master);
@@ -258,7 +252,7 @@
}
rc = cci_dev->cci_master_info[master].status;
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
@@ -278,15 +272,13 @@
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
- CDBG("%s:%d CCI_I2C_REPORT_CMD curr_w_cnt: %d\n",
- __func__, __LINE__, read_val);
+ CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD curr_w_cnt: %d", read_val);
cam_io_w_mb(report_val,
base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
read_val++;
- CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d\n",
- __func__, __LINE__, read_val);
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d", read_val);
cam_io_w_mb(read_val, base +
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
}
@@ -319,12 +311,12 @@
if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
if (rc < 0) {
- pr_err("%s failed line %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed line %d");
return rc;
}
rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
} else {
@@ -332,17 +324,17 @@
done_pending[queue], 1);
rc = cam_cci_wait(cci_dev, master, queue);
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
if (rc < 0) {
- pr_err("%s failed line %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "Failed rc %d", rc);
return rc;
}
}
@@ -362,8 +354,7 @@
read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
- CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d\n",
- __func__, __LINE__, read_val,
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d", read_val,
cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
return (cci_dev->
cci_i2c_queue_info[master][queue].max_queue_size) -
@@ -398,13 +389,13 @@
done_pending[queue], 1);
rc = cam_cci_wait(cci_dev, master, queue);
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
} else {
rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
}
@@ -425,7 +416,7 @@
uint32_t size = cmd_size;
if (!cci_dev || !c_ctrl) {
- pr_err("%s: failed %d", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed");
return -EINVAL;
}
@@ -458,8 +449,8 @@
}
if (len > cci_dev->payload_size) {
- pr_err("%s: %d Len error: %d",
- __func__, __LINE__, len);
+ CAM_ERR(CAM_CCI, "%s: %d Len error: %d",
+ len);
return -EINVAL;
}
@@ -476,8 +467,8 @@
if (clk) {
cycles_per_us = ((clk/1000)*256)/1000;
} else {
- pr_err("%s:%d, failed: Can use default: %d",
- __func__, __LINE__, CYCLES_PER_MICRO_SEC_DEFAULT);
+ CAM_ERR(CAM_CCI, "failed: Can use default: %d",
+ CYCLES_PER_MICRO_SEC_DEFAULT);
cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
}
@@ -498,8 +489,8 @@
if (i2c_freq_mode >= I2C_MAX_MODES ||
i2c_freq_mode < I2C_STANDARD_MODE) {
- pr_err("%s:%d Invalid frequency mode: %d\n",
- __func__, __LINE__, (int32_t)i2c_freq_mode);
+ CAM_ERR(CAM_CCI, "Invalid frequency mode: %d",
+ (int32_t)i2c_freq_mode);
cci_dev->clk_level_index = -1;
return;
}
@@ -544,8 +535,7 @@
void __iomem *base = soc_info->reg_map[0].mem_base;
if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) {
- pr_err("%s:%d invalid i2c_freq_mode = %d",
- __func__, __LINE__, i2c_freq_mode);
+ CAM_ERR(CAM_CCI, "invalid i2c_freq_mode = %d", i2c_freq_mode);
return -EINVAL;
}
@@ -608,28 +598,27 @@
void __iomem *base = soc_info->reg_map[0].mem_base;
if (i2c_cmd == NULL) {
- pr_err("%s:%d Failed line\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_CCI, "Failed: i2c cmd is NULL");
return -EINVAL;
}
if ((!cmd_size) || (cmd_size > CCI_I2C_MAX_WRITE)) {
- pr_err("%s:%d failed: invalid cmd_size %d\n",
- __func__, __LINE__, cmd_size);
+ CAM_ERR(CAM_CCI, "failed: invalid cmd_size %d",
+ cmd_size);
return -EINVAL;
}
- CDBG("%s addr type %d data type %d cmd_size %d\n", __func__,
+ CAM_DBG(CAM_CCI, "addr type %d data type %d cmd_size %d",
i2c_msg->addr_type, i2c_msg->data_type, cmd_size);
if (i2c_msg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
- pr_err("%s:%d failed: invalid addr_type 0x%X\n",
- __func__, __LINE__, i2c_msg->addr_type);
+ CAM_ERR(CAM_CCI, "failed: invalid addr_type 0x%X",
+ i2c_msg->addr_type);
return -EINVAL;
}
if (i2c_msg->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
- pr_err("%s:%d failed: invalid data_type 0x%X\n",
- __func__, __LINE__, i2c_msg->data_type);
+ CAM_ERR(CAM_CCI, "failed: invalid data_type 0x%X",
+ i2c_msg->data_type);
return -EINVAL;
}
reg_offset = master * 0x200 + queue * 0x100;
@@ -643,8 +632,8 @@
c_ctrl->cci_info->retries << 16 |
c_ctrl->cci_info->id_map << 18;
- CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
- __func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x",
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset, val);
cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
@@ -671,7 +660,7 @@
rc = cam_cci_lock_queue(cci_dev, master, queue, 1);
if (rc < 0) {
- pr_err("%s failed line %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "%s failed line %d");
return rc;
}
@@ -681,22 +670,21 @@
len = cam_cci_calc_cmd_len(cci_dev, c_ctrl, cmd_size,
i2c_cmd, &pack);
if (len <= 0) {
- pr_err("%s failed line %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "%s failed line %d");
return -EINVAL;
}
read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
- CDBG("%s line %d CUR_WORD_CNT_ADDR %d len %d max %d\n",
- __func__, __LINE__, read_val, len, max_queue_size);
+ CAM_DBG(CAM_CCI, "CUR_WORD_CNT_ADDR %d len %d max %d",
+ read_val, len, max_queue_size);
/* + 1 - space alocation for Report CMD */
if ((read_val + len + 1) > queue_size) {
if ((read_val + len + 1) > max_queue_size) {
rc = cam_cci_process_full_q(cci_dev,
master, queue);
if (rc < 0) {
- pr_err("%s failed line %d\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed rc: %d", rc);
return rc;
}
continue;
@@ -704,7 +692,7 @@
cam_cci_process_half_q(cci_dev, master, queue);
}
- CDBG("%s cmd_size %d addr 0x%x data 0x%x\n", __func__,
+ CAM_DBG(CAM_CCI, "cmd_size %d addr 0x%x data 0x%x",
cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data);
delay = i2c_cmd->delay;
i = 0;
@@ -772,8 +760,9 @@
cmd = 0;
for (j = 0; (j < 4 && k < i); j++)
cmd |= (data[k++] << (j * 8));
- CDBG("%s LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d\n",
- __func__, cmd, queue, len, read_val);
+ CAM_DBG(CAM_CCI,
+ "LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d",
+ cmd, queue, len, read_val);
cam_io_w_mb(cmd, base +
CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
master * 0x200 + queue * 0x100);
@@ -789,8 +778,8 @@
0x100);
cmd <<= 4;
cmd |= CCI_I2C_WAIT_CMD;
- CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x\n",
- __func__, cmd);
+ CAM_DBG(CAM_CCI,
+ "CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x", cmd);
cam_io_w_mb(cmd, base +
CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
master * 0x200 + queue * 0x100);
@@ -802,7 +791,7 @@
rc = cam_cci_transfer_end(cci_dev, master, queue);
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
@@ -830,7 +819,7 @@
if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
|| c_ctrl->cci_info->cci_i2c_master < 0) {
- pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Invalid I2C master addr");
return -EINVAL;
}
@@ -847,8 +836,7 @@
/* Set the I2C Frequency */
rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
if (rc < 0) {
- pr_err("%s:%d cam_cci_set_clk_param failed rc = %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
goto rel_mutex;
}
@@ -861,25 +849,22 @@
cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
master, queue);
if (rc < 0) {
- pr_err("%s:%d Initial validataion failed rc %d\n", __func__,
- __LINE__, rc);
+ CAM_ERR(CAM_CCI, "Initial validataion failed rc %d", rc);
goto rel_mutex;
}
if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
- pr_err("%s:%d More than max retries\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_CCI, "More than max retries");
goto rel_mutex;
}
if (read_cfg->data == NULL) {
- pr_err("%s:%d Data ptr is NULL\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_CCI, "Data ptr is NULL");
goto rel_mutex;
}
- CDBG("%s master %d, queue %d\n", __func__, master, queue);
- CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
+ CAM_DBG(CAM_CCI, "master %d, queue %d", master, queue);
+ CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
c_ctrl->cci_info->id_map);
val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
@@ -887,19 +872,20 @@
c_ctrl->cci_info->id_map << 18;
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
- CDBG("%s failed line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
goto rel_mutex;
}
val = CCI_I2C_LOCK_CMD;
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
- CDBG("%s failed line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
goto rel_mutex;
}
if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
- pr_err("%s failed line %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed : Invalid addr type: %u",
+ read_cfg->addr_type);
rc = -EINVAL;
goto rel_mutex;
}
@@ -912,34 +898,33 @@
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
- CDBG("%s failed line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
goto rel_mutex;
}
val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
- CDBG("%s failed line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
goto rel_mutex;
}
val = CCI_I2C_UNLOCK_CMD;
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
- CDBG("%s failed line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
goto rel_mutex;
}
val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+ master * 0x200 + queue * 0x100);
- CDBG("%s cur word cnt 0x%x\n", __func__, val);
+ CAM_DBG(CAM_CCI, "%s cur word cnt 0x%x", val);
cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+ master * 0x200 + queue * 0x100);
val = 1 << ((master * 2) + queue);
cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
- CDBG("%s:%d E wait_for_completion_timeout\n", __func__,
- __LINE__);
+ CAM_DBG(CAM_CCI, "wait_for_completion_timeout");
rc = wait_for_completion_timeout(&cci_dev->
cci_master_info[master].reset_complete, CCI_TIMEOUT);
@@ -949,8 +934,7 @@
#endif
if (rc == 0)
rc = -ETIMEDOUT;
- pr_err("%s: %d wait_for_completion_timeout rc = %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "wait_for_completion_timeout rc = %d", rc);
cam_cci_flush_queue(cci_dev, master);
goto rel_mutex;
} else {
@@ -961,29 +945,28 @@
CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
exp_words = ((read_cfg->num_byte / 4) + 1);
if (read_words != exp_words) {
- pr_err("%s:%d read_words = %d, exp words = %d\n", __func__,
- __LINE__, read_words, exp_words);
+ CAM_ERR(CAM_CCI, "read_words = %d, exp words = %d",
+ read_words, exp_words);
memset(read_cfg->data, 0, read_cfg->num_byte);
rc = -EINVAL;
goto rel_mutex;
}
index = 0;
- CDBG("%s index %d num_type %d\n", __func__, index,
- read_cfg->num_byte);
+ CAM_DBG(CAM_CCI, "index %d num_type %d", index, read_cfg->num_byte);
first_byte = 0;
do {
val = cam_io_r_mb(base +
CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
- CDBG("%s read val 0x%x\n", __func__, val);
+ CAM_DBG(CAM_CCI, "read val 0x%x", val);
for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
- CDBG("%s i %d index %d\n", __func__, i, index);
+ CAM_DBG(CAM_CCI, "i:%d index:%d", i, index);
if (!first_byte) {
- CDBG("%s sid 0x%x\n", __func__, val & 0xFF);
+ CAM_DBG(CAM_CCI, "sid 0x%x", val & 0xFF);
first_byte++;
} else {
read_cfg->data[index] =
(val >> (i * 8)) & 0xFF;
- CDBG("%s data[%d] 0x%x\n", __func__, index,
+ CAM_DBG(CAM_CCI, "data[%d] 0x%x", index,
read_cfg->data[index]);
index++;
}
@@ -1006,20 +989,19 @@
cci_dev = v4l2_get_subdevdata(sd);
if (cci_dev->cci_state != CCI_STATE_ENABLED) {
- pr_err("%s invalid cci state %d\n",
- __func__, cci_dev->cci_state);
+ CAM_ERR(CAM_CCI, "%s invalid cci state %d",
+ cci_dev->cci_state);
return -EINVAL;
}
master = c_ctrl->cci_info->cci_i2c_master;
- CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
+ CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
c_ctrl->cci_info->id_map);
/* Set the I2C Frequency */
rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
if (rc < 0) {
- pr_err("%s:%d cam_cci_set_clk_param failed rc = %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
return rc;
}
/*
@@ -1031,18 +1013,17 @@
cci_dev->cci_i2c_queue_info[master][queue].max_queue_size-1,
master, queue);
if (rc < 0) {
- pr_err("%s:%d Initial validataion failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "%s:%d Initial validataion failed rc %d",
+ rc);
return rc;
}
if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
- pr_err("%s:%d More than max retries\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_CCI, "More than max retries");
return rc;
}
rc = cam_cci_data_queue(cci_dev, c_ctrl, queue, sync_en);
if (rc < 0) {
- pr_err("%s failed line %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed rc: %d", rc);
return rc;
}
@@ -1069,7 +1050,7 @@
&write_async->c_ctrl, write_async->queue, write_async->sync_en);
mutex_unlock(&cci_master_info->mutex_q[write_async->queue]);
if (rc < 0)
- pr_err("%s: %d failed\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed rc: %d", rc);
kfree(write_async->c_ctrl.cfg.cci_i2c_write_cfg.reg_setting);
kfree(write_async);
@@ -1110,7 +1091,7 @@
kzalloc(sizeof(struct cam_sensor_i2c_reg_array)*
cci_i2c_write_cfg->size, GFP_KERNEL);
if (!cci_i2c_write_cfg_w->reg_setting) {
- pr_err("%s: %d Couldn't allocate memory\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Couldn't allocate memory");
kfree(write_async);
return -ENOMEM;
}
@@ -1140,35 +1121,33 @@
uint16_t read_bytes = 0;
if (!sd || !c_ctrl) {
- pr_err("%s:%d sd %pK c_ctrl %pK\n", __func__,
- __LINE__, sd, c_ctrl);
+ CAM_ERR(CAM_CCI, "sd %pK c_ctrl %pK", sd, c_ctrl);
return -EINVAL;
}
if (!c_ctrl->cci_info) {
- pr_err("%s:%d cci_info NULL\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "cci_info NULL");
return -EINVAL;
}
cci_dev = v4l2_get_subdevdata(sd);
if (!cci_dev) {
- pr_err("%s:%d cci_dev NULL\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "cci_dev NULL");
return -EINVAL;
}
if (cci_dev->cci_state != CCI_STATE_ENABLED) {
- pr_err("%s invalid cci state %d\n",
- __func__, cci_dev->cci_state);
+ CAM_ERR(CAM_CCI, "invalid cci state %d", cci_dev->cci_state);
return -EINVAL;
}
if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
|| c_ctrl->cci_info->cci_i2c_master < 0) {
- pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Invalid I2C master addr");
return -EINVAL;
}
master = c_ctrl->cci_info->cci_i2c_master;
read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
if ((!read_cfg->num_byte) || (read_cfg->num_byte > CCI_I2C_MAX_READ)) {
- pr_err("%s:%d read num bytes 0\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "read num bytes 0");
rc = -EINVAL;
goto ERROR;
}
@@ -1181,7 +1160,7 @@
read_cfg->num_byte = read_bytes;
rc = cam_cci_read(sd, c_ctrl);
if (rc < 0) {
- pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
goto ERROR;
}
if (read_bytes > CCI_READ_MAX) {
@@ -1205,8 +1184,8 @@
cci_dev = v4l2_get_subdevdata(sd);
if (!cci_dev || !c_ctrl) {
- pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
- __LINE__, cci_dev, c_ctrl);
+ CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+ cci_dev, c_ctrl);
rc = -EINVAL;
return rc;
}
@@ -1225,8 +1204,7 @@
rc = cam_cci_soc_release(cci_dev);
if (rc < 0) {
- pr_err("%s:%d Failed in releasing the cci: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "Failed in releasing the cci: %d", rc);
cam_cpas_stop(cci_dev->cpas_handle);
return rc;
}
@@ -1246,8 +1224,8 @@
cci_dev = v4l2_get_subdevdata(sd);
if (!cci_dev || !c_ctrl) {
- pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
- __LINE__, cci_dev, c_ctrl);
+ CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+ cci_dev, c_ctrl);
rc = -EINVAL;
return rc;
}
@@ -1256,7 +1234,7 @@
if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
|| c_ctrl->cci_info->cci_i2c_master < 0) {
- pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Invalid I2C master addr");
return -EINVAL;
}
@@ -1304,8 +1282,7 @@
{
int32_t rc = 0;
- CDBG("%s line %d cmd %d\n", __func__, __LINE__,
- cci_ctrl->cmd);
+ CAM_DBG(CAM_CCI, "cmd %d", cci_ctrl->cmd);
switch (cci_ctrl->cmd) {
case MSM_CCI_INIT:
rc = cam_cci_init(sd, cci_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
index 63655a4..dad02bf 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -37,8 +37,7 @@
case VIDIOC_CAM_CONTROL:
break;
default:
- pr_err("%s:%d Invalid ioctl cmd: %d\n",
- __func__, __LINE__, cmd);
+ CAM_ERR(CAM_CCI, "Invalid ioctl cmd: %d", cmd);
rc = -ENOIOCTLCMD;
}
@@ -134,13 +133,13 @@
base + CCI_RESET_CMD_ADDR);
}
if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
- pr_err("%s:%d MASTER_0 error 0x%x\n", __func__, __LINE__, irq);
+ CAM_ERR(CAM_CCI, "MASTER_0 error 0x%x", irq);
cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
cam_io_w_mb(CCI_M0_HALT_REQ_RMSK,
base + CCI_HALT_REQ_ADDR);
}
if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
- pr_err("%s:%d MASTER_1 error 0x%x\n", __func__, __LINE__, irq);
+ CAM_ERR(CAM_CCI, "MASTER_1 error 0x%x", irq);
cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
cam_io_w_mb(CCI_M1_HALT_REQ_RMSK,
base + CCI_HALT_REQ_ADDR);
@@ -192,8 +191,7 @@
rc = cam_cci_parse_dt_info(pdev, new_cci_dev);
if (rc < 0) {
- pr_err("%s: %d Resource get Failed: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "Resource get Failed: %d", rc);
goto cci_no_resource;
}
@@ -214,8 +212,7 @@
rc = cam_register_subdev(&(new_cci_dev->v4l2_dev_str));
if (rc < 0) {
- pr_err("%s:%d :Error: Fail with cam_register_subdev\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Fail with cam_register_subdev");
goto cci_no_resource;
}
@@ -230,10 +227,10 @@
strlcpy(cpas_parms.identifier, "cci", CAM_HW_IDENTIFIER_LENGTH);
rc = cam_cpas_register_client(&cpas_parms);
if (rc) {
- pr_err("%s:%d CPAS registration failed\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "CPAS registration failed");
goto cci_no_resource;
}
- CDBG("CPAS registration successful handle=%d\n",
+ CAM_DBG(CAM_CCI, "CPAS registration successful handle=%d",
cpas_parms.client_handle);
new_cci_dev->cpas_handle = cpas_parms.client_handle;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
index 6268a1b..cb01c6c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -37,6 +37,7 @@
#include <cam_cpas_api.h>
#include "cam_cci_hwreg.h"
#include "cam_soc_util.h"
+#include "cam_debug_util.h"
#define V4L2_IDENT_CCI 50005
#define CCI_I2C_QUEUE_0_SIZE 128
@@ -80,16 +81,6 @@
#define PRIORITY_QUEUE (QUEUE_0)
#define SYNC_QUEUE (QUEUE_1)
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
-#undef CCI_DBG
-#ifdef MSM_CCI_DEBUG
-#define CCI_DBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CCI_DBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
enum cci_i2c_sync {
MSM_SYNC_DISABLE,
MSM_SYNC_ENABLE,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
index d976788..83cb49e3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -27,8 +27,8 @@
cci_dev = v4l2_get_subdevdata(sd);
if (!cci_dev || !c_ctrl) {
- pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
- __LINE__, cci_dev, c_ctrl);
+ CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+ cci_dev, c_ctrl);
rc = -EINVAL;
return rc;
}
@@ -37,19 +37,18 @@
base = soc_info->reg_map[0].mem_base;
if (!soc_info || !base) {
- pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
- __LINE__, soc_info, base);
+ CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+ soc_info, base);
rc = -EINVAL;
return rc;
}
- CDBG("%s:%d Base address %pK\n", __func__, __LINE__, base);
+ CAM_DBG(CAM_CCI, "Base address %pK", base);
if (cci_dev->ref_count++) {
- CDBG("%s:%d ref_count %d\n", __func__, __LINE__,
- cci_dev->ref_count);
+ CAM_DBG(CAM_CCI, "ref_count %d", cci_dev->ref_count);
master = c_ctrl->cci_info->cci_i2c_master;
- CDBG("%s:%d master %d\n", __func__, __LINE__, master);
+ CAM_DBG(CAM_CCI, "master %d", master);
if (master < MASTER_MAX && master >= 0) {
mutex_lock(&cci_dev->cci_master_info[master].mutex);
flush_workqueue(cci_dev->write_wq[master]);
@@ -74,8 +73,7 @@
reset_complete,
CCI_TIMEOUT);
if (rc <= 0)
- pr_err("%s:%d wait failed %d\n", __func__,
- __LINE__, rc);
+ CAM_ERR(CAM_CCI, "wait failed %d", rc);
mutex_unlock(&cci_dev->cci_master_info[master].mutex);
}
return 0;
@@ -89,8 +87,7 @@
rc = cam_cpas_start(cci_dev->cpas_handle,
&ahb_vote, &axi_vote);
if (rc != 0) {
- pr_err("%s:%d CPAS start failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "CPAS start failed");
}
cam_cci_get_clk_rates(cci_dev, c_ctrl);
@@ -104,15 +101,13 @@
rc = cam_soc_util_enable_platform_resource(soc_info, true,
CAM_TURBO_VOTE, true);
if (rc < 0) {
- CDBG("%s:%d request platform resources failed\n", __func__,
- __LINE__);
+ CAM_DBG(CAM_CCI, "request platform resources failed");
goto platform_enable_failed;
}
cci_dev->hw_version = cam_io_r_mb(base +
CCI_HW_VERSION_ADDR);
- CDBG("%s:%d: hw_version = 0x%x\n", __func__, __LINE__,
- cci_dev->hw_version);
+ CAM_DBG(CAM_CCI, "hw_version = 0x%x", cci_dev->hw_version);
cci_dev->payload_size =
MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11;
@@ -129,12 +124,11 @@
max_queue_size =
CCI_I2C_QUEUE_1_SIZE;
- CDBG("%s:%d : CCI Master[%d] :: Q0 : %d Q1 : %d\n",
- __func__, __LINE__, i,
+ CAM_DBG(CAM_CCI, "CCI Master[%d] :: Q0 : %d Q1 : %d", i
+ , cci_dev->cci_i2c_queue_info[i][j].
+ max_queue_size,
cci_dev->cci_i2c_queue_info[i][j].
- max_queue_size,
- cci_dev->cci_i2c_queue_info[i][j].
- max_queue_size);
+ max_queue_size);
}
}
@@ -146,8 +140,7 @@
&cci_dev->cci_master_info[MASTER_0].reset_complete,
CCI_TIMEOUT);
if (rc <= 0) {
- pr_err("%s:%d wait_for_completion_timeout\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "wait_for_completion_timeout");
if (rc == 0)
rc = -ETIMEDOUT;
goto reset_complete_failed;
@@ -162,8 +155,7 @@
for (i = 0; i < MASTER_MAX; i++) {
if (!cci_dev->write_wq[i]) {
- pr_err("%s:%d Failed to flush write wq\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Failed to flush write wq");
rc = -ENOMEM;
goto reset_complete_failed;
} else {
@@ -251,77 +243,75 @@
"qcom,i2c_custom_mode");
rc = of_property_read_u32(src_node, "hw-thigh", &val);
- CDBG("%s:%d hw-thigh %d, rc %d\n", __func__, __LINE__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-thigh %d, rc %d", val, rc);
if (!rc) {
cci_dev->cci_clk_params[count].hw_thigh = val;
rc = of_property_read_u32(src_node, "hw-tlow",
&val);
- CDBG("%s:%d hw-tlow %d, rc %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_CCI, "hw-tlow %d, rc %d",
val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tlow = val;
rc = of_property_read_u32(src_node, "hw-tsu-sto",
&val);
- CDBG("%s:%d hw-tsu-sto %d, rc %d\n",
- __func__, __LINE__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-tsu-sto %d, rc %d",
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tsu_sto = val;
rc = of_property_read_u32(src_node, "hw-tsu-sta",
&val);
- CDBG("%s:%d hw-tsu-sta %d, rc %d\n",
- __func__, __LINE__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-tsu-sta %d, rc %d",
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tsu_sta = val;
rc = of_property_read_u32(src_node, "hw-thd-dat",
&val);
- CDBG("%s:%d hw-thd-dat %d, rc %d\n",
- __func__, __LINE__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-thd-dat %d, rc %d",
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_thd_dat = val;
rc = of_property_read_u32(src_node, "hw-thd-sta",
&val);
- CDBG("%s:%d hw-thd-sta %d, rc %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_CCI, "hw-thd-sta %d, rc %d",
val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_thd_sta = val;
rc = of_property_read_u32(src_node, "hw-tbuf",
&val);
- CDBG("%s:%d hw-tbuf %d, rc %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_CCI, "hw-tbuf %d, rc %d",
val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tbuf = val;
rc = of_property_read_u32(src_node,
"hw-scl-stretch-en", &val);
- CDBG("%s:%d hw-scl-stretch-en %d, rc %d\n",
- __func__, __LINE__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-scl-stretch-en %d, rc %d",
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_scl_stretch_en = val;
rc = of_property_read_u32(src_node, "hw-trdhld",
&val);
- CDBG("%s:%d hw-trdhld %d, rc %d\n",
- __func__, __LINE__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-trdhld %d, rc %d",
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_trdhld = val;
rc = of_property_read_u32(src_node, "hw-tsp",
&val);
- CDBG("%s:%d hw-tsp %d, rc %d\n", __func__, __LINE__,
- val, rc);
+ CAM_DBG(CAM_CCI, "hw-tsp %d, rc %d", val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tsp = val;
val = 0;
rc = of_property_read_u32(src_node, "cci-clk-src",
&val);
- CDBG("%s:%d cci-clk-src %d, rc %d\n",
- __func__, __LINE__, val, rc);
+ CAM_DBG(CAM_CCI, "cci-clk-src %d, rc %d", val, rc);
cci_dev->cci_clk_params[count].cci_clk_src = val;
} else
cam_cci_init_default_clk_params(cci_dev, count);
@@ -339,8 +329,7 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("%s:%d :Error: Parsing DT data failed:%d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "Parsing DT data failed:%d", rc);
return -EINVAL;
}
@@ -349,8 +338,7 @@
rc = cam_soc_util_request_platform_resource(soc_info,
cam_cci_irq, new_cci_dev);
if (rc < 0) {
- pr_err("%s:%d :Error: requesting platform resources failed:%d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "requesting platform resources failed:%d", rc);
return -EINVAL;
}
new_cci_dev->v4l2_dev_str.pdev = pdev;
@@ -359,17 +347,15 @@
rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (rc)
- pr_err("%s:%d failed to add child nodes, rc=%d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed to add child nodes, rc=%d", rc);
for (i = 0; i < MASTER_MAX; i++) {
new_cci_dev->write_wq[i] = create_singlethread_workqueue(
"cam_cci_wq");
if (!new_cci_dev->write_wq[i])
- pr_err("%s:%d Failed to create write wq\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Failed to create write wq");
}
- CDBG("%s line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "Exit");
return 0;
}
@@ -380,13 +366,12 @@
&cci_dev->soc_info;
if (!cci_dev->ref_count || cci_dev->cci_state != CCI_STATE_ENABLED) {
- pr_err("%s:%d invalid ref count %d / cci state %d\n", __func__,
- __LINE__, cci_dev->ref_count, cci_dev->cci_state);
+ CAM_ERR(CAM_CCI, "invalid ref count %d / cci state %d",
+ cci_dev->ref_count, cci_dev->cci_state);
return -EINVAL;
}
if (--cci_dev->ref_count) {
- CDBG("%s:%d ref_count Exit %d\n", __func__, __LINE__,
- cci_dev->ref_count);
+ CAM_DBG(CAM_CCI, "ref_count Exit %d", cci_dev->ref_count);
return 0;
}
for (i = 0; i < MASTER_MAX; i++)
@@ -398,8 +383,8 @@
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc) {
- pr_err("%s:%d: platform resources disable failed, rc=%d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "platform resources disable failed, rc=%d",
+ rc);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
index 0337b32..8edbea5 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
@@ -4,5 +4,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy_soc.o cam_csiphy_dev.o cam_csiphy_core.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 71a88bf..2655202 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -37,7 +37,7 @@
base = soc_info->reg_map[0].mem_base;
for (i = 0; i < size; i++) {
- cam_io_w(
+ cam_io_w_mb(
csiphy_dev->ctrl_reg->
csiphy_reset_reg[i].reg_data,
base +
@@ -63,7 +63,7 @@
size_t len;
if (!cfg_dev || !csiphy_dev) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Invalid Args");
return -EINVAL;
}
@@ -75,16 +75,16 @@
rc = cam_mem_get_cpu_buf((int32_t) cfg_dev->packet_handle,
(uint64_t *)&generic_ptr, &len);
if (rc < 0) {
- pr_err("%s:%d :ERROR: Failed to get packet Mem address: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CSIPHY, "Failed to get packet Mem address: %d", rc);
kfree(csiphy_dev->csiphy_info);
csiphy_dev->csiphy_info = NULL;
return rc;
}
if (cfg_dev->offset > len) {
- pr_err("%s: %d offset is out of bounds: offset: %lld len: %zu\n",
- __func__, __LINE__, cfg_dev->offset, len);
+ CAM_ERR(CAM_CSIPHY,
+ "offset is out of bounds: offset: %lld len: %zu",
+ cfg_dev->offset, len);
kfree(csiphy_dev->csiphy_info);
csiphy_dev->csiphy_info = NULL;
return -EINVAL;
@@ -99,8 +99,8 @@
rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
(uint64_t *)&generic_ptr, &len);
if (rc < 0) {
- pr_err("%s:%d :ERROR: Failed to get cmd buf Mem address : %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CSIPHY,
+ "Failed to get cmd buf Mem address : %d", rc);
kfree(csiphy_dev->csiphy_info);
csiphy_dev->csiphy_info = NULL;
return rc;
@@ -128,7 +128,7 @@
csiphy_dev->soc_info.reg_map[0].mem_base;
for (i = 0; i < csiphy_dev->num_irq_registers; i++)
- cam_io_w(csiphy_dev->ctrl_reg->
+ cam_io_w_mb(csiphy_dev->ctrl_reg->
csiphy_irq_reg[i].reg_data,
csiphybase +
csiphy_dev->ctrl_reg->
@@ -142,7 +142,7 @@
csiphy_dev->soc_info.reg_map[0].mem_base;
for (i = 0; i < csiphy_dev->num_irq_registers; i++)
- cam_io_w(0x0,
+ cam_io_w_mb(0x0,
csiphybase +
csiphy_dev->ctrl_reg->
csiphy_irq_reg[i].reg_addr);
@@ -158,8 +158,7 @@
void __iomem *base = NULL;
if (!csiphy_dev) {
- pr_err("%s:%d Invalid Args\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Invalid Args");
return -EINVAL;
}
@@ -171,22 +170,22 @@
base +
csiphy_dev->ctrl_reg->csiphy_reg.
mipi_csiphy_interrupt_status0_addr + 0x4*i);
- cam_io_w(irq,
+ cam_io_w_mb(irq,
base +
csiphy_dev->ctrl_reg->csiphy_reg.
mipi_csiphy_interrupt_clear0_addr + 0x4*i);
- pr_err_ratelimited(
- "%s CSIPHY%d_IRQ_STATUS_ADDR%d = 0x%x\n",
- __func__, soc_info->index, i, irq);
- cam_io_w(0x0,
+ CAM_ERR_RATE_LIMIT(CAM_CSIPHY,
+ "CSIPHY%d_IRQ_STATUS_ADDR%d = 0x%x",
+ soc_info->index, i, irq);
+ cam_io_w_mb(0x0,
base +
csiphy_dev->ctrl_reg->csiphy_reg.
mipi_csiphy_interrupt_clear0_addr + 0x4*i);
}
- cam_io_w(0x1, base +
+ cam_io_w_mb(0x1, base +
csiphy_dev->ctrl_reg->
csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
- cam_io_w(0x0, base +
+ cam_io_w_mb(0x0, base +
csiphy_dev->ctrl_reg->
csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
@@ -198,13 +197,13 @@
int32_t rc = 0;
uint32_t lane_enable = 0, mask = 1, size = 0;
uint16_t lane_mask = 0, i = 0, cfg_size = 0;
- uint8_t settle_cnt, lane_cnt, lane_pos = 0;
+ uint8_t lane_cnt, lane_pos = 0;
+ uint16_t settle_cnt = 0;
void __iomem *csiphybase;
struct csiphy_reg_t (*reg_array)[MAX_SETTINGS_PER_LANE];
if (csiphy_dev->csiphy_info == NULL) {
- pr_err("%s:%d csiphy_info is NULL, No/Fail CONFIG_DEV ?\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "csiphy_info is NULL, No/Fail CONFIG_DEV?");
return -EINVAL;
}
@@ -214,7 +213,7 @@
csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base;
if (!csiphybase) {
- pr_err("%s: csiphybase NULL\n", __func__);
+ CAM_ERR(CAM_CSIPHY, "csiphybase NULL");
return -EINVAL;
}
@@ -257,13 +256,13 @@
switch (csiphy_dev->ctrl_reg->
csiphy_common_reg[i].csiphy_param_type) {
case CSIPHY_LANE_ENABLE:
- cam_io_w(lane_enable,
+ cam_io_w_mb(lane_enable,
csiphybase +
csiphy_dev->ctrl_reg->
csiphy_common_reg[i].reg_addr);
break;
case CSIPHY_DEFAULT_PARAMS:
- cam_io_w(csiphy_dev->ctrl_reg->
+ cam_io_w_mb(csiphy_dev->ctrl_reg->
csiphy_common_reg[i].reg_data,
csiphybase +
csiphy_dev->ctrl_reg->
@@ -284,27 +283,27 @@
for (i = 0; i < cfg_size; i++) {
switch (reg_array[lane_pos][i].csiphy_param_type) {
case CSIPHY_LANE_ENABLE:
- cam_io_w(lane_enable,
+ cam_io_w_mb(lane_enable,
csiphybase +
reg_array[lane_pos][i].reg_addr);
break;
case CSIPHY_DEFAULT_PARAMS:
- cam_io_w(reg_array[lane_pos][i].reg_data,
+ cam_io_w_mb(reg_array[lane_pos][i].reg_data,
csiphybase +
reg_array[lane_pos][i].reg_addr);
break;
case CSIPHY_SETTLE_CNT_LOWER_BYTE:
- cam_io_w(settle_cnt & 0xFF,
+ cam_io_w_mb(settle_cnt & 0xFF,
csiphybase +
reg_array[lane_pos][i].reg_addr);
break;
case CSIPHY_SETTLE_CNT_HIGHER_BYTE:
- cam_io_w((settle_cnt >> 8) & 0xFF,
+ cam_io_w_mb((settle_cnt >> 8) & 0xFF,
csiphybase +
reg_array[lane_pos][i].reg_addr);
break;
default:
- CDBG("%s: %d Do Nothing\n", __func__, __LINE__);
+ CAM_DBG(CAM_CSIPHY, "Do Nothing");
break;
}
usleep_range(reg_array[lane_pos][i].delay*1000,
@@ -328,13 +327,11 @@
int32_t rc = 0;
if (!csiphy_dev || !cmd) {
- pr_err("%s:%d Invalid input args\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Invalid input args");
return -EINVAL;
}
- pr_debug("%s:%d Opcode received: %d\n", __func__, __LINE__,
- cmd->op_code);
+ CAM_DBG(CAM_CSIPHY, "Opcode received: %d", cmd->op_code);
mutex_lock(&csiphy_dev->mutex);
switch (cmd->op_code) {
case CAM_ACQUIRE_DEV: {
@@ -347,16 +344,15 @@
(void __user *)cmd->handle,
sizeof(csiphy_acq_dev));
if (rc < 0) {
- pr_err("%s:%d :ERROR: Failed copying from User\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
goto release_mutex;
}
csiphy_acq_params.combo_mode = 0;
if (csiphy_dev->acquire_count == 2) {
- pr_err("%s:%d CSIPHY device do not allow more than 2 acquires\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY,
+ "CSIPHY device do not allow more than 2 acquires");
rc = -EINVAL;
goto release_mutex;
}
@@ -379,8 +375,7 @@
if (copy_to_user((void __user *)cmd->handle,
&csiphy_acq_dev,
sizeof(struct cam_sensor_acquire_dev))) {
- pr_err("%s:%d :ERROR: Failed copying from User\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
rc = -EINVAL;
goto release_mutex;
}
@@ -395,8 +390,7 @@
cam_csiphy_query_cap(csiphy_dev, &csiphy_cap);
if (copy_to_user((void __user *)cmd->handle,
&csiphy_cap, sizeof(struct cam_csiphy_query_cap))) {
- pr_err("%s:%d :ERROR: Failed copying from User\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
rc = -EINVAL;
goto release_mutex;
}
@@ -405,15 +399,13 @@
case CAM_STOP_DEV: {
rc = cam_csiphy_disable_hw(csiphy_dev);
if (rc < 0) {
- pr_err("%s:%d Failed in csiphy release\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Failed in csiphy release");
cam_cpas_stop(csiphy_dev->cpas_handle);
goto release_mutex;
}
rc = cam_cpas_stop(csiphy_dev->cpas_handle);
if (rc < 0) {
- pr_err("%s:%d :Error: de-voting CPAS: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CSIPHY, "de-voting CPAS: %d", rc);
goto release_mutex;
}
}
@@ -422,8 +414,7 @@
struct cam_release_dev_cmd release;
if (!csiphy_dev->acquire_count) {
- pr_err("%s:%d No valid devices to release\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "No valid devices to release");
rc = -EINVAL;
goto release_mutex;
}
@@ -436,8 +427,7 @@
rc = cam_destroy_device_hdl(release.dev_handle);
if (rc < 0)
- pr_err("%s:%d :ERROR: destroying the device hdl\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "destroying the device hdl");
if (release.dev_handle ==
csiphy_dev->bridge_intf.device_hdl[0]) {
csiphy_dev->bridge_intf.device_hdl[0] = -1;
@@ -461,8 +451,7 @@
} else {
rc = cam_cmd_buf_parser(csiphy_dev, &config);
if (rc < 0) {
- pr_err("%s:%d Fail in cmd buf parser\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Fail in cmd buf parser");
goto release_mutex;
}
}
@@ -480,22 +469,19 @@
rc = cam_cpas_start(csiphy_dev->cpas_handle,
&ahb_vote, &axi_vote);
if (rc < 0) {
- pr_err("%s:%d :Error: voting CPAS: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CSIPHY, "voting CPAS: %d", rc);
goto release_mutex;
}
rc = cam_csiphy_enable_hw(csiphy_dev);
if (rc != 0) {
- pr_err("%s: %d cam_csiphy_enable_hw failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "cam_csiphy_enable_hw failed");
cam_cpas_stop(csiphy_dev->cpas_handle);
goto release_mutex;
}
rc = cam_csiphy_config_dev(csiphy_dev);
if (rc < 0) {
- pr_err("%s: %d cam_csiphy_config_dev failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "cam_csiphy_config_dev failed");
cam_cpas_stop(csiphy_dev->cpas_handle);
goto release_mutex;
}
@@ -504,8 +490,7 @@
case CAM_SD_SHUTDOWN:
break;
default:
- pr_err("%s:%d :Error: Invalid Opcode: %d\n",
- __func__, __LINE__, cmd->op_code);
+ CAM_ERR(CAM_CSIPHY, "Invalid Opcode: %d", cmd->op_code);
rc = -EINVAL;
goto release_mutex;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
index 7783b2e..1c93a1a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
@@ -16,13 +16,6 @@
#include "cam_csiphy_core.h"
#include <media/cam_sensor.h>
-#undef CDBG
-#ifdef CAM_CSIPHY_DEV_DEBUG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
static long cam_csiphy_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
@@ -33,13 +26,12 @@
case VIDIOC_CAM_CONTROL:
rc = cam_csiphy_core_cfg(csiphy_dev, arg);
if (rc != 0) {
- pr_err("%s: %d :ERROR: in configuring the device\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "in configuring the device");
return rc;
}
break;
default:
- pr_err("%s:%d :ERROR: Wrong ioctl\n", __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Wrong ioctl : %d", cmd);
break;
}
@@ -55,7 +47,7 @@
if (copy_from_user(&cmd_data, (void __user *)arg,
sizeof(cmd_data))) {
- pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_CSIPHY, "Failed to copy from user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
return -EFAULT;
}
@@ -68,15 +60,15 @@
rc = cam_csiphy_subdev_ioctl(sd, cmd, &cmd_data);
break;
default:
- pr_err("%s:%d Invalid compat ioctl cmd: %d\n",
- __func__, __LINE__, cmd);
+ CAM_ERR(CAM_CSIPHY, "Invalid compat ioctl cmd: %d", cmd);
rc = -EINVAL;
}
if (!rc) {
if (copy_to_user((void __user *)arg, &cmd_data,
sizeof(cmd_data))) {
- pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_CSIPHY,
+ "Failed to copy to user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
rc = -EFAULT;
}
@@ -126,8 +118,7 @@
rc = cam_csiphy_parse_dt_info(pdev, new_csiphy_dev);
if (rc < 0) {
- pr_err("%s:%d :ERROR: dt parsing failed: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CSIPHY, "DT parsing failed: %d", rc);
goto csiphy_no_resource;
}
@@ -148,8 +139,7 @@
rc = cam_register_subdev(&(new_csiphy_dev->v4l2_dev_str));
if (rc < 0) {
- pr_err("%s:%d :ERROR: In cam_register_subdev\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "cam_register_subdev Failed rc: %d", rc);
goto csiphy_no_resource;
}
@@ -176,11 +166,10 @@
strlcpy(cpas_parms.identifier, "csiphy", CAM_HW_IDENTIFIER_LENGTH);
rc = cam_cpas_register_client(&cpas_parms);
if (rc) {
- pr_err("%s:%d CPAS registration failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "CPAS registration failed rc: %d", rc);
goto csiphy_no_resource;
}
- CDBG("CPAS registration successful handle=%d\n",
+ CAM_DBG(CAM_CSIPHY, "CPAS registration successful handle=%d",
cpas_parms.client_handle);
new_csiphy_dev->cpas_handle = cpas_parms.client_handle;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
index c4258bd..8ed5ba4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -34,6 +34,7 @@
#include <cam_io_util.h>
#include <cam_cpas_api.h>
#include "cam_soc_util.h"
+#include "cam_debug_util.h"
#define MAX_CSIPHY 3
#define MAX_DPHY_DATA_LN 4
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
index 6b5aba9..ea6b7c8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
@@ -22,16 +22,16 @@
soc_info = &csiphy_dev->soc_info;
if (csiphy_dev->ref_count++) {
- pr_err("%s:%d csiphy refcount = %d\n", __func__,
- __LINE__, csiphy_dev->ref_count);
+ CAM_ERR(CAM_CSIPHY, "csiphy refcount = %d",
+ csiphy_dev->ref_count);
return rc;
}
rc = cam_soc_util_enable_platform_resource(soc_info, true,
CAM_TURBO_VOTE, ENABLE_IRQ);
if (rc < 0) {
- pr_err("%s:%d failed to enable platform resources %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CSIPHY, "failed to enable platform resources %d",
+ rc);
return rc;
}
@@ -41,8 +41,7 @@
soc_info->clk_rate[0][csiphy_dev->csiphy_clk_index]);
if (rc < 0) {
- pr_err("%s:%d csiphy_clk_set_rate failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "csiphy_clk_set_rate failed rc: %d", rc);
goto csiphy_disable_platform_resource;
}
@@ -62,15 +61,14 @@
struct cam_hw_soc_info *soc_info;
if (!csiphy_dev || !csiphy_dev->ref_count) {
- pr_err("%s:%d csiphy dev NULL / ref_count ZERO\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_CSIPHY, "csiphy dev NULL / ref_count ZERO");
return 0;
}
soc_info = &csiphy_dev->soc_info;
if (--csiphy_dev->ref_count) {
- pr_err("%s:%d csiphy refcount = %d\n", __func__,
- __LINE__, csiphy_dev->ref_count);
+ CAM_ERR(CAM_CSIPHY, "csiphy refcount = %d",
+ csiphy_dev->ref_count);
return 0;
}
@@ -95,8 +93,7 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("%s:%d :Error: parsing common soc dt(rc %d)\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CSIPHY, "parsing common soc dt(rc %d)", rc);
return rc;
}
@@ -117,15 +114,15 @@
csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
csiphy_dev->clk_lane = 0;
} else {
- pr_err("%s:%d, invalid hw version : 0x%x\n", __func__, __LINE__,
- csiphy_dev->hw_version);
+ CAM_ERR(CAM_CSIPHY, "invalid hw version : 0x%x",
+ csiphy_dev->hw_version);
rc = -EINVAL;
return rc;
}
if (soc_info->num_clk > CSIPHY_NUM_CLK_MAX) {
- pr_err("%s:%d invalid clk count=%d, max is %d\n", __func__,
- __LINE__, soc_info->num_clk, CSIPHY_NUM_CLK_MAX);
+ CAM_ERR(CAM_CSIPHY, "invalid clk count=%d, max is %d",
+ soc_info->num_clk, CSIPHY_NUM_CLK_MAX);
return -EINVAL;
}
for (i = 0; i < soc_info->num_clk; i++) {
@@ -155,7 +152,7 @@
soc_info->clk_rate[0][clk_cnt];
csiphy_dev->csiphy_clk_index = clk_cnt;
}
- CDBG("%s:%d clk_rate[%d] = %d\n", __func__, __LINE__, clk_cnt,
+ CAM_DBG(CAM_CSIPHY, "clk_rate[%d] = %d", clk_cnt,
soc_info->clk_rate[0][clk_cnt]);
clk_cnt++;
}
@@ -168,7 +165,7 @@
int32_t cam_csiphy_soc_release(struct csiphy_device *csiphy_dev)
{
if (!csiphy_dev) {
- pr_err("%s:%d csiphy dev NULL\n", __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "csiphy dev NULL");
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
new file mode 100644
index 0000000..5490992
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom_dev.o cam_eeprom_core.o cam_eeprom_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
new file mode 100644
index 0000000..96697f9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -0,0 +1,784 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <media/cam_sensor.h>
+
+#include "cam_eeprom_core.h"
+#include "cam_eeprom_soc.h"
+#include "cam_debug_util.h"
+
+/**
+ * cam_eeprom_read_memory() - read map data into buffer
+ * @e_ctrl: eeprom control struct
+ * @block: block to be read
+ *
+ * This function iterates through blocks stored in block->map, reads each
+ * region and concatenate them into the pre-allocated block->mapdata
+ */
+static int cam_eeprom_read_memory(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_eeprom_memory_block_t *block)
+{
+ int rc = 0;
+ int j;
+ struct cam_sensor_i2c_reg_setting i2c_reg_settings;
+ struct cam_sensor_i2c_reg_array i2c_reg_array;
+ struct cam_eeprom_memory_map_t *emap = block->map;
+ struct cam_eeprom_soc_private *eb_info;
+ uint8_t *memptr = block->mapdata;
+
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "e_ctrl is NULL");
+ return -EINVAL;
+ }
+
+ eb_info = (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+
+ for (j = 0; j < block->num_map; j++) {
+ CAM_DBG(CAM_EEPROM, "slave-addr = 0x%X", emap[j].saddr);
+ if (emap[j].saddr) {
+ eb_info->i2c_info.slave_addr = emap[j].saddr;
+ rc = cam_eeprom_update_i2c_info(e_ctrl,
+ &eb_info->i2c_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "failed: to update i2c info rc %d",
+ rc);
+ return rc;
+ }
+ }
+
+ if (emap[j].page.valid_size) {
+ i2c_reg_settings.addr_type = emap[j].page.addr_type;
+ i2c_reg_settings.data_type = emap[j].page.data_type;
+ i2c_reg_settings.size = 1;
+ i2c_reg_array.reg_addr = emap[j].page.addr;
+ i2c_reg_array.reg_data = emap[j].page.data;
+ i2c_reg_array.delay = emap[j].page.delay;
+ i2c_reg_settings.reg_setting = &i2c_reg_array;
+ rc = camera_io_dev_write(&e_ctrl->io_master_info,
+ &i2c_reg_settings);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "page write failed rc %d",
+ rc);
+ return rc;
+ }
+ }
+
+ if (emap[j].pageen.valid_size) {
+ i2c_reg_settings.addr_type = emap[j].pageen.addr_type;
+ i2c_reg_settings.data_type = emap[j].pageen.data_type;
+ i2c_reg_settings.size = 1;
+ i2c_reg_array.reg_addr = emap[j].pageen.addr;
+ i2c_reg_array.reg_data = emap[j].pageen.data;
+ i2c_reg_array.delay = emap[j].pageen.delay;
+ i2c_reg_settings.reg_setting = &i2c_reg_array;
+ rc = camera_io_dev_write(&e_ctrl->io_master_info,
+ &i2c_reg_settings);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "page enable failed rc %d",
+ rc);
+ return rc;
+ }
+ }
+
+ if (emap[j].poll.valid_size) {
+ rc = camera_io_dev_poll(&e_ctrl->io_master_info,
+ emap[j].poll.addr, emap[j].poll.data,
+ 0, emap[j].poll.addr_type,
+ emap[j].poll.data_type,
+ emap[j].poll.delay);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "poll failed rc %d",
+ rc);
+ return rc;
+ }
+ }
+
+ if (emap[j].mem.valid_size) {
+ rc = camera_io_dev_read_seq(&e_ctrl->io_master_info,
+ emap[j].mem.addr, memptr,
+ emap[j].mem.addr_type,
+ emap[j].mem.valid_size);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "read failed rc %d",
+ rc);
+ return rc;
+ }
+ memptr += emap[j].mem.valid_size;
+ }
+
+ if (emap[j].pageen.valid_size) {
+ i2c_reg_settings.addr_type = emap[j].pageen.addr_type;
+ i2c_reg_settings.data_type = emap[j].pageen.data_type;
+ i2c_reg_settings.size = 1;
+ i2c_reg_array.reg_addr = emap[j].pageen.addr;
+ i2c_reg_array.reg_data = 0;
+ i2c_reg_array.delay = emap[j].pageen.delay;
+ i2c_reg_settings.reg_setting = &i2c_reg_array;
+ rc = camera_io_dev_write(&e_ctrl->io_master_info,
+ &i2c_reg_settings);
+
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "page disable failed rc %d",
+ rc);
+ return rc;
+ }
+ }
+ }
+ return rc;
+}
+
+/**
+ * cam_eeprom_power_up - Power up eeprom hardware
+ * @e_ctrl: ctrl structure
+ * @power_info: power up/down info for eeprom
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_power_up(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_sensor_power_ctrl_t *power_info)
+{
+ int32_t rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &e_ctrl->soc_info;
+
+ /* Parse and fill vreg params for power up settings */
+ rc = msm_camera_fill_vreg_params(
+ &e_ctrl->soc_info,
+ power_info->power_setting,
+ power_info->power_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "failed to fill vreg params for power up rc:%d", rc);
+ return rc;
+ }
+
+ /* Parse and fill vreg params for power down settings*/
+ rc = msm_camera_fill_vreg_params(
+ &e_ctrl->soc_info,
+ power_info->power_down_setting,
+ power_info->power_down_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "failed to fill vreg params power down rc:%d", rc);
+ return rc;
+ }
+
+ rc = cam_sensor_core_power_up(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed in eeprom power up rc %d", rc);
+ return rc;
+ }
+
+ if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
+ rc = camera_io_init(&(e_ctrl->io_master_info));
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "cci_init failed");
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
+
+/**
+ * cam_eeprom_power_down - Power down eeprom hardware
+ * @e_ctrl: ctrl structure
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_power_down(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ struct cam_sensor_power_ctrl_t *power_info;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_eeprom_soc_private *soc_private;
+ int rc = 0;
+
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "failed: e_ctrl %pK", e_ctrl);
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+ soc_info = &e_ctrl->soc_info;
+
+ if (!power_info) {
+ CAM_ERR(CAM_EEPROM, "failed: power_info %pK", power_info);
+ return -EINVAL;
+ }
+ rc = msm_camera_power_down(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "power down the core is failed:%d", rc);
+ return rc;
+ }
+
+ if (e_ctrl->io_master_info.master_type == CCI_MASTER)
+ camera_io_release(&(e_ctrl->io_master_info));
+
+ return rc;
+}
+
+/**
+ * cam_eeprom_match_id - match eeprom id
+ * @e_ctrl: ctrl structure
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_match_id(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int rc;
+ struct camera_io_master *client = &e_ctrl->io_master_info;
+ uint8_t id[2];
+
+ rc = cam_spi_query_id(client, 0, &id[0], 2);
+ if (rc)
+ return rc;
+ CAM_DBG(CAM_EEPROM, "read 0x%x 0x%x, check 0x%x 0x%x",
+ id[0], id[1], client->spi_client->mfr_id0,
+ client->spi_client->device_id0);
+ if (id[0] != client->spi_client->mfr_id0
+ || id[1] != client->spi_client->device_id0)
+ return -ENODEV;
+ return 0;
+}
+
+/**
+ * cam_eeprom_parse_read_memory_map - Parse memory map
+ * @of_node: device node
+ * @e_ctrl: ctrl structure
+ *
+ * Returns success or failure
+ */
+int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
+ struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int32_t rc = 0;
+ struct cam_eeprom_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
+
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "failed: e_ctrl is NULL");
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ rc = cam_eeprom_parse_dt_memory_map(of_node, &e_ctrl->cal_data);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: eeprom dt parse rc %d", rc);
+ return rc;
+ }
+ rc = cam_eeprom_power_up(e_ctrl, power_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: eeprom power up rc %d", rc);
+ goto data_mem_free;
+ }
+ if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE) {
+ rc = cam_eeprom_match_id(e_ctrl);
+ if (rc) {
+ CAM_DBG(CAM_EEPROM, "eeprom not matching %d", rc);
+ goto power_down;
+ }
+ }
+ rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "read_eeprom_memory failed");
+ goto power_down;
+ }
+
+ rc = cam_eeprom_power_down(e_ctrl);
+ if (rc)
+ CAM_ERR(CAM_EEPROM, "failed: eeprom power down rc %d", rc);
+ return rc;
+power_down:
+ rc = cam_eeprom_power_down(e_ctrl);
+data_mem_free:
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ return rc;
+}
+
+/**
+ * cam_eeprom_get_dev_handle - get device handle
+ * @e_ctrl: ctrl structure
+ * @arg: Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_get_dev_handle(struct cam_eeprom_ctrl_t *e_ctrl,
+ void *arg)
+{
+ struct cam_sensor_acquire_dev eeprom_acq_dev;
+ struct cam_create_dev_hdl bridge_params;
+ struct cam_control *cmd = (struct cam_control *)arg;
+
+ if (e_ctrl->bridge_intf.device_hdl != -1) {
+ CAM_ERR(CAM_EEPROM, "Device is already acquired");
+ return -EFAULT;
+ }
+ if (copy_from_user(&eeprom_acq_dev, (void __user *) cmd->handle,
+ sizeof(eeprom_acq_dev))) {
+ CAM_ERR(CAM_EEPROM,
+ "EEPROM:ACQUIRE_DEV: copy from user failed");
+ return -EFAULT;
+ }
+
+ bridge_params.session_hdl = eeprom_acq_dev.session_handle;
+ bridge_params.ops = &e_ctrl->bridge_intf.ops;
+ bridge_params.v4l2_sub_dev_flag = 0;
+ bridge_params.media_entity_flag = 0;
+ bridge_params.priv = e_ctrl;
+
+ eeprom_acq_dev.device_handle =
+ cam_create_device_hdl(&bridge_params);
+ e_ctrl->bridge_intf.device_hdl = eeprom_acq_dev.device_handle;
+ e_ctrl->bridge_intf.session_hdl = eeprom_acq_dev.session_handle;
+
+ CAM_DBG(CAM_EEPROM, "Device Handle: %d", eeprom_acq_dev.device_handle);
+ if (copy_to_user((void __user *) cmd->handle, &eeprom_acq_dev,
+ sizeof(struct cam_sensor_acquire_dev))) {
+ CAM_ERR(CAM_EEPROM, "EEPROM:ACQUIRE_DEV: copy to user failed");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * cam_eeprom_update_slaveInfo - Update slave info
+ * @e_ctrl: ctrl structure
+ * @cmd_buf: command buffer
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_update_slaveInfo(struct cam_eeprom_ctrl_t *e_ctrl,
+ void *cmd_buf)
+{
+ int32_t rc = 0;
+ struct cam_eeprom_soc_private *soc_private;
+ struct cam_cmd_i2c_info *cmd_i2c_info = NULL;
+
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ cmd_i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+ soc_private->i2c_info.slave_addr = cmd_i2c_info->slave_addr;
+ soc_private->i2c_info.i2c_freq_mode = cmd_i2c_info->i2c_freq_mode;
+
+ rc = cam_eeprom_update_i2c_info(e_ctrl,
+ &soc_private->i2c_info);
+ CAM_DBG(CAM_EEPROM, "Slave addr: 0x%x Freq Mode: %d",
+ soc_private->i2c_info.slave_addr,
+ soc_private->i2c_info.i2c_freq_mode);
+
+ return rc;
+}
+
+/**
+ * cam_eeprom_parse_memory_map - Parse memory map info
+ * @data: memory block data
+ * @cmd_buf: command buffer
+ * @cmd_length: command buffer length
+ * @num_map: memory map size
+ * @cmd_length_bytes: command length processed in this function
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_parse_memory_map(
+ struct cam_eeprom_memory_block_t *data,
+ void *cmd_buf, int cmd_length, uint16_t *cmd_length_bytes,
+ int16_t num_map)
+{
+ int32_t rc = 0;
+ int32_t processed_size = 0;
+ struct cam_eeprom_memory_map_t *map = data->map;
+ struct common_header *cmm_hdr =
+ (struct common_header *)cmd_buf;
+ uint16_t cmd_length_in_bytes = 0;
+ struct cam_cmd_i2c_random_wr *i2c_random_wr = NULL;
+ struct cam_cmd_i2c_continuous_rd *i2c_cont_rd = NULL;
+ struct cam_cmd_conditional_wait *i2c_poll = NULL;
+
+ switch (cmm_hdr->cmd_type) {
+ case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR:
+ i2c_random_wr = (struct cam_cmd_i2c_random_wr *)cmd_buf;
+ cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_random_wr);
+
+ map[num_map].page.addr =
+ i2c_random_wr->random_wr_payload[0].reg_addr;
+ map[num_map].page.addr_type = i2c_random_wr->header.addr_type;
+ map[num_map].page.data =
+ i2c_random_wr->random_wr_payload[0].reg_data;
+ map[num_map].page.data_type = i2c_random_wr->header.data_type;
+ map[num_map].page.valid_size = 1;
+ cmd_buf += cmd_length_in_bytes / sizeof(int32_t);
+ processed_size +=
+ cmd_length_in_bytes;
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD:
+ i2c_cont_rd = (struct cam_cmd_i2c_continuous_rd *)cmd_buf;
+ cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_continuous_rd);
+
+ map[num_map].mem.addr = i2c_cont_rd->reg_addr;
+ map[num_map].mem.addr_type = i2c_cont_rd->header.addr_type;
+ map[num_map].mem.data_type = i2c_cont_rd->header.data_type;
+ map[num_map].mem.valid_size =
+ i2c_cont_rd->header.count;
+ cmd_buf += cmd_length_in_bytes / sizeof(int32_t);
+ processed_size +=
+ cmd_length_in_bytes;
+ data->num_data += map[num_map].mem.valid_size;
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_WAIT:
+ i2c_poll = (struct cam_cmd_conditional_wait *)cmd_buf;
+ cmd_length_in_bytes = sizeof(struct cam_cmd_conditional_wait);
+
+ map[num_map].poll.addr = i2c_poll->reg_addr;
+ map[num_map].poll.addr_type = i2c_poll->addr_type;
+ map[num_map].poll.data = i2c_poll->reg_data;
+ map[num_map].poll.data_type = i2c_poll->data_type;
+ map[num_map].poll.delay = i2c_poll->timeout;
+ map[num_map].poll.valid_size = 1;
+ break;
+ default:
+ break;
+ }
+ *cmd_length_bytes = processed_size;
+ return rc;
+}
+
+/**
+ * cam_eeprom_init_pkt_parser - Parse eeprom packet
+ * @e_ctrl: ctrl structure
+ * @csl_packet: csl packet received
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_init_pkt_parser(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_packet *csl_packet)
+{
+ int32_t rc = 0;
+ int i = 0;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ uint32_t *offset = NULL;
+ uint32_t *cmd_buf = NULL;
+ uint64_t generic_pkt_addr;
+ size_t pkt_len = 0;
+ uint32_t total_cmd_buf_in_bytes = 0;
+ uint32_t processed_cmd_buf_in_bytes = 0;
+ struct common_header *cmm_hdr = NULL;
+ uint16_t cmd_length_in_bytes = 0;
+ struct cam_cmd_i2c_info *i2c_info = NULL;
+ int num_map = -1;
+ struct cam_eeprom_memory_map_t *map;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+
+ e_ctrl->cal_data.map = kcalloc((MSM_EEPROM_MEMORY_MAP_MAX_SIZE *
+ MSM_EEPROM_MAX_MEM_MAP_CNT),
+ (sizeof(struct cam_eeprom_memory_map_t)), GFP_KERNEL);
+ if (!e_ctrl->cal_data.map) {
+ rc = -ENOMEM;
+ CAM_ERR(CAM_EEPROM, "failed");
+ return rc;
+ }
+ map = e_ctrl->cal_data.map;
+
+ offset = (uint32_t *)&csl_packet->payload;
+ offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+ /* Loop through multiple command buffers */
+ for (i = 0; i < csl_packet->num_cmd_buf; i++) {
+ total_cmd_buf_in_bytes = cmd_desc[i].length;
+ processed_cmd_buf_in_bytes = 0;
+ if (!total_cmd_buf_in_bytes)
+ continue;
+ rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+ (uint64_t *)&generic_pkt_addr, &pkt_len);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed to get cpu buf");
+ return rc;
+ }
+ cmd_buf = (uint32_t *)generic_pkt_addr;
+ if (!cmd_buf) {
+ CAM_ERR(CAM_EEPROM, "invalid cmd buf");
+ return -EINVAL;
+ }
+ cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+ /* Loop through multiple cmd formats in one cmd buffer */
+ while (processed_cmd_buf_in_bytes < total_cmd_buf_in_bytes) {
+ cmm_hdr = (struct common_header *)cmd_buf;
+ switch (cmm_hdr->cmd_type) {
+ case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+ i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+ num_map++;
+ map[num_map].saddr = i2c_info->slave_addr;
+ rc = cam_eeprom_update_slaveInfo(e_ctrl,
+ cmd_buf);
+ cmd_length_in_bytes =
+ sizeof(struct cam_cmd_i2c_info);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/4;
+ e_ctrl->cal_data.num_map = num_map + 1;
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+ case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+ cmd_length_in_bytes =
+ sizeof(struct cam_cmd_power);
+ rc = cam_sensor_update_power_settings(cmd_buf,
+ cmd_length_in_bytes, power_info);
+ processed_cmd_buf_in_bytes +=
+ total_cmd_buf_in_bytes;
+ cmd_buf += total_cmd_buf_in_bytes/4;
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed");
+ return rc;
+ }
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR:
+ case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD:
+ case CAMERA_SENSOR_CMD_TYPE_WAIT:
+ rc = cam_eeprom_parse_memory_map(
+ &e_ctrl->cal_data, cmd_buf,
+ total_cmd_buf_in_bytes,
+ &cmd_length_in_bytes, num_map);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/4;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return rc;
+}
+
+/**
+ * cam_eeprom_get_cal_data - parse the userspace IO config and
+ * copy read data to share with userspace
+ * @e_ctrl: ctrl structure
+ * @csl_packet: csl packet received
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_packet *csl_packet)
+{
+ struct cam_buf_io_cfg *io_cfg;
+ uint32_t i = 0;
+ int rc = 0;
+ uint64_t buf_addr;
+ size_t buf_size;
+ uint8_t *read_buffer;
+
+ io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
+ &csl_packet->payload +
+ csl_packet->io_configs_offset);
+
+ CAM_DBG(CAM_EEPROM, "number of IO configs: %d:",
+ csl_packet->num_io_configs);
+
+ for (i = 0; i < csl_packet->num_io_configs; i++) {
+ CAM_DBG(CAM_EEPROM, "Direction: %d:", io_cfg->direction);
+ if (io_cfg->direction == CAM_BUF_OUTPUT) {
+ rc = cam_mem_get_cpu_buf(io_cfg->mem_handle[0],
+ (uint64_t *)&buf_addr, &buf_size);
+ CAM_DBG(CAM_EEPROM, "buf_addr : %pK, buf_size : %zu\n",
+ (void *)buf_addr, buf_size);
+
+ read_buffer = (uint8_t *)buf_addr;
+ if (!read_buffer) {
+ CAM_ERR(CAM_EEPROM,
+ "invalid buffer to copy data");
+ return -EINVAL;
+ }
+ read_buffer += io_cfg->offsets[0];
+
+ if (buf_size < e_ctrl->cal_data.num_data) {
+ CAM_ERR(CAM_EEPROM,
+ "failed to copy, Invalid size");
+ return -EINVAL;
+ }
+
+ CAM_ERR(CAM_EEPROM, "copy the data, len:%d",
+ e_ctrl->cal_data.num_data);
+ memcpy(read_buffer, e_ctrl->cal_data.mapdata,
+ e_ctrl->cal_data.num_data);
+
+ } else {
+ CAM_ERR(CAM_EEPROM, "Invalid direction");
+ rc = -EINVAL;
+ }
+ }
+ return rc;
+}
+
+/**
+ * cam_eeprom_pkt_parse - Parse csl packet
+ * @e_ctrl: ctrl structure
+ * @arg: Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
+{
+ int32_t rc = 0;
+ struct cam_control *ioctl_ctrl = NULL;
+ struct cam_config_dev_cmd dev_config;
+ uint64_t generic_pkt_addr;
+ size_t pkt_len;
+ struct cam_packet *csl_packet = NULL;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+
+ ioctl_ctrl = (struct cam_control *)arg;
+ if (copy_from_user(&dev_config, (void __user *) ioctl_ctrl->handle,
+ sizeof(dev_config)))
+ return -EFAULT;
+ rc = cam_mem_get_cpu_buf(dev_config.packet_handle,
+ (uint64_t *)&generic_pkt_addr, &pkt_len);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "error in converting command Handle Error: %d", rc);
+ return rc;
+ }
+ csl_packet = (struct cam_packet *)
+ (generic_pkt_addr + dev_config.offset);
+ switch (csl_packet->header.op_code & 0xFFFFFF) {
+ case CAM_EEPROM_PACKET_OPCODE_INIT:
+ if (e_ctrl->userspace_probe == false) {
+ rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
+ CAM_ERR(CAM_EEPROM,
+ "Eeprom already probed at kernel boot");
+ rc = -EINVAL;
+ break;
+ }
+ if (e_ctrl->cal_data.num_data == 0) {
+ rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "Failed in parsing the pkt");
+ return rc;
+ }
+
+ e_ctrl->cal_data.mapdata =
+ kzalloc(e_ctrl->cal_data.num_data, GFP_KERNEL);
+ if (!e_ctrl->cal_data.mapdata) {
+ rc = -ENOMEM;
+ CAM_ERR(CAM_EEPROM, "failed");
+ goto error;
+ }
+
+ rc = cam_eeprom_power_up(e_ctrl,
+ &soc_private->power_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+ goto memdata_free;
+ }
+
+ rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "read_eeprom_memory failed");
+ goto power_down;
+ }
+
+ rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
+ rc = cam_eeprom_power_down(e_ctrl);
+ } else {
+ CAM_DBG(CAM_EEPROM, "Already read eeprom");
+ }
+ break;
+ default:
+ break;
+ }
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ return rc;
+power_down:
+ rc = cam_eeprom_power_down(e_ctrl);
+memdata_free:
+ kfree(e_ctrl->cal_data.mapdata);
+error:
+ kfree(e_ctrl->cal_data.map);
+ return rc;
+}
+
+/**
+ * cam_eeprom_driver_cmd - Handle eeprom cmds
+ * @e_ctrl: ctrl structure
+ * @arg: Camera control command argument
+ *
+ * Returns success or failure
+ */
+int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
+{
+ int rc = 0;
+ struct cam_eeprom_query_cap_t eeprom_cap;
+ struct cam_control *cmd = (struct cam_control *)arg;
+
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "e_ctrl is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&(e_ctrl->eeprom_mutex));
+ switch (cmd->op_code) {
+ case CAM_QUERY_CAP:
+ eeprom_cap.slot_info = e_ctrl->subdev_id;
+ if (e_ctrl->userspace_probe == false)
+ eeprom_cap.eeprom_kernel_probe = true;
+ else
+ eeprom_cap.eeprom_kernel_probe = false;
+
+ if (copy_to_user((void __user *) cmd->handle,
+ &eeprom_cap,
+ sizeof(struct cam_eeprom_query_cap_t))) {
+ CAM_ERR(CAM_EEPROM, "Failed Copy to User");
+ return -EFAULT;
+ goto release_mutex;
+ }
+ CAM_DBG(CAM_EEPROM, "eeprom_cap: ID: %d", eeprom_cap.slot_info);
+ break;
+ case CAM_ACQUIRE_DEV:
+ rc = cam_eeprom_get_dev_handle(e_ctrl, arg);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed to acquire dev");
+ goto release_mutex;
+ }
+ break;
+ case CAM_CONFIG_DEV:
+ rc = cam_eeprom_pkt_parse(e_ctrl, arg);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed in eeprom pkt Parsing");
+ goto release_mutex;
+ }
+ break;
+ default:
+ CAM_DBG(CAM_EEPROM, "invalid opcode");
+ break;
+ }
+
+release_mutex:
+ mutex_unlock(&(e_ctrl->eeprom_mutex));
+
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.h
new file mode 100644
index 0000000..84736df
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_EEPROM_CORE_H_
+#define _CAM_EEPROM_CORE_H_
+
+#include "cam_eeprom_dev.h"
+
+int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg);
+int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
+ struct cam_eeprom_ctrl_t *e_ctrl);
+#endif
+/* _CAM_EEPROM_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
new file mode 100644
index 0000000..82dcc9c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -0,0 +1,487 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_eeprom_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_eeprom_soc.h"
+#include "cam_eeprom_core.h"
+#include "cam_debug_util.h"
+
+static long cam_eeprom_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct cam_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_eeprom_driver_cmd(e_ctrl, arg);
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+
+ return rc;
+}
+
+int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_eeprom_i2c_info_t *i2c_info)
+{
+ struct cam_sensor_cci_client *cci_client = NULL;
+
+ if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
+ cci_client = e_ctrl->io_master_info.cci_client;
+ if (!cci_client) {
+ CAM_ERR(CAM_EEPROM, "failed: cci_client %pK",
+ cci_client);
+ return -EINVAL;
+ }
+ cci_client->cci_i2c_master = e_ctrl->cci_i2c_master;
+ cci_client->sid = (i2c_info->slave_addr) >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_eeprom_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cam_control cmd_data;
+ int32_t rc = 0;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_EEPROM,
+ "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_eeprom_subdev_ioctl(sd, cmd, &cmd_data);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM,
+ "Failed in eeprom suddev handling rc %d",
+ rc);
+ return rc;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_EEPROM, "Invalid compat ioctl: %d", cmd);
+ rc = -EINVAL;
+ }
+
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_EEPROM,
+ "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+ return rc;
+}
+#endif
+
+static const struct v4l2_subdev_internal_ops cam_eeprom_internal_ops;
+
+static struct v4l2_subdev_core_ops cam_eeprom_subdev_core_ops = {
+ .ioctl = cam_eeprom_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_eeprom_init_subdev_do_ioctl,
+#endif
+};
+
+static struct v4l2_subdev_ops cam_eeprom_subdev_ops = {
+ .core = &cam_eeprom_subdev_core_ops,
+};
+
+static int cam_eeprom_i2c_driver_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct cam_eeprom_ctrl_t *e_ctrl = NULL;
+ struct cam_eeprom_soc_private *soc_private = NULL;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ CAM_ERR(CAM_EEPROM, "i2c_check_functionality failed");
+ goto probe_failure;
+ }
+
+ e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "kzalloc failed");
+ rc = -ENOMEM;
+ goto probe_failure;
+ }
+ e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+ soc_private = (struct cam_eeprom_soc_private *)(id->driver_data);
+ if (!soc_private) {
+ CAM_ERR(CAM_EEPROM, "board info NULL");
+ rc = -EINVAL;
+ goto ectrl_free;
+ }
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+ e_ctrl->userspace_probe = false;
+
+ e_ctrl->eeprom_device_type = MSM_CAMERA_I2C_DEVICE;
+ e_ctrl->io_master_info.master_type = I2C_MASTER;
+ e_ctrl->io_master_info.client = client;
+
+ if (soc_private->i2c_info.slave_addr != 0)
+ e_ctrl->io_master_info.client->addr =
+ soc_private->i2c_info.slave_addr;
+
+ return rc;
+
+ectrl_free:
+ kfree(e_ctrl);
+probe_failure:
+ return rc;
+}
+
+static int cam_eeprom_i2c_driver_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct cam_eeprom_ctrl_t *e_ctrl;
+ struct cam_eeprom_soc_private *soc_private;
+
+ if (!sd) {
+ CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
+ return -EINVAL;
+ }
+
+ e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ if (!soc_private) {
+ CAM_ERR(CAM_EEPROM, "soc_info.soc_private is NULL");
+ return -EINVAL;
+ }
+
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ if (soc_private) {
+ kfree(soc_private->power_info.gpio_num_info);
+ kfree(soc_private);
+ }
+ kfree(e_ctrl);
+
+ return 0;
+}
+
+static int cam_eeprom_spi_setup(struct spi_device *spi)
+{
+ struct cam_eeprom_ctrl_t *e_ctrl = NULL;
+ struct cam_sensor_spi_client *spi_client;
+ struct cam_eeprom_soc_private *eb_info;
+ struct cam_sensor_power_ctrl_t *power_info = NULL;
+ int rc = 0;
+
+ e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+ if (!e_ctrl)
+ return -ENOMEM;
+
+ e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+ e_ctrl->userspace_probe = false;
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+
+ spi_client = kzalloc(sizeof(*spi_client), GFP_KERNEL);
+ if (!spi_client) {
+ kfree(e_ctrl);
+ return -ENOMEM;
+ }
+
+ eb_info = kzalloc(sizeof(*eb_info), GFP_KERNEL);
+ if (!eb_info)
+ goto spi_free;
+ e_ctrl->soc_info.soc_private = eb_info;
+
+ e_ctrl->eeprom_device_type = MSM_CAMERA_SPI_DEVICE;
+ e_ctrl->io_master_info.spi_client = spi_client;
+ e_ctrl->io_master_info.master_type = SPI_MASTER;
+ spi_client->spi_master = spi;
+
+ power_info = &eb_info->power_info;
+ power_info->dev = &spi->dev;
+
+ /* set spi instruction info */
+ spi_client->retry_delay = 1;
+ spi_client->retries = 0;
+
+ /* Initialize mutex */
+ mutex_init(&(e_ctrl->eeprom_mutex));
+
+ rc = cam_eeprom_spi_driver_soc_init(e_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: spi soc init rc %d", rc);
+ goto board_free;
+ }
+
+ if (e_ctrl->userspace_probe == false) {
+ rc = cam_eeprom_parse_read_memory_map(spi->dev.of_node,
+ e_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: read mem map rc %d", rc);
+ goto board_free;
+ }
+ }
+
+ return rc;
+
+board_free:
+ kfree(e_ctrl->soc_info.soc_private);
+spi_free:
+ kfree(spi_client);
+ kfree(e_ctrl);
+ return rc;
+}
+
+static int cam_eeprom_spi_driver_probe(struct spi_device *spi)
+{
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+ spi_setup(spi);
+
+ CAM_DBG(CAM_EEPROM, "irq[%d] cs[%x] CPHA[%x] CPOL[%x] CS_HIGH[%x]",
+ spi->irq, spi->chip_select, (spi->mode & SPI_CPHA) ? 1 : 0,
+ (spi->mode & SPI_CPOL) ? 1 : 0,
+ (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+ CAM_DBG(CAM_EEPROM, "max_speed[%u]", spi->max_speed_hz);
+
+ return cam_eeprom_spi_setup(spi);
+}
+
+static int cam_eeprom_spi_driver_remove(struct spi_device *sdev)
+{
+ struct v4l2_subdev *sd = spi_get_drvdata(sdev);
+ struct cam_eeprom_ctrl_t *e_ctrl;
+ struct cam_eeprom_soc_private *soc_private;
+
+ if (!sd) {
+ CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
+ return -EINVAL;
+ }
+
+ e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+ return -EINVAL;
+ }
+
+ kfree(e_ctrl->io_master_info.spi_client);
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ if (soc_private) {
+ kfree(soc_private->power_info.gpio_num_info);
+ kfree(soc_private);
+ }
+ kfree(e_ctrl);
+
+ return 0;
+}
+
+static int32_t cam_eeprom_platform_driver_probe(
+ struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct cam_eeprom_ctrl_t *e_ctrl = NULL;
+ struct cam_eeprom_soc_private *soc_private = NULL;
+
+ e_ctrl = kzalloc(sizeof(struct cam_eeprom_ctrl_t), GFP_KERNEL);
+ if (!e_ctrl)
+ return -ENOMEM;
+
+ e_ctrl->soc_info.pdev = pdev;
+ e_ctrl->eeprom_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+ e_ctrl->userspace_probe = false;
+
+ e_ctrl->io_master_info.master_type = CCI_MASTER;
+ e_ctrl->io_master_info.cci_client = kzalloc(
+ sizeof(struct cam_sensor_cci_client), GFP_KERNEL);
+ if (!e_ctrl->io_master_info.cci_client)
+ goto free_e_ctrl;
+
+ soc_private = kzalloc(sizeof(struct cam_eeprom_soc_private),
+ GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto free_cci_client;
+ }
+ e_ctrl->soc_info.soc_private = soc_private;
+
+ /* Initialize mutex */
+ mutex_init(&(e_ctrl->eeprom_mutex));
+ rc = cam_eeprom_platform_driver_soc_init(e_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: soc init rc %d", rc);
+ goto free_soc;
+ }
+ rc = cam_eeprom_update_i2c_info(e_ctrl, &soc_private->i2c_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: to update i2c info rc %d", rc);
+ goto free_soc;
+ }
+
+ if (e_ctrl->userspace_probe == false) {
+ rc = cam_eeprom_parse_read_memory_map(pdev->dev.of_node,
+ e_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: read mem map rc %d", rc);
+ goto free_soc;
+ }
+ }
+
+ e_ctrl->v4l2_dev_str.internal_ops = &cam_eeprom_internal_ops;
+ e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+ strlcpy(e_ctrl->device_name, CAM_EEPROM_NAME,
+ sizeof(e_ctrl->device_name));
+ e_ctrl->v4l2_dev_str.name = e_ctrl->device_name;
+ e_ctrl->v4l2_dev_str.sd_flags =
+ (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+ e_ctrl->v4l2_dev_str.ent_function = CAM_EEPROM_DEVICE_TYPE;
+ e_ctrl->v4l2_dev_str.token = e_ctrl;
+
+ rc = cam_register_subdev(&(e_ctrl->v4l2_dev_str));
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "fail to create subdev");
+ goto free_soc;
+ }
+
+ e_ctrl->bridge_intf.device_hdl = -1;
+ e_ctrl->bridge_intf.ops.get_dev_info = NULL;
+ e_ctrl->bridge_intf.ops.link_setup = NULL;
+ e_ctrl->bridge_intf.ops.apply_req = NULL;
+
+ platform_set_drvdata(pdev, e_ctrl);
+ v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl);
+ return rc;
+free_soc:
+ kfree(soc_private);
+free_cci_client:
+ kfree(e_ctrl->io_master_info.cci_client);
+free_e_ctrl:
+ kfree(e_ctrl);
+ return rc;
+}
+
+static int cam_eeprom_platform_driver_remove(struct platform_device *pdev)
+{
+ struct cam_eeprom_ctrl_t *e_ctrl;
+
+ e_ctrl = platform_get_drvdata(pdev);
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+ return -EINVAL;
+ }
+
+ kfree(e_ctrl->soc_info.soc_private);
+ kfree(e_ctrl->io_master_info.cci_client);
+ kfree(e_ctrl);
+ return 0;
+}
+
+static const struct of_device_id cam_eeprom_dt_match[] = {
+ { .compatible = "qcom,eeprom" },
+ { }
+};
+
+
+MODULE_DEVICE_TABLE(of, cam_eeprom_dt_match);
+
+static struct platform_driver cam_eeprom_platform_driver = {
+ .driver = {
+ .name = "qcom,eeprom",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_eeprom_dt_match,
+ },
+ .probe = cam_eeprom_platform_driver_probe,
+ .remove = cam_eeprom_platform_driver_remove,
+};
+
+static const struct i2c_device_id cam_eeprom_i2c_id[] = {
+ { "msm_eeprom", (kernel_ulong_t)NULL},
+ { }
+};
+
+static struct i2c_driver cam_eeprom_i2c_driver = {
+ .id_table = cam_eeprom_i2c_id,
+ .probe = cam_eeprom_i2c_driver_probe,
+ .remove = cam_eeprom_i2c_driver_remove,
+ .driver = {
+ .name = "msm_eeprom",
+ },
+};
+
+static struct spi_driver cam_eeprom_spi_driver = {
+ .driver = {
+ .name = "qcom_eeprom",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_eeprom_dt_match,
+ },
+ .probe = cam_eeprom_spi_driver_probe,
+ .remove = cam_eeprom_spi_driver_remove,
+};
+static int __init cam_eeprom_driver_init(void)
+{
+ int rc = 0;
+
+ rc = platform_driver_register(&cam_eeprom_platform_driver);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "platform_driver_register failed rc = %d",
+ rc);
+ return rc;
+ }
+
+ rc = spi_register_driver(&cam_eeprom_spi_driver);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "spi_register_driver failed rc = %d", rc);
+ return rc;
+ }
+
+ rc = i2c_add_driver(&cam_eeprom_i2c_driver);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "i2c_add_driver failed rc = %d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static void __exit cam_eeprom_driver_exit(void)
+{
+ platform_driver_unregister(&cam_eeprom_platform_driver);
+ spi_unregister_driver(&cam_eeprom_spi_driver);
+ i2c_del_driver(&cam_eeprom_i2c_driver);
+}
+
+module_init(cam_eeprom_driver_init);
+module_exit(cam_eeprom_driver_exit);
+MODULE_DESCRIPTION("CAM EEPROM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
new file mode 100644
index 0000000..a98bf00
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
@@ -0,0 +1,183 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_EEPROM_DEV_H_
+#define _CAM_EEPROM_DEV_H_
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/cam_sensor.h>
+#include <cam_sensor_i2c.h>
+#include <cam_sensor_spi.h>
+#include <cam_sensor_io.h>
+#include <cam_cci_dev.h>
+#include <cam_req_mgr_util.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_mem_mgr.h>
+#include <cam_subdev.h>
+#include "cam_soc_util.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+#define PROPERTY_MAXSIZE 32
+
+#define MSM_EEPROM_MEMORY_MAP_MAX_SIZE 80
+#define MSM_EEPROM_MAX_MEM_MAP_CNT 8
+#define MSM_EEPROM_MEM_MAP_PROPERTIES_CNT 8
+
+/**
+ * struct cam_eeprom_map_t - eeprom map
+ * @data_type : Data type
+ * @addr_type : Address type
+ * @addr : Address
+ * @data : data
+ * @delay : Delay
+ *
+ */
+struct cam_eeprom_map_t {
+ uint32_t valid_size;
+ uint32_t addr;
+ uint32_t addr_type;
+ uint32_t data;
+ uint32_t data_type;
+ uint32_t delay;
+};
+
+/**
+ * struct cam_eeprom_memory_map_t - eeprom memory map types
+ * @page : page memory
+ * @pageen : pageen memory
+ * @poll : poll memory
+ * @mem : mem
+ * @saddr : slave addr
+ *
+ */
+struct cam_eeprom_memory_map_t {
+ struct cam_eeprom_map_t page;
+ struct cam_eeprom_map_t pageen;
+ struct cam_eeprom_map_t poll;
+ struct cam_eeprom_map_t mem;
+ uint32_t saddr;
+};
+
+/**
+ * struct cam_eeprom_memory_block_t - eeprom mem block info
+ * @map : eeprom memory map
+ * @num_map : number of map blocks
+ * @mapdata : map data
+ * @cmd_type : size of total mapdata
+ *
+ */
+struct cam_eeprom_memory_block_t {
+ struct cam_eeprom_memory_map_t *map;
+ uint32_t num_map;
+ uint8_t *mapdata;
+ uint32_t num_data;
+};
+
+/**
+ * struct cam_eeprom_cmm_t - camera multimodule
+ * @cmm_support : cmm support flag
+ * @cmm_compression : cmm compression flag
+ * @cmm_offset : cmm data start offset
+ * @cmm_size : cmm data size
+ *
+ */
+struct cam_eeprom_cmm_t {
+ uint32_t cmm_support;
+ uint32_t cmm_compression;
+ uint32_t cmm_offset;
+ uint32_t cmm_size;
+};
+
+/**
+ * struct cam_eeprom_i2c_info_t - I2C info
+ * @slave_addr : slave address
+ * @i2c_freq_mode : i2c frequency mode
+ *
+ */
+struct cam_eeprom_i2c_info_t {
+ uint16_t slave_addr;
+ uint8_t i2c_freq_mode;
+};
+
+/**
+ * struct cam_eeprom_soc_private - eeprom soc private data structure
+ * @eeprom_name : eeprom name
+ * @i2c_info : i2c info structure
+ * @power_info : eeprom power info
+ * @cmm_data : cmm data
+ *
+ */
+struct cam_eeprom_soc_private {
+ const char *eeprom_name;
+ struct cam_eeprom_i2c_info_t i2c_info;
+ struct cam_sensor_power_ctrl_t power_info;
+ struct cam_eeprom_cmm_t cmm_data;
+};
+
+/**
+ * struct cam_eeprom_intf_params - bridge interface params
+ * @device_hdl : Device Handle
+ * @session_hdl : Session Handle
+ * @ops : KMD operations
+ * @crm_cb : Callback API pointers
+ */
+struct cam_eeprom_intf_params {
+ int32_t device_hdl;
+ int32_t session_hdl;
+ int32_t link_hdl;
+ struct cam_req_mgr_kmd_ops ops;
+ struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_cmd_conditional_wait - Conditional wait command
+ * @pdev : platform device
+ * @spi : spi device
+ * @eeprom_mutex : eeprom mutex
+ * @soc_info : eeprom soc related info
+ * @io_master_info : Information about the communication master
+ * @gpio_num_info : gpio info
+ * @cci_i2c_master : I2C structure
+ * @v4l2_dev_str : V4L2 device structure
+ * @bridge_intf : bridge interface params
+ * @subdev_id : subdev id
+ * @userspace_probe : flag indicates userspace or kernel probe
+ * @cal_data : Calibration data
+ * @device_name : Device name
+ *
+ */
+struct cam_eeprom_ctrl_t {
+ struct platform_device *pdev;
+ struct spi_device *spi;
+ struct mutex eeprom_mutex;
+ struct cam_hw_soc_info soc_info;
+ struct camera_io_master io_master_info;
+ struct msm_camera_gpio_num_info *gpio_num_info;
+ enum cci_i2c_master_t cci_i2c_master;
+ struct cam_subdev v4l2_dev_str;
+ struct cam_eeprom_intf_params bridge_intf;
+ enum msm_camera_device_type_t eeprom_device_type;
+ uint32_t subdev_id;
+ bool userspace_probe;
+ struct cam_eeprom_memory_block_t cal_data;
+ char device_name[20];
+};
+
+int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_eeprom_i2c_info_t *i2c_info);
+
+#endif /*_CAM_EEPROM_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
new file mode 100644
index 0000000..84e723f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+
+#include "cam_eeprom_soc.h"
+#include "cam_debug_util.h"
+
+/*
+ * cam_eeprom_parse_memory_map() - parse memory map in device node
+ * @of: device node
+ * @data: memory block for output
+ *
+ * This functions parses @of to fill @data. It allocates map itself, parses
+ * the @of node, calculate total data length, and allocates required buffer.
+ * It only fills the map, but does not perform actual reading.
+ */
+int cam_eeprom_parse_dt_memory_map(struct device_node *node,
+ struct cam_eeprom_memory_block_t *data)
+{
+ int i, rc = 0;
+ char property[PROPERTY_MAXSIZE];
+ uint32_t count = MSM_EEPROM_MEM_MAP_PROPERTIES_CNT;
+ struct cam_eeprom_memory_map_t *map;
+
+ snprintf(property, PROPERTY_MAXSIZE, "num-blocks");
+ rc = of_property_read_u32(node, property, &data->num_map);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: num-blocks not available rc %d",
+ rc);
+ return rc;
+ }
+
+ map = kzalloc((sizeof(*map) * data->num_map), GFP_KERNEL);
+ if (!map) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ data->map = map;
+
+ for (i = 0; i < data->num_map; i++) {
+ snprintf(property, PROPERTY_MAXSIZE, "page%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].page, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: page not available rc %d",
+ rc);
+ goto ERROR;
+ }
+
+ snprintf(property, PROPERTY_MAXSIZE, "pageen%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].pageen, count);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "pageen not needed");
+
+ snprintf(property, PROPERTY_MAXSIZE, "saddr%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].saddr, 1);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "saddr not needed - block %d", i);
+
+ snprintf(property, PROPERTY_MAXSIZE, "poll%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].poll, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: poll not available rc %d",
+ rc);
+ goto ERROR;
+ }
+
+ snprintf(property, PROPERTY_MAXSIZE, "mem%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].mem, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: mem not available rc %d",
+ rc);
+ goto ERROR;
+ }
+ data->num_data += map[i].mem.valid_size;
+ }
+
+ data->mapdata = kzalloc(data->num_data, GFP_KERNEL);
+ if (!data->mapdata) {
+ rc = -ENOMEM;
+ goto ERROR;
+ }
+ return rc;
+
+ERROR:
+ kfree(data->map);
+ memset(data, 0, sizeof(*data));
+ return rc;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * Parses eeprom dt
+ */
+static int cam_eeprom_get_dt_data(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info = &e_ctrl->soc_info;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+ struct device_node *of_node = NULL;
+
+ if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE)
+ of_node = e_ctrl->io_master_info.
+ spi_client->spi_master->dev.of_node;
+ else if (e_ctrl->eeprom_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ of_node = soc_info->pdev->dev.of_node;
+
+ if (!of_node) {
+ CAM_ERR(CAM_EEPROM, "of_node is NULL, device type %d",
+ e_ctrl->eeprom_device_type);
+ return -EINVAL;
+ }
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "cam_soc_util_get_dt_properties rc %d",
+ rc);
+ return rc;
+ }
+
+ if (e_ctrl->userspace_probe == false) {
+ rc = cam_get_dt_power_setting_data(of_node,
+ soc_info, power_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed in getting power settings");
+ return rc;
+ }
+ }
+
+ if (!soc_info->gpio_data) {
+ CAM_INFO(CAM_EEPROM, "No GPIO found");
+ return 0;
+ }
+
+ if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
+ CAM_INFO(CAM_EEPROM, "No GPIO found");
+ return -EINVAL;
+ }
+
+ rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+ &power_info->gpio_num_info);
+ if ((rc < 0) || (!power_info->gpio_num_info)) {
+ CAM_ERR(CAM_EEPROM, "No/Error EEPROM GPIOs");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * @eb_info: eeprom private data structure
+ * @of_node: eeprom device node
+ *
+ * This function parses the eeprom dt to get the MM data
+ */
+static int cam_eeprom_cmm_dts(struct cam_eeprom_soc_private *eb_info,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ struct cam_eeprom_cmm_t *cmm_data = &eb_info->cmm_data;
+
+ cmm_data->cmm_support =
+ of_property_read_bool(of_node, "cmm-data-support");
+ if (!cmm_data->cmm_support) {
+ CAM_DBG(CAM_EEPROM, "No cmm support");
+ return 0;
+ }
+
+ cmm_data->cmm_compression =
+ of_property_read_bool(of_node, "cmm-data-compressed");
+
+ rc = of_property_read_u32(of_node, "cmm-data-offset",
+ &cmm_data->cmm_offset);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "No MM offset data rc %d", rc);
+
+ rc = of_property_read_u32(of_node, "cmm-data-size",
+ &cmm_data->cmm_size);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "No MM size data rc %d", rc);
+
+ CAM_DBG(CAM_EEPROM, "cmm_compr %d, cmm_offset %d, cmm_size %d",
+ cmm_data->cmm_compression, cmm_data->cmm_offset,
+ cmm_data->cmm_size);
+ return 0;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * This function is called from cam_eeprom_spi_driver_probe, it parses
+ * the eeprom dt node and decides for userspace or kernel probe.
+ */
+int cam_eeprom_spi_driver_soc_init(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int rc = 0;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+
+ rc = of_property_read_u32(e_ctrl->spi->dev.of_node, "cell-index",
+ &e_ctrl->subdev_id);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+ return rc;
+ }
+ rc = of_property_read_string(e_ctrl->spi->dev.of_node,
+ "eeprom-name", &soc_private->eeprom_name);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+ e_ctrl->userspace_probe = true;
+ }
+
+ CAM_DBG(CAM_EEPROM, "eeprom-name %s, rc %d", soc_private->eeprom_name,
+ rc);
+ rc = cam_eeprom_cmm_dts(soc_private,
+ e_ctrl->io_master_info.spi_client->spi_master->dev.of_node);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "MM data not available rc %d", rc);
+ rc = cam_eeprom_get_dt_data(e_ctrl);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "failed: eeprom get dt data rc %d", rc);
+
+ return rc;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * This function is called from cam_eeprom_platform_driver_probe, it parses
+ * the eeprom dt node and decides for userspace or kernel probe.
+ */
+int cam_eeprom_platform_driver_soc_init(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info = &e_ctrl->soc_info;
+ struct device_node *of_node = NULL;
+ struct platform_device *pdev = NULL;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ uint32_t temp;
+
+ if (!soc_info->pdev) {
+ CAM_ERR(CAM_EEPROM, "Error:soc_info is not initialized");
+ return -EINVAL;
+ }
+
+ pdev = soc_info->pdev;
+ of_node = pdev->dev.of_node;
+ if (!of_node) {
+ CAM_ERR(CAM_EEPROM, "dev.of_node NULL");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(of_node, "cell-index",
+ &e_ctrl->subdev_id);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed rc %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "cci-master",
+ &e_ctrl->cci_i2c_master);
+ if (rc < 0) {
+ CAM_DBG(CAM_EEPROM, "failed rc %d", rc);
+ return rc;
+ }
+
+ rc = of_property_read_string(of_node, "eeprom-name",
+ &soc_private->eeprom_name);
+ if (rc < 0) {
+ CAM_DBG(CAM_EEPROM, "kernel probe is not enabled");
+ e_ctrl->userspace_probe = true;
+ }
+
+ rc = cam_eeprom_get_dt_data(e_ctrl);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "failed: eeprom get dt data rc %d", rc);
+
+ if (e_ctrl->userspace_probe == false) {
+ rc = of_property_read_u32(of_node, "slave-addr", &temp);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "failed: no slave-addr rc %d", rc);
+
+ soc_private->i2c_info.slave_addr = temp;
+
+ rc = of_property_read_u32(of_node, "i2c-freq-mode", &temp);
+ soc_private->i2c_info.i2c_freq_mode = temp;
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM,
+ "i2c-freq-mode read fail %d", rc);
+ soc_private->i2c_info.i2c_freq_mode = 0;
+ }
+ if (soc_private->i2c_info.i2c_freq_mode >= I2C_MAX_MODES) {
+ CAM_ERR(CAM_EEPROM, "invalid i2c_freq_mode = %d",
+ soc_private->i2c_info.i2c_freq_mode);
+ soc_private->i2c_info.i2c_freq_mode = 0;
+ }
+ CAM_DBG(CAM_EEPROM, "slave-addr = 0x%X",
+ soc_private->i2c_info.slave_addr);
+ }
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
new file mode 100644
index 0000000..02e59d7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_EEPROM_SOC_H_
+#define _CAM_EEPROM_SOC_H_
+
+#include "cam_eeprom_dev.h"
+
+int cam_eeprom_parse_dt_memory_map(struct device_node *of,
+ struct cam_eeprom_memory_block_t *data);
+
+int cam_eeprom_platform_driver_soc_init(struct cam_eeprom_ctrl_t *e_ctrl);
+int cam_eeprom_spi_driver_soc_init(struct cam_eeprom_ctrl_t *e_ctrl);
+#endif/* _CAM_EEPROM_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
new file mode 100644
index 0000000..9aab0e4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash_dev.o cam_flash_core.o cam_flash_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
new file mode 100644
index 0000000..7af7efc
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -0,0 +1,741 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_flash_core.h"
+
+int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
+ enum cam_flash_state state)
+{
+ int rc = 0;
+
+ if (!(flash_ctrl->switch_trigger)) {
+ CAM_ERR(CAM_FLASH, "Invalid argument");
+ return -EINVAL;
+ }
+
+ if ((state == CAM_FLASH_STATE_INIT) &&
+ (flash_ctrl->is_regulator_enabled == false)) {
+ rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+ ENABLE_REGULATOR, NULL);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "regulator enable failed rc = %d",
+ rc);
+ return rc;
+ }
+ flash_ctrl->is_regulator_enabled = true;
+ } else if ((state == CAM_FLASH_STATE_RELEASE) &&
+ (flash_ctrl->is_regulator_enabled == true)) {
+ rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+ DISABLE_REGULATOR, NULL);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "regulator disable failed rc = %d",
+ rc);
+ return rc;
+ }
+ flash_ctrl->is_regulator_enabled = false;
+ } else {
+ CAM_ERR(CAM_FLASH, "Wrong Flash State : %d",
+ flash_ctrl->flash_state);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl,
+ struct cam_flash_frame_setting *flash_data, enum camera_flash_opcode op)
+{
+ uint32_t curr = 0, max_current = 0;
+ struct cam_flash_private_soc *soc_private = NULL;
+ int i = 0;
+
+ if (!flash_ctrl || !flash_data) {
+ CAM_ERR(CAM_FLASH, "Fctrl or Data NULL");
+ return -EINVAL;
+ }
+
+ soc_private = (struct cam_flash_private_soc *)
+ &flash_ctrl->soc_info.soc_private;
+
+ if (op == CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++) {
+ if (flash_ctrl->torch_trigger[i]) {
+ max_current = soc_private->torch_max_current[i];
+
+ if (flash_data->led_current_ma[i] <=
+ max_current)
+ curr = flash_data->led_current_ma[i];
+ else
+ curr = soc_private->torch_op_current[i];
+
+ CAM_DBG(CAM_FLASH,
+ "Led_Current[%d] = %d", i, curr);
+ led_trigger_event(flash_ctrl->torch_trigger[i],
+ curr);
+ }
+ }
+ } else if (op == CAMERA_SENSOR_FLASH_OP_FIREHIGH) {
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++) {
+ if (flash_ctrl->flash_trigger[i]) {
+ max_current = soc_private->flash_max_current[i];
+
+ if (flash_data->led_current_ma[i] <=
+ max_current)
+ curr = flash_data->led_current_ma[i];
+ else
+ curr = soc_private->flash_op_current[i];
+
+ CAM_DBG(CAM_FLASH, "LED flash_current[%d]: %d",
+ i, curr);
+ led_trigger_event(flash_ctrl->flash_trigger[i],
+ curr);
+ }
+ }
+ } else {
+ CAM_ERR(CAM_FLASH, "Wrong Operation: %d", op);
+ return -EINVAL;
+ }
+
+ if (flash_ctrl->switch_trigger)
+ led_trigger_event(flash_ctrl->switch_trigger, LED_SWITCH_ON);
+
+ return 0;
+}
+
+int cam_flash_off(struct cam_flash_ctrl *flash_ctrl)
+{
+ int i = 0;
+
+ if (!flash_ctrl) {
+ CAM_ERR(CAM_FLASH, "Flash control Null");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+ if (flash_ctrl->flash_trigger[i])
+ led_trigger_event(flash_ctrl->flash_trigger[i],
+ LED_OFF);
+
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+ if (flash_ctrl->torch_trigger[i])
+ led_trigger_event(flash_ctrl->torch_trigger[i],
+ LED_OFF);
+
+ if (flash_ctrl->switch_trigger)
+ led_trigger_event(flash_ctrl->switch_trigger,
+ LED_SWITCH_OFF);
+
+ return 0;
+}
+
+static int cam_flash_low(
+ struct cam_flash_ctrl *flash_ctrl,
+ struct cam_flash_frame_setting *flash_data)
+{
+ int i = 0, rc = 0;
+
+ if (!flash_data) {
+ CAM_ERR(CAM_FLASH, "Flash Data Null");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+ if (flash_ctrl->flash_trigger[i])
+ led_trigger_event(flash_ctrl->flash_trigger[i],
+ LED_OFF);
+
+ rc = cam_flash_ops(flash_ctrl, flash_data,
+ CAMERA_SENSOR_FLASH_OP_FIRELOW);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Fire Torch failed: %d", rc);
+
+ return rc;
+}
+
+static int cam_flash_high(
+ struct cam_flash_ctrl *flash_ctrl,
+ struct cam_flash_frame_setting *flash_data)
+{
+ int i = 0, rc = 0;
+
+ if (!flash_data) {
+ CAM_ERR(CAM_FLASH, "Flash Data Null");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+ if (flash_ctrl->torch_trigger[i])
+ led_trigger_event(flash_ctrl->torch_trigger[i],
+ LED_OFF);
+
+ rc = cam_flash_ops(flash_ctrl, flash_data,
+ CAMERA_SENSOR_FLASH_OP_FIREHIGH);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Fire Flash Failed: %d", rc);
+
+ return rc;
+}
+
+static int delete_req(struct cam_flash_ctrl *fctrl, uint64_t req_id)
+{
+ int i = 0;
+ int frame_offset = 0;
+ struct cam_flash_frame_setting *flash_data = NULL;
+
+ if (req_id == 0) {
+ flash_data = &fctrl->nrt_info;
+ if ((fctrl->nrt_info.cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
+ (fctrl->nrt_info.cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
+ flash_data->cmn_attr.is_settings_valid = false;
+ for (i = 0; i < flash_data->cmn_attr.count; i++)
+ flash_data->led_current_ma[i] = 0;
+ } else {
+ fctrl->flash_init_setting.cmn_attr.
+ is_settings_valid = false;
+ }
+ } else {
+ frame_offset = (req_id + MAX_PER_FRAME_ARRAY -
+ CAM_FLASH_PIPELINE_DELAY) % 8;
+ flash_data = &fctrl->per_frame[frame_offset];
+ if (req_id > flash_data->cmn_attr.request_id) {
+ flash_data->cmn_attr.request_id = 0;
+ flash_data->cmn_attr.is_settings_valid = false;
+ for (i = 0; i < flash_data->cmn_attr.count; i++)
+ flash_data->led_current_ma[i] = 0;
+ }
+ }
+
+ return 0;
+}
+
+int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
+ uint64_t req_id)
+{
+ int rc = 0, i = 0;
+ int frame_offset = 0;
+ uint16_t num_iterations;
+ struct cam_flash_frame_setting *flash_data = NULL;
+
+ if (req_id == 0) {
+ if (fctrl->nrt_info.cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) {
+ flash_data = &fctrl->nrt_info;
+ if (flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+ if (!(fctrl->is_regulator_enabled)) {
+ rc = cam_flash_prepare(fctrl,
+ CAM_FLASH_STATE_INIT);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Reg Enable Failed %d",
+ rc);
+ goto nrt_del_req;
+ }
+ fctrl->flash_state =
+ CAM_FLASH_STATE_INIT;
+ rc = cam_flash_low(fctrl, flash_data);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Torch ON failed : %d",
+ rc);
+ goto nrt_del_req;
+ }
+ fctrl->flash_state =
+ CAM_FLASH_STATE_LOW;
+ }
+ } else if (flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_OFF) {
+ if (fctrl->flash_state !=
+ CAM_FLASH_STATE_INIT) {
+ rc = cam_flash_off(fctrl);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "LED off failed: %d",
+ rc);
+ }
+
+ rc = cam_flash_prepare(fctrl,
+ CAM_FLASH_STATE_RELEASE);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Regulator Disable failed %d",
+ rc);
+ goto nrt_del_req;
+ }
+
+ fctrl->flash_state =
+ CAM_FLASH_STATE_RELEASE;
+ fctrl->is_regulator_enabled = false;
+ }
+ } else if (fctrl->nrt_info.cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER) {
+ flash_data = &fctrl->nrt_info;
+
+ if (fctrl->flash_state != CAM_FLASH_STATE_INIT) {
+ rc = cam_flash_off(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Flash off failed: %d",
+ rc);
+ } else {
+ fctrl->flash_state =
+ CAM_FLASH_STATE_INIT;
+ }
+ }
+
+ num_iterations = flash_data->num_iterations;
+ for (i = 0; i < num_iterations; i++) {
+ /* Turn On Torch */
+ if (fctrl->flash_state ==
+ CAM_FLASH_STATE_INIT) {
+ rc = cam_flash_low(fctrl, flash_data);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Fire Torch Failed");
+ goto nrt_del_req;
+ }
+ fctrl->flash_state =
+ CAM_FLASH_STATE_LOW;
+ }
+ usleep_range(
+ flash_data->led_on_delay_ms * 1000,
+ flash_data->led_on_delay_ms * 1000 + 100);
+
+ /* Turn Off Torch */
+ rc = cam_flash_off(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Flash off failed: %d",
+ rc);
+ continue;
+ }
+ fctrl->flash_state = CAM_FLASH_STATE_INIT;
+ usleep_range(
+ flash_data->led_off_delay_ms * 1000,
+ flash_data->led_off_delay_ms * 1000 + 100);
+ }
+ }
+ } else {
+ frame_offset = req_id % MAX_PER_FRAME_ARRAY;
+ flash_data = &fctrl->per_frame[frame_offset];
+
+ if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_FIREHIGH) &&
+ (flash_data->cmn_attr.is_settings_valid)) {
+ /* Turn On Flash */
+ if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
+ rc = cam_flash_high(fctrl, flash_data);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Flash ON failed: rc= %d",
+ rc);
+ goto apply_setting_err;
+ }
+ fctrl->flash_state = CAM_FLASH_STATE_HIGH;
+ }
+ } else if ((flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_FIRELOW) &&
+ (flash_data->cmn_attr.is_settings_valid)) {
+ /* Turn Off Flash */
+ if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
+ rc = cam_flash_low(fctrl, flash_data);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Torch ON failed: rc= %d",
+ rc);
+ goto apply_setting_err;
+ }
+ fctrl->flash_state = CAM_FLASH_STATE_LOW;
+ }
+ } else if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF) &&
+ (flash_data->cmn_attr.is_settings_valid)) {
+ if ((fctrl->flash_state != CAM_FLASH_STATE_RELEASE) ||
+ (fctrl->flash_state != CAM_FLASH_STATE_INIT)) {
+ rc = cam_flash_off(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Flash off failed %d", rc);
+ } else {
+ fctrl->flash_state =
+ CAM_FLASH_STATE_INIT;
+ }
+ }
+ } else {
+ CAM_ERR(CAM_FLASH, "Wrong opcode : %d",
+ flash_data->opcode);
+ rc = -EINVAL;
+ goto apply_setting_err;
+ }
+ }
+
+nrt_del_req:
+ delete_req(fctrl, req_id);
+apply_setting_err:
+ return rc;
+}
+
+int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
+{
+ int rc = 0, i = 0;
+ uint64_t generic_ptr;
+ uint32_t *cmd_buf = NULL;
+ uint32_t *offset = NULL;
+ uint32_t frame_offset = 0;
+ size_t len_of_buffer;
+ struct cam_control *ioctl_ctrl = NULL;
+ struct cam_packet *csl_packet = NULL;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct common_header *cmn_hdr;
+ struct cam_config_dev_cmd config;
+ struct cam_req_mgr_add_request add_req;
+ struct cam_flash_init *cam_flash_info = NULL;
+ struct cam_flash_set_rer *flash_rer_info = NULL;
+ struct cam_flash_set_on_off *flash_operation_info = NULL;
+ struct cam_flash_query_curr *flash_query_info = NULL;
+
+ if (!fctrl || !arg) {
+ CAM_ERR(CAM_FLASH, "fctrl/arg is NULL");
+ return -EINVAL;
+ }
+ /* getting CSL Packet */
+ ioctl_ctrl = (struct cam_control *)arg;
+
+ if (copy_from_user((&config), (void __user *) ioctl_ctrl->handle,
+ sizeof(config))) {
+ CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed");
+ rc = -EFAULT;
+ return rc;
+ }
+
+ rc = cam_mem_get_cpu_buf(config.packet_handle,
+ (uint64_t *)&generic_ptr, &len_of_buffer);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed in getting the buffer : %d", rc);
+ return rc;
+ }
+
+ csl_packet = (struct cam_packet *)generic_ptr;
+
+ switch (csl_packet->header.op_code & 0xFFFFFF) {
+ case CAM_FLASH_PACKET_OPCODE_INIT: {
+ /* INIT packet*/
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ fctrl->flash_init_setting.cmn_attr.request_id = 0;
+ fctrl->flash_init_setting.cmn_attr.is_settings_valid = true;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+ (uint64_t *)&generic_ptr, &len_of_buffer);
+ cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+ cmd_desc->offset);
+ cam_flash_info = (struct cam_flash_init *)cmd_buf;
+
+ switch (cam_flash_info->cmd_type) {
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT:
+ fctrl->flash_type = cam_flash_info->flash_type;
+ fctrl->is_regulator_enabled = false;
+ fctrl->nrt_info.cmn_attr.cmd_type =
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT;
+ break;
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
+ cam_flash_info->cmd_type);
+ return -EINVAL;
+ }
+ break;
+ }
+ case CAM_FLASH_PACKET_OPCODE_SET_OPS: {
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ frame_offset = csl_packet->header.request_id %
+ MAX_PER_FRAME_ARRAY;
+ fctrl->per_frame[frame_offset].cmn_attr.request_id =
+ csl_packet->header.request_id;
+ fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
+ true;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+ (uint64_t *)&generic_ptr, &len_of_buffer);
+ cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+ cmd_desc->offset);
+ cmn_hdr = (struct common_header *)cmd_buf;
+
+ switch (cmn_hdr->cmd_type) {
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE: {
+ CAM_DBG(CAM_FLASH,
+ "CAMERA_FLASH_CMD_TYPE_OPS case called");
+ flash_operation_info =
+ (struct cam_flash_set_on_off *) cmd_buf;
+ fctrl->per_frame[frame_offset].opcode =
+ flash_operation_info->opcode;
+ fctrl->per_frame[frame_offset].cmn_attr.count =
+ flash_operation_info->count;
+ for (i = 0; i < flash_operation_info->count; i++)
+ fctrl->per_frame[frame_offset].led_current_ma[i]
+ = flash_operation_info->
+ led_current_ma[i];
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
+ cmn_hdr->cmd_type);
+ return -EINVAL;
+ }
+
+ break;
+ }
+ case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: {
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ fctrl->nrt_info.cmn_attr.is_settings_valid = true;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+ (uint64_t *)&generic_ptr, &len_of_buffer);
+ cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+ cmd_desc->offset);
+ cmn_hdr = (struct common_header *)cmd_buf;
+
+ switch (cmn_hdr->cmd_type) {
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET: {
+ CAM_DBG(CAM_FLASH, "Widget Flash Operation");
+ flash_operation_info =
+ (struct cam_flash_set_on_off *) cmd_buf;
+ fctrl->nrt_info.cmn_attr.count =
+ flash_operation_info->count;
+ fctrl->nrt_info.cmn_attr.request_id = 0;
+ fctrl->nrt_info.opcode =
+ flash_operation_info->opcode;
+ fctrl->nrt_info.cmn_attr.cmd_type =
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET;
+
+ for (i = 0; i < flash_operation_info->count; i++)
+ fctrl->nrt_info.led_current_ma[i] =
+ flash_operation_info->led_current_ma[i];
+
+ mutex_lock(&fctrl->flash_wq_mutex);
+ rc = cam_flash_apply_setting(fctrl, 0);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Apply setting failed: %d",
+ rc);
+ mutex_unlock(&fctrl->flash_wq_mutex);
+ return rc;
+ }
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR: {
+ int query_curr_ma = 0;
+
+ flash_query_info =
+ (struct cam_flash_query_curr *)cmd_buf;
+
+ rc = qpnp_flash_led_prepare(fctrl->switch_trigger,
+ QUERY_MAX_CURRENT, &query_curr_ma);
+ CAM_DBG(CAM_FLASH, "query_curr_ma = %d",
+ query_curr_ma);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Query current failed with rc=%d", rc);
+ return rc;
+ }
+ flash_query_info->query_current_ma = query_curr_ma;
+ break;
+ }
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_RER: {
+ rc = 0;
+ flash_rer_info = (struct cam_flash_set_rer *)cmd_buf;
+ fctrl->nrt_info.cmn_attr.cmd_type =
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER;
+ fctrl->nrt_info.opcode = flash_rer_info->opcode;
+ fctrl->nrt_info.cmn_attr.count = flash_rer_info->count;
+ fctrl->nrt_info.cmn_attr.request_id = 0;
+ fctrl->nrt_info.num_iterations =
+ flash_rer_info->num_iteration;
+ fctrl->nrt_info.led_on_delay_ms =
+ flash_rer_info->led_on_delay_ms;
+ fctrl->nrt_info.led_off_delay_ms =
+ flash_rer_info->led_off_delay_ms;
+
+ for (i = 0; i < flash_rer_info->count; i++)
+ fctrl->nrt_info.led_current_ma[i] =
+ flash_rer_info->led_current_ma[i];
+
+
+ mutex_lock(&fctrl->flash_wq_mutex);
+ rc = cam_flash_apply_setting(fctrl, 0);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "apply_setting failed: %d",
+ rc);
+ mutex_unlock(&fctrl->flash_wq_mutex);
+ return rc;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong cmd_type : %d",
+ cmn_hdr->cmd_type);
+ return -EINVAL;
+ }
+
+ break;
+ }
+ case CAM_PKT_NOP_OPCODE: {
+ goto update_req_mgr;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong Opcode : %d",
+ (csl_packet->header.op_code & 0xFFFFFF));
+ return -EINVAL;
+ }
+update_req_mgr:
+ if (((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_PKT_NOP_OPCODE) ||
+ ((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_FLASH_PACKET_OPCODE_SET_OPS)) {
+ add_req.link_hdl = fctrl->bridge_intf.link_hdl;
+ add_req.req_id = csl_packet->header.request_id;
+ add_req.dev_hdl = fctrl->bridge_intf.device_hdl;
+ if (fctrl->bridge_intf.crm_cb &&
+ fctrl->bridge_intf.crm_cb->add_req)
+ fctrl->bridge_intf.crm_cb->add_req(&add_req);
+ CAM_DBG(CAM_FLASH, "add req to req_mgr= %lld", add_req.req_id);
+ }
+
+ return rc;
+}
+
+int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info)
+{
+ info->dev_id = CAM_REQ_MGR_DEVICE_FLASH;
+ strlcpy(info->name, CAM_FLASH_NAME, sizeof(info->name));
+ info->p_delay = CAM_FLASH_PIPELINE_DELAY;
+ return 0;
+}
+
+int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
+{
+ struct cam_flash_ctrl *fctrl = NULL;
+
+ if (!link)
+ return -EINVAL;
+
+ fctrl = (struct cam_flash_ctrl *)cam_get_device_priv(link->dev_hdl);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, " Device data is NULL");
+ return -EINVAL;
+ }
+
+ if (link->link_enable) {
+ fctrl->bridge_intf.link_hdl = link->link_hdl;
+ fctrl->bridge_intf.crm_cb = link->crm_cb;
+ } else {
+ fctrl->bridge_intf.link_hdl = -1;
+ fctrl->bridge_intf.crm_cb = NULL;
+ }
+
+ return 0;
+}
+
+static int cam_flash_flush_nrt(struct cam_flash_ctrl *fctrl)
+{
+ int j = 0;
+ struct cam_flash_frame_setting *nrt_settings;
+
+ if (!fctrl)
+ return -EINVAL;
+
+ nrt_settings = &fctrl->nrt_info;
+
+ if (nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT) {
+ fctrl->flash_init_setting.cmn_attr.is_settings_valid = false;
+ } else if ((nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
+ (nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
+ fctrl->nrt_info.cmn_attr.is_settings_valid = false;
+ fctrl->nrt_info.cmn_attr.count = 0;
+ fctrl->nrt_info.num_iterations = 0;
+ fctrl->nrt_info.led_on_delay_ms = 0;
+ fctrl->nrt_info.led_off_delay_ms = 0;
+ for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+ fctrl->nrt_info.led_current_ma[j] = 0;
+ }
+
+ return 0;
+}
+
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
+{
+ int rc = 0;
+ int i = 0, j = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
+ int frame_offset = 0;
+
+ fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+ /* flush all requests*/
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ fctrl->per_frame[i].cmn_attr.request_id = 0;
+ fctrl->per_frame[i].cmn_attr.is_settings_valid = false;
+ fctrl->per_frame[i].cmn_attr.count = 0;
+ for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+ fctrl->per_frame[i].led_current_ma[j] = 0;
+ }
+
+ rc = cam_flash_flush_nrt(fctrl);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "NonRealTime flush error");
+ } else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ /* flush request with req_id*/
+ frame_offset = flush->req_id % MAX_PER_FRAME_ARRAY;
+ fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
+ fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
+ false;
+ fctrl->per_frame[frame_offset].cmn_attr.count = 0;
+ for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++)
+ fctrl->per_frame[frame_offset].led_current_ma[i] = 0;
+ }
+ return rc;
+}
+
+int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply)
+{
+ int rc = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
+
+ if (!apply)
+ return -EINVAL;
+
+ fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(apply->dev_hdl);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Device data is NULL");
+ rc = -EINVAL;
+ goto free_resource;
+ }
+
+ if (!(apply->report_if_bubble)) {
+ mutex_lock(&fctrl->flash_wq_mutex);
+ rc = cam_flash_apply_setting(fctrl, apply->request_id);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "apply_setting failed with rc=%d",
+ rc);
+ mutex_unlock(&fctrl->flash_wq_mutex);
+ }
+
+free_resource:
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
new file mode 100644
index 0000000..4b0cf8d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FLASH_CORE_H_
+#define _CAM_FLASH_CORE_H_
+
+#include <linux/leds-qpnp-flash.h>
+#include <media/cam_sensor.h>
+#include "cam_flash_dev.h"
+#include "cam_sync_api.h"
+#include "cam_mem_mgr_api.h"
+
+int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg);
+int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info);
+int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link);
+int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply);
+int cam_flash_process_evt(struct cam_req_mgr_link_evt_data *event_data);
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush);
+int cam_flash_off(struct cam_flash_ctrl *fctrl);
+int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
+ enum cam_flash_state state);
+
+#endif /*_CAM_FLASH_CORE_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
new file mode 100644
index 0000000..32df2f1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include "cam_flash_dev.h"
+#include "cam_flash_soc.h"
+#include "cam_flash_core.h"
+
+static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
+ void *arg, struct cam_flash_private_soc *soc_private)
+{
+ int rc = 0;
+ int i = 0;
+ struct cam_control *cmd = (struct cam_control *)arg;
+
+ if (!fctrl || !arg) {
+ CAM_ERR(CAM_FLASH, "fctrl/arg is NULL with arg:%pK fctrl%pK",
+ fctrl, arg);
+ return -EINVAL;
+ }
+
+ mutex_lock(&(fctrl->flash_mutex));
+ switch (cmd->op_code) {
+ case CAM_ACQUIRE_DEV: {
+ struct cam_sensor_acquire_dev flash_acq_dev;
+ struct cam_create_dev_hdl bridge_params;
+
+ CAM_DBG(CAM_FLASH, "CAM_ACQUIRE_DEV");
+ if (fctrl->bridge_intf.device_hdl != -1) {
+ CAM_ERR(CAM_FLASH, "Device is already acquired");
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ rc = copy_from_user(&flash_acq_dev, (void __user *)cmd->handle,
+ sizeof(flash_acq_dev));
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed Copying from User");
+ goto release_mutex;
+ }
+
+ bridge_params.session_hdl = flash_acq_dev.session_handle;
+ bridge_params.ops = &fctrl->bridge_intf.ops;
+ bridge_params.v4l2_sub_dev_flag = 0;
+ bridge_params.media_entity_flag = 0;
+ bridge_params.priv = fctrl;
+
+ flash_acq_dev.device_handle =
+ cam_create_device_hdl(&bridge_params);
+ fctrl->bridge_intf.device_hdl =
+ flash_acq_dev.device_handle;
+ fctrl->bridge_intf.session_hdl =
+ flash_acq_dev.session_handle;
+
+ rc = copy_to_user((void __user *) cmd->handle, &flash_acq_dev,
+ sizeof(struct cam_sensor_acquire_dev));
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed Copy to User with rc = %d",
+ rc);
+ rc = -EFAULT;
+ goto release_mutex;
+ }
+ break;
+ }
+ case CAM_RELEASE_DEV: {
+ CAM_DBG(CAM_FLASH, "CAM_RELEASE_DEV");
+ if (fctrl->bridge_intf.device_hdl == -1) {
+ CAM_ERR(CAM_FLASH,
+ "Invalid Handle: Link Hdl: %d device hdl: %d",
+ fctrl->bridge_intf.device_hdl,
+ fctrl->bridge_intf.link_hdl);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+ rc = cam_destroy_device_hdl(fctrl->bridge_intf.device_hdl);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "Failed in destroying the device Handle rc= %d",
+ rc);
+ fctrl->bridge_intf.device_hdl = -1;
+ fctrl->bridge_intf.link_hdl = -1;
+ fctrl->bridge_intf.session_hdl = -1;
+ break;
+ }
+ case CAM_QUERY_CAP: {
+ struct cam_flash_query_cap_info flash_cap;
+
+ CAM_DBG(CAM_FLASH, "CAM_QUERY_CAP");
+ flash_cap.slot_info = fctrl->soc_info.index;
+ for (i = 0; i < fctrl->flash_num_sources; i++) {
+ flash_cap.max_current_flash[i] =
+ soc_private->flash_max_current[i];
+ flash_cap.max_duration_flash[i] =
+ soc_private->flash_max_duration[i];
+ }
+
+ for (i = 0; i < fctrl->torch_num_sources; i++)
+ flash_cap.max_current_torch[i] =
+ soc_private->torch_max_current[i];
+
+ if (copy_to_user((void __user *) cmd->handle, &flash_cap,
+ sizeof(struct cam_flash_query_cap_info))) {
+ CAM_ERR(CAM_FLASH, "Failed Copy to User");
+ rc = -EFAULT;
+ goto release_mutex;
+ }
+ break;
+ }
+ case CAM_START_DEV: {
+ CAM_DBG(CAM_FLASH, "CAM_START_DEV");
+ rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_INIT);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Enable Regulator Failed rc = %d", rc);
+ goto release_mutex;
+ }
+ fctrl->flash_state = CAM_FLASH_STATE_INIT;
+ rc = cam_flash_apply_setting(fctrl, 0);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc);
+ goto release_mutex;
+ }
+ break;
+ }
+ case CAM_STOP_DEV: {
+ CAM_DBG(CAM_FLASH, "CAM_STOP_DEV");
+ if (fctrl->flash_state != CAM_FLASH_STATE_INIT)
+ cam_flash_off(fctrl);
+
+ rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_RELEASE);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Disable Regulator Failed ret = %d",
+ rc);
+ goto release_mutex;
+ }
+ fctrl->flash_state = CAM_FLASH_STATE_RELEASE;
+
+ break;
+ }
+ case CAM_CONFIG_DEV: {
+ CAM_DBG(CAM_FLASH, "CAM_CONFIG_DEV");
+ rc = cam_flash_parser(fctrl, arg);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed Flash Config: rc=%d\n", rc);
+ goto release_mutex;
+ }
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Invalid Opcode: %d", cmd->op_code);
+ rc = -EINVAL;
+ }
+
+release_mutex:
+ mutex_unlock(&(fctrl->flash_mutex));
+ return rc;
+}
+
+static const struct of_device_id cam_flash_dt_match[] = {
+ {.compatible = "qcom,camera-flash", .data = NULL},
+ {}
+};
+
+static long cam_flash_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
+ struct cam_flash_private_soc *soc_private = NULL;
+
+ CAM_DBG(CAM_FLASH, "Enter");
+
+ fctrl = v4l2_get_subdevdata(sd);
+ soc_private = fctrl->soc_info.soc_private;
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL: {
+ rc = cam_flash_driver_cmd(fctrl, arg,
+ soc_private);
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Invalid ioctl cmd type");
+ rc = -EINVAL;
+ break;
+ }
+
+ CAM_DBG(CAM_FLASH, "Exit");
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_flash_subdev_do_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cam_control cmd_data;
+ int32_t rc = 0;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_FLASH,
+ "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL: {
+ rc = cam_flash_subdev_ioctl(sd, cmd, &cmd_data);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "cam_flash_ioctl failed");
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Invalid compat ioctl cmd_type:%d",
+ cmd);
+ rc = -EINVAL;
+ }
+
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_FLASH,
+ "Failed to copy to user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+
+ return rc;
+}
+#endif
+
+static int cam_flash_platform_remove(struct platform_device *pdev)
+{
+ struct cam_flash_ctrl *fctrl;
+
+ fctrl = platform_get_drvdata(pdev);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Flash device is NULL");
+ return 0;
+ }
+
+ devm_kfree(&pdev->dev, fctrl);
+
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops cam_flash_subdev_core_ops = {
+ .ioctl = cam_flash_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_flash_subdev_do_ioctl
+#endif
+};
+
+static struct v4l2_subdev_ops cam_flash_subdev_ops = {
+ .core = &cam_flash_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cam_flash_internal_ops;
+
+static int32_t cam_flash_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct cam_flash_ctrl *flash_ctrl = NULL;
+
+ CAM_DBG(CAM_FLASH, "Enter");
+ if (!pdev->dev.of_node) {
+ CAM_ERR(CAM_FLASH, "of_node NULL");
+ return -EINVAL;
+ }
+
+ flash_ctrl = kzalloc(sizeof(struct cam_flash_ctrl), GFP_KERNEL);
+ if (!flash_ctrl)
+ return -ENOMEM;
+
+ flash_ctrl->pdev = pdev;
+ flash_ctrl->soc_info.pdev = pdev;
+
+ rc = cam_flash_get_dt_data(flash_ctrl, &flash_ctrl->soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "cam_flash_get_dt_data failed with %d", rc);
+ kfree(flash_ctrl);
+ return -EINVAL;
+ }
+
+ flash_ctrl->v4l2_dev_str.internal_ops =
+ &cam_flash_internal_ops;
+ flash_ctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops;
+ flash_ctrl->v4l2_dev_str.name = CAMX_FLASH_DEV_NAME;
+ flash_ctrl->v4l2_dev_str.sd_flags =
+ V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ flash_ctrl->v4l2_dev_str.ent_function = CAM_FLASH_DEVICE_TYPE;
+ flash_ctrl->v4l2_dev_str.token = flash_ctrl;
+
+ rc = cam_register_subdev(&(flash_ctrl->v4l2_dev_str));
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Fail to create subdev with %d", rc);
+ goto free_resource;
+ }
+ flash_ctrl->bridge_intf.device_hdl = -1;
+ flash_ctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
+ flash_ctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
+ flash_ctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
+ flash_ctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
+
+ platform_set_drvdata(pdev, flash_ctrl);
+ v4l2_set_subdevdata(&flash_ctrl->v4l2_dev_str.sd, flash_ctrl);
+
+ mutex_init(&(flash_ctrl->flash_mutex));
+ mutex_init(&(flash_ctrl->flash_wq_mutex));
+
+ CAM_DBG(CAM_FLASH, "Probe success");
+ return rc;
+free_resource:
+ kfree(flash_ctrl);
+ return rc;
+}
+
+MODULE_DEVICE_TABLE(of, cam_flash_dt_match);
+
+static struct platform_driver cam_flash_platform_driver = {
+ .probe = cam_flash_platform_probe,
+ .remove = cam_flash_platform_remove,
+ .driver = {
+ .name = "CAM-FLASH-DRIVER",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_flash_dt_match,
+ },
+};
+
+static int __init cam_flash_init_module(void)
+{
+ int32_t rc = 0;
+
+ rc = platform_driver_register(&cam_flash_platform_driver);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "platform probe for flash failed");
+
+ return rc;
+}
+
+static void __exit cam_flash_exit_module(void)
+{
+ platform_driver_unregister(&cam_flash_platform_driver);
+}
+
+module_init(cam_flash_init_module);
+module_exit(cam_flash_exit_module);
+MODULE_DESCRIPTION("CAM FLASH");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
new file mode 100644
index 0000000..1897eb6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
@@ -0,0 +1,181 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _CAM_FLASH_DEV_H_
+#define _CAM_FLASH_DEV_H_
+
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/cam_sensor.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_util.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_subdev.h"
+#include "cam_mem_mgr.h"
+#include "cam_sensor_cmn_header.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+#define CAMX_FLASH_DEV_NAME "cam-flash-dev"
+
+#define CAM_FLASH_PIPELINE_DELAY 1
+
+#define CAM_FLASH_PACKET_OPCODE_INIT 0
+#define CAM_FLASH_PACKET_OPCODE_SET_OPS 1
+#define CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS 2
+
+enum cam_flash_switch_trigger_ops {
+ LED_SWITCH_OFF = 0,
+ LED_SWITCH_ON,
+};
+
+enum cam_flash_state {
+ CAM_FLASH_STATE_INIT,
+ CAM_FLASH_STATE_LOW,
+ CAM_FLASH_STATE_HIGH,
+ CAM_FLASH_STATE_RELEASE,
+};
+
+/**
+ * struct cam_flash_intf_params
+ * @device_hdl : Device Handle
+ * @session_hdl : Session Handle
+ * @link_hdl : Link Handle
+ * @ops : KMD operations
+ * @crm_cb : Callback API pointers
+ */
+struct cam_flash_intf_params {
+ int32_t device_hdl;
+ int32_t session_hdl;
+ int32_t link_hdl;
+ struct cam_req_mgr_kmd_ops ops;
+ struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_flash_common_attr
+ * @is_settings_valid : Notify the valid settings
+ * @request_id : Request id provided by umd
+ * @count : Number of led count
+ * @cmd_type : Command buffer type
+ */
+struct cam_flash_common_attr {
+ bool is_settings_valid;
+ int32_t request_id;
+ uint16_t count;
+ uint8_t cmd_type;
+};
+
+/**
+ * struct flash_init_packet
+ * @cmn_attr : Provides common attributes
+ * @flash_type : Flash type(PMIC/I2C/GPIO)
+ */
+struct cam_flash_init_packet {
+ struct cam_flash_common_attr cmn_attr;
+ uint8_t flash_type;
+};
+
+/**
+ * struct flash_frame_setting
+ * @cmn_attr : Provides common attributes
+ * @num_iterations : Iterations used to perform RER
+ * @led_on_delay_ms : LED on time in milisec
+ * @led_off_delay_ms : LED off time in milisec
+ * @opcode : Command buffer opcode
+ * @led_current_ma[] : LED current array in miliamps
+ *
+ */
+struct cam_flash_frame_setting {
+ struct cam_flash_common_attr cmn_attr;
+ uint16_t num_iterations;
+ uint16_t led_on_delay_ms;
+ uint16_t led_off_delay_ms;
+ int8_t opcode;
+ uint32_t led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
+};
+
+/**
+ * struct cam_flash_private_soc
+ * @switch_trigger_name : Switch trigger name
+ * @flash_trigger_name : Flash trigger name array
+ * @flash_op_current : Flash operational current
+ * @flash_max_current : Max supported current for LED in flash mode
+ * @flash_max_duration : Max turn on duration for LED in Flash mode
+ * @torch_trigger_name : Torch trigger name array
+ * @torch_op_current : Torch operational current
+ * @torch_max_current : Max supported current for LED in torch mode
+ */
+
+struct cam_flash_private_soc {
+ const char *switch_trigger_name;
+ const char *flash_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t flash_op_current[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t flash_max_current[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t flash_max_duration[CAM_FLASH_MAX_LED_TRIGGERS];
+ const char *torch_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t torch_op_current[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t torch_max_current[CAM_FLASH_MAX_LED_TRIGGERS];
+};
+
+/**
+ * struct cam_flash_ctrl
+ * @soc_info : Soc related information
+ * @pdev : Platform device
+ * @per_frame[] : Per_frame setting array
+ * @nrt_info : NonRealTime settings
+ * @of_node : Of Node ptr
+ * @v4l2_dev_str : V4L2 device structure
+ * @bridge_intf : CRM interface
+ * @flash_init_setting : Init command buffer structure
+ * @switch_trigger : Switch trigger ptr
+ * @flash_num_sources : Number of flash sources
+ * @torch_num_source : Number of torch sources
+ * @flash_mutex : Mutex for flash operations
+ * @flash_wq_mutex : Mutex for flash apply setting
+ * @flash_state : Current flash state (LOW/OFF/ON/INIT)
+ * @flash_type : Flash types (PMIC/I2C/GPIO)
+ * @is_regulator_enable : Regulator disable/enable notifier
+ * @flash_trigger : Flash trigger ptr
+ * @torch_trigger : Torch trigger ptr
+ */
+struct cam_flash_ctrl {
+ struct cam_hw_soc_info soc_info;
+ struct platform_device *pdev;
+ struct cam_flash_frame_setting per_frame[MAX_PER_FRAME_ARRAY];
+ struct cam_flash_frame_setting nrt_info;
+ struct device_node *of_node;
+ struct cam_subdev v4l2_dev_str;
+ struct cam_flash_intf_params bridge_intf;
+ struct cam_flash_init_packet flash_init_setting;
+ struct led_trigger *switch_trigger;
+ uint32_t flash_num_sources;
+ uint32_t torch_num_sources;
+ struct mutex flash_mutex;
+ struct mutex flash_wq_mutex;
+ enum cam_flash_state flash_state;
+ uint8_t flash_type;
+ bool is_regulator_enabled;
+ struct led_trigger *flash_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
+ struct led_trigger *torch_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
+};
+
+#endif /*_CAM_FLASH_DEV_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
new file mode 100644
index 0000000..a9ab169
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
@@ -0,0 +1,224 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include "cam_flash_soc.h"
+
+static int32_t cam_get_source_node_info(
+ struct device_node *of_node,
+ struct cam_flash_ctrl *fctrl,
+ struct cam_flash_private_soc *soc_private)
+{
+ int32_t rc = 0;
+ uint32_t count = 0, i = 0;
+ struct device_node *flash_src_node = NULL;
+ struct device_node *torch_src_node = NULL;
+ struct device_node *switch_src_node = NULL;
+
+ switch_src_node = of_parse_phandle(of_node, "switch-source", 0);
+ if (!switch_src_node) {
+ CAM_DBG(CAM_FLASH, "switch_src_node NULL");
+ } else {
+ rc = of_property_read_string(switch_src_node,
+ "qcom,default-led-trigger",
+ &soc_private->switch_trigger_name);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "default-led-trigger read failed rc=%d", rc);
+ } else {
+ CAM_DBG(CAM_FLASH, "switch trigger %s",
+ soc_private->switch_trigger_name);
+ led_trigger_register_simple(
+ soc_private->switch_trigger_name,
+ &fctrl->switch_trigger);
+ }
+
+ of_node_put(switch_src_node);
+ }
+
+ if (of_get_property(of_node, "flash-source", &count)) {
+ count /= sizeof(uint32_t);
+
+ if (count > CAM_FLASH_MAX_LED_TRIGGERS) {
+ CAM_ERR(CAM_FLASH, "Invalid LED count: %d", count);
+ return -EINVAL;
+ }
+
+ fctrl->flash_num_sources = count;
+
+ for (i = 0; i < count; i++) {
+ flash_src_node = of_parse_phandle(of_node,
+ "flash-source", i);
+ if (!flash_src_node) {
+ CAM_WARN(CAM_FLASH, "flash_src_node NULL");
+ continue;
+ }
+
+ rc = of_property_read_string(flash_src_node,
+ "qcom,default-led-trigger",
+ &soc_private->flash_trigger_name[i]);
+ if (rc) {
+ CAM_WARN(CAM_FLASH,
+ "defalut-led-trigger read failed rc=%d", rc);
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ CAM_DBG(CAM_FLASH, "default trigger %s",
+ soc_private->flash_trigger_name[i]);
+
+ /* Read operational-current */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,current-ma",
+ &soc_private->flash_op_current[i]);
+ if (rc) {
+ CAM_WARN(CAM_FLASH, "op-current: read failed");
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ /* Read max-current */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,max-current",
+ &soc_private->flash_max_current[i]);
+ if (rc) {
+ CAM_WARN(CAM_FLASH,
+ "max-current: read failed");
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ /* Read max-duration */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,duration-ms",
+ &soc_private->flash_max_duration[i]);
+ if (rc)
+ CAM_WARN(CAM_FLASH,
+ "max-duration: read failed");
+
+ of_node_put(flash_src_node);
+
+ CAM_DBG(CAM_FLASH, "max_current[%d]: %d",
+ i, soc_private->flash_max_current[i]);
+
+ led_trigger_register_simple(
+ soc_private->flash_trigger_name[i],
+ &fctrl->flash_trigger[i]);
+ }
+ }
+
+ if (of_get_property(of_node, "torch-source", &count)) {
+ count /= sizeof(uint32_t);
+ if (count > CAM_FLASH_MAX_LED_TRIGGERS) {
+ CAM_ERR(CAM_FLASH, "Invalid LED count : %d", count);
+ return -EINVAL;
+ }
+
+ fctrl->torch_num_sources = count;
+
+ CAM_DBG(CAM_FLASH, "torch_num_sources = %d",
+ fctrl->torch_num_sources);
+ for (i = 0; i < count; i++) {
+ torch_src_node = of_parse_phandle(of_node,
+ "torch-source", i);
+ if (!torch_src_node) {
+ CAM_WARN(CAM_FLASH, "torch_src_node NULL");
+ continue;
+ }
+
+ rc = of_property_read_string(torch_src_node,
+ "qcom,default-led-trigger",
+ &soc_private->torch_trigger_name[i]);
+ if (rc < 0) {
+ CAM_WARN(CAM_FLASH,
+ "default-trigger read failed");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ /* Read operational-current */
+ rc = of_property_read_u32(torch_src_node,
+ "qcom,current-ma",
+ &soc_private->torch_op_current[i]);
+ if (rc < 0) {
+ CAM_WARN(CAM_FLASH, "current: read failed");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ /* Read max-current */
+ rc = of_property_read_u32(torch_src_node,
+ "qcom,max-current",
+ &soc_private->torch_max_current[i]);
+ if (rc < 0) {
+ CAM_WARN(CAM_FLASH,
+ "max-current: read failed");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ of_node_put(torch_src_node);
+
+ CAM_DBG(CAM_FLASH, "max_current[%d]: %d",
+ i, soc_private->torch_max_current[i]);
+
+ led_trigger_register_simple(
+ soc_private->torch_trigger_name[i],
+ &fctrl->torch_trigger[i]);
+ }
+ }
+
+ return rc;
+}
+
+int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl,
+ struct cam_hw_soc_info *soc_info)
+{
+ int32_t rc = 0;
+ struct device_node *of_node = NULL;
+
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "NULL flash control structure");
+ return -EINVAL;
+ }
+
+ of_node = fctrl->pdev->dev.of_node;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_FLASH, "Get_dt_properties failed rc %d", rc);
+ return rc;
+ }
+
+ soc_info->soc_private =
+ kzalloc(sizeof(struct cam_flash_private_soc), GFP_KERNEL);
+ if (!soc_info->soc_private) {
+ rc = -ENOMEM;
+ goto release_soc_res;
+ }
+
+ rc = cam_get_source_node_info(of_node, fctrl, soc_info->soc_private);
+ if (rc < 0) {
+ CAM_ERR(CAM_FLASH,
+ "cam_flash_get_pmic_source_info failed rc %d", rc);
+ goto free_soc_private;
+ }
+
+ return rc;
+
+free_soc_private:
+ kfree(soc_info->soc_private);
+release_soc_res:
+ cam_soc_util_release_platform_resource(soc_info);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.h
new file mode 100644
index 0000000..2e1da69
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FLASH_SOC_H_
+#define _CAM_FLASH_SOC_H_
+
+#include "cam_flash_dev.h"
+
+int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl,
+ struct cam_hw_soc_info *soc_info);
+
+#endif /*_CAM_FLASH_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile
index d8c75fb..5a9441f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile
@@ -1,8 +1,10 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_dev.o cam_sensor_core.o cam_sensor_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index 031c340..f6e6a9a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -13,8 +13,9 @@
#include <linux/module.h>
#include <cam_sensor_cmn_header.h>
#include "cam_sensor_core.h"
-#include <cam_sensor_util.h>
+#include "cam_sensor_util.h"
#include "cam_soc_util.h"
+#include "cam_trace.h"
static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
void *arg)
@@ -34,8 +35,7 @@
ioctl_ctrl = (struct cam_control *)arg;
if (ioctl_ctrl->handle_type != CAM_HANDLE_USER_POINTER) {
- pr_err("%s:%d :Error: Invalid Handle Type\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid Handle Type");
return -EINVAL;
}
@@ -48,22 +48,21 @@
(uint64_t *)&generic_ptr,
&len_of_buff);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in getting the buffer: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "Failed in getting the buffer: %d", rc);
return rc;
}
csl_packet = (struct cam_packet *)(generic_ptr +
config.offset);
if (config.offset > len_of_buff) {
- pr_err("%s: %d offset is out of bounds: off: %lld len: %zu\n",
- __func__, __LINE__, config.offset, len_of_buff);
+ CAM_ERR(CAM_SENSOR,
+ "offset is out of bounds: off: %lld len: %zu",
+ config.offset, len_of_buff);
return -EINVAL;
}
i2c_data = &(s_ctrl->i2c_data);
- CDBG("%s:%d Header OpCode: %d\n",
- __func__, __LINE__, csl_packet->header.op_code);
+ CAM_DBG(CAM_SENSOR, "Header OpCode: %d", csl_packet->header.op_code);
if ((csl_packet->header.op_code & 0xFFFFFF) ==
CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG) {
i2c_reg_settings = &i2c_data->init_settings;
@@ -75,16 +74,16 @@
&i2c_data->
per_frame[csl_packet->header.request_id %
MAX_PER_FRAME_ARRAY];
- CDBG("%s:%d Received Packet: %lld\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "Received Packet: %lld",
csl_packet->header.request_id % MAX_PER_FRAME_ARRAY);
if (i2c_reg_settings->is_settings_valid == 1) {
- pr_err("%s:%d :Error: Already some pkt in offset req : %lld\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR,
+ "Already some pkt in offset req : %lld",
csl_packet->header.request_id);
rc = delete_request(i2c_reg_settings);
if (rc < 0) {
- pr_err("%s: %d :Error: Failed in Deleting the err: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Failed in Deleting the err: %d", rc);
return rc;
}
}
@@ -96,7 +95,7 @@
CAM_PKT_NOP_OPCODE) {
goto update_req_mgr;
} else {
- pr_err("%s:%d Invalid Packet Header\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid Packet Header");
return -EINVAL;
}
@@ -106,8 +105,7 @@
rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings, cmd_desc, 1);
if (rc < 0) {
- pr_err("%s:%d :Error: Fail parsing I2C Pkt: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "Fail parsing I2C Pkt: %d", rc);
return rc;
}
@@ -117,14 +115,14 @@
CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE)) {
add_req.link_hdl = s_ctrl->bridge_intf.link_hdl;
add_req.req_id = csl_packet->header.request_id;
- CDBG("%s:%d Rxed Req Id: %lld\n",
- __func__, __LINE__, csl_packet->header.request_id);
+ CAM_DBG(CAM_SENSOR, " Rxed Req Id: %lld",
+ csl_packet->header.request_id);
add_req.dev_hdl = s_ctrl->bridge_intf.device_hdl;
if (s_ctrl->bridge_intf.crm_cb &&
s_ctrl->bridge_intf.crm_cb->add_req)
s_ctrl->bridge_intf.crm_cb->add_req(&add_req);
- CDBG("%s:%d add req to req mgr: %lld\n",
- __func__, __LINE__, add_req.req_id);
+ CAM_DBG(CAM_SENSOR, " add req to req mgr: %lld",
+ add_req.req_id);
}
return rc;
}
@@ -138,7 +136,8 @@
if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
cci_client = s_ctrl->io_master_info.cci_client;
if (!cci_client) {
- pr_err("failed: cci_client %pK", cci_client);
+ CAM_ERR(CAM_SENSOR, "failed: cci_client %pK",
+ cci_client);
return -EINVAL;
}
cci_client->cci_i2c_master = s_ctrl->cci_i2c_master;
@@ -146,8 +145,7 @@
cci_client->retries = 3;
cci_client->id_map = 0;
cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
- CDBG("%s:%d Master: %d sid: %d freq_mode: %d\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, " Master: %d sid: %d freq_mode: %d",
cci_client->cci_i2c_master, i2c_info->slave_addr,
i2c_info->i2c_freq_mode);
}
@@ -169,211 +167,14 @@
s_ctrl->sensor_probe_addr_type = probe_info->addr_type;
s_ctrl->sensor_probe_data_type = probe_info->data_type;
- CDBG("%s:%d Sensor Addr: 0x%x sensor_id: 0x%x sensor_mask: 0x%x\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR,
+ "Sensor Addr: 0x%x sensor_id: 0x%x sensor_mask: 0x%x",
s_ctrl->sensordata->slave_info.sensor_id_reg_addr,
s_ctrl->sensordata->slave_info.sensor_id,
s_ctrl->sensordata->slave_info.sensor_id_mask);
return rc;
}
-int32_t cam_sensor_update_power_settings(void *cmd_buf,
- int cmd_length, struct cam_sensor_ctrl_t *s_ctrl)
-{
- int32_t rc = 0, tot_size = 0, last_cmd_type = 0;
- int32_t i = 0, pwr_up = 0, pwr_down = 0;
- void *ptr = cmd_buf, *scr;
- struct cam_cmd_power *pwr_cmd = (struct cam_cmd_power *)cmd_buf;
- struct common_header *cmm_hdr = (struct common_header *)cmd_buf;
- struct cam_sensor_power_ctrl_t *power_info =
- &s_ctrl->sensordata->power_info;
-
- if (!pwr_cmd || !cmd_length) {
- pr_err("%s:%d Invalid Args: pwr_cmd %pK, cmd_length: %d\n",
- __func__, __LINE__, pwr_cmd, cmd_length);
- return -EINVAL;
- }
-
- power_info->power_setting_size = 0;
- power_info->power_setting =
- (struct cam_sensor_power_setting *)
- kzalloc(sizeof(struct cam_sensor_power_setting) *
- MAX_POWER_CONFIG, GFP_KERNEL);
- if (!power_info->power_setting)
- return -ENOMEM;
-
- power_info->power_down_setting =
- (struct cam_sensor_power_setting *)
- kzalloc(sizeof(struct cam_sensor_power_setting) *
- MAX_POWER_CONFIG, GFP_KERNEL);
- if (!power_info->power_down_setting) {
- rc = -ENOMEM;
- goto free_power_settings;
- }
-
- while (tot_size < cmd_length) {
- if (cmm_hdr->cmd_type ==
- CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
- struct cam_cmd_power *pwr_cmd =
- (struct cam_cmd_power *)ptr;
-
- power_info->
- power_setting_size +=
- pwr_cmd->count;
- scr = ptr + sizeof(struct cam_cmd_power);
- tot_size = tot_size + sizeof(struct cam_cmd_power);
-
- if (pwr_cmd->count == 0)
- CDBG("%s:%d Un expected Command\n",
- __func__, __LINE__);
-
- for (i = 0; i < pwr_cmd->count; i++, pwr_up++) {
- power_info->
- power_setting[pwr_up].seq_type =
- pwr_cmd->power_settings[i].
- power_seq_type;
- power_info->
- power_setting[pwr_up].config_val =
- pwr_cmd->power_settings[i].
- config_val_low;
- power_info->power_setting[pwr_up].delay = 0;
- if (i) {
- scr = scr +
- sizeof(
- struct cam_power_settings);
- tot_size = tot_size +
- sizeof(
- struct cam_power_settings);
- }
- if (tot_size > cmd_length) {
- pr_err("%s:%d :Error: Command Buffer is wrong\n",
- __func__, __LINE__);
- rc = -EINVAL;
- goto free_power_down_settings;
- }
- CDBG("Seq Type[%d]: %d Config_val: %ldn",
- pwr_up,
- power_info->
- power_setting[pwr_up].seq_type,
- power_info->
- power_setting[pwr_up].
- config_val);
- }
- last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_UP;
- ptr = (void *) scr;
- cmm_hdr = (struct common_header *)ptr;
- } else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT) {
- struct cam_cmd_unconditional_wait *wait_cmd =
- (struct cam_cmd_unconditional_wait *)ptr;
- if (wait_cmd->op_code ==
- CAMERA_SENSOR_WAIT_OP_SW_UCND) {
- if (last_cmd_type ==
- CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
- if (pwr_up > 0)
- power_info->
- power_setting
- [pwr_up - 1].delay +=
- wait_cmd->delay;
- else
- pr_err("%s:%d Delay is expected only after valid power up setting\n",
- __func__, __LINE__);
- } else if (last_cmd_type ==
- CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
- if (pwr_down > 0)
- power_info->
- power_down_setting
- [pwr_down - 1].delay +=
- wait_cmd->delay;
- else
- pr_err("%s:%d Delay is expected only after valid power down setting\n",
- __func__, __LINE__);
- }
- } else
- CDBG("%s:%d Invalid op code: %d\n",
- __func__, __LINE__, wait_cmd->op_code);
- tot_size = tot_size +
- sizeof(struct cam_cmd_unconditional_wait);
- if (tot_size > cmd_length) {
- pr_err("Command Buffer is wrong\n");
- return -EINVAL;
- }
- scr = (void *) (wait_cmd);
- ptr = (void *)
- (scr +
- sizeof(struct cam_cmd_unconditional_wait));
- CDBG("%s:%d ptr: %pK sizeof: %d Next: %pK\n",
- __func__, __LINE__, scr,
- (int32_t)sizeof(
- struct cam_cmd_unconditional_wait), ptr);
-
- cmm_hdr = (struct common_header *)ptr;
- } else if (cmm_hdr->cmd_type ==
- CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
- struct cam_cmd_power *pwr_cmd =
- (struct cam_cmd_power *)ptr;
-
- scr = ptr + sizeof(struct cam_cmd_power);
- tot_size = tot_size + sizeof(struct cam_cmd_power);
- power_info->power_down_setting_size += pwr_cmd->count;
-
- if (pwr_cmd->count == 0)
- pr_err("%s:%d Invalid Command\n",
- __func__, __LINE__);
-
- for (i = 0; i < pwr_cmd->count; i++, pwr_down++) {
- power_info->
- power_down_setting[pwr_down].
- seq_type =
- pwr_cmd->power_settings[i].
- power_seq_type;
- power_info->
- power_down_setting[pwr_down].
- config_val =
- pwr_cmd->power_settings[i].
- config_val_low;
- power_info->
- power_down_setting[pwr_down].delay = 0;
- if (i) {
- scr = scr +
- sizeof(
- struct cam_power_settings);
- tot_size =
- tot_size +
- sizeof(
- struct cam_power_settings);
- }
- if (tot_size > cmd_length) {
- pr_err("Command Buffer is wrong\n");
- rc = -EINVAL;
- goto free_power_down_settings;
- }
- CDBG("%s:%d Seq Type[%d]: %d Config_val: %ldn",
- __func__, __LINE__,
- pwr_down,
- power_info->
- power_down_setting[pwr_down].
- seq_type,
- power_info->
- power_down_setting[pwr_down].
- config_val);
- }
- last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_DOWN;
- ptr = (void *) scr;
- cmm_hdr = (struct common_header *)ptr;
- } else {
- pr_err("%s:%d: :Error: Un expected Header Type: %d\n",
- __func__, __LINE__, cmm_hdr->cmd_type);
- }
- }
-
- return rc;
-free_power_down_settings:
- kfree(power_info->power_down_setting);
-free_power_settings:
- kfree(power_info->power_setting);
- return rc;
-}
-
int32_t cam_handle_cmd_buffers_for_probe(void *cmd_buf,
struct cam_sensor_ctrl_t *s_ctrl,
int32_t cmd_buf_num, int cmd_buf_length)
@@ -388,16 +189,14 @@
i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
rc = cam_sensor_update_i2c_info(i2c_info, s_ctrl);
if (rc < 0) {
- pr_err("%s:%d Failed in Updating the i2c Info\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Failed in Updating the i2c Info");
return rc;
}
probe_info = (struct cam_cmd_probe *)
(cmd_buf + sizeof(struct cam_cmd_i2c_info));
rc = cam_sensor_update_slave_info(probe_info, s_ctrl);
if (rc < 0) {
- pr_err("%s:%d :Error: Updating the slave Info\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Updating the slave Info");
return rc;
}
cmd_buf = probe_info;
@@ -405,16 +204,16 @@
break;
case 1: {
rc = cam_sensor_update_power_settings(cmd_buf,
- cmd_buf_length, s_ctrl);
+ cmd_buf_length, &s_ctrl->sensordata->power_info);
if (rc < 0) {
- pr_err("Failed in updating power settings\n");
+ CAM_ERR(CAM_SENSOR,
+ "Failed in updating power settings");
return rc;
}
}
break;
default:
- pr_err("%s:%d Invalid command buffer\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid command buffer");
break;
}
return rc;
@@ -433,21 +232,19 @@
rc = cam_mem_get_cpu_buf(handle,
(uint64_t *)&packet, &len);
if (rc < 0) {
- pr_err("%s: %d Failed to get the command Buffer\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Failed to get the command Buffer");
return -EINVAL;
}
pkt = (struct cam_packet *)packet;
cmd_desc = (struct cam_cmd_buf_desc *)
((uint32_t *)&pkt->payload + pkt->cmd_buf_offset/4);
if (cmd_desc == NULL) {
- pr_err("%s: %d command descriptor pos is invalid\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "command descriptor pos is invalid");
return -EINVAL;
}
if (pkt->num_cmd_buf != 2) {
- pr_err("%s: %d Expected More Command Buffers : %d\n",
- __func__, __LINE__, pkt->num_cmd_buf);
+ CAM_ERR(CAM_SENSOR, "Expected More Command Buffers : %d",
+ pkt->num_cmd_buf);
return -EINVAL;
}
for (i = 0; i < pkt->num_cmd_buf; i++) {
@@ -456,8 +253,8 @@
rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
(uint64_t *)&cmd_buf1, &len);
if (rc < 0) {
- pr_err("%s: %d Failed to parse the command Buffer Header\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Failed to parse the command Buffer Header");
return -EINVAL;
}
cmd_buf = (uint32_t *)cmd_buf1;
@@ -467,8 +264,8 @@
rc = cam_handle_cmd_buffers_for_probe(ptr, s_ctrl,
i, cmd_desc[i].length);
if (rc < 0) {
- pr_err("%s: %d Failed to parse the command Buffer Header\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Failed to parse the command Buffer Header");
return -EINVAL;
}
}
@@ -524,8 +321,8 @@
slave_info = &(s_ctrl->sensordata->slave_info);
if (!slave_info) {
- pr_err("%s:%d failed: %pK\n",
- __func__, __LINE__, slave_info);
+ CAM_ERR(CAM_SENSOR, " failed: %pK",
+ slave_info);
return -EINVAL;
}
@@ -535,11 +332,11 @@
&chipid, CAMERA_SENSOR_I2C_TYPE_WORD,
CAMERA_SENSOR_I2C_TYPE_WORD);
- CDBG("%s:%d read id: 0x%x expected id 0x%x:\n",
- __func__, __LINE__, chipid, slave_info->sensor_id);
+ CAM_DBG(CAM_SENSOR, "read id: 0x%x expected id 0x%x:",
+ chipid, slave_info->sensor_id);
if (cam_sensor_id_by_mask(s_ctrl, chipid) != slave_info->sensor_id) {
- pr_err("%s: chip id %x does not match %x\n",
- __func__, chipid, slave_info->sensor_id);
+ CAM_ERR(CAM_SENSOR, "chip id %x does not match %x",
+ chipid, slave_info->sensor_id);
return -ENODEV;
}
return rc;
@@ -556,8 +353,7 @@
&s_ctrl->sensordata->power_info;
if (!s_ctrl || !arg) {
- pr_err("%s: %d s_ctrl is NULL\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "s_ctrl is NULL");
return -EINVAL;
}
@@ -565,7 +361,8 @@
switch (cmd->op_code) {
case CAM_SENSOR_PROBE_CMD: {
if (s_ctrl->is_probe_succeed == 1) {
- pr_err("Already Sensor Probed in the slot\n");
+ CAM_ERR(CAM_SENSOR,
+ "Already Sensor Probed in the slot");
break;
}
/* Allocate memory for power up setting */
@@ -591,15 +388,14 @@
CAM_HANDLE_MEM_HANDLE) {
rc = cam_handle_mem_ptr(cmd->handle, s_ctrl);
if (rc < 0) {
- pr_err("%s: %d Get Buffer Handle Failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Get Buffer Handle Failed");
kfree(pu);
kfree(pd);
goto release_mutex;
}
} else {
- pr_err("%s:%d :Error: Invalid Command Type: %d",
- __func__, __LINE__, cmd->handle_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Command Type: %d",
+ cmd->handle_type);
}
/* Parse and fill vreg params for powerup settings */
@@ -608,8 +404,9 @@
s_ctrl->sensordata->power_info.power_setting,
s_ctrl->sensordata->power_info.power_setting_size);
if (rc < 0) {
- pr_err("%s:%d :Error: Fail in filling vreg params for PUP rc %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Fail in filling vreg params for PUP rc %d",
+ rc);
kfree(pu);
kfree(pd);
goto release_mutex;
@@ -621,8 +418,9 @@
s_ctrl->sensordata->power_info.power_down_setting,
s_ctrl->sensordata->power_info.power_down_setting_size);
if (rc < 0) {
- pr_err("%s:%d :Error: Fail in filling vreg params for PDOWN rc %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Fail in filling vreg params for PDOWN rc %d",
+ rc);
kfree(pu);
kfree(pd);
goto release_mutex;
@@ -631,7 +429,7 @@
/* Power up and probe sensor */
rc = cam_sensor_power_up(s_ctrl);
if (rc < 0) {
- pr_err("power up failed");
+ CAM_ERR(CAM_SENSOR, "power up failed");
cam_sensor_power_down(s_ctrl);
kfree(pu);
kfree(pd);
@@ -648,13 +446,11 @@
goto release_mutex;
}
- CDBG("%s:%d Probe Succeeded on the slot: %d\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "Probe Succeeded on the slot: %d",
s_ctrl->soc_info.index);
rc = cam_sensor_power_down(s_ctrl);
if (rc < 0) {
- pr_err("%s:%d :Error: fail in Sensor Power Down\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "fail in Sensor Power Down");
kfree(pu);
kfree(pd);
goto release_mutex;
@@ -671,15 +467,14 @@
struct cam_create_dev_hdl bridge_params;
if (s_ctrl->bridge_intf.device_hdl != -1) {
- pr_err("%s:%d Device is already acquired\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Device is already acquired");
rc = -EINVAL;
goto release_mutex;
}
rc = copy_from_user(&sensor_acq_dev,
(void __user *) cmd->handle, sizeof(sensor_acq_dev));
if (rc < 0) {
- pr_err("Failed Copying from user\n");
+ CAM_ERR(CAM_SENSOR, "Failed Copying from user");
goto release_mutex;
}
@@ -694,11 +489,11 @@
s_ctrl->bridge_intf.device_hdl = sensor_acq_dev.device_handle;
s_ctrl->bridge_intf.session_hdl = sensor_acq_dev.session_handle;
- CDBG("%s:%d Device Handle: %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "Device Handle: %d",
sensor_acq_dev.device_handle);
if (copy_to_user((void __user *) cmd->handle, &sensor_acq_dev,
sizeof(struct cam_sensor_acquire_dev))) {
- pr_err("Failed Copy to User\n");
+ CAM_ERR(CAM_SENSOR, "Failed Copy to User");
rc = -EFAULT;
goto release_mutex;
}
@@ -706,8 +501,8 @@
break;
case CAM_RELEASE_DEV: {
if (s_ctrl->bridge_intf.device_hdl == -1) {
- pr_err("%s:%d Invalid Handles: link hdl: %d device hdl: %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR,
+ "Invalid Handles: link hdl: %d device hdl: %d",
s_ctrl->bridge_intf.device_hdl,
s_ctrl->bridge_intf.link_hdl);
rc = -EINVAL;
@@ -715,8 +510,8 @@
}
rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
if (rc < 0)
- pr_err("%s:%d Failed in destroying the device hdl\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "failed in destroying the device hdl");
s_ctrl->bridge_intf.device_hdl = -1;
s_ctrl->bridge_intf.link_hdl = -1;
s_ctrl->bridge_intf.session_hdl = -1;
@@ -728,7 +523,7 @@
cam_sensor_query_cap(s_ctrl, &sensor_cap);
if (copy_to_user((void __user *) cmd->handle, &sensor_cap,
sizeof(struct cam_sensor_query_cap))) {
- pr_err("Failed Copy to User\n");
+ CAM_ERR(CAM_SENSOR, "Failed Copy to User");
rc = -EFAULT;
goto release_mutex;
}
@@ -737,19 +532,18 @@
case CAM_START_DEV: {
rc = cam_sensor_power_up(s_ctrl);
if (rc < 0) {
- pr_err("%s:%d :Error: Sensor Power up failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Sensor Power up failed");
goto release_mutex;
}
rc = cam_sensor_apply_settings(s_ctrl, 0);
if (rc < 0) {
- pr_err("cannot apply settings\n");
+ CAM_ERR(CAM_SENSOR, "cannot apply settings");
goto release_mutex;
}
rc = delete_request(&s_ctrl->i2c_data.init_settings);
if (rc < 0) {
- pr_err("%s:%d Fail in deleting the Init settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Fail in deleting the Init settings");
rc = -EINVAL;
goto release_mutex;
}
@@ -758,8 +552,7 @@
case CAM_STOP_DEV: {
rc = cam_sensor_power_down(s_ctrl);
if (rc < 0) {
- pr_err("%s:%d Sensor Power Down failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Sensor Power Down failed");
goto release_mutex;
}
}
@@ -767,8 +560,7 @@
case CAM_CONFIG_DEV: {
rc = cam_sensor_i2c_pkt_parse(s_ctrl, arg);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed CCI Config: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "Failed CCI Config: %d", rc);
goto release_mutex;
}
}
@@ -776,8 +568,7 @@
case CAM_SD_SHUTDOWN:
break;
default:
- pr_err("%s:%d :Error: Invalid Opcode: %d\n",
- __func__, __LINE__, cmd->op_code);
+ CAM_ERR(CAM_SENSOR, "Invalid Opcode: %d", cmd->op_code);
rc = -EINVAL;
goto release_mutex;
}
@@ -811,7 +602,7 @@
s_ctrl = (struct cam_sensor_ctrl_t *)
cam_get_device_priv(link->dev_hdl);
if (!s_ctrl) {
- pr_err("%s: Device data is NULL\n", __func__);
+ CAM_ERR(CAM_SENSOR, "Device data is NULL");
return -EINVAL;
}
if (link->link_enable) {
@@ -848,8 +639,7 @@
&s_ctrl->soc_info;
if (!s_ctrl) {
- pr_err("%s:%d failed: %pK\n",
- __func__, __LINE__, s_ctrl);
+ CAM_ERR(CAM_SENSOR, "failed: %pK", s_ctrl);
return -EINVAL;
}
@@ -857,23 +647,20 @@
slave_info = &(s_ctrl->sensordata->slave_info);
if (!power_info || !slave_info) {
- pr_err("%s:%d failed: %pK %pK\n",
- __func__, __LINE__, power_info,
- slave_info);
+ CAM_ERR(CAM_SENSOR, "failed: %pK %pK", power_info, slave_info);
return -EINVAL;
}
rc = cam_sensor_core_power_up(power_info, soc_info);
if (rc < 0) {
- pr_err("%s:%d power up the core is failed:%d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "power up the core is failed:%d", rc);
return rc;
}
if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
rc = camera_io_init(&(s_ctrl->io_master_info));
if (rc < 0) {
- pr_err("%s cci_init failed\n", __func__);
+ CAM_ERR(CAM_SENSOR, "cci_init failed");
return -EINVAL;
}
}
@@ -890,8 +677,7 @@
int rc = 0;
if (!s_ctrl) {
- pr_err("%s:%d failed: s_ctrl %pK\n",
- __func__, __LINE__, s_ctrl);
+ CAM_ERR(CAM_SENSOR, "failed: s_ctrl %pK", s_ctrl);
return -EINVAL;
}
@@ -899,14 +685,12 @@
soc_info = &s_ctrl->soc_info;
if (!power_info) {
- pr_err("%s:%d failed: power_info %pK\n",
- __func__, __LINE__, power_info);
+ CAM_ERR(CAM_SENSOR, "failed: power_info %pK", power_info);
return -EINVAL;
}
rc = msm_camera_power_down(power_info, soc_info);
if (rc < 0) {
- pr_err("%s:%d power down the core is failed:%d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "power down the core is failed:%d", rc);
return rc;
}
@@ -934,15 +718,16 @@
&(s_ctrl->io_master_info),
&(i2c_list->i2c_settings));
if (rc < 0) {
- pr_err("Failed to write the I2C settings\n");
+ CAM_ERR(CAM_SENSOR,
+ "Failed to write the I2C settings");
return rc;
}
}
rc = delete_request(&(s_ctrl->i2c_data.init_settings));
i2c_set->is_settings_valid = 0;
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in deleting the Init request: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Failed in deleting the Init request: %d", rc);
}
}
} else {
@@ -956,8 +741,9 @@
&(s_ctrl->io_master_info),
&(i2c_list->i2c_settings));
if (rc < 0) {
- pr_err("%s:%d :Error: Fail to write the I2C settings: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Fail to write the I2C settings: %d",
+ rc);
return rc;
}
}
@@ -965,8 +751,8 @@
MAX_PER_FRAME_ARRAY -
MAX_SYSTEM_PIPELINE_DELAY) %
MAX_PER_FRAME_ARRAY;
- CDBG("%s:%d Deleting the Request: %d\n",
- __func__, __LINE__, del_req_id);
+ CAM_DBG(CAM_SENSOR, "Deleting the Request: %d",
+ del_req_id);
if (req_id >
s_ctrl->i2c_data.per_frame[del_req_id].
request_id) {
@@ -976,13 +762,13 @@
&(s_ctrl->i2c_data.
per_frame[del_req_id]));
if (rc < 0)
- pr_err("%s:%d :Error: Failed in deleting the request: %d rc: %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR,
+ "Delete request Fail:%d rc:%d",
del_req_id, rc);
}
} else {
- CDBG("%s:%d Invalid/NOP request to apply: %lld\n",
- __func__, __LINE__, req_id);
+ CAM_DBG(CAM_SENSOR,
+ "Invalid/NOP request to apply: %lld", req_id);
}
}
return rc;
@@ -999,11 +785,11 @@
s_ctrl = (struct cam_sensor_ctrl_t *)
cam_get_device_priv(apply->dev_hdl);
if (!s_ctrl) {
- pr_err("%s: Device data is NULL\n", __func__);
+ CAM_ERR(CAM_SENSOR, "Device data is NULL");
return -EINVAL;
}
- CDBG("%s:%d Req Id: %lld\n", __func__, __LINE__,
- apply->request_id);
+ CAM_DBG(CAM_SENSOR, " Req Id: %lld", apply->request_id);
+ trace_cam_apply_req("Sensor", apply);
rc = cam_sensor_apply_settings(s_ctrl, apply->request_id);
return rc;
}
@@ -1021,7 +807,7 @@
s_ctrl = (struct cam_sensor_ctrl_t *)
cam_get_device_priv(flush_req->dev_hdl);
if (!s_ctrl) {
- pr_err("%s: Device data is NULL\n", __func__);
+ CAM_ERR(CAM_SENSOR, "Device data is NULL");
return -EINVAL;
}
@@ -1035,8 +821,8 @@
if (i2c_set->is_settings_valid == 1) {
rc = delete_request(i2c_set);
if (rc < 0)
- pr_err("%s:%d :Error: delete request: %lld rc: %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR,
+ "delete request: %lld rc: %d",
i2c_set->request_id, rc);
if (flush_req->type ==
@@ -1049,7 +835,8 @@
if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
!cancel_req_id_found)
- CDBG("%s:Flush request id:%lld not found in the pending list\n",
- __func__, flush_req->req_id);
+ CAM_DBG(CAM_SENSOR,
+ "Flush request id:%lld not found in the pending list",
+ flush_req->req_id);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
index c06a1b3..1453fb3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
@@ -27,8 +27,7 @@
rc = cam_sensor_driver_cmd(s_ctrl, arg);
break;
default:
- pr_err("%s:%d Invalid ioctl cmd: %d\n",
- __func__, __LINE__, cmd);
+ CAM_ERR(CAM_SENSOR, " Invalid ioctl cmd: %d", cmd);
rc = -EINVAL;
break;
}
@@ -42,8 +41,8 @@
struct cam_sensor_ctrl_t *s_ctrl;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- pr_err("%s %s :Error: i2c_check_functionality failed\n",
- __func__, client->name);
+ CAM_ERR(CAM_SENSOR,
+ "%s :i2c_check_functionality failed", client->name);
return -EFAULT;
}
@@ -60,8 +59,7 @@
rc = cam_sensor_parse_dt(s_ctrl);
if (rc < 0) {
- pr_err("%s:%d :Error: cam_sensor_parse_dt rc %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "cam_sensor_parse_dt rc %d", rc);
goto free_s_ctrl;
}
@@ -77,7 +75,7 @@
s_ctrl = platform_get_drvdata(pdev);
if (!s_ctrl) {
- pr_err("%s: sensor device is NULL\n", __func__);
+ CAM_ERR(CAM_SENSOR, "sensor device is NULL");
return 0;
}
@@ -92,7 +90,7 @@
struct cam_sensor_ctrl_t *s_ctrl = i2c_get_clientdata(client);
if (!s_ctrl) {
- pr_err("%s: sensor device is NULL\n", __func__);
+ CAM_ERR(CAM_SENSOR, "sensor device is NULL");
return 0;
}
@@ -111,7 +109,7 @@
if (copy_from_user(&cmd_data, (void __user *)arg,
sizeof(cmd_data))) {
- pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_SENSOR, "Failed to copy from user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
return -EFAULT;
}
@@ -120,19 +118,18 @@
case VIDIOC_CAM_CONTROL:
rc = cam_sensor_subdev_ioctl(sd, cmd, &cmd_data);
if (rc < 0)
- pr_err("%s:%d cam_sensor_subdev_ioctl failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "cam_sensor_subdev_ioctl failed");
break;
default:
- pr_err("%s:%d Invalid compat ioctl cmd_type: %d\n",
- __func__, __LINE__, cmd);
+ CAM_ERR(CAM_SENSOR, "Invalid compat ioctl cmd_type: %d", cmd);
rc = -EINVAL;
}
if (!rc) {
if (copy_to_user((void __user *)arg, &cmd_data,
sizeof(cmd_data))) {
- pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_SENSOR,
+ "Failed to copy to user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
rc = -EFAULT;
}
@@ -188,7 +185,7 @@
rc = cam_sensor_parse_dt(s_ctrl);
if (rc < 0) {
- pr_err("failed: cam_sensor_parse_dt rc %d", rc);
+ CAM_ERR(CAM_SENSOR, "failed: cam_sensor_parse_dt rc %d", rc);
goto free_s_ctrl;
}
@@ -211,8 +208,7 @@
rc = cam_register_subdev(&(s_ctrl->v4l2_dev_str));
if (rc < 0) {
- pr_err("%s:%d :ERROR: Fail with cam_register_subdev\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Fail with cam_register_subdev");
goto free_s_ctrl;
}
@@ -278,11 +274,11 @@
rc = platform_driver_register(&cam_sensor_platform_driver);
if (rc)
- pr_err("%s platform_driver_register failed rc = %d",
- __func__, rc);
+ CAM_ERR(CAM_SENSOR, "platform_driver_register failed rc = %d",
+ rc);
rc = i2c_add_driver(&cam_sensor_driver_i2c);
if (rc)
- pr_err("%s i2c_add_driver failed rc = %d", __func__, rc);
+ CAM_ERR(CAM_SENSOR, "i2c_add_driver failed rc = %d", rc);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
index ae14c9d..f3c70c4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
@@ -31,6 +31,7 @@
#include <cam_sensor_cmn_header.h>
#include <cam_subdev.h>
#include <cam_sensor_io.h>
+#include "cam_debug_util.h"
#define NUM_MASTERS 2
#define NUM_QUEUES 2
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
index 78edec1..c2f1b4d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
@@ -34,13 +34,12 @@
src_node = of_parse_phandle(of_node, "actuator-src", 0);
if (!src_node) {
- CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "src_node NULL");
} else {
rc = of_property_read_u32(src_node, "cell-index", &val);
- CDBG("%s:%d actuator cell index %d, rc %d\n", __func__,
- __LINE__, val, rc);
+ CAM_DBG(CAM_SENSOR, "actuator cell index %d, rc %d", val, rc);
if (rc < 0) {
- pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
of_node_put(src_node);
return rc;
}
@@ -50,13 +49,12 @@
src_node = of_parse_phandle(of_node, "ois-src", 0);
if (!src_node) {
- CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "src_node NULL");
} else {
rc = of_property_read_u32(src_node, "cell-index", &val);
- CDBG("%s:%d ois cell index %d, rc %d\n", __func__, __LINE__,
- val, rc);
+ CAM_DBG(CAM_SENSOR, " ois cell index %d, rc %d", val, rc);
if (rc < 0) {
- pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
of_node_put(src_node);
return rc;
}
@@ -66,13 +64,12 @@
src_node = of_parse_phandle(of_node, "eeprom-src", 0);
if (!src_node) {
- CDBG("%s:%d eeprom src_node NULL\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "eeprom src_node NULL");
} else {
rc = of_property_read_u32(src_node, "cell-index", &val);
- CDBG("%s:%d eeprom cell index %d, rc %d\n", __func__, __LINE__,
- val, rc);
+ CAM_DBG(CAM_SENSOR, "eeprom cell index %d, rc %d", val, rc);
if (rc < 0) {
- pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
of_node_put(src_node);
return rc;
}
@@ -82,13 +79,12 @@
src_node = of_parse_phandle(of_node, "led-flash-src", 0);
if (!src_node) {
- CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, " src_node NULL");
} else {
rc = of_property_read_u32(src_node, "cell-index", &val);
- CDBG("%s:%d led flash cell index %d, rc %d\n", __func__,
- __LINE__, val, rc);
+ CAM_DBG(CAM_SENSOR, "led flash cell index %d, rc %d", val, rc);
if (rc < 0) {
- pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
of_node_put(src_node);
return rc;
}
@@ -98,8 +94,7 @@
rc = of_property_read_u32(of_node, "csiphy-sd-index", &val);
if (rc < 0)
- pr_err("%s:%d :Error: paring the dt node for csiphy rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "paring the dt node for csiphy rc %d", rc);
else
sensor_info->subdev_id[SUB_MODULE_CSIPHY] = val;
@@ -120,15 +115,14 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("%s:%d Failed to read DT properties rc %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "Failed to read DT properties rc %d", rc);
goto FREE_SENSOR_DATA;
}
rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
&sensordata->power_info.gpio_num_info);
if (rc < 0) {
- pr_err("%s:%d Failed to read gpios %d", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "Failed to read gpios %d", rc);
goto FREE_SENSOR_DATA;
}
@@ -136,8 +130,7 @@
/* Validate cell_id */
if (s_ctrl->id >= MAX_CAMERAS) {
- pr_err("%s:%d Failed invalid cell_id %d", __func__, __LINE__,
- s_ctrl->id);
+ CAM_ERR(CAM_SENSOR, "Failed invalid cell_id %d", s_ctrl->id);
rc = -EINVAL;
goto FREE_SENSOR_DATA;
}
@@ -145,16 +138,15 @@
/* Read subdev info */
rc = cam_sensor_get_sub_module_index(of_node, sensordata);
if (rc < 0) {
- pr_err("%s:%d failed to get sub module index, rc=%d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "failed to get sub module index, rc=%d",
+ rc);
goto FREE_SENSOR_DATA;
}
/* Get CCI master */
rc = of_property_read_u32(of_node, "cci-master",
&s_ctrl->cci_i2c_master);
- CDBG("%s:%d cci-master %d, rc %d", __func__, __LINE__,
- s_ctrl->cci_i2c_master, rc);
+ CAM_DBG(CAM_SENSOR, "cci-master %d, rc %d", s_ctrl->cci_i2c_master, rc);
if (rc < 0) {
/* Set default master 0 */
s_ctrl->cci_i2c_master = MASTER_0;
@@ -163,17 +155,17 @@
if (of_property_read_u32(of_node, "sensor-position-pitch",
&sensordata->pos_pitch) < 0) {
- CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "Invalid sensor position");
sensordata->pos_pitch = 360;
}
if (of_property_read_u32(of_node, "sensor-position-roll",
&sensordata->pos_roll) < 0) {
- CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "Invalid sensor position");
sensordata->pos_roll = 360;
}
if (of_property_read_u32(of_node, "sensor-position-yaw",
&sensordata->pos_yaw) < 0) {
- CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "Invalid sensor position");
sensordata->pos_yaw = 360;
}
@@ -188,13 +180,13 @@
{
/* Validate input parameters */
if (!s_ctrl) {
- pr_err("%s:%d failed: invalid params s_ctrl %pK\n", __func__,
- __LINE__, s_ctrl);
+ CAM_ERR(CAM_SENSOR, "failed: invalid params s_ctrl %pK",
+ s_ctrl);
return -EINVAL;
}
- CDBG("%s: %d master_type: %d\n", __func__, __LINE__,
- s_ctrl->io_master_info.master_type);
+ CAM_DBG(CAM_SENSOR,
+ "master_type: %d", s_ctrl->io_master_info.master_type);
/* Initialize cci_client */
if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
s_ctrl->io_master_info.cci_client = kzalloc(sizeof(
@@ -203,8 +195,8 @@
return -ENOMEM;
} else {
- pr_err("%s:%d Invalid master / Master type Not supported\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Invalid master / Master type Not supported");
return -EINVAL;
}
@@ -219,30 +211,29 @@
/* Parse dt information and store in sensor control structure */
rc = cam_sensor_driver_get_dt_data(s_ctrl);
if (rc < 0) {
- pr_err("%s:%d Failed to get dt data rc %d", __func__, __LINE__,
- rc);
+ CAM_ERR(CAM_SENSOR, "Failed to get dt data rc %d", rc);
return rc;
}
/* Initialize mutex */
mutex_init(&(s_ctrl->cam_sensor_mutex));
- CDBG("%s: %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "%s: %d");
/* Initialize default parameters */
for (i = 0; i < soc_info->num_clk; i++) {
soc_info->clk[i] = devm_clk_get(&soc_info->pdev->dev,
soc_info->clk_name[i]);
if (!soc_info->clk[i]) {
- pr_err("%s:%d get failed for %s\n",
- __func__, __LINE__, soc_info->clk_name[i]);
+ CAM_ERR(CAM_SENSOR, "get failed for %s",
+ soc_info->clk_name[i]);
rc = -ENOENT;
return rc;
}
}
rc = msm_sensor_init_default_params(s_ctrl);
if (rc < 0) {
- pr_err("%s;%d failed: msm_sensor_init_default_params rc %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "failed: msm_sensor_init_default_params rc %d", rc);
goto FREE_DT_DATA;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
index 6292a9f..6a0a0e1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
@@ -4,5 +4,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o cam_sensor_qup_i2c.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o cam_sensor_qup_i2c.o cam_sensor_spi.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
index 40a69ef..915e2f7 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
@@ -14,9 +14,6 @@
#include "cam_sensor_i2c.h"
#include "cam_cci_dev.h"
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *cci_client,
uint32_t addr, uint32_t *data,
enum camera_sensor_i2c_type addr_type,
@@ -41,9 +38,10 @@
rc = v4l2_subdev_call(cci_client->cci_subdev,
core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
if (rc < 0) {
- pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "line %d rc = %d", rc);
return rc;
}
+
rc = cci_ctrl.status;
if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
*data = buf[0];
@@ -58,6 +56,46 @@
return rc;
}
+int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *cci_client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ unsigned char *buf = NULL;
+ int i = 0;
+ struct cam_cci_ctrl cci_ctrl;
+
+ if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ || (num_byte > I2C_REG_DATA_MAX)) {
+ CAM_ERR(CAM_SENSOR, "addr_type %d num_byte %d", addr_type,
+ num_byte);
+ return rc;
+ }
+
+ buf = kzalloc(num_byte, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ cci_ctrl.cmd = MSM_CCI_I2C_READ;
+ cci_ctrl.cci_info = cci_client;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+ cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
+ cci_ctrl.status = -EFAULT;
+ rc = v4l2_subdev_call(cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ rc = cci_ctrl.status;
+ CAM_DBG(CAM_SENSOR, "addr = 0x%x, rc = %d", addr, rc);
+ for (i = 0; i < num_byte; i++) {
+ data[i] = buf[i];
+ CAM_DBG(CAM_SENSOR, "Byte %d: Data: 0x%x\n", i, data[i]);
+ }
+ kfree(buf);
+ return rc;
+}
+
static int32_t cam_cci_i2c_write_table_cmd(
struct camera_io_master *client,
struct cam_sensor_i2c_reg_setting *write_setting,
@@ -85,7 +123,7 @@
rc = v4l2_subdev_call(client->cci_client->cci_subdev,
core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
if (rc < 0) {
- pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "Failed rc = %d", rc);
return rc;
}
rc = cci_ctrl.status;
@@ -135,12 +173,12 @@
int32_t rc = -EINVAL;
int32_t i = 0;
- CDBG("%s: addr: 0x%x data: 0x%x dt: %d\n",
- __func__, addr, data, data_type);
+ CAM_DBG(CAM_SENSOR, "addr: 0x%x data: 0x%x dt: %d",
+ addr, data, data_type);
if (delay_ms > MAX_POLL_DELAY_MS) {
- pr_err("%s:%d invalid delay = %d max_delay = %d\n",
- __func__, __LINE__, delay_ms, MAX_POLL_DELAY_MS);
+ CAM_ERR(CAM_SENSOR, "invalid delay = %d max_delay = %d",
+ delay_ms, MAX_POLL_DELAY_MS);
return -EINVAL;
}
for (i = 0; i < delay_ms; i++) {
@@ -154,11 +192,10 @@
/* If rc is 1 then read is successful but poll is failure */
if (rc == 1)
- pr_err("%s:%d poll failed rc=%d(non-fatal)\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "poll failed rc=%d(non-fatal)", rc);
if (rc < 0)
- pr_err("%s:%d poll failed rc=%d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "poll failed rc=%d", rc);
return rc;
}
@@ -169,13 +206,13 @@
int32_t rc = 0;
struct cam_cci_ctrl cci_ctrl;
- CDBG("%s line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "%s line %d");
cci_ctrl.cmd = cci_cmd;
cci_ctrl.cci_info = cci_client;
rc = v4l2_subdev_call(cci_client->cci_subdev,
core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
if (rc < 0) {
- pr_err("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "Failed rc = %d", rc);
return rc;
}
return cci_ctrl.status;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
index 06e8104..6207a8a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -26,6 +26,8 @@
#define I2C_COMPARE_MATCH 0
#define I2C_COMPARE_MISMATCH 1
+#define I2C_REG_DATA_MAX (8*1024)
+
/**
* @client: CCI client structure
* @data: I2C data
@@ -41,6 +43,20 @@
/**
* @client: CCI client structure
+ * @addr: I2c address
+ * @data: I2C data
+ * @addr_type: I2c address type
+ * @num_byte: number of bytes
+ *
+ * This API handles CCI sequential read
+ */
+int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t num_byte);
+
+/**
+ * @client: CCI client structure
* @cci_cmd: CCI command type
*
* This API handles CCI random write
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index 3e1b331..9e38e1a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -10,9 +10,14 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) "CAM-SENSOR_IO %s:%d " fmt, __func__, __LINE__
+
#include "cam_sensor_io.h"
#include "cam_sensor_i2c.h"
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
int32_t camera_io_dev_poll(struct camera_io_master *io_master_info,
uint32_t addr, uint16_t data, uint32_t data_mask,
enum camera_sensor_i2c_type data_type,
@@ -22,7 +27,7 @@
int16_t mask = data_mask & 0xFF;
if (!io_master_info) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid Args");
return -EINVAL;
}
@@ -34,8 +39,8 @@
addr, data, data_mask, addr_type, data_type,
delay_ms);
} else {
- pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
- __LINE__, io_master_info->master_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
return -EINVAL;
}
}
@@ -46,7 +51,7 @@
enum camera_sensor_i2c_type data_type)
{
if (!io_master_info) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid Args");
return -EINVAL;
}
@@ -56,18 +61,44 @@
} else if (io_master_info->master_type == I2C_MASTER) {
return cam_qup_i2c_read(io_master_info->client,
addr, data, addr_type, data_type);
+ } else if (io_master_info->master_type == SPI_MASTER) {
+ return cam_spi_read(io_master_info,
+ addr, data, addr_type);
} else {
- pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
- __LINE__, io_master_info->master_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
return -EINVAL;
}
+ return 0;
+}
+
+int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type, int32_t num_bytes)
+{
+ if (io_master_info->master_type == CCI_MASTER) {
+ return cam_camera_cci_i2c_read_seq(io_master_info->cci_client,
+ addr, data, addr_type, num_bytes);
+ } else if (io_master_info->master_type == I2C_MASTER) {
+ return cam_qup_i2c_read_seq(io_master_info->client,
+ addr, data, addr_type, num_bytes);
+ } else if (io_master_info->master_type == SPI_MASTER) {
+ return cam_spi_read(io_master_info,
+ addr, (uint32_t *)data, addr_type);
+ } else {
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
+ return -EINVAL;
+ }
+ return 0;
}
int32_t camera_io_dev_write(struct camera_io_master *io_master_info,
struct cam_sensor_i2c_reg_setting *write_setting)
{
if (!write_setting || !io_master_info) {
- pr_err("Input parameters not valid ws: %pK ioinfo: %pK",
+ CAM_ERR(CAM_SENSOR,
+ "Input parameters not valid ws: %pK ioinfo: %pK",
write_setting, io_master_info);
return -EINVAL;
}
@@ -79,8 +110,8 @@
return cam_qup_i2c_write_table(io_master_info,
write_setting);
} else {
- pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
- __LINE__, io_master_info->master_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
return -EINVAL;
}
}
@@ -88,7 +119,7 @@
int32_t camera_io_init(struct camera_io_master *io_master_info)
{
if (!io_master_info) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid Args");
return -EINVAL;
}
@@ -98,8 +129,8 @@
return cam_sensor_cci_i2c_util(io_master_info->cci_client,
MSM_CCI_INIT);
} else {
- pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
- __LINE__, io_master_info->master_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
return -EINVAL;
}
}
@@ -107,7 +138,7 @@
int32_t camera_io_release(struct camera_io_master *io_master_info)
{
if (!io_master_info) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid Args");
return -EINVAL;
}
@@ -115,8 +146,8 @@
return cam_sensor_cci_i2c_util(io_master_info->cci_client,
MSM_CCI_RELEASE);
} else {
- pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
- __LINE__, io_master_info->master_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
return -EINVAL;
}
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
index f721afd..9a60fd0e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -52,16 +52,15 @@
* @io_master_info: I2C/SPI master information
* @addr: I2C address
* @data: I2C data
- * @addr_type: I2C addr type
+ * @data_type: I2C data type
* @num_bytes: number of bytes
*
- * This API abstracts sequential read functionality based on master type
+ * This API abstracts read functionality based on master type
*/
int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
uint32_t addr, uint8_t *data,
enum camera_sensor_i2c_type addr_type,
- uint32_t num_bytes);
-
+ int32_t num_bytes);
/**
* @io_master_info: I2C/SPI master information
@@ -103,4 +102,6 @@
enum camera_sensor_i2c_type addr_type,
uint32_t delay_ms);
+#include "cam_sensor_i2c.h"
+#include "cam_sensor_spi.h"
#endif /* _CAM_SENSOR_IO_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
index b25b1855..b64e0d0 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
@@ -40,7 +40,7 @@
};
rc = i2c_transfer(dev_client->adapter, msgs, 2);
if (rc < 0)
- pr_err("%s:failed 0x%x\n", __func__, saddr);
+ CAM_ERR(CAM_SENSOR, "%s:failed 0x%x", saddr);
return rc;
}
@@ -61,7 +61,7 @@
};
rc = i2c_transfer(dev_client->client->adapter, msg, 1);
if (rc < 0)
- pr_err("%s: failed 0x%x\n", __func__, saddr);
+ CAM_ERR(CAM_SENSOR, "failed 0x%x", saddr);
return rc;
}
@@ -77,8 +77,7 @@
|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
- pr_err("ERR: %s Failed with addr/data_type verfication\n",
- __func__);
+ CAM_ERR(CAM_SENSOR, "Failed with addr/data_type verfication");
return rc;
}
@@ -105,7 +104,7 @@
rc = cam_qup_i2c_rxdata(client, buf, addr_type, data_type);
if (rc < 0) {
- pr_err("%s fail\n", __func__);
+ CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
goto read_fail;
}
@@ -119,7 +118,7 @@
*data = buf[0] << 24 | buf[1] << 16 |
buf[2] << 8 | buf[3];
- CDBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data);
+ CAM_DBG(CAM_SENSOR, "addr = 0x%x data: 0x%x", addr, *data);
read_fail:
kfree(buf);
buf = NULL;
@@ -137,14 +136,13 @@
if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
- pr_err("ERR: %s Failed with addr_type verification\n",
- __func__);
+ CAM_ERR(CAM_SENSOR, "Failed with addr_type verification");
return rc;
}
if ((num_byte == 0) || (num_byte > I2C_REG_DATA_MAX)) {
- pr_err("%s: Error num_byte:0x%x max supported:0x%x\n",
- __func__, num_byte, I2C_REG_DATA_MAX);
+ CAM_ERR(CAM_SENSOR, "num_byte:0x%x max supported:0x%x",
+ num_byte, I2C_REG_DATA_MAX);
return rc;
}
@@ -170,7 +168,7 @@
rc = cam_qup_i2c_rxdata(client, buf, addr_type, num_byte);
if (rc < 0) {
- pr_err("%s fail\n", __func__);
+ CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
goto read_seq_fail;
}
@@ -213,8 +211,8 @@
int i = 0;
if ((delay_ms > MAX_POLL_DELAY_MS) || (delay_ms == 0)) {
- pr_err("%s:%d invalid delay = %d max_delay = %d\n",
- __func__, __LINE__, delay_ms, MAX_POLL_DELAY_MS);
+ CAM_ERR(CAM_SENSOR, "invalid delay = %d max_delay = %d",
+ delay_ms, MAX_POLL_DELAY_MS);
return -EINVAL;
}
@@ -234,10 +232,9 @@
}
/* If rc is MISMATCH then read is successful but poll is failure */
if (rc == I2C_COMPARE_MISMATCH)
- pr_err("%s:%d poll failed rc=%d(non-fatal)\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "poll failed rc=%d(non-fatal)", rc);
if (rc < 0)
- pr_err("%s:%d poll failed rc=%d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "poll failed rc=%d", rc);
return rc;
}
@@ -251,20 +248,17 @@
unsigned char buf[I2C_REG_MAX_BUF_SIZE];
uint8_t len = 0;
- CDBG("%s reg addr = 0x%x data type: %d\n",
- __func__, reg_setting->reg_addr, data_type);
+ CAM_DBG(CAM_SENSOR, "reg addr = 0x%x data type: %d",
+ reg_setting->reg_addr, data_type);
if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
buf[0] = reg_setting->reg_addr;
- CDBG("%s byte %d: 0x%x\n", __func__,
- len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
len = 1;
} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
buf[0] = reg_setting->reg_addr >> 8;
buf[1] = reg_setting->reg_addr;
- CDBG("%s byte %d: 0x%x\n", __func__,
- len, buf[len]);
- CDBG("%s byte %d: 0x%x\n", __func__,
- len+1, buf[len+1]);
+ CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len+1, buf[len+1]);
len = 2;
} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
buf[0] = reg_setting->reg_addr >> 16;
@@ -278,47 +272,47 @@
buf[3] = reg_setting->reg_addr;
len = 4;
} else {
- pr_err("%s: Invalid I2C addr type\n", __func__);
+ CAM_ERR(CAM_SENSOR, "Invalid I2C addr type");
return -EINVAL;
}
- CDBG("Data: 0x%x\n", reg_setting->reg_data);
+ CAM_DBG(CAM_SENSOR, "Data: 0x%x", reg_setting->reg_data);
if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
buf[len] = reg_setting->reg_data;
- CDBG("Byte %d: 0x%x\n", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
len += 1;
} else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
buf[len] = reg_setting->reg_data >> 8;
buf[len+1] = reg_setting->reg_data;
- CDBG("Byte %d: 0x%x\n", len, buf[len]);
- CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
len += 2;
} else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
buf[len] = reg_setting->reg_data >> 16;
buf[len + 1] = reg_setting->reg_data >> 8;
buf[len + 2] = reg_setting->reg_data;
- CDBG("Byte %d: 0x%x\n", len, buf[len]);
- CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
- CDBG("Byte %d: 0x%x\n", len+2, buf[len+2]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+2, buf[len+2]);
len += 3;
} else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
buf[len] = reg_setting->reg_data >> 24;
buf[len + 1] = reg_setting->reg_data >> 16;
buf[len + 2] = reg_setting->reg_data >> 8;
buf[len + 3] = reg_setting->reg_data;
- CDBG("Byte %d: 0x%x\n", len, buf[len]);
- CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
- CDBG("Byte %d: 0x%x\n", len+2, buf[len+2]);
- CDBG("Byte %d: 0x%x\n", len+3, buf[len+3]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+2, buf[len+2]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+3, buf[len+3]);
len += 4;
} else {
- pr_err("%s: Invalid Data Type\n", __func__);
+ CAM_ERR(CAM_SENSOR, "Invalid Data Type");
return -EINVAL;
}
rc = cam_qup_i2c_txdata(client, buf, len);
if (rc < 0)
- pr_err("%s fail\n", __func__);
+ CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
return rc;
}
@@ -341,7 +335,7 @@
reg_setting = write_setting->reg_setting;
for (i = 0; i < write_setting->size; i++) {
- CDBG("%s addr 0x%x data 0x%x\n", __func__,
+ CAM_DBG(CAM_SENSOR, "addr 0x%x data 0x%x",
reg_setting->reg_addr, reg_setting->reg_data);
rc = cam_qup_i2c_write(client, reg_setting,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
new file mode 100644
index 0000000..e0b737e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
@@ -0,0 +1,469 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_spi.h"
+#include "cam_debug_util.h"
+
+static int cam_spi_txfr(struct spi_device *spi, char *txbuf,
+ char *rxbuf, int num_byte)
+{
+ struct spi_transfer txfr;
+ struct spi_message msg;
+
+ memset(&txfr, 0, sizeof(txfr));
+ txfr.tx_buf = txbuf;
+ txfr.rx_buf = rxbuf;
+ txfr.len = num_byte;
+ spi_message_init(&msg);
+ spi_message_add_tail(&txfr, &msg);
+
+ return spi_sync(spi, &msg);
+}
+
+static int cam_spi_txfr_read(struct spi_device *spi, char *txbuf,
+ char *rxbuf, int txlen, int rxlen)
+{
+ struct spi_transfer tx;
+ struct spi_transfer rx;
+ struct spi_message m;
+
+ memset(&tx, 0, sizeof(tx));
+ memset(&rx, 0, sizeof(rx));
+ tx.tx_buf = txbuf;
+ rx.rx_buf = rxbuf;
+ tx.len = txlen;
+ rx.len = rxlen;
+ spi_message_init(&m);
+ spi_message_add_tail(&tx, &m);
+ spi_message_add_tail(&rx, &m);
+ return spi_sync(spi, &m);
+}
+
+/**
+ * cam_set_addr() - helper function to set transfer address
+ * @addr: device address
+ * @addr_len: the addr field length of an instruction
+ * @type: type (i.e. byte-length) of @addr
+ * @str: shifted address output, must be zeroed when passed in
+ *
+ * This helper function sets @str based on the addr field length of an
+ * instruction and the data length.
+ */
+static void cam_set_addr(uint32_t addr, uint8_t addr_len,
+ enum camera_sensor_i2c_type type,
+ char *str)
+{
+ int i, len;
+
+ if (!addr_len)
+ return;
+
+ if (addr_len < type)
+ CAM_DBG(CAM_EEPROM, "omitting higher bits in address");
+
+ /* only support transfer MSB first for now */
+ len = addr_len - type;
+ for (i = len; i < addr_len; i++) {
+ if (i >= 0)
+ str[i] = (addr >> (BITS_PER_BYTE * (addr_len - i - 1)))
+ & 0xFF;
+ }
+
+}
+
+/**
+ * cam_spi_tx_helper() - wrapper for SPI transaction
+ * @client: io client
+ * @inst: inst of this transaction
+ * @addr: device addr following the inst
+ * @data: output byte array (could be NULL)
+ * @num_byte: size of @data
+ * @tx, rx: optional transfer buffer. It must be at least header
+ * + @num_byte long.
+ *
+ * This is the core function for SPI transaction, except for writes. It first
+ * checks address type, then allocates required memory for tx/rx buffers.
+ * It sends out <opcode><addr>, and optionally receives @num_byte of response,
+ * if @data is not NULL. This function does not check for wait conditions,
+ * and will return immediately once bus transaction finishes.
+ *
+ * This function will allocate buffers of header + @num_byte long. For
+ * large transfers, the allocation could fail. External buffer @tx, @rx
+ * should be passed in to bypass allocation. The size of buffer should be
+ * at least header + num_byte long. Since buffer is managed externally,
+ * @data will be ignored, and read results will be in @rx.
+ * @tx, @rx also can be used for repeated transfers to improve performance.
+ */
+static int32_t cam_spi_tx_helper(struct camera_io_master *client,
+ struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+ uint32_t num_byte, char *tx, char *rx)
+{
+ int32_t rc = -EINVAL;
+ struct spi_device *spi = client->spi_client->spi_master;
+ char *ctx = NULL, *crx = NULL;
+ uint32_t len, hlen;
+ uint8_t retries = client->spi_client->retries;
+ enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
+
+ if (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ return rc;
+
+ hlen = cam_camera_spi_get_hlen(inst);
+ len = hlen + num_byte;
+
+ if (tx) {
+ ctx = tx;
+ } else {
+ ctx = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!ctx)
+ return -ENOMEM;
+ }
+
+ if (num_byte) {
+ if (rx) {
+ crx = rx;
+ } else {
+ crx = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!crx) {
+ if (!tx)
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ }
+ } else {
+ crx = NULL;
+ }
+
+ ctx[0] = inst->opcode;
+ cam_set_addr(addr, inst->addr_len, addr_type, ctx + 1);
+ while ((rc = cam_spi_txfr(spi, ctx, crx, len)) && retries) {
+ retries--;
+ msleep(client->spi_client->retry_delay);
+ }
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: spi txfr rc %d", rc);
+ goto out;
+ }
+ if (data && num_byte && !rx)
+ memcpy(data, crx + hlen, num_byte);
+
+out:
+ if (!tx)
+ kfree(ctx);
+ if (!rx)
+ kfree(crx);
+ return rc;
+}
+
+static int32_t cam_spi_tx_read(struct camera_io_master *client,
+ struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+ uint32_t num_byte, char *tx, char *rx)
+{
+ int32_t rc = -EINVAL;
+ struct spi_device *spi = client->spi_client->spi_master;
+ char *ctx = NULL, *crx = NULL;
+ uint32_t hlen;
+ uint8_t retries = client->spi_client->retries;
+ enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
+
+ if ((addr_type != CAMERA_SENSOR_I2C_TYPE_WORD)
+ && (addr_type != CAMERA_SENSOR_I2C_TYPE_BYTE)
+ && (addr_type != CAMERA_SENSOR_I2C_TYPE_3B))
+ return rc;
+
+ hlen = cam_camera_spi_get_hlen(inst);
+ if (tx) {
+ ctx = tx;
+ } else {
+ ctx = kzalloc(hlen, GFP_KERNEL | GFP_DMA);
+ if (!ctx)
+ return -ENOMEM;
+ }
+ if (num_byte) {
+ if (rx) {
+ crx = rx;
+ } else {
+ crx = kzalloc(num_byte, GFP_KERNEL | GFP_DMA);
+ if (!crx) {
+ if (!tx)
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ }
+ } else {
+ crx = NULL;
+ }
+
+ ctx[0] = inst->opcode;
+ if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ cam_set_addr(addr, inst->addr_len, addr_type,
+ ctx + 1);
+ } else {
+ ctx[1] = (addr >> BITS_PER_BYTE) & 0xFF;
+ ctx[2] = (addr & 0xFF);
+ ctx[3] = 0;
+ }
+ CAM_DBG(CAM_EEPROM, "tx(%u): %02x %02x %02x %02x", hlen, ctx[0],
+ ctx[1], ctx[2], ctx[3]);
+ while ((rc = cam_spi_txfr_read(spi, ctx, crx, hlen, num_byte))
+ && retries) {
+ retries--;
+ msleep(client->spi_client->retry_delay);
+ }
+ if (rc < 0) {
+ pr_err("%s: failed %d\n", __func__, rc);
+ goto out;
+ }
+ if (data && num_byte && !rx)
+ memcpy(data, crx, num_byte);
+out:
+ if (!tx)
+ kfree(ctx);
+ if (!rx)
+ kfree(crx);
+ return rc;
+}
+
+int cam_spi_read(struct camera_io_master *client,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type data_type)
+{
+ int rc = -EINVAL;
+ uint8_t temp[CAMERA_SENSOR_I2C_TYPE_MAX];
+
+ if ((data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+ || (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
+ return rc;
+
+ rc = cam_spi_tx_read(client,
+ &client->spi_client->cmd_tbl.read, addr, &temp[0],
+ data_type, NULL, NULL);
+ if (rc < 0) {
+ pr_err("%s: failed %d\n", __func__, rc);
+ return rc;
+ }
+
+ if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+ *data = temp[0];
+ else
+ *data = (temp[0] << BITS_PER_BYTE) | temp[1];
+
+ CAM_DBG(CAM_SENSOR, "addr 0x%x, data %u\n", addr, *data);
+ return rc;
+}
+
+int cam_spi_query_id(struct camera_io_master *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ return cam_spi_tx_helper(client,
+ &client->spi_client->cmd_tbl.query_id, addr, data, num_byte,
+ NULL, NULL);
+}
+
+static int32_t cam_spi_read_status_reg(
+ struct camera_io_master *client, uint8_t *status)
+{
+ struct cam_camera_spi_inst *rs =
+ &client->spi_client->cmd_tbl.read_status;
+
+ if (rs->addr_len != 0) {
+ pr_err("%s: not implemented yet\n", __func__);
+ return -EINVAL;
+ }
+ return cam_spi_tx_helper(client, rs, 0, status, 1, NULL, NULL);
+}
+
+static int32_t cam_spi_device_busy(struct camera_io_master *client,
+ uint8_t *busy)
+{
+ int rc;
+ uint8_t st = 0;
+
+ rc = cam_spi_read_status_reg(client, &st);
+ if (rc < 0) {
+ pr_err("%s: failed to read status reg\n", __func__);
+ return rc;
+ }
+ *busy = st & client->spi_client->busy_mask;
+ return 0;
+}
+
+static int32_t cam_spi_wait(struct camera_io_master *client,
+ struct cam_camera_spi_inst *inst)
+{
+ uint8_t busy;
+ int i, rc;
+
+ CAM_DBG(CAM_SENSOR, "op 0x%x wait start", inst->opcode);
+ for (i = 0; i < inst->delay_count; i++) {
+ rc = cam_spi_device_busy(client, &busy);
+ if (rc < 0)
+ return rc;
+ if (!busy)
+ break;
+ msleep(inst->delay_intv);
+ CAM_DBG(CAM_SENSOR, "op 0x%x wait", inst->opcode);
+ }
+ if (i > inst->delay_count) {
+ pr_err("%s: op %x timed out\n", __func__, inst->opcode);
+ return -ETIMEDOUT;
+ }
+ CAM_DBG(CAM_SENSOR, "op %x finished", inst->opcode);
+ return 0;
+}
+
+static int32_t cam_spi_write_enable(
+ struct camera_io_master *client)
+{
+ struct cam_camera_spi_inst *we =
+ &client->spi_client->cmd_tbl.write_enable;
+ int rc;
+
+ if (we->opcode == 0)
+ return 0;
+ if (we->addr_len != 0) {
+ pr_err("%s: not implemented yet\n", __func__);
+ return -EINVAL;
+ }
+ rc = cam_spi_tx_helper(client, we, 0, NULL, 0, NULL, NULL);
+ if (rc < 0)
+ pr_err("%s: write enable failed\n", __func__);
+ return rc;
+}
+
+/**
+ * cam_spi_page_program() - core function to perform write
+ * @client: need for obtaining SPI device
+ * @addr: address to program on device
+ * @data: data to write
+ * @len: size of data
+ * @tx: tx buffer, size >= header + len
+ *
+ * This function performs SPI write, and has no boundary check. Writing range
+ * should not cross page boundary, or data will be corrupted. Transaction is
+ * guaranteed to be finished when it returns. This function should never be
+ * used outside cam_spi_write_seq().
+ */
+static int32_t cam_spi_page_program(struct camera_io_master *client,
+ uint32_t addr, uint8_t *data, uint16_t len, uint8_t *tx)
+{
+ int rc;
+ struct cam_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ struct spi_device *spi = client->spi_client->spi_master;
+ uint8_t retries = client->spi_client->retries;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
+
+ CAM_DBG(CAM_SENSOR, "addr 0x%x, size 0x%x", addr, len);
+ rc = cam_spi_write_enable(client);
+ if (rc < 0)
+ return rc;
+ memset(tx, 0, header_len);
+ tx[0] = pg->opcode;
+ cam_set_addr(addr, pg->addr_len, addr_type, tx + 1);
+ memcpy(tx + header_len, data, len);
+ CAM_DBG(CAM_SENSOR, "tx(%u): %02x %02x %02x %02x\n",
+ len, tx[0], tx[1], tx[2], tx[3]);
+ while ((rc = spi_write(spi, tx, len + header_len)) && retries) {
+ rc = cam_spi_wait(client, pg);
+ msleep(client->spi_client->retry_delay);
+ retries--;
+ }
+ if (rc < 0) {
+ pr_err("%s: failed %d\n", __func__, rc);
+ return rc;
+ }
+ rc = cam_spi_wait(client, pg);
+ return rc;
+}
+
+int cam_spi_write(struct camera_io_master *client,
+ uint32_t addr, uint16_t data,
+ enum camera_sensor_i2c_type data_type)
+{
+ struct cam_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ uint16_t len = 0;
+ char buf[2];
+ char *tx;
+ int rc = -EINVAL;
+ enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
+
+ if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ || (data_type != CAMERA_SENSOR_I2C_TYPE_BYTE
+ && data_type != CAMERA_SENSOR_I2C_TYPE_WORD))
+ return rc;
+ CAM_DBG(CAM_EEPROM, "Data: 0x%x", data);
+ len = header_len + (uint8_t)data_type;
+ tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!tx)
+ goto NOMEM;
+ if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[0] = data;
+ CAM_DBG(CAM_EEPROM, "Byte %d: 0x%x", len, buf[0]);
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[0] = (data >> BITS_PER_BYTE) & 0x00FF;
+ buf[1] = (data & 0x00FF);
+ }
+ rc = cam_spi_page_program(client, addr, buf,
+ (uint16_t)data_type, tx);
+ if (rc < 0)
+ goto ERROR;
+ goto OUT;
+NOMEM:
+ pr_err("%s: memory allocation failed\n", __func__);
+ return -ENOMEM;
+ERROR:
+ pr_err("%s: error write\n", __func__);
+OUT:
+ kfree(tx);
+ return rc;
+}
+
+int cam_spi_write_table(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting)
+{
+ int i;
+ int rc = -EFAULT;
+ struct cam_sensor_i2c_reg_array *reg_setting;
+ uint16_t client_addr_type;
+ enum camera_sensor_i2c_type addr_type;
+
+ if (!client || !write_setting)
+ return rc;
+ if (write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+ || (write_setting->data_type != CAMERA_SENSOR_I2C_TYPE_BYTE
+ && write_setting->data_type != CAMERA_SENSOR_I2C_TYPE_WORD))
+ return rc;
+ reg_setting = write_setting->reg_setting;
+ client_addr_type = addr_type;
+ addr_type = write_setting->addr_type;
+ for (i = 0; i < write_setting->size; i++) {
+ CAM_DBG(CAM_SENSOR, "addr %x data %x",
+ reg_setting->reg_addr, reg_setting->reg_data);
+ rc = cam_spi_write(client, reg_setting->reg_addr,
+ reg_setting->reg_data, write_setting->data_type);
+ if (rc < 0)
+ break;
+ reg_setting++;
+ }
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000,
+ (write_setting->delay
+ * 1000) + 1000);
+ addr_type = client_addr_type;
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
new file mode 100644
index 0000000..a497491
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_SPI_H_
+#define _CAM_SENSOR_SPI_H_
+
+#include <linux/spi/spi.h>
+#include <media/cam_sensor.h>
+#include "cam_sensor_i2c.h"
+
+#define MAX_SPI_SIZE 110
+#define SPI_DYNAMIC_ALLOC
+
+struct cam_camera_spi_inst {
+ uint8_t opcode;
+ uint8_t addr_len;
+ uint8_t dummy_len;
+ uint8_t delay_intv;
+ uint8_t delay_count;
+};
+
+struct cam_spi_write_burst_data {
+ u8 data_msb;
+ u8 data_lsb;
+};
+
+struct cam_spi_write_burst_packet {
+ u8 cmd;
+ u8 addr_msb;
+ u8 addr_lsb;
+ struct cam_spi_write_burst_data data_arr[MAX_SPI_SIZE];
+};
+
+struct cam_camera_burst_info {
+ uint32_t burst_addr;
+ uint32_t burst_start;
+ uint32_t burst_len;
+ uint32_t chunk_size;
+};
+
+struct cam_camera_spi_inst_tbl {
+ struct cam_camera_spi_inst read;
+ struct cam_camera_spi_inst read_seq;
+ struct cam_camera_spi_inst query_id;
+ struct cam_camera_spi_inst page_program;
+ struct cam_camera_spi_inst write_enable;
+ struct cam_camera_spi_inst read_status;
+ struct cam_camera_spi_inst erase;
+};
+
+struct cam_sensor_spi_client {
+ struct spi_device *spi_master;
+ struct cam_camera_spi_inst_tbl cmd_tbl;
+ uint8_t device_id0;
+ uint8_t device_id1;
+ uint8_t mfr_id0;
+ uint8_t mfr_id1;
+ uint8_t retry_delay;
+ uint8_t retries;
+ uint8_t busy_mask;
+ uint16_t page_size;
+ uint32_t erase_size;
+};
+static __always_inline
+uint16_t cam_camera_spi_get_hlen(struct cam_camera_spi_inst *inst)
+{
+ return sizeof(inst->opcode) + inst->addr_len + inst->dummy_len;
+}
+
+int cam_spi_read(struct camera_io_master *client,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type data_type);
+
+int cam_spi_query_id(struct camera_io_master *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int cam_spi_write(struct camera_io_master *client,
+ uint32_t addr, uint16_t data,
+ enum camera_sensor_i2c_type data_type);
+
+int cam_spi_write_table(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
index 770391c..bf61fb3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
@@ -2,5 +2,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
index d12ff2b..6520042b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
@@ -31,6 +31,8 @@
#define CAM_SENSOR_NAME "cam-sensor"
#define CAM_ACTUATOR_NAME "cam-actuator"
#define CAM_CSIPHY_NAME "cam-csiphy"
+#define CAM_FLASH_NAME "cam-flash"
+#define CAM_EEPROM_NAME "cam-eeprom"
#define MAX_SYSTEM_PIPELINE_DELAY 2
@@ -47,6 +49,11 @@
CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR,
CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD,
CAMERA_SENSOR_CMD_TYPE_WAIT,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET,
CAMERA_SENSOR_CMD_TYPE_MAX,
};
@@ -69,6 +76,14 @@
CAMERA_SENSOR_WAIT_OP_MAX,
};
+enum camera_flash_opcode {
+ CAMERA_SENSOR_FLASH_OP_INVALID,
+ CAMERA_SENSOR_FLASH_OP_OFF,
+ CAMERA_SENSOR_FLASH_OP_FIRELOW,
+ CAMERA_SENSOR_FLASH_OP_FIREHIGH,
+ CAMERA_SENSOR_FLASH_OP_MAX,
+};
+
enum camera_sensor_i2c_type {
CAMERA_SENSOR_I2C_TYPE_INVALID,
CAMERA_SENSOR_I2C_TYPE_BYTE,
@@ -146,6 +161,10 @@
CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS
};
+enum cam_eeprom_packet_opcodes {
+ CAM_EEPROM_PACKET_OPCODE_INIT
+};
+
enum msm_bus_perf_setting {
S_INIT,
S_PREVIEW,
@@ -166,6 +185,12 @@
MSM_CAMERA_SPI_DEVICE,
};
+enum cam_flash_device_type {
+ CAMERA_FLASH_DEVICE_TYPE_PMIC = 0,
+ CAMERA_FLASH_DEVICE_TYPE_I2C,
+ CAMERA_FLASH_DEVICE_TYPE_GPIO,
+};
+
enum cci_i2c_master_t {
MASTER_0,
MASTER_1,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 9f16e93..40a8c179 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include "cam_sensor_util.h"
+#include <cam_mem_mgr.h>
#define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend"
#define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default"
@@ -19,9 +20,6 @@
#define VALIDATE_VOLTAGE(min, max, config_val) ((config_val) && \
(config_val >= min) && (config_val <= max))
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
static struct i2c_settings_list*
cam_sensor_get_i2c_ptr(struct i2c_settings_array *i2c_reg_settings,
uint32_t size)
@@ -56,8 +54,7 @@
int32_t rc = 0;
if (i2c_array == NULL) {
- pr_err("%s:%d ::FATAL:: Invalid argument\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "FATAL:: Invalid argument");
return -EINVAL;
}
@@ -86,8 +83,7 @@
struct i2c_settings_list *i2c_list = NULL;
if (i2c_list == NULL) {
- pr_err("%s:%d Invalid list ptr\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid list ptr");
return -EINVAL;
}
@@ -109,8 +105,7 @@
sizeof(
struct cam_cmd_unconditional_wait);
} else {
- pr_err("%s: %d Error: Delay Rxed Before any buffer: %d\n",
- __func__, __LINE__, offset);
+ CAM_ERR(CAM_SENSOR, "Delay Rxed Before any buffer: %d", offset);
return -EINVAL;
}
@@ -131,8 +126,7 @@
i2c_list =
cam_sensor_get_i2c_ptr(i2c_reg_settings, 1);
if (!i2c_list || !i2c_list->i2c_settings.reg_setting) {
- pr_err("%s: %d Failed in allocating mem for list\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Failed in allocating mem for list");
return -ENOMEM;
}
@@ -171,8 +165,7 @@
cam_cmd_i2c_random_wr->header.count);
if (i2c_list == NULL ||
i2c_list->i2c_settings.reg_setting == NULL) {
- pr_err("%s: %d Failed in allocating i2c_list\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
return -ENOMEM;
}
@@ -234,8 +227,8 @@
* be spread across multiple cmd buffers
*/
- CDBG("%s:%d Total cmd Buf in Bytes: %d\n", __func__,
- __LINE__, cmd_desc[i].length);
+ CAM_DBG(CAM_SENSOR, "Total cmd Buf in Bytes: %d",
+ cmd_desc[i].length);
if (!cmd_desc[i].length)
continue;
@@ -244,10 +237,9 @@
(uint64_t *)&generic_ptr, &len_of_buff);
cmd_buf = (uint32_t *)generic_ptr;
if (rc < 0) {
- pr_err("%s:%d Failed in getting cmd hdl: %d Err: %d Buffer Len: %ld\n",
- __func__, __LINE__,
- cmd_desc[i].mem_handle, rc,
- len_of_buff);
+ CAM_ERR(CAM_SENSOR,
+ "cmd hdl failed:%d, Err: %d, Buffer_len: %ld",
+ cmd_desc[i].mem_handle, rc, len_of_buff);
return rc;
}
cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
@@ -267,8 +259,8 @@
i2c_reg_settings,
&cmd_length_in_bytes, &j, &list);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in random read %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Failed in random read %d", rc);
return rc;
}
@@ -288,8 +280,9 @@
i2c_reg_settings, j, &byte_cnt,
list);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in handling delay %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "delay hdl failed: %d",
+ rc);
return rc;
}
@@ -299,21 +292,22 @@
&cmd_buf, i2c_reg_settings,
&byte_cnt, &j, &list);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in random read %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Random read fail: %d",
+ rc);
return rc;
}
} else {
- pr_err("%s: %d Wrong Wait Command: %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR,
+ "Wrong Wait Command: %d",
generic_op_code);
return -EINVAL;
}
break;
}
default:
- pr_err("%s:%d Invalid Command Type:%d\n",
- __func__, __LINE__, cmm_hdr->cmd_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Command Type:%d",
+ cmm_hdr->cmd_type);
return -EINVAL;
}
}
@@ -333,20 +327,18 @@
/* Validate input parameters */
if (!soc_info || !power_setting) {
- pr_err("%s:%d failed: soc_info %pK power_setting %pK", __func__,
- __LINE__, soc_info, power_setting);
+ CAM_ERR(CAM_SENSOR, "failed: soc_info %pK power_setting %pK",
+ soc_info, power_setting);
return -EINVAL;
}
num_vreg = soc_info->num_rgltr;
if (num_vreg <= 0) {
- pr_err("%s:%d failed: num_vreg %d", __func__, __LINE__,
- num_vreg);
+ CAM_ERR(CAM_SENSOR, "failed: num_vreg %d", num_vreg);
return -EINVAL;
}
-
for (i = 0; i < power_setting_size; i++) {
switch (power_setting[i].seq_type) {
case SENSOR_VDIG:
@@ -354,8 +346,8 @@
if (!strcmp(soc_info->rgltr_name[j],
"cam_vdig")) {
- CDBG("%s:%d i %d j %d cam_vdig\n",
- __func__, __LINE__, i, j);
+ CAM_DBG(CAM_SENSOR,
+ "i: %d j: %d cam_vdig", i, j);
power_setting[i].seq_val = j;
if (VALIDATE_VOLTAGE(
@@ -378,8 +370,8 @@
if (!strcmp(soc_info->rgltr_name[j],
"cam_vio")) {
- CDBG("%s:%d i %d j %d cam_vio\n",
- __func__, __LINE__, i, j);
+ CAM_DBG(CAM_SENSOR,
+ "i: %d j: %d cam_vio", i, j);
power_setting[i].seq_val = j;
if (VALIDATE_VOLTAGE(
@@ -403,8 +395,8 @@
if (!strcmp(soc_info->rgltr_name[j],
"cam_vana")) {
- CDBG("%s:%d i %d j %d cam_vana\n",
- __func__, __LINE__, i, j);
+ CAM_DBG(CAM_SENSOR,
+ "i: %d j: %d cam_vana", i, j);
power_setting[i].seq_val = j;
if (VALIDATE_VOLTAGE(
@@ -428,8 +420,8 @@
if (!strcmp(soc_info->rgltr_name[j],
"cam_vaf")) {
- CDBG("%s:%d i %d j %d cam_vaf\n",
- __func__, __LINE__, i, j);
+ CAM_DBG(CAM_SENSOR,
+ "i: %d j: %d cam_vaf", i, j);
power_setting[i].seq_val = j;
if (VALIDATE_VOLTAGE(
@@ -454,8 +446,8 @@
if (!strcmp(soc_info->rgltr_name[j],
"cam_v_custom1")) {
- CDBG("%s:%d i %d j %d cam_vcustom1\n",
- __func__, __LINE__, i, j);
+ CAM_DBG(CAM_SENSOR,
+ "i:%d j:%d cam_vcustom1", i, j);
power_setting[i].seq_val = j;
if (VALIDATE_VOLTAGE(
@@ -478,8 +470,8 @@
if (!strcmp(soc_info->rgltr_name[j],
"cam_v_custom2")) {
- CDBG("%s:%d i %d j %d cam_vcustom2\n",
- __func__, __LINE__, i, j);
+ CAM_DBG(CAM_SENSOR,
+ "i:%d j:%d cam_vcustom2", i, j);
power_setting[i].seq_val = j;
if (VALIDATE_VOLTAGE(
@@ -498,8 +490,8 @@
break;
default: {
- pr_err("%s:%d invalid seq_val %d\n", __func__,
- __LINE__, power_setting[i].seq_val);
+ CAM_ERR(CAM_SENSOR, "invalid seq_val %d",
+ power_setting[i].seq_val);
break;
}
}
@@ -520,19 +512,19 @@
size = gpio_conf->cam_gpio_req_tbl_size;
if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
- pr_info("%s:%d No GPIO entry\n", __func__, __LINE__);
+ CAM_INFO(CAM_SENSOR, "No GPIO entry");
return 0;
}
if (!gpio_tbl || !size) {
- pr_err("%s:%d invalid gpio_tbl %pK / size %d\n", __func__,
- __LINE__, gpio_tbl, size);
+ CAM_ERR(CAM_SENSOR, "invalid gpio_tbl %pK / size %d",
+ gpio_tbl, size);
return -EINVAL;
}
for (i = 0; i < size; i++) {
- CDBG("%s:%d i %d, gpio %d dir %ld\n", __func__, __LINE__, i,
- gpio_tbl[i].gpio, gpio_tbl[i].flags);
+ CAM_DBG(CAM_SENSOR, "i: %d, gpio %d dir %ld", i,
+ gpio_tbl[i].gpio, gpio_tbl[i].flags);
}
if (gpio_en) {
@@ -545,8 +537,7 @@
* apply new gpios, outout a error message
* for driver bringup debug
*/
- pr_err("%s:%d gpio %d:%s request fails\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR, "gpio %d:%s request fails",
gpio_tbl[i].gpio, gpio_tbl[i].label);
}
}
@@ -557,6 +548,307 @@
return rc;
}
+int32_t cam_sensor_update_power_settings(void *cmd_buf,
+ int cmd_length, struct cam_sensor_power_ctrl_t *power_info)
+{
+ int32_t rc = 0, tot_size = 0, last_cmd_type = 0;
+ int32_t i = 0, pwr_up = 0, pwr_down = 0;
+ void *ptr = cmd_buf, *scr;
+ struct cam_cmd_power *pwr_cmd = (struct cam_cmd_power *)cmd_buf;
+ struct common_header *cmm_hdr = (struct common_header *)cmd_buf;
+
+ if (!pwr_cmd || !cmd_length) {
+ CAM_ERR(CAM_SENSOR, "Invalid Args: pwr_cmd %pK, cmd_length: %d",
+ pwr_cmd, cmd_length);
+ return -EINVAL;
+ }
+
+ power_info->power_setting_size = 0;
+ power_info->power_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting) *
+ MAX_POWER_CONFIG, GFP_KERNEL);
+ if (!power_info->power_setting)
+ return -ENOMEM;
+
+ power_info->power_down_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting) *
+ MAX_POWER_CONFIG, GFP_KERNEL);
+ if (!power_info->power_down_setting) {
+ rc = -ENOMEM;
+ goto free_power_settings;
+ }
+
+ while (tot_size < cmd_length) {
+ if (cmm_hdr->cmd_type ==
+ CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
+ struct cam_cmd_power *pwr_cmd =
+ (struct cam_cmd_power *)ptr;
+
+ power_info->
+ power_setting_size +=
+ pwr_cmd->count;
+ scr = ptr + sizeof(struct cam_cmd_power);
+ tot_size = tot_size + sizeof(struct cam_cmd_power);
+
+ if (pwr_cmd->count == 0)
+ CAM_DBG(CAM_SENSOR, "Un expected Command");
+
+ for (i = 0; i < pwr_cmd->count; i++, pwr_up++) {
+ power_info->
+ power_setting[pwr_up].seq_type =
+ pwr_cmd->power_settings[i].
+ power_seq_type;
+ power_info->
+ power_setting[pwr_up].config_val =
+ pwr_cmd->power_settings[i].
+ config_val_low;
+ power_info->power_setting[pwr_up].delay = 0;
+ if (i) {
+ scr = scr +
+ sizeof(
+ struct cam_power_settings);
+ tot_size = tot_size +
+ sizeof(
+ struct cam_power_settings);
+ }
+ if (tot_size > cmd_length) {
+ CAM_ERR(CAM_SENSOR,
+ "Error: Cmd Buffer is wrong");
+ rc = -EINVAL;
+ goto free_power_down_settings;
+ }
+ CAM_DBG(CAM_SENSOR,
+ "Seq Type[%d]: %d Config_val: %ld",
+ pwr_up,
+ power_info->
+ power_setting[pwr_up].seq_type,
+ power_info->
+ power_setting[pwr_up].
+ config_val);
+ }
+ last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_UP;
+ ptr = (void *) scr;
+ cmm_hdr = (struct common_header *)ptr;
+ } else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT) {
+ struct cam_cmd_unconditional_wait *wait_cmd =
+ (struct cam_cmd_unconditional_wait *)ptr;
+ if (wait_cmd->op_code ==
+ CAMERA_SENSOR_WAIT_OP_SW_UCND) {
+ if (last_cmd_type ==
+ CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
+ if (pwr_up > 0)
+ power_info->
+ power_setting
+ [pwr_up - 1].delay +=
+ wait_cmd->delay;
+ else
+ CAM_ERR(CAM_SENSOR,
+ "Delay is expected only after valid power up setting");
+ } else if (last_cmd_type ==
+ CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
+ if (pwr_down > 0)
+ power_info->
+ power_down_setting
+ [pwr_down - 1].delay +=
+ wait_cmd->delay;
+ else
+ CAM_ERR(CAM_SENSOR,
+ "Delay is expected only after valid power up setting");
+ }
+ } else
+ CAM_DBG(CAM_SENSOR, "Invalid op code: %d",
+ wait_cmd->op_code);
+ tot_size = tot_size +
+ sizeof(struct cam_cmd_unconditional_wait);
+ if (tot_size > cmd_length) {
+ CAM_ERR(CAM_SENSOR, "Command Buffer is wrong");
+ return -EINVAL;
+ }
+ scr = (void *) (wait_cmd);
+ ptr = (void *)
+ (scr +
+ sizeof(struct cam_cmd_unconditional_wait));
+ CAM_DBG(CAM_SENSOR, "ptr: %pK sizeof: %d Next: %pK",
+ scr, (int32_t)sizeof(
+ struct cam_cmd_unconditional_wait), ptr);
+
+ cmm_hdr = (struct common_header *)ptr;
+ } else if (cmm_hdr->cmd_type ==
+ CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
+ struct cam_cmd_power *pwr_cmd =
+ (struct cam_cmd_power *)ptr;
+
+ scr = ptr + sizeof(struct cam_cmd_power);
+ tot_size = tot_size + sizeof(struct cam_cmd_power);
+ power_info->power_down_setting_size += pwr_cmd->count;
+
+ if (pwr_cmd->count == 0)
+ CAM_ERR(CAM_SENSOR, "Invalid Command");
+
+ for (i = 0; i < pwr_cmd->count; i++, pwr_down++) {
+ power_info->
+ power_down_setting[pwr_down].
+ seq_type =
+ pwr_cmd->power_settings[i].
+ power_seq_type;
+ power_info->
+ power_down_setting[pwr_down].
+ config_val =
+ pwr_cmd->power_settings[i].
+ config_val_low;
+ power_info->
+ power_down_setting[pwr_down].delay = 0;
+ if (i) {
+ scr = scr +
+ sizeof(
+ struct cam_power_settings);
+ tot_size =
+ tot_size +
+ sizeof(
+ struct cam_power_settings);
+ }
+ if (tot_size > cmd_length) {
+ CAM_ERR(CAM_SENSOR,
+ "Command Buffer is wrong");
+ rc = -EINVAL;
+ goto free_power_down_settings;
+ }
+ CAM_DBG(CAM_SENSOR,
+ "Seq Type[%d]: %d Config_val: %ld",
+ pwr_down,
+ power_info->
+ power_down_setting[pwr_down].
+ seq_type,
+ power_info->
+ power_down_setting[pwr_down].
+ config_val);
+ }
+ last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_DOWN;
+ ptr = (void *) scr;
+ cmm_hdr = (struct common_header *)ptr;
+ } else {
+ CAM_ERR(CAM_SENSOR,
+ "Error: Un expected Header Type: %d",
+ cmm_hdr->cmd_type);
+ }
+ }
+
+ return rc;
+free_power_down_settings:
+ kfree(power_info->power_down_setting);
+free_power_settings:
+ kfree(power_info->power_setting);
+ return rc;
+}
+
+int cam_get_dt_power_setting_data(struct device_node *of_node,
+ struct cam_hw_soc_info *soc_info,
+ struct cam_sensor_power_ctrl_t *power_info)
+{
+ int rc = 0, i;
+ int count = 0;
+ const char *seq_name = NULL;
+ uint32_t *array = NULL;
+ struct cam_sensor_power_setting *ps;
+ int c, end;
+
+ if (!power_info)
+ return -EINVAL;
+
+ count = of_property_count_strings(of_node, "qcom,cam-power-seq-type");
+ power_info->power_setting_size = count;
+
+ CAM_DBG(CAM_SENSOR, "qcom,cam-power-seq-type count %d", count);
+
+ if (count <= 0)
+ return 0;
+
+ ps = kcalloc(count, sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ return -ENOMEM;
+ power_info->power_setting = ps;
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,cam-power-seq-type", i, &seq_name);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed");
+ goto ERROR1;
+ }
+ CAM_DBG(CAM_SENSOR, "seq_name[%d] = %s", i, seq_name);
+ if (!strcmp(seq_name, "cam_vio")) {
+ ps[i].seq_type = SENSOR_VIO;
+ } else if (!strcmp(seq_name, "cam_vana")) {
+ ps[i].seq_type = SENSOR_VANA;
+ } else if (!strcmp(seq_name, "cam_clk")) {
+ ps[i].seq_type = SENSOR_MCLK;
+ } else {
+ CAM_ERR(CAM_SENSOR, "unrecognized seq-type %s",
+ seq_name);
+ rc = -EILSEQ;
+ goto ERROR1;
+ }
+ CAM_DBG(CAM_SENSOR, "seq_type[%d] %d", i, ps[i].seq_type);
+ }
+
+ array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+ if (!array) {
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-cfg-val",
+ array, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed ");
+ goto ERROR2;
+ }
+
+ for (i = 0; i < count; i++) {
+ ps[i].config_val = array[i];
+ CAM_DBG(CAM_SENSOR, "power_setting[%d].config_val = %ld", i,
+ ps[i].config_val);
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-delay",
+ array, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed");
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ ps[i].delay = array[i];
+ CAM_DBG(CAM_SENSOR, "power_setting[%d].delay = %d", i,
+ ps[i].delay);
+ }
+ kfree(array);
+
+ power_info->power_down_setting =
+ kzalloc(sizeof(*ps) * count, GFP_KERNEL);
+
+ if (!power_info->power_down_setting) {
+ CAM_ERR(CAM_SENSOR, "failed");
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ power_info->power_down_setting_size = count;
+
+ end = count - 1;
+
+ for (c = 0; c < count; c++) {
+ power_info->power_down_setting[c] = ps[end];
+ end--;
+ }
+ return rc;
+ERROR2:
+ kfree(array);
+ERROR1:
+ kfree(ps);
+ return rc;
+}
int cam_sensor_util_init_gpio_pin_tbl(
struct cam_hw_soc_info *soc_info,
@@ -574,22 +866,19 @@
gconf = soc_info->gpio_data;
if (!gconf) {
- pr_err("%s:%d No gpio_common_table is found\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "No gpio_common_table is found");
return -EINVAL;
}
if (!gconf->cam_gpio_common_tbl) {
- pr_err("%s:%d gpio_common_table is not initialized\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "gpio_common_table is not initialized");
return -EINVAL;
}
gpio_array_size = gconf->cam_gpio_common_tbl_size;
if (!gpio_array_size) {
- pr_err("%s:%d invalid size of gpio table\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "invalid size of gpio table");
return -EINVAL;
}
@@ -602,12 +891,10 @@
rc = of_property_read_u32(of_node, "gpio-vana", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read gpio-vana failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "read gpio-vana failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d gpio-vana invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-vana invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
@@ -615,38 +902,34 @@
gconf->cam_gpio_common_tbl[val].gpio;
gpio_num_info->valid[SENSOR_VANA] = 1;
- CDBG("%s:%d gpio-vana %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "%s:%d gpio-vana %d",
gpio_num_info->gpio_num[SENSOR_VANA]);
}
rc = of_property_read_u32(of_node, "gpio-vio", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read gpio-vio failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "read gpio-vio failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d gpio-vio invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-vio invalid %d", val);
goto free_gpio_info;
}
gpio_num_info->gpio_num[SENSOR_VIO] =
gconf->cam_gpio_common_tbl[val].gpio;
gpio_num_info->valid[SENSOR_VIO] = 1;
- CDBG("%s:%d gpio-vio %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "gpio-vio %d",
gpio_num_info->gpio_num[SENSOR_VIO]);
}
rc = of_property_read_u32(of_node, "gpio-vaf", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read gpio-vaf failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "read gpio-vaf failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d gpio-vaf invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-vaf invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
@@ -654,19 +937,17 @@
gconf->cam_gpio_common_tbl[val].gpio;
gpio_num_info->valid[SENSOR_VAF] = 1;
- CDBG("%s:%d gpio-vaf %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "gpio-vaf %d",
gpio_num_info->gpio_num[SENSOR_VAF]);
}
rc = of_property_read_u32(of_node, "gpio-vdig", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read gpio-vdig failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "read gpio-vdig failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d gpio-vdig invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-vdig invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
@@ -674,19 +955,17 @@
gconf->cam_gpio_common_tbl[val].gpio;
gpio_num_info->valid[SENSOR_VDIG] = 1;
- CDBG("%s:%d gpio-vdig %d\n", __func__, __LINE__,
- gpio_num_info->gpio_num[SENSOR_VDIG]);
+ CAM_DBG(CAM_SENSOR, "gpio-vdig %d",
+ gpio_num_info->gpio_num[SENSOR_VDIG]);
}
rc = of_property_read_u32(of_node, "gpio-reset", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read gpio-reset failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "read gpio-reset failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d gpio-reset invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-reset invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
@@ -694,19 +973,18 @@
gconf->cam_gpio_common_tbl[val].gpio;
gpio_num_info->valid[SENSOR_RESET] = 1;
- CDBG("%s:%d gpio-reset %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "gpio-reset %d",
gpio_num_info->gpio_num[SENSOR_RESET]);
}
rc = of_property_read_u32(of_node, "gpio-standby", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read gpio-standby failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "read gpio-standby failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d gpio-standby invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-standby invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
@@ -714,19 +992,18 @@
gconf->cam_gpio_common_tbl[val].gpio;
gpio_num_info->valid[SENSOR_STANDBY] = 1;
- CDBG("%s:%d gpio-standby %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "gpio-standby %d",
gpio_num_info->gpio_num[SENSOR_STANDBY]);
}
rc = of_property_read_u32(of_node, "gpio-af-pwdm", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read gpio-af-pwdm failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "read gpio-af-pwdm failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d gpio-af-pwdm invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-af-pwdm invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
@@ -734,19 +1011,18 @@
gconf->cam_gpio_common_tbl[val].gpio;
gpio_num_info->valid[SENSOR_VAF_PWDM] = 1;
- CDBG("%s:%d gpio-af-pwdm %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "gpio-af-pwdm %d",
gpio_num_info->gpio_num[SENSOR_VAF_PWDM]);
}
rc = of_property_read_u32(of_node, "gpio-custom1", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read gpio-custom1 failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "read gpio-custom1 failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d gpio-custom1 invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-custom1 invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
@@ -754,19 +1030,18 @@
gconf->cam_gpio_common_tbl[val].gpio;
gpio_num_info->valid[SENSOR_CUSTOM_GPIO1] = 1;
- CDBG("%s:%d gpio-custom1 %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "gpio-custom1 %d",
gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1]);
}
rc = of_property_read_u32(of_node, "gpio-custom2", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read gpio-custom2 failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "read gpio-custom2 failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d gpio-custom2 invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-custom2 invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
@@ -774,7 +1049,7 @@
gconf->cam_gpio_common_tbl[val].gpio;
gpio_num_info->valid[SENSOR_CUSTOM_GPIO2] = 1;
- CDBG("%s:%d gpio-custom2 %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "gpio-custom2 %d",
gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2]);
} else {
rc = 0;
@@ -793,24 +1068,23 @@
sensor_pctrl->pinctrl = devm_pinctrl_get(dev);
if (IS_ERR_OR_NULL(sensor_pctrl->pinctrl)) {
- pr_err("%s:%d Getting pinctrl handle failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Getting pinctrl handle failed");
return -EINVAL;
}
sensor_pctrl->gpio_state_active =
pinctrl_lookup_state(sensor_pctrl->pinctrl,
CAM_SENSOR_PINCTRL_STATE_DEFAULT);
if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_active)) {
- pr_err("%s:%d Failed to get the active state pinctrl handle\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Failed to get the active state pinctrl handle");
return -EINVAL;
}
sensor_pctrl->gpio_state_suspend
= pinctrl_lookup_state(sensor_pctrl->pinctrl,
CAM_SENSOR_PINCTRL_STATE_SLEEP);
if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_suspend)) {
- pr_err("%s:%d Failed to get the suspend state pinctrl handle\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Failed to get the suspend state pinctrl handle");
return -EINVAL;
}
return 0;
@@ -821,19 +1095,17 @@
int gpio_offset = -1;
if (!gpio_num_info) {
- pr_err("%s:%d Input Parameters are not proper\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Input Parameters are not proper");
return -EINVAL;
}
- CDBG("%s: %d Seq type: %d, config: %d", __func__, __LINE__,
- seq_type, val);
+ CAM_DBG(CAM_SENSOR, "Seq type: %d, config: %d", seq_type, val);
gpio_offset = seq_type;
if (gpio_num_info->valid[gpio_offset] == 1) {
- CDBG("%s: %d VALID GPIO offset: %d, seqtype: %d\n",
- __func__, __LINE__, gpio_offset, seq_type);
+ CAM_DBG(CAM_SENSOR, "VALID GPIO offset: %d, seqtype: %d",
+ gpio_offset, seq_type);
gpio_set_value_cansleep(
gpio_num_info->gpio_num
[gpio_offset], val);
@@ -850,9 +1122,9 @@
struct cam_sensor_power_setting *power_setting = NULL;
struct msm_camera_gpio_num_info *gpio_num_info = NULL;
- CDBG("%s:%d\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "Enter");
if (!ctrl) {
- pr_err("%s:%d Invalid ctrl handle\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid ctrl handle");
return -EINVAL;
}
@@ -860,15 +1132,13 @@
num_vreg = soc_info->num_rgltr;
if ((num_vreg == 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
- pr_err("%s:%d Regulators are not initialized\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Regulators are not initialized");
return -EINVAL;
}
ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev);
if (ret < 0) {
- pr_err("%s:%d Initialization of pinctrl failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Initialization of pinctrl failed");
ctrl->cam_pinctrl_status = 0;
} else {
ctrl->cam_pinctrl_status = 1;
@@ -882,19 +1152,18 @@
ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
ctrl->pinctrl_info.gpio_state_active);
if (ret)
- pr_err("%s:%d cannot set pin to active state",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "cannot set pin to active state");
}
for (index = 0; index < ctrl->power_setting_size; index++) {
- CDBG("%s:%d index %d\n", __func__, __LINE__, index);
+ CAM_DBG(CAM_SENSOR, "index: %d", index);
power_setting = &ctrl->power_setting[index];
+ CAM_DBG(CAM_SENSOR, "seq_type %d", power_setting->seq_type);
switch (power_setting->seq_type) {
case SENSOR_MCLK:
if (power_setting->seq_val >= soc_info->num_clk) {
- pr_err("%s:%d :Error: clk index %d >= max %u\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR, "clk index %d >= max %u",
power_setting->seq_val,
soc_info->num_clk);
goto power_up_failed;
@@ -902,8 +1171,8 @@
for (j = 0; j < num_vreg; j++) {
if (!strcmp(soc_info->rgltr_name[j],
"cam_clk")) {
- CDBG("%s:%d Enable cam_clk: %d\n",
- __func__, __LINE__, j);
+ CAM_DBG(CAM_SENSOR,
+ "Enable cam_clk: %d", j);
soc_info->rgltr[j] =
regulator_get(
@@ -915,8 +1184,8 @@
rc = PTR_ERR(
soc_info->rgltr[j]);
rc = rc ? rc : -EINVAL;
- pr_err("%s:%d :vreg %s %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR,
+ "vreg %s %d",
soc_info->rgltr_name[j],
rc);
soc_info->rgltr[j] = NULL;
@@ -947,8 +1216,7 @@
}
if (rc < 0) {
- pr_err("%s:%d clk enable failed\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_SENSOR, "clk enable failed");
goto power_up_failed;
}
break;
@@ -957,20 +1225,17 @@
case SENSOR_CUSTOM_GPIO1:
case SENSOR_CUSTOM_GPIO2:
if (no_gpio) {
- pr_err("%s:%d request gpio failed\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_SENSOR, "request gpio failed");
return no_gpio;
}
if (power_setting->seq_val >= CAM_VREG_MAX ||
!gpio_num_info) {
- pr_err("%s:%d gpio index %d >= max %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR, "gpio index %d >= max %d",
power_setting->seq_val,
CAM_VREG_MAX);
goto power_up_failed;
}
- CDBG("%s:%d gpio set val %d\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "gpio set val %d",
gpio_num_info->gpio_num
[power_setting->seq_val]);
@@ -978,8 +1243,8 @@
power_setting->seq_type,
gpio_num_info, 1);
if (rc < 0) {
- pr_err("%s:%d Error in handling VREG GPIO\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Error in handling VREG GPIO");
goto power_up_failed;
}
break;
@@ -994,15 +1259,13 @@
break;
if (power_setting->seq_val >= CAM_VREG_MAX) {
- pr_err("%s:%d vreg index %d >= max %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR, "vreg index %d >= max %d",
power_setting->seq_val,
CAM_VREG_MAX);
goto power_up_failed;
}
if (power_setting->seq_val < num_vreg) {
- CDBG("%s:%d Enable Regulator\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "Enable Regulator");
vreg_idx = power_setting->seq_val;
soc_info->rgltr[vreg_idx] =
@@ -1013,8 +1276,7 @@
rc = PTR_ERR(soc_info->rgltr[vreg_idx]);
rc = rc ? rc : -EINVAL;
- pr_err("%s:%d, %s get failed %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR, "%s get failed %d",
soc_info->rgltr_name[vreg_idx],
rc);
@@ -1033,22 +1295,21 @@
soc_info->rgltr[vreg_idx];
}
else
- pr_err("%s: %d usr_idx:%d dts_idx:%d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR, "usr_idx:%d dts_idx:%d",
power_setting->seq_val, num_vreg);
rc = msm_cam_sensor_handle_reg_gpio(
power_setting->seq_type,
gpio_num_info, 1);
if (rc < 0) {
- pr_err("%s:%d Error in handling VREG GPIO\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Error in handling VREG GPIO");
goto power_up_failed;
}
break;
default:
- pr_err("%s:%d error power seq type %d\n", __func__,
- __LINE__, power_setting->seq_type);
+ CAM_ERR(CAM_SENSOR, "error power seq type %d",
+ power_setting->seq_type);
break;
}
if (power_setting->delay > 20)
@@ -1060,11 +1321,11 @@
return 0;
power_up_failed:
- pr_err("%s:%d failed\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "failed");
for (index--; index >= 0; index--) {
- CDBG("%s:%d index %d\n", __func__, __LINE__, index);
+ CAM_DBG(CAM_SENSOR, "index %d", index);
power_setting = &ctrl->power_setting[index];
- CDBG("%s:%d type %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "type %d",
power_setting->seq_type);
switch (power_setting->seq_type) {
case SENSOR_RESET:
@@ -1088,8 +1349,7 @@
case SENSOR_CUSTOM_REG1:
case SENSOR_CUSTOM_REG2:
if (power_setting->seq_val < num_vreg) {
- CDBG("%s:%d Disable Regulator\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "Disable Regulator");
vreg_idx = power_setting->seq_val;
rc = cam_soc_util_regulator_disable(
@@ -1105,8 +1365,7 @@
}
else
- pr_err("%s:%d:seq_val: %d > num_vreg: %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR, "seq_val:%d > num_vreg: %d",
power_setting->seq_val, num_vreg);
msm_cam_sensor_handle_reg_gpio(power_setting->seq_type,
@@ -1114,8 +1373,8 @@
break;
default:
- pr_err("%s:%d error power seq type %d\n", __func__,
- __LINE__, power_setting->seq_type);
+ CAM_ERR(CAM_SENSOR, "error power seq type %d",
+ power_setting->seq_type);
break;
}
if (power_setting->delay > 20) {
@@ -1129,8 +1388,7 @@
ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
ctrl->pinctrl_info.gpio_state_suspend);
if (ret)
- pr_err("%s:%d cannot set pin to suspend state\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
}
ctrl->cam_pinctrl_status = 0;
@@ -1186,8 +1444,7 @@
}
if (ps != NULL) {
- CDBG("%s:%d Disable Regulator\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "Disable Regulator");
rc = cam_soc_util_regulator_disable(
soc_info->rgltr[j],
@@ -1214,9 +1471,9 @@
struct cam_sensor_power_setting *ps;
struct msm_camera_gpio_num_info *gpio_num_info = NULL;
- CDBG("%s:%d\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "Enter");
if (!ctrl || !soc_info) {
- pr_err("%s:%d failed ctrl %pK\n", __func__, __LINE__, ctrl);
+ CAM_ERR(CAM_SENSOR, "failed ctrl %pK", ctrl);
return -EINVAL;
}
@@ -1224,16 +1481,16 @@
num_vreg = soc_info->num_rgltr;
for (index = 0; index < ctrl->power_down_setting_size; index++) {
- CDBG("%s:%d index %d\n", __func__, __LINE__, index);
+ CAM_DBG(CAM_SENSOR, "index %d", index);
pd = &ctrl->power_down_setting[index];
ps = NULL;
- CDBG("%s:%d type %d\n", __func__, __LINE__, pd->seq_type);
+ CAM_DBG(CAM_SENSOR, "type %d", pd->seq_type);
switch (pd->seq_type) {
case SENSOR_MCLK:
ret = cam_config_mclk_reg(ctrl, soc_info, index);
if (ret < 0) {
- pr_err("%s:%d :Error: in config clk reg\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "config clk reg failed rc: %d", ret);
return ret;
}
//cam_soc_util_clk_disable_default(soc_info);
@@ -1272,8 +1529,8 @@
pd->seq_val);
if (ps) {
if (pd->seq_val < num_vreg) {
- CDBG("%s:%d Disable Regulator\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR,
+ "Disable Regulator");
ret = cam_soc_util_regulator_disable(
soc_info->rgltr[ps->seq_val],
soc_info->rgltr_name[ps->seq_val],
@@ -1286,23 +1543,24 @@
soc_info->rgltr[ps->seq_val];
}
else
- pr_err("%s:%d:seq_val:%d > num_vreg: %d\n",
- __func__, __LINE__, pd->seq_val,
+ CAM_ERR(CAM_SENSOR,
+ "seq_val:%d > num_vreg: %d",
+ pd->seq_val,
num_vreg);
} else
- pr_err("%s:%d error in power up/down seq\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "error in power up/down seq");
ret = msm_cam_sensor_handle_reg_gpio(pd->seq_type,
gpio_num_info, GPIOF_OUT_INIT_LOW);
if (ret < 0)
- pr_err("%s:%d Error disabling VREG GPIO\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Error disabling VREG GPIO");
break;
default:
- pr_err("%s:%d error power seq type %d\n", __func__,
- __LINE__, pd->seq_type);
+ CAM_ERR(CAM_SENSOR, "error power seq type %d",
+ pd->seq_type);
break;
}
if (pd->delay > 20)
@@ -1316,8 +1574,7 @@
ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
ctrl->pinctrl_info.gpio_state_suspend);
if (ret)
- pr_err("%s:%d cannot set pin to suspend state",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
index 912f06b..8a26369 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
@@ -23,11 +23,12 @@
#include <cam_req_mgr_interface.h>
#include <cam_mem_mgr.h>
#include "cam_soc_util.h"
+#include "cam_debug_util.h"
#define INVALID_VREG 100
-int msm_camera_get_dt_power_setting_data(struct device_node *of_node,
- struct camera_vreg_t *cam_vreg, int num_vreg,
+int cam_get_dt_power_setting_data(struct device_node *of_node,
+ struct cam_hw_soc_info *soc_info,
struct cam_sensor_power_ctrl_t *power_info);
int msm_camera_pinctrl_init
@@ -52,4 +53,7 @@
int msm_camera_fill_vreg_params(struct cam_hw_soc_info *soc_info,
struct cam_sensor_power_setting *power_setting,
uint16_t power_setting_size);
+
+int32_t cam_sensor_update_power_settings(void *cmd_buf,
+ int cmd_length, struct cam_sensor_power_ctrl_t *power_info);
#endif /* _CAM_SENSOR_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/Makefile b/drivers/media/platform/msm/camera/cam_utils/Makefile
index f22115c..4702963 100644
--- a/drivers/media/platform/msm/camera/cam_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_utils/Makefile
@@ -1,3 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o cam_packet_util.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o cam_packet_util.o cam_debug_util.o cam_trace.o cam_common_util.o
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c
new file mode 100644
index 0000000..199d3ea
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+
+int cam_common_util_get_string_index(const char **strings,
+ uint32_t num_strings, char *matching_string, uint32_t *index)
+{
+ int i;
+
+ for (i = 0; i < num_strings; i++) {
+ if (strnstr(strings[i], matching_string, strlen(strings[i]))) {
+ CAM_DBG(CAM_UTIL, "matched %s : %d\n",
+ matching_string, i);
+ *index = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
new file mode 100644
index 0000000..d6a11b7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_COMMON_UTIL_H_
+#define _CAM_COMMON_UTIL_H_
+
+#define CAM_BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+
+/**
+ * cam_common_util_get_string_index()
+ *
+ * @brief Match the string from list of strings to return
+ * matching index
+ *
+ * @strings: Pointer to list of strings
+ * @num_strings: Number of strings in 'strings'
+ * @matching_string: String to match
+ * @index: Pointer to index to return matching index
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_common_util_get_string_index(const char **strings,
+ uint32_t num_strings, char *matching_string, uint32_t *index);
+
+#endif /* _CAM_COMMON_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c
new file mode 100644
index 0000000..21f90ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c
@@ -0,0 +1,133 @@
+/* Copyright (c) 2017, The Linux Foundataion. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+
+#include "cam_debug_util.h"
+
+static uint debug_mdl;
+module_param(debug_mdl, uint, 0644);
+
+const char *cam_get_module_name(unsigned int module_id)
+{
+ const char *name = NULL;
+
+ switch (module_id) {
+ case CAM_CDM:
+ name = "CAM-CDM";
+ break;
+ case CAM_CORE:
+ name = "CAM-CORE";
+ break;
+ case CAM_CRM:
+ name = "CAM_CRM";
+ break;
+ case CAM_CPAS:
+ name = "CAM-CPAS";
+ break;
+ case CAM_ISP:
+ name = "CAM-ISP";
+ break;
+ case CAM_SENSOR:
+ name = "CAM-SENSOR";
+ break;
+ case CAM_SMMU:
+ name = "CAM-SMMU";
+ break;
+ case CAM_SYNC:
+ name = "CAM-SYNC";
+ break;
+ case CAM_ICP:
+ name = "CAM-ICP";
+ break;
+ case CAM_JPEG:
+ name = "CAM-JPEG";
+ break;
+ case CAM_FD:
+ name = "CAM-FD";
+ break;
+ case CAM_LRME:
+ name = "CAM-LRME";
+ break;
+ case CAM_FLASH:
+ name = "CAM-FLASH";
+ break;
+ case CAM_ACTUATOR:
+ name = "CAM-ACTUATOR";
+ break;
+ case CAM_CCI:
+ name = "CAM-CCI";
+ break;
+ case CAM_CSIPHY:
+ name = "CAM-CSIPHY";
+ break;
+ case CAM_EEPROM:
+ name = "CAM-EEPROM";
+ break;
+ case CAM_UTIL:
+ name = "CAM-UTIL";
+ break;
+ case CAM_CTXT:
+ name = "CAM-CTXT";
+ break;
+ case CAM_HFI:
+ name = "CAM-HFI";
+ break;
+ default:
+ name = "CAM";
+ break;
+ }
+
+ return name;
+}
+
+void cam_debug_log(unsigned int module_id, enum cam_debug_level dbg_level,
+ const char *func, const int line, const char *fmt, ...)
+{
+ char str_buffer[STR_BUFFER_MAX_LENGTH];
+ va_list args;
+
+ va_start(args, fmt);
+
+ switch (dbg_level) {
+ case CAM_LEVEL_DBG:
+ if (debug_mdl & module_id) {
+ vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+ pr_info("CAM_DBG: %s: %s: %d: %s\n",
+ cam_get_module_name(module_id),
+ func, line, str_buffer);
+ va_end(args);
+ }
+ break;
+ case CAM_LEVEL_ERR:
+ vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+ pr_err("CAM_ERR: %s: %s: %d: %s\n",
+ cam_get_module_name(module_id), func, line, str_buffer);
+ va_end(args);
+ break;
+ case CAM_LEVEL_INFO:
+ vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+ pr_info("CAM_INFO: %s: %s: %d: %s\n",
+ cam_get_module_name(module_id), func, line, str_buffer);
+ va_end(args);
+ break;
+ case CAM_LEVEL_WARN:
+ vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+ pr_warn("CAM_WARN: %s: %s: %d: %s\n",
+ cam_get_module_name(module_id), func, line, str_buffer);
+ va_end(args);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
index 5989f1a..7275d56 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundataion. All rights reserved.
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,45 +13,112 @@
#ifndef _CAM_DEBUG_UTIL_H_
#define _CAM_DEBUG_UTIL_H_
-#define DEFAULT 0xFFFF
-#define CAM_CDM (1 << 0)
-#define CAM_CORE (1 << 1)
-#define CAM_CPAS (1 << 2)
-#define CAM_ISP (1 << 3)
-#define CAM_CRM (1 << 4)
-#define CAM_SENSOR (1 << 5)
-#define CAM_SMMU (1 << 6)
-#define CAM_SYNC (1 << 7)
-#define CAM_ICP (1 << 8)
-#define CAM_JPEG (1 << 9)
-#define CAM_FD (1 << 10)
-#define CAM_LRME (1 << 11)
+#define CAM_CDM (1 << 0)
+#define CAM_CORE (1 << 1)
+#define CAM_CPAS (1 << 2)
+#define CAM_ISP (1 << 3)
+#define CAM_CRM (1 << 4)
+#define CAM_SENSOR (1 << 5)
+#define CAM_SMMU (1 << 6)
+#define CAM_SYNC (1 << 7)
+#define CAM_ICP (1 << 8)
+#define CAM_JPEG (1 << 9)
+#define CAM_FD (1 << 10)
+#define CAM_LRME (1 << 11)
+#define CAM_FLASH (1 << 12)
+#define CAM_ACTUATOR (1 << 13)
+#define CAM_CCI (1 << 14)
+#define CAM_CSIPHY (1 << 15)
+#define CAM_EEPROM (1 << 16)
+#define CAM_UTIL (1 << 17)
+#define CAM_HFI (1 << 18)
+#define CAM_CTXT (1 << 19)
-#define GROUP DEFAULT
-#define TRACE_ON 0
+#define STR_BUFFER_MAX_LENGTH 1024
-#define CAM_ERR(__module, fmt, args...) \
- do { if (GROUP & __module) { \
- if (TRACE_ON) \
- trace_printk(fmt, ##args); \
- else \
- pr_err(fmt, ##args); \
- } } while (0)
+enum cam_debug_level {
+ CAM_LEVEL_INFO,
+ CAM_LEVEL_WARN,
+ CAM_LEVEL_ERR,
+ CAM_LEVEL_DBG,
+};
-#define CAM_WARN(__module, fmt, args...) \
- do { if (GROUP & __module) { \
- if (TRACE_ON) \
- trace_printk(fmt, ##args); \
- else \
- pr_warn(fmt, ##args); \
- } } while (0)
+/*
+ * cam_debug_log()
+ *
+ * @brief : Get the Module name from module ID and print
+ * respective debug logs
+ *
+ * @module_id : Respective Module ID which is calling this function
+ * @dbg_level : Debug level from cam_module_debug_level enum entries
+ * @func : Function which is calling to print logs
+ * @line : Line number associated with the function which is calling
+ * to print log
+ * @fmt : Formatted string which needs to be print in the log
+ *
+ */
+void cam_debug_log(unsigned int module_id, enum cam_debug_level dbg_level,
+ const char *func, const int line, const char *fmt, ...);
-#define CAM_INFO(__module, fmt, args...) \
- do { if (GROUP & __module) { \
- if (TRACE_ON) \
- trace_printk(fmt, ##args); \
- else \
- pr_info(fmt, ##args); \
- } } while (0)
+/*
+ * cam_get_module_name()
+ *
+ * @brief : Get the module name from module ID
+ *
+ * @module_id : Module ID which is using this function
+ */
+const char *cam_get_module_name(unsigned int module_id);
+
+/*
+ * CAM_ERR
+ * @brief : This Macro will print error logs
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_ERR(__module, fmt, args...) \
+ cam_debug_log(__module, CAM_LEVEL_ERR, __func__, __LINE__, fmt, ##args)
+
+/*
+ * CAM_WARN
+ * @brief : This Macro will print warning logs
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_WARN(__module, fmt, args...) \
+ cam_debug_log(__module, CAM_LEVEL_WARN, __func__, __LINE__, fmt, ##args)
+
+/*
+ * CAM_INFO
+ * @brief : This Macro will print Information logs
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_INFO(__module, fmt, args...) \
+ cam_debug_log(__module, CAM_LEVEL_INFO, __func__, __LINE__, fmt, ##args)
+
+/*
+ * CAM_DBG
+ * @brief : This Macro will print debug logs when enabled using GROUP
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_DBG(__module, fmt, args...) \
+ cam_debug_log(__module, CAM_LEVEL_DBG, __func__, __LINE__, fmt, ##args)
+
+/*
+ * CAM_ERR_RATE_LIMIT
+ * @brief : This Macro will prevent error print logs with ratelimit
+ */
+#define CAM_ERR_RATE_LIMIT(__module, fmt, args...) \
+ pr_err_ratelimited("CAM_ERR: %s: %s: %d\n" fmt, \
+ cam_get_module_name(__module), __func__, __LINE__, ##args)
#endif /* _CAM_DEBUG_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index 6d90c1e..442d0bd 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -12,11 +12,118 @@
#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+#include <linux/types.h>
+#include <linux/slab.h>
+
#include "cam_mem_mgr.h"
#include "cam_packet_util.h"
+#include "cam_debug_util.h"
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+static int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
+ size_t *len)
+{
+ int rc = 0;
+ uint64_t kmd_buf_addr = 0;
+
+ rc = cam_mem_get_cpu_buf(handle, &kmd_buf_addr, len);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Unable to get the virtual address %d", rc);
+ } else {
+ if (kmd_buf_addr && *len) {
+ *buf_addr = (uint32_t *)kmd_buf_addr;
+ } else {
+ CAM_ERR(CAM_UTIL, "Invalid addr and length :%ld", *len);
+ rc = -ENOMEM;
+ }
+ }
+ return rc;
+}
+
+int cam_packet_util_validate_cmd_desc(struct cam_cmd_buf_desc *cmd_desc)
+{
+ if ((cmd_desc->length > cmd_desc->size) ||
+ (cmd_desc->mem_handle <= 0)) {
+ CAM_ERR(CAM_UTIL, "invalid cmd arg %d %d %d %d",
+ cmd_desc->offset, cmd_desc->length,
+ cmd_desc->mem_handle, cmd_desc->size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cam_packet_util_validate_packet(struct cam_packet *packet)
+{
+ if (!packet)
+ return -EINVAL;
+
+ CAM_DBG(CAM_UTIL, "num cmd buf:%d num of io config:%d kmd buf index:%d",
+ packet->num_cmd_buf, packet->num_io_configs,
+ packet->kmd_cmd_buf_index);
+
+ if ((packet->kmd_cmd_buf_index >= packet->num_cmd_buf) ||
+ (!packet->header.size) ||
+ (packet->cmd_buf_offset > packet->header.size) ||
+ (packet->io_configs_offset > packet->header.size)) {
+ CAM_ERR(CAM_UTIL, "invalid packet:%d %d %d %d %d",
+ packet->kmd_cmd_buf_index,
+ packet->num_cmd_buf, packet->cmd_buf_offset,
+ packet->io_configs_offset, packet->header.size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
+ struct cam_kmd_buf_info *kmd_buf)
+{
+ int rc = 0;
+ size_t len = 0;
+ struct cam_cmd_buf_desc *cmd_desc;
+ uint32_t *cpu_addr;
+
+ if (!packet || !kmd_buf) {
+ CAM_ERR(CAM_UTIL, "Invalid arg %pK %pK", packet, kmd_buf);
+ return -EINVAL;
+ }
+
+ /* Take first command descriptor and add offset to it for kmd*/
+ cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)
+ &packet->payload + packet->cmd_buf_offset);
+ cmd_desc += packet->kmd_cmd_buf_index;
+
+ rc = cam_packet_util_validate_cmd_desc(cmd_desc);
+ if (rc)
+ return rc;
+
+ rc = cam_packet_util_get_cmd_mem_addr(cmd_desc->mem_handle, &cpu_addr,
+ &len);
+ if (rc)
+ return rc;
+
+ if (len < cmd_desc->size) {
+ CAM_ERR(CAM_UTIL, "invalid memory len:%ld and cmd desc size:%d",
+ len, cmd_desc->size);
+ return -EINVAL;
+ }
+
+ cpu_addr += (cmd_desc->offset / 4) + (packet->kmd_cmd_buf_offset / 4);
+ CAM_DBG(CAM_UTIL, "total size %d, cmd size: %d, KMD buffer size: %d",
+ cmd_desc->size, cmd_desc->length,
+ cmd_desc->size - cmd_desc->length);
+ CAM_DBG(CAM_UTIL, "hdl 0x%x, cmd offset %d, kmd offset %d, addr 0x%pK",
+ cmd_desc->mem_handle, cmd_desc->offset,
+ packet->kmd_cmd_buf_offset, cpu_addr);
+
+ kmd_buf->cpu_addr = cpu_addr;
+ kmd_buf->handle = cmd_desc->mem_handle;
+ kmd_buf->offset = cmd_desc->offset + packet->kmd_cmd_buf_offset;
+ kmd_buf->size = cmd_desc->size - cmd_desc->length;
+ kmd_buf->used_bytes = 0;
+
+ return rc;
+}
int cam_packet_util_process_patches(struct cam_packet *packet,
int32_t iommu_hdl)
@@ -36,7 +143,7 @@
patch_desc = (struct cam_patch_desc *)
((uint32_t *) &packet->payload +
packet->patch_offset/4);
- CDBG("packet = %pK patch_desc = %pK size = %lu\n",
+ CAM_DBG(CAM_UTIL, "packet = %pK patch_desc = %pK size = %lu",
(void *)packet, (void *)patch_desc,
sizeof(struct cam_patch_desc));
@@ -44,7 +151,7 @@
rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl,
iommu_hdl, &iova_addr, &src_buf_size);
if (rc < 0) {
- pr_err("unable to get src buf address\n");
+ CAM_ERR(CAM_UTIL, "unable to get src buf address");
return rc;
}
src_buf_iova_addr = (uint32_t *)iova_addr;
@@ -53,12 +160,12 @@
rc = cam_mem_get_cpu_buf(patch_desc[i].dst_buf_hdl,
&cpu_addr, &dst_buf_len);
if (rc < 0) {
- pr_err("unable to get dst buf address\n");
+ CAM_ERR(CAM_UTIL, "unable to get dst buf address");
return rc;
}
dst_cpu_addr = (uint32_t *)cpu_addr;
- CDBG("i = %d patch info = %x %x %x %x\n", i,
+ CAM_DBG(CAM_UTIL, "i = %d patch info = %x %x %x %x", i,
patch_desc[i].dst_buf_hdl, patch_desc[i].dst_offset,
patch_desc[i].src_buf_hdl, patch_desc[i].src_offset);
@@ -68,7 +175,8 @@
*dst_cpu_addr = temp;
- CDBG("patch is done for dst %pK with src %pK value %llx\n",
+ CAM_DBG(CAM_UTIL,
+ "patch is done for dst %pK with src %pK value %llx",
dst_cpu_addr, src_buf_iova_addr,
*((uint64_t *)dst_cpu_addr));
}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
index 614e868..8b590a7 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
@@ -16,6 +16,62 @@
#include <uapi/media/cam_defs.h>
/**
+ * @brief KMD scratch buffer information
+ *
+ * @handle: Memory handle
+ * @cpu_addr: Cpu address
+ * @offset: Offset from the start of the buffer
+ * @size: Size of the buffer
+ * @used_bytes: Used memory in bytes
+ *
+ */
+struct cam_kmd_buf_info {
+ int handle;
+ uint32_t *cpu_addr;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t used_bytes;
+};
+
+/**
+ * cam_packet_util_validate_packet()
+ *
+ * @brief Validate the packet
+ *
+ * @packet: Packet to be validated
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_packet_util_validate_packet(struct cam_packet *packet);
+
+/**
+ * cam_packet_util_validate_cmd_desc()
+ *
+ * @brief Validate the packet
+ *
+ * @cmd_desc: Command descriptor to be validated
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_packet_util_validate_cmd_desc(struct cam_cmd_buf_desc *cmd_desc);
+
+/**
+ * cam_packet_util_get_kmd_buffer()
+ *
+ * @brief Get the kmd buffer from the packet command descriptor
+ *
+ * @packet: Packet data
+ * @kmd_buf: Extracted the KMD buffer information
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
+ struct cam_kmd_buf_info *kmd_buf_info);
+
+/**
* cam_packet_util_process_patches()
*
* @brief: Replace the handle in Packet to Address using the
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index 0be2aaa..1d86bb1 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -574,7 +574,7 @@
goto free_gpio_array;
}
- for (i = 0; i <= gpio_array_size; i++)
+ for (i = 0; i < gpio_array_size; i++)
gconf->cam_gpio_common_tbl[i].gpio = gpio_array[i];
gconf->cam_gpio_common_tbl_size = gpio_array_size;
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_trace.c b/drivers/media/platform/msm/camera/cam_utils/cam_trace.c
new file mode 100644
index 0000000..08129f3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_trace.c
@@ -0,0 +1,16 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "cam_trace.h"
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_trace.h b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
new file mode 100644
index 0000000..2e9e61f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
@@ -0,0 +1,223 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_CAM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _CAM_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM camera
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cam_trace
+
+#include <linux/tracepoint.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_core.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_context.h"
+
+TRACE_EVENT(cam_context_state,
+ TP_PROTO(const char *name, struct cam_context *ctx),
+ TP_ARGS(name, ctx),
+ TP_STRUCT__entry(
+ __field(void*, ctx)
+ __field(uint32_t, state)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->state = ctx->state;
+ __assign_str(name, name);
+ ),
+ TP_printk(
+ "%s: State ctx=%p ctx_state=%u",
+ __get_str(name), __entry->ctx, __entry->state
+ )
+);
+
+TRACE_EVENT(cam_isp_activated_irq,
+ TP_PROTO(struct cam_context *ctx, unsigned int substate,
+ unsigned int event, uint64_t timestamp),
+ TP_ARGS(ctx, substate, event, timestamp),
+ TP_STRUCT__entry(
+ __field(void*, ctx)
+ __field(uint32_t, state)
+ __field(uint32_t, substate)
+ __field(uint32_t, event)
+ __field(uint64_t, ts)
+ ),
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->state = ctx->state;
+ __entry->substate = substate;
+ __entry->event = event;
+ __entry->ts = timestamp;
+ ),
+ TP_printk(
+ "ISP: IRQ ctx=%p ctx_state=%u substate=%u event=%u ts=%llu",
+ __entry->ctx, __entry->state, __entry->substate,
+ __entry->event, __entry->ts
+ )
+);
+
+TRACE_EVENT(cam_buf_done,
+ TP_PROTO(const char *ctx_type, struct cam_context *ctx,
+ struct cam_ctx_request *req),
+ TP_ARGS(ctx_type, ctx, req),
+ TP_STRUCT__entry(
+ __string(ctx_type, ctx_type)
+ __field(void*, ctx)
+ __field(uint64_t, request)
+ ),
+ TP_fast_assign(
+ __assign_str(ctx_type, ctx_type);
+ __entry->ctx = ctx;
+ __entry->request = req->request_id;
+ ),
+ TP_printk(
+ "%5s: BufDone ctx=%p request=%llu",
+ __get_str(ctx_type), __entry->ctx, __entry->request
+ )
+);
+
+TRACE_EVENT(cam_apply_req,
+ TP_PROTO(const char *entity, struct cam_req_mgr_apply_request *req),
+ TP_ARGS(entity, req),
+ TP_STRUCT__entry(
+ __string(entity, entity)
+ __field(uint64_t, req_id)
+ ),
+ TP_fast_assign(
+ __assign_str(entity, entity);
+ __entry->req_id = req->request_id;
+ ),
+ TP_printk(
+ "%8s: ApplyRequest request=%llu",
+ __get_str(entity), __entry->req_id
+ )
+);
+
+TRACE_EVENT(cam_flush_req,
+ TP_PROTO(struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_flush_info *info),
+ TP_ARGS(link, info),
+ TP_STRUCT__entry(
+ __field(uint32_t, type)
+ __field(int64_t, req_id)
+ __field(void*, link)
+ __field(void*, session)
+ ),
+ TP_fast_assign(
+ __entry->type = info->flush_type;
+ __entry->req_id = info->req_id;
+ __entry->link = link;
+ __entry->session = link->parent;
+ ),
+ TP_printk(
+ "FlushRequest type=%u request=%llu link=%pK session=%pK",
+ __entry->type, __entry->req_id, __entry->link,
+ __entry->session
+ )
+);
+
+TRACE_EVENT(cam_req_mgr_connect_device,
+ TP_PROTO(struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_device_info *info),
+ TP_ARGS(link, info),
+ TP_STRUCT__entry(
+ __string(name, info->name)
+ __field(uint32_t, id)
+ __field(uint32_t, delay)
+ __field(void*, link)
+ __field(void*, session)
+ ),
+ TP_fast_assign(
+ __assign_str(name, info->name);
+ __entry->id = info->dev_id;
+ __entry->delay = info->p_delay;
+ __entry->link = link;
+ __entry->session = link->parent;
+ ),
+ TP_printk(
+ "ReqMgr Connect name=%s id=%u pd=%d link=%pK session=%pK",
+ __get_str(name), __entry->id, __entry->delay,
+ __entry->link, __entry->session
+ )
+);
+
+TRACE_EVENT(cam_req_mgr_apply_request,
+ TP_PROTO(struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_apply_request *req,
+ struct cam_req_mgr_connected_device *dev),
+ TP_ARGS(link, req, dev),
+ TP_STRUCT__entry(
+ __string(name, dev->dev_info.name)
+ __field(uint32_t, dev_id)
+ __field(uint64_t, req_id)
+ __field(void*, link)
+ __field(void*, session)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev->dev_info.name);
+ __entry->dev_id = dev->dev_info.dev_id;
+ __entry->req_id = req->request_id;
+ __entry->link = link;
+ __entry->session = link->parent;
+ ),
+ TP_printk(
+ "ReqMgr ApplyRequest devname=%s devid=%u request=%lld link=%pK session=%pK",
+ __get_str(name), __entry->dev_id, __entry->req_id,
+ __entry->link, __entry->session
+ )
+);
+
+TRACE_EVENT(cam_req_mgr_add_req,
+ TP_PROTO(struct cam_req_mgr_core_link *link,
+ int idx, struct cam_req_mgr_add_request *add_req,
+ struct cam_req_mgr_req_tbl *tbl,
+ struct cam_req_mgr_connected_device *dev),
+ TP_ARGS(link, idx, add_req, tbl, dev),
+ TP_STRUCT__entry(
+ __string(name, dev->dev_info.name)
+ __field(uint32_t, dev_id)
+ __field(uint64_t, req_id)
+ __field(uint32_t, slot_id)
+ __field(uint32_t, delay)
+ __field(uint32_t, readymap)
+ __field(uint32_t, devicemap)
+ __field(void*, link)
+ __field(void*, session)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev->dev_info.name);
+ __entry->dev_id = dev->dev_info.dev_id;
+ __entry->req_id = add_req->req_id;
+ __entry->slot_id = idx;
+ __entry->delay = tbl->pd;
+ __entry->readymap = tbl->slot[idx].req_ready_map;
+ __entry->devicemap = tbl->dev_mask;
+ __entry->link = link;
+ __entry->session = link->parent;
+ ),
+ TP_printk(
+ "ReqMgr AddRequest devname=%s devid=%d request=%lld slot=%d pd=%d readymap=%x devicemap=%d link=%pk session=%pK",
+ __get_str(name), __entry->dev_id, __entry->req_id,
+ __entry->slot_id, __entry->delay, __entry->readymap,
+ __entry->devicemap, __entry->link, __entry->session
+ )
+);
+#endif /* _CAM_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
index 15b8a2d..ae01baf 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
@@ -283,6 +283,59 @@
return;
}
+/*
+ * sde_mdp_set_vbif_memtype - set memtype output for the given xin port
+ * @mdata: pointer to global rotator data
+ * @xin_id: xin identifier
+ * @memtype: memtype output configuration
+ * return: none
+ */
+static void sde_mdp_set_vbif_memtype(struct sde_rot_data_type *mdata,
+ u32 xin_id, u32 memtype)
+{
+ u32 reg_off;
+ u32 bit_off;
+ u32 reg_val;
+
+ /*
+ * Assume 4 bits per bit field, 8 fields per 32-bit register.
+ */
+ if (xin_id >= 8)
+ return;
+
+ reg_off = MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0;
+
+ bit_off = (xin_id & 0x7) * 4;
+ reg_val = SDE_VBIF_READ(mdata, reg_off);
+ reg_val &= ~(0x7 << bit_off);
+ reg_val |= (memtype & 0x7) << bit_off;
+ SDE_VBIF_WRITE(mdata, reg_off, reg_val);
+}
+
+/*
+ * sde_mdp_init_vbif - initialize static vbif configuration
+ * return: 0 if success; error code otherwise
+ */
+int sde_mdp_init_vbif(void)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ int i;
+
+ if (!mdata)
+ return -EINVAL;
+
+ if (mdata->vbif_memtype_count && mdata->vbif_memtype) {
+ for (i = 0; i < mdata->vbif_memtype_count; i++)
+ sde_mdp_set_vbif_memtype(mdata, i,
+ mdata->vbif_memtype[i]);
+
+ SDEROT_DBG("amemtype=0x%x\n", SDE_VBIF_READ(mdata,
+ MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0));
+ }
+
+ return 0;
+}
+
struct reg_bus_client *sde_reg_bus_vote_client_create(char *client_name)
{
struct reg_bus_client *client;
@@ -398,6 +451,32 @@
return len;
}
+static void sde_mdp_parse_vbif_memtype(struct platform_device *pdev,
+ struct sde_rot_data_type *mdata)
+{
+ int rc;
+
+ mdata->vbif_memtype_count = sde_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-rot-vbif-memtype");
+ mdata->vbif_memtype = kzalloc(sizeof(u32) *
+ mdata->vbif_memtype_count, GFP_KERNEL);
+ if (!mdata->vbif_memtype) {
+ mdata->vbif_memtype_count = 0;
+ return;
+ }
+
+ rc = sde_mdp_parse_dt_handler(pdev,
+ "qcom,mdss-rot-vbif-memtype", mdata->vbif_memtype,
+ mdata->vbif_memtype_count);
+ if (rc) {
+ SDEROT_DBG("vbif memtype not found\n");
+ kfree(mdata->vbif_memtype);
+ mdata->vbif_memtype = NULL;
+ mdata->vbif_memtype_count = 0;
+ return;
+ }
+}
+
static void sde_mdp_parse_vbif_qos(struct platform_device *pdev,
struct sde_rot_data_type *mdata)
{
@@ -409,14 +488,19 @@
"qcom,mdss-rot-vbif-qos-setting");
mdata->vbif_nrt_qos = kzalloc(sizeof(u32) *
mdata->npriority_lvl, GFP_KERNEL);
- if (!mdata->vbif_nrt_qos)
+ if (!mdata->vbif_nrt_qos) {
+ mdata->npriority_lvl = 0;
return;
+ }
rc = sde_mdp_parse_dt_handler(pdev,
"qcom,mdss-rot-vbif-qos-setting", mdata->vbif_nrt_qos,
mdata->npriority_lvl);
if (rc) {
SDEROT_DBG("vbif setting not found\n");
+ kfree(mdata->vbif_nrt_qos);
+ mdata->vbif_nrt_qos = NULL;
+ mdata->npriority_lvl = 0;
return;
}
}
@@ -579,6 +663,8 @@
sde_mdp_parse_vbif_qos(pdev, mdata);
+ sde_mdp_parse_vbif_memtype(pdev, mdata);
+
sde_mdp_parse_rot_lut_setting(pdev, mdata);
sde_mdp_parse_inline_rot_lut_setting(pdev, mdata);
@@ -588,6 +674,17 @@
return 0;
}
+static void sde_mdp_destroy_dt_misc(struct platform_device *pdev,
+ struct sde_rot_data_type *mdata)
+{
+ kfree(mdata->vbif_memtype);
+ mdata->vbif_memtype = NULL;
+ kfree(mdata->vbif_rt_qos);
+ mdata->vbif_rt_qos = NULL;
+ kfree(mdata->vbif_nrt_qos);
+ mdata->vbif_nrt_qos = NULL;
+}
+
#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \
{ \
.src = MSM_BUS_MASTER_AMPSS_M0, \
@@ -742,6 +839,7 @@
sde_rot_res = NULL;
sde_mdp_bus_scale_unregister(mdata);
+ sde_mdp_destroy_dt_misc(pdev, mdata);
sde_rot_iounmap(&mdata->vbif_nrt_io);
sde_rot_iounmap(&mdata->sde_io);
devm_kfree(&pdev->dev, mdata);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 313c709..b1438d5 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -225,6 +225,9 @@
u32 *vbif_nrt_qos;
u32 npriority_lvl;
+ u32 vbif_memtype_count;
+ u32 *vbif_memtype;
+
int iommu_attached;
int iommu_ref_cnt;
@@ -271,6 +274,8 @@
void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params);
+int sde_mdp_init_vbif(void);
+
#define SDE_VBIF_WRITE(mdata, offset, value) \
(sde_reg_w(&mdata->vbif_nrt_io, offset, value, 0))
#define SDE_VBIF_READ(mdata, offset) \
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index df2642c..9d10b06 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -341,6 +341,8 @@
if (!on) {
mgr->minimum_bw_vote = 0;
sde_rotator_update_perf(mgr);
+ } else {
+ sde_mdp_init_vbif();
}
mgr->regulator_enable = on;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index 3e686e9..da36e38 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -1136,6 +1136,9 @@
if (sscanf(buf, "%5x %x", &off, &cnt) < 2)
return -EINVAL;
+ if (off % sizeof(u32))
+ return -EINVAL;
+
if (off > dbg->max_offset)
return -EINVAL;
@@ -1204,6 +1207,9 @@
if (cnt < 2)
return -EFAULT;
+ if (off % sizeof(u32))
+ return -EFAULT;
+
if (off >= dbg->max_offset)
return -EFAULT;
@@ -1252,6 +1258,9 @@
goto debug_read_error;
}
+ if (dbg->off % sizeof(u32))
+ return -EFAULT;
+
ptr = dbg->base + dbg->off;
tot = 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index d300de2..8727535 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -486,7 +486,7 @@
struct sde_rotator_vbinfo *vbinfo =
&ctx->vbinfo_cap[i];
- if (vbinfo->fence && vbinfo->fd < 0) {
+ if (vbinfo->fence) {
/* fence is not used */
SDEDEV_DBG(rot_dev->dev,
"put fence s:%d t:%d i:%d\n",
@@ -2158,7 +2158,7 @@
&& (buf->index < ctx->nbuf_cap)) {
int idx = buf->index;
- if (ctx->vbinfo_cap[idx].fence && ctx->vbinfo_cap[idx].fd < 0) {
+ if (ctx->vbinfo_cap[idx].fence) {
/* fence is not used */
SDEDEV_DBG(ctx->rot_dev->dev, "put fence s:%d i:%d\n",
ctx->session_id, idx);
@@ -2487,6 +2487,7 @@
struct msm_sde_rotator_fence *fence = arg;
struct msm_sde_rotator_comp_ratio *comp_ratio = arg;
struct sde_rotator_vbinfo *vbinfo;
+ int ret;
switch (cmd) {
case VIDIOC_S_SDE_ROTATOR_FENCE:
@@ -2545,17 +2546,37 @@
vbinfo = &ctx->vbinfo_cap[fence->index];
- if (vbinfo->fence == NULL) {
- vbinfo->fd = -1;
- } else {
- vbinfo->fd =
- sde_rotator_get_sync_fence_fd(vbinfo->fence);
- if (vbinfo->fd < 0) {
+ if (!vbinfo)
+ return -EINVAL;
+
+ if (vbinfo->fence) {
+ ret = sde_rotator_get_sync_fence_fd(vbinfo->fence);
+ if (ret < 0) {
SDEDEV_ERR(rot_dev->dev,
- "fail get fence fd s:%d\n",
- ctx->session_id);
- return vbinfo->fd;
+ "fail get fence fd s:%d\n",
+ ctx->session_id);
+ return ret;
}
+
+ /**
+ * Loose any reference to sync fence once we pass
+ * it to user. Driver does not clean up user
+ * unclosed fence descriptors.
+ */
+ vbinfo->fence = NULL;
+
+ /**
+ * Cache fence descriptor in case user calls this
+ * ioctl multiple times. Cached value would be stale
+ * if user duplicated and closed old descriptor.
+ */
+ vbinfo->fd = ret;
+ } else if (!sde_rotator_get_fd_sync_fence(vbinfo->fd)) {
+ /**
+ * User has closed cached fence descriptor.
+ * Invalidate descriptor cache.
+ */
+ vbinfo->fd = -1;
}
fence->fd = vbinfo->fd;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
index 573e0a8..27e9ba6 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
@@ -783,10 +783,15 @@
if (!fmt_found) {
for (i = 0; i < ARRAY_SIZE(sde_mdp_format_ubwc_map); i++) {
fmt = &sde_mdp_format_ubwc_map[i].mdp_format;
- if (format == fmt->format)
+ if (format == fmt->format) {
+ fmt_found = true;
break;
+ }
}
}
+ /* If format not supported than return NULL */
+ if (!fmt_found)
+ fmt = NULL;
return fmt;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
index de448a4..5593919 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
@@ -65,6 +65,7 @@
#define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF2 0x00C8
#define MMSS_VBIF_NRT_VBIF_OUT_RD_LIM_CONF0 0x00D0
#define MMSS_VBIF_NRT_VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
#define MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 0x0550
#define MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 0x0590
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 743d2f7..205eeef 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -1446,6 +1446,9 @@
if (bw > 0xFF)
bw = 0xFF;
+ else if (bw == 0)
+ bw = 1;
+
SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
index 9f8b341..a7b1852 100644
--- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
+++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
@@ -846,10 +846,6 @@
if (!dev || !freq)
return -EINVAL;
- /* Start with highest frequecy and decide correct one later*/
-
- ab_kbps = INT_MAX;
-
gov = container_of(dev->governor,
struct governor, devfreq_gov);
dev->profile->get_dev_status(dev->dev.parent, &stats);
@@ -860,11 +856,11 @@
for (c = 0; c < vidc_data->data_count; ++c) {
if (vidc_data->data->power_mode == VIDC_POWER_TURBO) {
+ ab_kbps = INT_MAX;
goto exit;
}
}
- ab_kbps = 0;
for (c = 0; c < vidc_data->data_count; ++c)
ab_kbps += __calculate(&vidc_data->data[c], gov->mode);
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 1991a34..32e79f2 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1360,11 +1360,15 @@
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR;
hfi = (struct hfi_conceal_color *) &pkt->rg_property_data[1];
- if (hfi)
- hfi->conceal_color =
+ if (hfi) {
+ hfi->conceal_color_8bit =
((struct hfi_conceal_color *) pdata)->
- conceal_color;
- pkt->size += sizeof(u32) * 2;
+ conceal_color_8bit;
+ hfi->conceal_color_10bit =
+ ((struct hfi_conceal_color *) pdata)->
+ conceal_color_10bit;
+ }
+ pkt->size += sizeof(u32) + sizeof(struct hfi_conceal_color);
break;
}
case HAL_PARAM_VPE_ROTATION:
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 6e28e70..988f79c 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -24,7 +24,8 @@
#define MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS MIN_NUM_CAPTURE_BUFFERS
#define MIN_NUM_DEC_OUTPUT_BUFFERS 4
#define MIN_NUM_DEC_CAPTURE_BUFFERS 4
-#define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8010
+// Y=16(0-9bits), Cb(10-19bits)=Cr(20-29bits)=128, black by default
+#define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8020010
#define MB_SIZE_IN_PIXEL (16 * 16)
#define OPERATING_FRAME_RATE_STEP (1 << 16)
@@ -264,11 +265,20 @@
.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
},
{
- .id = V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR,
- .name = "Picture concealed color",
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT,
+ .name = "Picture concealed color 8bit",
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = 0x0,
- .maximum = 0xffffff,
+ .maximum = 0xff3fcff,
+ .default_value = DEFAULT_VIDEO_CONCEAL_COLOR_BLACK,
+ .step = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_10BIT,
+ .name = "Picture concealed color 10bit",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0x0,
+ .maximum = 0x3fffffff,
.default_value = DEFAULT_VIDEO_CONCEAL_COLOR_BLACK,
.step = 1,
},
@@ -991,11 +1001,6 @@
break;
}
break;
- case V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR:
- property_id = HAL_PARAM_VDEC_CONCEAL_COLOR;
- property_val = ctrl->val;
- pdata = &property_val;
- break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
property_id =
@@ -1056,9 +1061,6 @@
inst, inst->clk_data.operating_rate >> 16,
ctrl->val >> 16);
inst->clk_data.operating_rate = ctrl->val;
-
- msm_vidc_update_operating_rate(inst);
-
break;
default:
break;
@@ -1084,29 +1086,37 @@
int rc = 0, i = 0, fourcc = 0;
struct v4l2_ext_control *ext_control;
struct v4l2_control control;
+ struct hal_conceal_color conceal_color = {0};
+ struct hfi_device *hdev;
- if (!inst || !inst->core || !ctrl) {
+ if (!inst || !inst->core || !inst->core->device || !ctrl) {
dprintk(VIDC_ERR,
"%s invalid parameters\n", __func__);
return -EINVAL;
}
+ hdev = inst->core->device;
+
+ v4l2_try_ext_ctrls(&inst->ctrl_handler, ctrl);
+
ext_control = ctrl->controls;
- control.id =
- V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE;
for (i = 0; i < ctrl->count; i++) {
switch (ext_control[i].id) {
case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE:
control.value = ext_control[i].value;
-
+ control.id =
+ V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE;
rc = msm_comm_s_ctrl(inst, &control);
if (rc)
dprintk(VIDC_ERR,
"%s Failed setting stream output mode : %d\n",
__func__, rc);
+ rc = msm_vidc_update_host_buff_counts(inst);
break;
case V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT:
+ control.id =
+ V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE;
switch (ext_control[i].value) {
case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_NONE:
if (!msm_comm_g_ctrl_for_id(inst, control.id)) {
@@ -1142,6 +1152,7 @@
break;
}
}
+ rc = msm_vidc_update_host_buff_counts(inst);
inst->clk_data.dpb_fourcc = fourcc;
break;
default:
@@ -1152,6 +1163,36 @@
break;
}
break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT:
+ conceal_color.conceal_color_8bit = ext_control[i].value;
+ i++;
+ switch (ext_control[i].id) {
+ case V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_10BIT:
+ conceal_color.conceal_color_10bit =
+ ext_control[i].value;
+ dprintk(VIDC_DBG,
+ "conceal color: 8bit=0x%x 10bit=0x%x",
+ conceal_color.conceal_color_8bit,
+ conceal_color.conceal_color_10bit);
+ rc = call_hfi_op(hdev, session_set_property,
+ inst->session,
+ HAL_PARAM_VDEC_CONCEAL_COLOR,
+ &conceal_color);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s Failed setting conceal color",
+ __func__);
+ }
+ break;
+ default:
+ dprintk(VIDC_ERR,
+ "%s Could not find CONCEAL_COLOR_10BIT ext_control",
+ __func__);
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ break;
default:
dprintk(VIDC_ERR
, "%s Unsupported set control %d",
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 90a38bb..5c587e2 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1748,8 +1748,6 @@
ctrl->val >> 16);
inst->clk_data.operating_rate = ctrl->val;
- msm_vidc_update_operating_rate(inst);
-
break;
case V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_TYPE:
{
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 98a59a5..427568c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -429,7 +429,7 @@
print_vidc_buffer(VIDC_DBG, "release buf", inst, mbuf);
msm_comm_unmap_vidc_buffer(inst, mbuf);
list_del(&mbuf->list);
- kfree(mbuf);
+ kref_put_mbuf(mbuf);
}
mutex_unlock(&inst->registeredbufs.lock);
@@ -998,7 +998,7 @@
}
msm_comm_unmap_vidc_buffer(inst, temp);
list_del(&temp->list);
- kfree(temp);
+ kref_put_mbuf(temp);
}
mutex_unlock(&inst->registeredbufs.lock);
}
@@ -1075,10 +1075,16 @@
inst, vb2);
return;
}
+ if (!kref_get_mbuf(inst, mbuf)) {
+ dprintk(VIDC_ERR, "%s: mbuf not found\n", __func__);
+ return;
+ }
rc = msm_comm_qbuf(inst, mbuf);
if (rc)
print_vidc_buffer(VIDC_ERR, "failed qbuf", inst, mbuf);
+
+ kref_put_mbuf(mbuf);
}
static const struct vb2_ops msm_vidc_vb2q_ops = {
@@ -1620,7 +1626,7 @@
print_vidc_buffer(VIDC_ERR, "undequeud buf", inst, temp);
msm_comm_unmap_vidc_buffer(inst, temp);
list_del(&temp->list);
- kfree(temp);
+ kref_put_mbuf(temp);
}
mutex_unlock(&inst->registeredbufs.lock);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 86dc973..4327309 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -791,8 +791,6 @@
msm_dcvs_print_dcvs_stats(dcvs);
- msm_vidc_update_operating_rate(inst);
-
rc = msm_comm_scale_clocks_and_bus(inst);
if (rc)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 844a4e1..ee538a9 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1492,6 +1492,7 @@
break;
case HAL_EVENT_RELEASE_BUFFER_REFERENCE:
{
+ struct msm_vidc_buffer *mbuf;
u32 planes[VIDEO_MAX_PLANES] = {0};
dprintk(VIDC_DBG,
@@ -1501,8 +1502,15 @@
planes[0] = event_notify->packet_buffer;
planes[1] = event_notify->extra_data_buffer;
- handle_release_buffer_reference(inst, planes);
-
+ mbuf = msm_comm_get_buffer_using_device_planes(inst, planes);
+ if (!mbuf || !kref_get_mbuf(inst, mbuf)) {
+ dprintk(VIDC_ERR,
+ "%s: data_addr %x, extradata_addr %x not found\n",
+ __func__, planes[0], planes[1]);
+ } else {
+ handle_release_buffer_reference(inst, mbuf);
+ kref_put_mbuf(mbuf);
+ }
goto err_bad_event;
}
default:
@@ -2176,7 +2184,7 @@
{
struct msm_vidc_cb_data_done *response = data;
struct msm_vidc_buffer *mbuf;
- struct vb2_buffer *vb;
+ struct vb2_buffer *vb, *vb2;
struct msm_vidc_inst *inst;
struct vidc_hal_ebd *empty_buf_done;
struct vb2_v4l2_buffer *vbuf;
@@ -2200,18 +2208,31 @@
planes[1] = empty_buf_done->extra_data_buffer;
mbuf = msm_comm_get_buffer_using_device_planes(inst, planes);
- if (!mbuf) {
+ if (!mbuf || !kref_get_mbuf(inst, mbuf)) {
dprintk(VIDC_ERR,
"%s: data_addr %x, extradata_addr %x not found\n",
__func__, planes[0], planes[1]);
goto exit;
}
+ vb2 = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
+
+ /*
+ * take registeredbufs.lock to update mbuf & vb2 variables together
+ * so that both are in sync else if mbuf and vb2 variables are not
+ * in sync msm_comm_compare_vb2_planes() returns false for the
+ * right buffer due to data_offset field mismatch.
+ */
+ mutex_lock(&inst->registeredbufs.lock);
vb = &mbuf->vvb.vb2_buf;
vb->planes[0].bytesused = response->input_done.filled_len;
if (vb->planes[0].bytesused > vb->planes[0].length)
dprintk(VIDC_INFO, "bytesused overflow length\n");
+ vb->planes[0].data_offset = response->input_done.offset;
+ if (vb->planes[0].data_offset > vb->planes[0].length)
+ dprintk(VIDC_INFO, "data_offset overflow length\n");
+
if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) {
dprintk(VIDC_INFO, "Failed : Unsupported input stream\n");
mbuf->vvb.flags |= V4L2_QCOM_BUF_INPUT_UNSUPPORTED;
@@ -2228,26 +2249,28 @@
if (extra_idx && extra_idx < VIDEO_MAX_PLANES)
vb->planes[extra_idx].bytesused = vb->planes[extra_idx].length;
+ if (vb2) {
+ vbuf = to_vb2_v4l2_buffer(vb2);
+ vbuf->flags |= mbuf->vvb.flags;
+ for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+ vb2->planes[i].bytesused =
+ mbuf->vvb.vb2_buf.planes[i].bytesused;
+ vb2->planes[i].data_offset =
+ mbuf->vvb.vb2_buf.planes[i].data_offset;
+ }
+ }
+ mutex_unlock(&inst->registeredbufs.lock);
+
update_recon_stats(inst, &empty_buf_done->recon_stats);
msm_vidc_clear_freq_entry(inst, mbuf->smem[0].device_addr);
-
- vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
- if (vb) {
- vbuf = to_vb2_v4l2_buffer(vb);
- vbuf->flags |= mbuf->vvb.flags;
- for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++)
- vb->planes[i].bytesused =
- mbuf->vvb.vb2_buf.planes[i].bytesused;
- }
/*
* put_buffer should be done before vb2_buffer_done else
* client might queue the same buffer before it is unmapped
- * in put_buffer. also don't use mbuf after put_buffer
- * as it may be freed in put_buffer.
+ * in put_buffer.
*/
msm_comm_put_vidc_buffer(inst, mbuf);
- msm_comm_vb2_buffer_done(inst, vb);
-
+ msm_comm_vb2_buffer_done(inst, vb2);
+ kref_put_mbuf(mbuf);
exit:
put_inst(inst);
}
@@ -2299,7 +2322,7 @@
struct msm_vidc_cb_data_done *response = data;
struct msm_vidc_buffer *mbuf;
struct msm_vidc_inst *inst;
- struct vb2_buffer *vb = NULL;
+ struct vb2_buffer *vb, *vb2;
struct vidc_hal_fbd *fill_buf_done;
struct vb2_v4l2_buffer *vbuf;
enum hal_buffer buffer_type;
@@ -2326,12 +2349,13 @@
buffer_type = msm_comm_get_hal_output_buffer(inst);
if (fill_buf_done->buffer_type == buffer_type) {
mbuf = msm_comm_get_buffer_using_device_planes(inst, planes);
- if (!mbuf) {
+ if (!mbuf || !kref_get_mbuf(inst, mbuf)) {
dprintk(VIDC_ERR,
"%s: data_addr %x, extradata_addr %x not found\n",
__func__, planes[0], planes[1]);
goto exit;
}
+ vb2 = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
} else {
if (handle_multi_stream_buffers(inst,
fill_buf_done->packet_buffer1))
@@ -2340,6 +2364,14 @@
&fill_buf_done->packet_buffer1);
goto exit;
}
+
+ /*
+ * take registeredbufs.lock to update mbuf & vb2 variables together
+ * so that both are in sync else if mbuf and vb2 variables are not
+ * in sync msm_comm_compare_vb2_planes() returns false for the
+ * right buffer due to data_offset field mismatch.
+ */
+ mutex_lock(&inst->registeredbufs.lock);
vb = &mbuf->vvb.vb2_buf;
if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME ||
@@ -2351,10 +2383,12 @@
"fbd:Overflow bytesused = %d; length = %d\n",
vb->planes[0].bytesused,
vb->planes[0].length);
- if (vb->planes[0].data_offset != fill_buf_done->offset1)
- dprintk(VIDC_ERR, "%s: data_offset %d vs %d\n",
- __func__, vb->planes[0].data_offset,
- fill_buf_done->offset1);
+ vb->planes[0].data_offset = fill_buf_done->offset1;
+ if (vb->planes[0].data_offset > vb->planes[0].length)
+ dprintk(VIDC_INFO,
+ "fbd:Overflow data_offset = %d; length = %d\n",
+ vb->planes[0].data_offset,
+ vb->planes[0].length);
if (!(fill_buf_done->flags1 & HAL_BUFFERFLAG_TIMESTAMPINVALID)) {
time_usec = fill_buf_done->timestamp_hi;
time_usec = (time_usec << 32) | fill_buf_done->timestamp_lo;
@@ -2412,23 +2446,27 @@
break;
}
- vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
- if (vb) {
- vbuf = to_vb2_v4l2_buffer(vb);
+ if (vb2) {
+ vbuf = to_vb2_v4l2_buffer(vb2);
vbuf->flags = mbuf->vvb.flags;
- vb->timestamp = mbuf->vvb.vb2_buf.timestamp;
- for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++)
- vb->planes[i].bytesused =
+ vb2->timestamp = mbuf->vvb.vb2_buf.timestamp;
+ for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+ vb2->planes[i].bytesused =
mbuf->vvb.vb2_buf.planes[i].bytesused;
+ vb2->planes[i].data_offset =
+ mbuf->vvb.vb2_buf.planes[i].data_offset;
+ }
}
+ mutex_unlock(&inst->registeredbufs.lock);
+
/*
* put_buffer should be done before vb2_buffer_done else
* client might queue the same buffer before it is unmapped
- * in put_buffer. also don't use mbuf after put_buffer
- * as it may be freed in put_buffer.
+ * in put_buffer.
*/
msm_comm_put_vidc_buffer(inst, mbuf);
- msm_comm_vb2_buffer_done(inst, vb);
+ msm_comm_vb2_buffer_done(inst, vb2);
+ kref_put_mbuf(mbuf);
exit:
put_inst(inst);
@@ -3174,6 +3212,11 @@
buffer_type);
return 0;
}
+
+ /* For DPB buffers, Always use FW count */
+ output_buf->buffer_count_actual = output_buf->buffer_count_min_host =
+ output_buf->buffer_count_min;
+
dprintk(VIDC_DBG,
"output: num = %d, size = %d\n",
output_buf->buffer_count_actual,
@@ -4024,9 +4067,14 @@
return -EINVAL;
}
extra_buffers = msm_vidc_get_extra_buff_count(inst, HAL_BUFFER_INPUT);
-
bufreq->buffer_count_min_host = bufreq->buffer_count_min +
extra_buffers;
+ bufreq = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_INPUT);
+ if (bufreq) {
+ if (bufreq->buffer_count_min)
+ bufreq->buffer_count_min_host =
+ bufreq->buffer_count_min + extra_buffers;
+ }
if (msm_comm_get_stream_output_mode(inst) ==
HAL_VIDEO_DECODER_SECONDARY) {
@@ -4749,8 +4797,7 @@
/* remove from list */
list_del(&mbuf->list);
- kfree(mbuf);
- mbuf = NULL;
+ kref_put_mbuf(mbuf);
}
mutex_unlock(&inst->registeredbufs.lock);
@@ -4940,6 +4987,7 @@
{
u32 x_min, x_max, y_min, y_max;
u32 input_height, input_width, output_height, output_width;
+ u32 rotation;
input_height = inst->prop.height[OUTPUT_PORT];
input_width = inst->prop.width[OUTPUT_PORT];
@@ -4975,6 +5023,20 @@
return 0;
}
+ rotation = msm_comm_g_ctrl_for_id(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
+
+ if ((output_width != output_height) &&
+ (rotation == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90 ||
+ rotation == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270)) {
+
+ output_width = inst->prop.height[CAPTURE_PORT];
+ output_height = inst->prop.width[CAPTURE_PORT];
+ dprintk(VIDC_DBG,
+ "Rotation=%u Swapped Output W=%u H=%u to check scaling",
+ rotation, output_width, output_height);
+ }
+
x_min = (1<<16)/inst->capability.scale_x.min;
y_min = (1<<16)/inst->capability.scale_y.min;
x_max = inst->capability.scale_x.max >> 16;
@@ -5923,6 +5985,7 @@
rc = -ENOMEM;
goto exit;
}
+ kref_init(&mbuf->kref);
}
vbuf = to_vb2_v4l2_buffer(vb2);
@@ -5986,11 +6049,11 @@
return mbuf;
exit:
- mutex_unlock(&inst->registeredbufs.lock);
dprintk(VIDC_ERR, "%s: rc %d\n", __func__, rc);
msm_comm_unmap_vidc_buffer(inst, mbuf);
if (!found)
- kfree(mbuf);
+ kref_put_mbuf(mbuf);
+ mutex_unlock(&inst->registeredbufs.lock);
return ERR_PTR(rc);
}
@@ -6042,24 +6105,26 @@
*/
if (!mbuf->smem[0].refcount) {
list_del(&mbuf->list);
- kfree(mbuf);
- mbuf = NULL;
+ kref_put_mbuf(mbuf);
}
unlock:
mutex_unlock(&inst->registeredbufs.lock);
}
-void handle_release_buffer_reference(struct msm_vidc_inst *inst, u32 *planes)
+void handle_release_buffer_reference(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf)
{
int rc = 0;
- struct msm_vidc_buffer *mbuf = NULL;
+ struct msm_vidc_buffer *temp;
bool found = false;
int i = 0;
mutex_lock(&inst->registeredbufs.lock);
found = false;
- list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
- if (msm_comm_compare_device_planes(mbuf, planes)) {
+ /* check if mbuf was not removed by any chance */
+ list_for_each_entry(temp, &inst->registeredbufs.list, list) {
+ if (msm_comm_compare_vb2_planes(inst, mbuf,
+ &temp->vvb.vb2_buf)) {
found = true;
break;
}
@@ -6077,13 +6142,10 @@
/* refcount is not zero if client queued the same buffer */
if (!mbuf->smem[0].refcount) {
list_del(&mbuf->list);
- kfree(mbuf);
- mbuf = NULL;
+ kref_put_mbuf(mbuf);
}
} else {
- dprintk(VIDC_ERR,
- "%s: data_addr %x extradata_addr %x not found\n",
- __func__, planes[0], planes[1]);
+ print_vidc_buffer(VIDC_ERR, "mbuf not found", inst, mbuf);
goto unlock;
}
@@ -6097,8 +6159,9 @@
* and if found queue it to video hw (if not flushing).
*/
found = false;
- list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
- if (msm_comm_compare_device_plane(mbuf, planes, 0)) {
+ list_for_each_entry(temp, &inst->registeredbufs.list, list) {
+ if (msm_comm_compare_vb2_plane(inst, mbuf,
+ &temp->vvb.vb2_buf, 0)) {
found = true;
break;
}
@@ -6113,8 +6176,7 @@
msm_comm_unmap_vidc_buffer(inst, mbuf);
/* remove from list */
list_del(&mbuf->list);
- kfree(mbuf);
- mbuf = NULL;
+ kref_put_mbuf(mbuf);
/* don't queue the buffer */
found = false;
@@ -6161,3 +6223,41 @@
return rc;
}
+static void kref_free_mbuf(struct kref *kref)
+{
+ struct msm_vidc_buffer *mbuf = container_of(kref,
+ struct msm_vidc_buffer, kref);
+
+ kfree(mbuf);
+}
+
+void kref_put_mbuf(struct msm_vidc_buffer *mbuf)
+{
+ if (!mbuf)
+ return;
+
+ kref_put(&mbuf->kref, kref_free_mbuf);
+}
+
+bool kref_get_mbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf)
+{
+ struct msm_vidc_buffer *temp;
+ bool matches = false;
+ bool ret = false;
+
+ if (!inst || !mbuf)
+ return false;
+
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry(temp, &inst->registeredbufs.list, list) {
+ if (temp == mbuf) {
+ matches = true;
+ break;
+ }
+ }
+ ret = (matches && kref_get_unless_zero(&mbuf->kref)) ? true : false;
+ mutex_unlock(&inst->registeredbufs.lock);
+
+ return ret;
+}
+
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index bc881a0..18ba4a5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -116,7 +116,8 @@
struct vb2_buffer *vb2);
void msm_comm_put_vidc_buffer(struct msm_vidc_inst *inst,
struct msm_vidc_buffer *mbuf);
-void handle_release_buffer_reference(struct msm_vidc_inst *inst, u32 *planes);
+void handle_release_buffer_reference(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf);
int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst,
struct vb2_buffer *vb);
int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst,
@@ -145,4 +146,7 @@
struct vb2_buffer *vb2);
void print_v4l2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
struct v4l2_buffer *v4l2);
+void kref_put_mbuf(struct msm_vidc_buffer *mbuf);
+bool kref_get_mbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
+
#endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 58c3b0f..5be1ee2 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -430,7 +430,8 @@
case MSM_VIDC_DEBUGFS_EVENT_FBD:
inst->count.fbd++;
inst->debug.samples++;
- if (inst->count.ebd && inst->count.fbd == inst->count.ftb) {
+ if (inst->count.fbd &&
+ inst->count.fbd == inst->count.ftb) {
toc(inst, FRAME_PROCESSING);
dprintk(VIDC_PROF, "FBD: FW needs output buffers\n");
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 49e6c3ec..e554a46 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -387,6 +387,7 @@
struct msm_vidc_buffer {
struct list_head list;
+ struct kref kref;
struct msm_smem smem[VIDEO_MAX_PLANES];
struct vb2_v4l2_buffer vvb;
bool deferred;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 0069e40..b430d14 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1681,9 +1681,16 @@
}
dev = device;
- mutex_lock(&dev->lock);
dprintk(VIDC_DBG, "Core initializing\n");
+
+ mutex_lock(&dev->lock);
+
+ dev->bus_vote.data =
+ kzalloc(sizeof(struct vidc_bus_vote_data), GFP_KERNEL);
+ dev->bus_vote.data_count = 1;
+ dev->bus_vote.data->power_mode = VIDC_POWER_TURBO;
+
rc = __load_fw(dev);
if (rc) {
dprintk(VIDC_ERR, "Failed to load Venus FW\n");
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 79ce858..fbd3b02 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -996,6 +996,11 @@
struct hal_buffer_requirements buffer[HAL_BUFFER_MAX];
};
+struct hal_conceal_color {
+ u32 conceal_color_8bit;
+ u32 conceal_color_10bit;
+};
+
union hal_get_property {
struct hal_frame_rate frame_rate;
struct hal_uncompressed_format_select format_select;
@@ -1045,6 +1050,7 @@
struct hal_buffer_alloc_mode buffer_alloc_mode;
struct buffer_requirements buf_req;
enum hal_h264_entropy h264_entropy;
+ struct hal_conceal_color conceal_color;
};
/* HAL Response */
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index d5624ce..a522918 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -478,7 +478,8 @@
};
struct hfi_conceal_color {
- u32 conceal_color;
+ u32 conceal_color_8bit;
+ u32 conceal_color_10bit;
};
struct hfi_intra_period {
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index b373acb..232c290 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -228,7 +228,7 @@
if (!wcd9xxx->dev_up) {
dev_dbg_ratelimited(
- wcd9xxx->dev, "%s: No read allowed. dev_up = %d\n",
+ wcd9xxx->dev, "%s: No read allowed. dev_up = %lu\n",
__func__, wcd9xxx->dev_up);
return 0;
}
@@ -268,7 +268,7 @@
if (!wcd9xxx->dev_up) {
dev_dbg_ratelimited(
- wcd9xxx->dev, "%s: No write allowed. dev_up = %d\n",
+ wcd9xxx->dev, "%s: No write allowed. dev_up = %lu\n",
__func__, wcd9xxx->dev_up);
return 0;
}
@@ -345,7 +345,7 @@
if (!wcd9xxx->dev_up) {
dev_dbg_ratelimited(
- wcd9xxx->dev, "%s: No write allowed. dev_up = %d\n",
+ wcd9xxx->dev, "%s: No write allowed. dev_up = %lu\n",
__func__, wcd9xxx->dev_up);
ret = 0;
goto done;
@@ -426,7 +426,7 @@
if (!wcd9xxx->dev_up) {
dev_dbg_ratelimited(
- wcd9xxx->dev, "%s: No write allowed. dev_up = %d\n",
+ wcd9xxx->dev, "%s: No write allowed. dev_up = %lu\n",
__func__, wcd9xxx->dev_up);
return 0;
}
@@ -1479,12 +1479,27 @@
return -EINVAL;
}
- dev_info(wcd9xxx->dev, "%s: device reset, dev_up = %d\n",
- __func__, wcd9xxx->dev_up);
- if (wcd9xxx->dev_up)
- return 0;
+ /*
+ * Wait for 500 ms for device down to complete. Observed delay
+ * of ~200ms for device down to complete after being called,
+ * due to context switch issue.
+ */
+ ret = wait_on_bit_timeout(&wcd9xxx->dev_up, 0,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(500));
+ if (ret)
+ pr_err("%s: slim device down not complete in 500 msec\n",
+ __func__);
mutex_lock(&wcd9xxx->reset_lock);
+
+ dev_info(wcd9xxx->dev, "%s: device reset, dev_up = %lu\n",
+ __func__, wcd9xxx->dev_up);
+ if (wcd9xxx->dev_up) {
+ mutex_unlock(&wcd9xxx->reset_lock);
+ return 0;
+ }
+
ret = wcd9xxx_reset(wcd9xxx->dev);
if (ret)
dev_err(wcd9xxx->dev, "%s: Resetting Codec failed\n", __func__);
@@ -1502,8 +1517,8 @@
pr_err("%s: wcd9xxx is NULL\n", __func__);
return -EINVAL;
}
- dev_info(wcd9xxx->dev, "%s: slim device up, dev_up = %d\n",
- __func__, wcd9xxx->dev_up);
+ dev_info(wcd9xxx->dev, "%s: slim device up, dev_up = %lu\n",
+ __func__, wcd9xxx->dev_up);
if (wcd9xxx->dev_up)
return 0;
@@ -1525,18 +1540,20 @@
return -EINVAL;
}
- dev_info(wcd9xxx->dev, "%s: device down, dev_up = %d\n",
- __func__, wcd9xxx->dev_up);
- if (!wcd9xxx->dev_up)
- return 0;
-
- wcd9xxx->dev_up = false;
-
mutex_lock(&wcd9xxx->reset_lock);
+
+ dev_info(wcd9xxx->dev, "%s: device down, dev_up = %lu\n",
+ __func__, wcd9xxx->dev_up);
+ if (!wcd9xxx->dev_up) {
+ mutex_unlock(&wcd9xxx->reset_lock);
+ return 0;
+ }
+
if (wcd9xxx->dev_down)
wcd9xxx->dev_down(wcd9xxx);
wcd9xxx_irq_exit(&wcd9xxx->core_res);
wcd9xxx_reset_low(wcd9xxx->dev);
+ wcd9xxx->dev_up = false;
mutex_unlock(&wcd9xxx->reset_lock);
return 0;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index e8b9b48..7077b30 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -281,6 +281,7 @@
wait_queue_head_t app_block_wq;
atomic_t qseecom_state;
int is_apps_region_protected;
+ bool smcinvoke_support;
};
struct qseecom_sec_buf_fd_info {
@@ -580,10 +581,12 @@
desc.args[1] = req_64bit->sb_ptr;
desc.args[2] = req_64bit->sb_len;
}
+ qseecom.smcinvoke_support = true;
smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
ret = scm_call2(smc_id, &desc);
if (ret) {
+ qseecom.smcinvoke_support = false;
smc_id = TZ_OS_REGISTER_LISTENER_ID;
__qseecom_reentrancy_check_if_no_app_blocked(
smc_id);
@@ -1012,10 +1015,14 @@
struct qseecom_continue_blocked_request_ireq *req =
(struct qseecom_continue_blocked_request_ireq *)
req_buf;
- smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
+ if (qseecom.smcinvoke_support)
+ smc_id =
+ TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
+ else
+ smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
desc.arginfo =
TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
- desc.args[0] = req->app_id;
+ desc.args[0] = req->app_or_session_id;
ret = scm_call2(smc_id, &desc);
break;
}
@@ -1842,7 +1849,7 @@
return ret;
}
-int __qseecom_process_reentrancy_blocked_on_listener(
+static int __qseecom_process_blocked_on_listener_legacy(
struct qseecom_command_scm_resp *resp,
struct qseecom_registered_app_list *ptr_app,
struct qseecom_dev_handle *data)
@@ -1851,9 +1858,8 @@
int ret = 0;
struct qseecom_continue_blocked_request_ireq ireq;
struct qseecom_command_scm_resp continue_resp;
- sigset_t new_sigset, old_sigset;
- unsigned long flags;
bool found_app = false;
+ unsigned long flags;
if (!resp || !data) {
pr_err("invalid resp or data pointer\n");
@@ -1893,32 +1899,30 @@
pr_debug("lsntr %d in_use = %d\n",
resp->data, list_ptr->listener_in_use);
ptr_app->blocked_on_listener_id = resp->data;
+
/* sleep until listener is available */
- do {
- qseecom.app_block_ref_cnt++;
- ptr_app->app_blocked = true;
- sigfillset(&new_sigset);
- sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
- mutex_unlock(&app_access_lock);
- do {
- if (!wait_event_freezable(
- list_ptr->listener_block_app_wq,
- !list_ptr->listener_in_use)) {
- break;
- }
- } while (1);
- mutex_lock(&app_access_lock);
- sigprocmask(SIG_SETMASK, &old_sigset, NULL);
- ptr_app->app_blocked = false;
- qseecom.app_block_ref_cnt--;
- } while (list_ptr->listener_in_use == true);
+ qseecom.app_block_ref_cnt++;
+ ptr_app->app_blocked = true;
+ mutex_unlock(&app_access_lock);
+ if (wait_event_freezable(
+ list_ptr->listener_block_app_wq,
+ !list_ptr->listener_in_use)) {
+ pr_err("Interrupted: listener_id %d, app_id %d\n",
+ resp->data, ptr_app->app_id);
+ ret = -ERESTARTSYS;
+ goto exit;
+ }
+ mutex_lock(&app_access_lock);
+ ptr_app->app_blocked = false;
+ qseecom.app_block_ref_cnt--;
+
ptr_app->blocked_on_listener_id = 0;
/* notify the blocked app that listener is available */
pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
resp->data, data->client.app_id,
data->client.app_name);
ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
- ireq.app_id = data->client.app_id;
+ ireq.app_or_session_id = data->client.app_id;
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
&ireq, sizeof(ireq),
&continue_resp, sizeof(continue_resp));
@@ -1937,6 +1941,73 @@
return ret;
}
+static int __qseecom_process_blocked_on_listener_smcinvoke(
+ struct qseecom_command_scm_resp *resp)
+{
+ struct qseecom_registered_listener_list *list_ptr;
+ int ret = 0;
+ struct qseecom_continue_blocked_request_ireq ireq;
+ struct qseecom_command_scm_resp continue_resp;
+ unsigned int session_id;
+
+ if (!resp) {
+ pr_err("invalid resp pointer\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ session_id = resp->resp_type;
+ list_ptr = __qseecom_find_svc(resp->data);
+ if (!list_ptr) {
+ pr_err("Invalid listener ID\n");
+ ret = -ENODATA;
+ goto exit;
+ }
+ pr_debug("lsntr %d in_use = %d\n",
+ resp->data, list_ptr->listener_in_use);
+ /* sleep until listener is available */
+ qseecom.app_block_ref_cnt++;
+ mutex_unlock(&app_access_lock);
+ if (wait_event_freezable(
+ list_ptr->listener_block_app_wq,
+ !list_ptr->listener_in_use)) {
+ pr_err("Interrupted: listener_id %d, session_id %d\n",
+ resp->data, session_id);
+ ret = -ERESTARTSYS;
+ goto exit;
+ }
+ mutex_lock(&app_access_lock);
+ qseecom.app_block_ref_cnt--;
+
+ /* notify TZ that listener is available */
+ pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
+ resp->data, session_id);
+ ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+ ireq.app_or_session_id = session_id;
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ &ireq, sizeof(ireq),
+ &continue_resp, sizeof(continue_resp));
+ if (ret) {
+ pr_err("scm_call for continue blocked req for session %d failed, ret %d\n",
+ session_id, ret);
+ goto exit;
+ }
+ resp->result = QSEOS_RESULT_INCOMPLETE;
+exit:
+ return ret;
+}
+
+static int __qseecom_process_reentrancy_blocked_on_listener(
+ struct qseecom_command_scm_resp *resp,
+ struct qseecom_registered_app_list *ptr_app,
+ struct qseecom_dev_handle *data)
+{
+ if (!qseecom.smcinvoke_support)
+ return __qseecom_process_blocked_on_listener_legacy(
+ resp, ptr_app, data);
+ else
+ return __qseecom_process_blocked_on_listener_smcinvoke(
+ resp);
+}
static int __qseecom_reentrancy_process_incomplete_cmd(
struct qseecom_dev_handle *data,
struct qseecom_command_scm_resp *resp)
@@ -4703,18 +4774,15 @@
}
resp.result = desc->ret[0]; /*req_cmd*/
- resp.resp_type = desc->ret[1]; /*app_id*/
+ resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
resp.data = desc->ret[2]; /*listener_id*/
- dummy_private_data.client.app_id = desc->ret[1];
- dummy_app_entry.app_id = desc->ret[1];
-
mutex_lock(&app_access_lock);
ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
&dummy_private_data);
mutex_unlock(&app_access_lock);
if (ret)
- pr_err("Failed to req cmd %d lsnr %d on app %d, ret = %d\n",
+ pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
(int)desc->ret[0], (int)desc->ret[2],
(int)desc->ret[1], ret);
desc->ret[0] = resp.result;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3e9448b..0bf89b4 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -495,11 +495,6 @@
if (!clk_scaling->enable)
goto out;
- if (*freq == UINT_MAX)
- *freq = clk_scaling->freq_table[1];
- else
- *freq = clk_scaling->freq_table[0];
-
pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
*freq, current->comm);
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index bfe1242..7c3638c 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -729,6 +729,15 @@
goto ring_doorbell;
}
+ if (cq_host->ops->crypto_cfg) {
+ err = cq_host->ops->crypto_cfg(mmc, mrq, tag);
+ if (err) {
+ pr_err("%s: failed to configure crypto: err %d tag %d\n",
+ mmc_hostname(mmc), err, tag);
+ goto out;
+ }
+ }
+
task_desc = (__le64 __force *)get_desc(cq_host, tag);
cmdq_prep_task_desc(mrq, &data, 1,
@@ -778,6 +787,8 @@
CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG);
cmdq_runtime_pm_put(cq_host);
+ if (cq_host->ops->crypto_cfg_reset)
+ cq_host->ops->crypto_cfg_reset(mmc, tag);
mrq->done(mrq);
}
diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h
index 5347b3ab..8e9f765 100644
--- a/drivers/mmc/host/cmdq_hci.h
+++ b/drivers/mmc/host/cmdq_hci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -208,6 +208,9 @@
void (*enhanced_strobe_mask)(struct mmc_host *mmc, bool set);
int (*reset)(struct mmc_host *mmc);
void (*post_cqe_halt)(struct mmc_host *mmc);
+ int (*crypto_cfg)(struct mmc_host *mmc, struct mmc_request *mrq,
+ u32 slot);
+ void (*crypto_cfg_reset)(struct mmc_host *mmc, unsigned int slot);
};
static inline void cmdq_writel(struct cmdq_host *host, u32 val, int reg)
diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c
index ba6e51c..d624b48 100644
--- a/drivers/mmc/host/sdhci-msm-ice.c
+++ b/drivers/mmc/host/sdhci-msm-ice.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,19 +13,6 @@
#include "sdhci-msm-ice.h"
-static void sdhci_msm_ice_success_cb(void *host_ctrl,
- enum ice_event_completion evt)
-{
- struct sdhci_msm_host *msm_host = (struct sdhci_msm_host *)host_ctrl;
-
- if ((msm_host->ice.state == SDHCI_MSM_ICE_STATE_DISABLED &&
- evt == ICE_INIT_COMPLETION) || (msm_host->ice.state ==
- SDHCI_MSM_ICE_STATE_SUSPENDED && evt == ICE_RESUME_COMPLETION))
- msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
-
- complete(&msm_host->ice.async_done);
-}
-
static void sdhci_msm_ice_error_cb(void *host_ctrl, u32 error)
{
struct sdhci_msm_host *msm_host = (struct sdhci_msm_host *)host_ctrl;
@@ -35,8 +22,6 @@
if (msm_host->ice.state == SDHCI_MSM_ICE_STATE_ACTIVE)
msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED;
-
- complete(&msm_host->ice.async_done);
}
static struct platform_device *sdhci_msm_ice_get_pdevice(struct device *dev)
@@ -194,89 +179,79 @@
struct sdhci_msm_host *msm_host = pltfm_host->priv;
int err = 0;
- init_completion(&msm_host->ice.async_done);
- if (msm_host->ice.vops->config) {
+ if (msm_host->ice.vops->init) {
+ err = sdhci_msm_ice_pltfm_init(msm_host);
+ if (err)
+ goto out;
+
+ if (msm_host->ice_hci_support)
+ sdhci_msm_enable_ice_hci(host, true);
+
err = msm_host->ice.vops->init(msm_host->ice.pdev,
msm_host,
- sdhci_msm_ice_success_cb,
sdhci_msm_ice_error_cb);
if (err) {
pr_err("%s: ice init err %d\n",
mmc_hostname(host->mmc), err);
+ sdhci_msm_ice_print_regs(host);
+ if (msm_host->ice_hci_support)
+ sdhci_msm_enable_ice_hci(host, false);
+ goto out;
+ }
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
+ }
+
+out:
+ return err;
+}
+
+void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot)
+{
+ writel_relaxed(SDHCI_MSM_ICE_ENABLE_BYPASS,
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
+}
+
+static
+int sdhci_msm_ice_get_cfg(struct sdhci_msm_host *msm_host, struct request *req,
+ unsigned int *bypass, short *key_index)
+{
+ int err = 0;
+ struct ice_data_setting ice_set;
+
+ memset(&ice_set, 0, sizeof(struct ice_data_setting));
+ if (msm_host->ice.vops->config_start) {
+ err = msm_host->ice.vops->config_start(
+ msm_host->ice.pdev,
+ req, &ice_set, false);
+ if (err) {
+ pr_err("%s: ice config failed %d\n",
+ mmc_hostname(msm_host->mmc), err);
return err;
}
}
-
- if (!wait_for_completion_timeout(&msm_host->ice.async_done,
- msecs_to_jiffies(SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS))) {
- pr_err("%s: ice init timedout after %d ms\n",
- mmc_hostname(host->mmc),
- SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS);
- sdhci_msm_ice_print_regs(host);
- return -ETIMEDOUT;
- }
-
- if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
- pr_err("%s: ice is in invalid state %d\n",
- mmc_hostname(host->mmc), msm_host->ice.state);
- return -EINVAL;
- }
- return 0;
+ /* if writing data command */
+ if (rq_data_dir(req) == WRITE)
+ *bypass = ice_set.encr_bypass ?
+ SDHCI_MSM_ICE_ENABLE_BYPASS :
+ SDHCI_MSM_ICE_DISABLE_BYPASS;
+ /* if reading data command */
+ else if (rq_data_dir(req) == READ)
+ *bypass = ice_set.decr_bypass ?
+ SDHCI_MSM_ICE_ENABLE_BYPASS :
+ SDHCI_MSM_ICE_DISABLE_BYPASS;
+ *key_index = ice_set.crypto_data.key_index;
+ return err;
}
-int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
- u32 slot)
+static
+void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba,
+ u32 slot, unsigned int bypass, short key_index)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_msm_host *msm_host = pltfm_host->priv;
- int err = 0;
- struct ice_data_setting ice_set;
- sector_t lba = 0;
unsigned int ctrl_info_val = 0;
- unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
- struct request *req;
-
- if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
- pr_err("%s: ice is in invalid state %d\n",
- mmc_hostname(host->mmc), msm_host->ice.state);
- return -EINVAL;
- }
-
- BUG_ON(!mrq);
- memset(&ice_set, 0, sizeof(struct ice_data_setting));
- req = mrq->req;
- if (req) {
- lba = req->__sector;
- if (msm_host->ice.vops->config) {
- err = msm_host->ice.vops->config(msm_host->ice.pdev,
- req, &ice_set);
- if (err) {
- pr_err("%s: ice config failed %d\n",
- mmc_hostname(host->mmc), err);
- return err;
- }
- }
- /* if writing data command */
- if (rq_data_dir(req) == WRITE)
- bypass = ice_set.encr_bypass ?
- SDHCI_MSM_ICE_ENABLE_BYPASS :
- SDHCI_MSM_ICE_DISABLE_BYPASS;
- /* if reading data command */
- else if (rq_data_dir(req) == READ)
- bypass = ice_set.decr_bypass ?
- SDHCI_MSM_ICE_ENABLE_BYPASS :
- SDHCI_MSM_ICE_DISABLE_BYPASS;
- pr_debug("%s: %s: slot %d encr_bypass %d bypass %d decr_bypass %d key_index %d\n",
- mmc_hostname(host->mmc),
- (rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
- slot, ice_set.encr_bypass, bypass,
- ice_set.decr_bypass,
- ice_set.crypto_data.key_index);
- }
/* Configure ICE index */
ctrl_info_val =
- (ice_set.crypto_data.key_index &
+ (key_index &
MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX)
<< OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX;
@@ -297,9 +272,43 @@
host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n + 16 * slot);
writel_relaxed(ctrl_info_val,
host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
-
/* Ensure ICE registers are configured before issuing SDHCI request */
mb();
+}
+
+int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+ u32 slot)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+ short key_index = 0;
+ sector_t lba = 0;
+ unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
+ struct request *req;
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ WARN_ON(!mrq);
+ if (!mrq)
+ return -EINVAL;
+ req = mrq->req;
+ if (req) {
+ lba = req->__sector;
+ err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
+ if (err)
+ return err;
+ pr_debug("%s: %s: slot %d bypass %d key_index %d\n",
+ mmc_hostname(host->mmc),
+ (rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
+ slot, bypass, key_index);
+ }
+
+ sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
return 0;
}
@@ -315,25 +324,19 @@
return -EINVAL;
}
- init_completion(&msm_host->ice.async_done);
-
if (msm_host->ice.vops->reset) {
err = msm_host->ice.vops->reset(msm_host->ice.pdev);
if (err) {
pr_err("%s: ice reset failed %d\n",
mmc_hostname(host->mmc), err);
+ sdhci_msm_ice_print_regs(host);
return err;
}
}
- if (!wait_for_completion_timeout(&msm_host->ice.async_done,
- msecs_to_jiffies(SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS))) {
- pr_err("%s: ice reset timedout after %d ms\n",
- mmc_hostname(host->mmc),
- SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS);
- sdhci_msm_ice_print_regs(host);
- return -ETIMEDOUT;
- }
+ /* If ICE HCI support is present then re-enable it */
+ if (msm_host->ice_hci_support)
+ sdhci_msm_enable_ice_hci(host, true);
if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
pr_err("%s: ice is in invalid state after reset %d\n",
@@ -356,8 +359,6 @@
return -EINVAL;
}
- init_completion(&msm_host->ice.async_done);
-
if (msm_host->ice.vops->resume) {
err = msm_host->ice.vops->resume(msm_host->ice.pdev);
if (err) {
@@ -367,20 +368,7 @@
}
}
- if (!wait_for_completion_timeout(&msm_host->ice.async_done,
- msecs_to_jiffies(SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS))) {
- pr_err("%s: ice resume timedout after %d ms\n",
- mmc_hostname(host->mmc),
- SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS);
- sdhci_msm_ice_print_regs(host);
- return -ETIMEDOUT;
- }
-
- if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
- pr_err("%s: ice is in invalid state after resume %d\n",
- mmc_hostname(host->mmc), msm_host->ice.state);
- return -EINVAL;
- }
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
return 0;
}
diff --git a/drivers/mmc/host/sdhci-msm-ice.h b/drivers/mmc/host/sdhci-msm-ice.h
index 88ef0e2..23922cf 100644
--- a/drivers/mmc/host/sdhci-msm-ice.h
+++ b/drivers/mmc/host/sdhci-msm-ice.h
@@ -17,7 +17,6 @@
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/async.h>
#include <linux/blkdev.h>
#include <crypto/ice.h>
@@ -97,6 +96,7 @@
#ifdef CONFIG_MMC_SDHCI_MSM_ICE
int sdhci_msm_ice_get_dev(struct sdhci_host *host);
int sdhci_msm_ice_init(struct sdhci_host *host);
+void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot);
int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
u32 slot);
int sdhci_msm_ice_reset(struct sdhci_host *host);
@@ -120,6 +120,11 @@
{
return 0;
}
+
+inline void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot)
+{
+}
+
inline int sdhci_msm_ice_cfg(struct sdhci_host *host,
struct mmc_request *mrq, u32 slot)
{
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index f3f181d..4b45ea5 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -42,6 +42,7 @@
#include <trace/events/mmc.h>
#include "sdhci-msm.h"
+#include "sdhci-msm-ice.h"
#include "cmdq_hci.h"
#define QOS_REMOVE_DELAY_MS 10
@@ -690,7 +691,7 @@
mclk_freq = 5;
else if (host->clock <= 187000000)
mclk_freq = 6;
- else if (host->clock <= 200000000)
+ else if (host->clock <= 208000000)
mclk_freq = 7;
writel_relaxed(((readl_relaxed(host->ioaddr +
@@ -1814,6 +1815,8 @@
int len, i;
int clk_table_len;
u32 *clk_table = NULL;
+ int ice_clk_table_len;
+ u32 *ice_clk_table = NULL;
enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
const char *lower_bus_speed = NULL;
@@ -1871,6 +1874,20 @@
pdata->sup_clk_table = clk_table;
pdata->sup_clk_cnt = clk_table_len;
+ if (msm_host->ice.pdev) {
+ if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
+ &ice_clk_table, &ice_clk_table_len, 0)) {
+ dev_err(dev, "failed parsing supported ice clock rates\n");
+ goto out;
+ }
+ if (!ice_clk_table || !ice_clk_table_len) {
+ dev_err(dev, "Invalid clock table\n");
+ goto out;
+ }
+ pdata->sup_ice_clk_table = ice_clk_table;
+ pdata->sup_ice_clk_cnt = ice_clk_table_len;
+ }
+
pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
sdhci_msm_slot_reg_data),
GFP_KERNEL);
@@ -1940,6 +1957,8 @@
msm_host->core_3_0v_support = true;
pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
+ msm_host->regs_restore.is_supported =
+ of_property_read_bool(np, "qcom,restore-after-cx-collapse");
return pdata;
out:
@@ -2831,6 +2850,103 @@
return sel_clk;
}
+static void sdhci_msm_registers_save(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ if (!msm_host->regs_restore.is_supported)
+ return;
+
+ msm_host->regs_restore.vendor_func = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ msm_host->regs_restore.vendor_pwrctl_mask =
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_PWRCTL_MASK);
+ msm_host->regs_restore.vendor_func2 =
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ msm_host->regs_restore.vendor_func3 =
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3);
+ msm_host->regs_restore.hc_2c_2e =
+ sdhci_readl(host, SDHCI_CLOCK_CONTROL);
+ msm_host->regs_restore.hc_3c_3e =
+ sdhci_readl(host, SDHCI_AUTO_CMD_ERR);
+ msm_host->regs_restore.vendor_pwrctl_ctl =
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_PWRCTL_CTL);
+ msm_host->regs_restore.hc_38_3a =
+ sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
+ msm_host->regs_restore.hc_34_36 =
+ sdhci_readl(host, SDHCI_INT_ENABLE);
+ msm_host->regs_restore.hc_28_2a =
+ sdhci_readl(host, SDHCI_HOST_CONTROL);
+ msm_host->regs_restore.vendor_caps_0 =
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+ msm_host->regs_restore.hc_caps_1 =
+ sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ msm_host->regs_restore.testbus_config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_TESTBUS_CONFIG);
+ msm_host->regs_restore.is_valid = true;
+
+ pr_debug("%s: %s: registers saved. PWRCTL_MASK = 0x%x\n",
+ mmc_hostname(host->mmc), __func__,
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_PWRCTL_MASK));
+}
+
+static void sdhci_msm_registers_restore(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ if (!msm_host->regs_restore.is_supported ||
+ !msm_host->regs_restore.is_valid)
+ return;
+
+ writel_relaxed(msm_host->regs_restore.vendor_func, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ writel_relaxed(msm_host->regs_restore.vendor_pwrctl_mask,
+ host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
+ writel_relaxed(msm_host->regs_restore.vendor_func2,
+ host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ writel_relaxed(msm_host->regs_restore.vendor_func3,
+ host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3);
+ sdhci_writel(host, msm_host->regs_restore.hc_2c_2e,
+ SDHCI_CLOCK_CONTROL);
+ sdhci_writel(host, msm_host->regs_restore.hc_3c_3e,
+ SDHCI_AUTO_CMD_ERR);
+ writel_relaxed(msm_host->regs_restore.vendor_pwrctl_ctl,
+ host->ioaddr + msm_host_offset->CORE_PWRCTL_CTL);
+ sdhci_writel(host, msm_host->regs_restore.hc_38_3a,
+ SDHCI_SIGNAL_ENABLE);
+ sdhci_writel(host, msm_host->regs_restore.hc_34_36,
+ SDHCI_INT_ENABLE);
+ sdhci_writel(host, msm_host->regs_restore.hc_28_2a,
+ SDHCI_HOST_CONTROL);
+ writel_relaxed(msm_host->regs_restore.vendor_caps_0,
+ host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+ sdhci_writel(host, msm_host->regs_restore.hc_caps_1,
+ SDHCI_CAPABILITIES_1);
+ writel_relaxed(msm_host->regs_restore.testbus_config, host->ioaddr +
+ msm_host_offset->CORE_TESTBUS_CONFIG);
+ msm_host->regs_restore.is_valid = false;
+
+ pr_debug("%s: %s: registers restored. PWRCTL_MASK = 0x%x\n",
+ mmc_hostname(host->mmc), __func__,
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_PWRCTL_MASK));
+}
+
static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -2858,11 +2974,23 @@
goto disable_pclk;
}
+ if (!IS_ERR(msm_host->ice_clk)) {
+ rc = clk_prepare_enable(msm_host->ice_clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the ice-clk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_host_clk;
+ }
+ }
atomic_set(&msm_host->controller_clock, 1);
pr_debug("%s: %s: enabled controller clock\n",
mmc_hostname(host->mmc), __func__);
+ sdhci_msm_registers_restore(host);
goto out;
+disable_host_clk:
+ if (!IS_ERR(msm_host->clk))
+ clk_disable_unprepare(msm_host->clk);
disable_pclk:
if (!IS_ERR(msm_host->pclk))
clk_disable_unprepare(msm_host->pclk);
@@ -2879,12 +3007,13 @@
struct sdhci_msm_host *msm_host = pltfm_host->priv;
if (atomic_read(&msm_host->controller_clock)) {
+ sdhci_msm_registers_save(host);
if (!IS_ERR(msm_host->clk))
clk_disable_unprepare(msm_host->clk);
- if (!IS_ERR(msm_host->pclk))
- clk_disable_unprepare(msm_host->pclk);
if (!IS_ERR(msm_host->ice_clk))
clk_disable_unprepare(msm_host->ice_clk);
+ if (!IS_ERR(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
sdhci_msm_bus_voting(host, 0);
atomic_set(&msm_host->controller_clock, 0);
pr_debug("%s: %s: disabled controller clock\n",
@@ -2957,14 +3086,9 @@
clk_disable_unprepare(msm_host->sleep_clk);
if (!IS_ERR_OR_NULL(msm_host->ff_clk))
clk_disable_unprepare(msm_host->ff_clk);
- clk_disable_unprepare(msm_host->clk);
- if (!IS_ERR(msm_host->pclk))
- clk_disable_unprepare(msm_host->pclk);
if (!IS_ERR_OR_NULL(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
-
- atomic_set(&msm_host->controller_clock, 0);
- sdhci_msm_bus_voting(host, 0);
+ sdhci_msm_disable_controller_clock(host);
}
atomic_set(&msm_host->clks_on, enable);
goto out;
@@ -2977,6 +3101,8 @@
disable_controller_clk:
if (!IS_ERR_OR_NULL(msm_host->clk))
clk_disable_unprepare(msm_host->clk);
+ if (!IS_ERR(msm_host->ice_clk))
+ clk_disable_unprepare(msm_host->ice_clk);
if (!IS_ERR_OR_NULL(msm_host->pclk))
clk_disable_unprepare(msm_host->pclk);
atomic_set(&msm_host->controller_clock, 0);
@@ -3282,6 +3408,7 @@
int i, index = 0;
u32 test_bus_val = 0;
u32 debug_reg[MAX_TEST_BUS] = {0};
+ u32 sts = 0;
sdhci_msm_cache_debug_data(host);
pr_info("----------- VENDOR REGISTER DUMP -----------\n");
@@ -3344,6 +3471,29 @@
pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, i + 3, debug_reg[i], debug_reg[i+1],
debug_reg[i+2], debug_reg[i+3]);
+ if (host->is_crypto_en) {
+ sdhci_msm_ice_get_status(host, &sts);
+ pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
+ sdhci_msm_ice_print_regs(host);
+ }
+}
+
+static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ /* Set ICE core to be reset in sync with SDHC core */
+ if (msm_host->ice.pdev) {
+ if (msm_host->ice_hci_support)
+ writel_relaxed(1, host->ioaddr +
+ HC_VENDOR_SPECIFIC_ICE_CTRL);
+ else
+ writel_relaxed(1,
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
+ }
+
+ sdhci_reset(host, mask);
}
/*
@@ -3926,6 +4076,8 @@
}
static struct sdhci_ops sdhci_msm_ops = {
+ .crypto_engine_cfg = sdhci_msm_ice_cfg,
+ .crypto_engine_reset = sdhci_msm_ice_reset,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
.check_power_status = sdhci_msm_check_power_status,
.platform_execute_tuning = sdhci_msm_execute_tuning,
@@ -3939,7 +4091,7 @@
.config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
.enable_controller_clock = sdhci_msm_enable_controller_clock,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = sdhci_msm_reset,
.clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
.enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
.reset_workaround = sdhci_msm_reset_workaround,
@@ -4138,7 +4290,7 @@
host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
if (IS_ERR(host)) {
ret = PTR_ERR(host);
- goto out;
+ goto out_host_free;
}
pltfm_host = sdhci_priv(host);
@@ -4146,6 +4298,31 @@
msm_host->mmc = host->mmc;
msm_host->pdev = pdev;
+ /* get the ice device vops if present */
+ ret = sdhci_msm_ice_get_dev(host);
+ if (ret == -EPROBE_DEFER) {
+ /*
+ * SDHCI driver might be probed before ICE driver does.
+ * In that case we would like to return EPROBE_DEFER code
+ * in order to delay its probing.
+ */
+ dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
+ __func__, ret);
+ goto out_host_free;
+
+ } else if (ret == -ENODEV) {
+ /*
+ * ICE device is not enabled in DTS file. No need for further
+ * initialization of ICE driver.
+ */
+ dev_warn(&pdev->dev, "%s: ICE device is not enabled",
+ __func__);
+ } else if (ret) {
+ dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
+ __func__, ret);
+ goto out_host_free;
+ }
+
/* Extract platform data */
if (pdev->dev.of_node) {
ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
@@ -4205,6 +4382,28 @@
}
atomic_set(&msm_host->controller_clock, 1);
+ if (msm_host->ice.pdev) {
+ /* Setup SDC ICE clock */
+ msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
+ if (!IS_ERR(msm_host->ice_clk)) {
+ /* ICE core has only one clock frequency for now */
+ ret = clk_set_rate(msm_host->ice_clk,
+ msm_host->pdata->sup_ice_clk_table[0]);
+ if (ret) {
+ dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
+ ret,
+ msm_host->pdata->sup_ice_clk_table[0]);
+ goto pclk_disable;
+ }
+ ret = clk_prepare_enable(msm_host->ice_clk);
+ if (ret)
+ goto pclk_disable;
+
+ msm_host->ice_clk_rate =
+ msm_host->pdata->sup_clk_table[0];
+ }
+ }
+
/* Setup SDC MMC clock */
msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(msm_host->clk)) {
@@ -4431,6 +4630,21 @@
msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
+ /* Initialize ICE if present */
+ if (msm_host->ice.pdev) {
+ ret = sdhci_msm_ice_init(host);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ ret = -EINVAL;
+ goto vreg_deinit;
+ }
+ host->is_crypto_en = true;
+ /* Packed commands cannot be encrypted/decrypted using ICE */
+ msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
+ MMC_CAP2_PACKED_WR_CONTROL);
+ }
+
init_completion(&msm_host->pwr_irq_completion);
if (gpio_is_valid(msm_host->pdata->status_gpio)) {
@@ -4570,6 +4784,8 @@
clk_disable_unprepare(msm_host->bus_clk);
pltfm_free:
sdhci_pltfm_free(pdev);
+out_host_free:
+ devm_kfree(&pdev->dev, msm_host);
out:
pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
return ret;
@@ -4659,6 +4875,7 @@
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
ktime_t start = ktime_get();
+ int ret;
if (host->mmc->card && mmc_card_sdio(host->mmc->card))
goto defer_disable_host_irq;
@@ -4680,6 +4897,12 @@
trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
ktime_to_us(ktime_sub(ktime_get(), start)));
+ if (host->is_crypto_en) {
+ ret = sdhci_msm_ice_suspend(host);
+ if (ret < 0)
+ pr_err("%s: failed to suspend crypto engine %d\n",
+ mmc_hostname(host->mmc), ret);
+ }
return 0;
}
@@ -4689,6 +4912,21 @@
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
ktime_t start = ktime_get();
+ int ret;
+
+ if (host->is_crypto_en) {
+ ret = sdhci_msm_enable_controller_clock(host);
+ if (ret) {
+ pr_err("%s: Failed to enable reqd clocks\n",
+ mmc_hostname(host->mmc));
+ goto skip_ice_resume;
+ }
+ ret = sdhci_msm_ice_resume(host);
+ if (ret)
+ pr_err("%s: failed to resume crypto engine %d\n",
+ mmc_hostname(host->mmc), ret);
+ }
+skip_ice_resume:
if (host->mmc->card && mmc_card_sdio(host->mmc->card))
goto defer_enable_host_irq;
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index c536a7d..cdbaaa9 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -165,11 +165,28 @@
struct sdhci_msm_ice_data {
struct qcom_ice_variant_ops *vops;
- struct completion async_done;
struct platform_device *pdev;
int state;
};
+struct sdhci_msm_regs_restore {
+ bool is_supported;
+ bool is_valid;
+ u32 vendor_pwrctl_mask;
+ u32 vendor_pwrctl_ctl;
+ u32 vendor_caps_0;
+ u32 vendor_func;
+ u32 vendor_func2;
+ u32 vendor_func3;
+ u32 hc_2c_2e;
+ u32 hc_28_2a;
+ u32 hc_34_36;
+ u32 hc_38_3a;
+ u32 hc_3c_3e;
+ u32 hc_caps_1;
+ u32 testbus_config;
+};
+
struct sdhci_msm_debug_data {
struct mmc_host copy_mmc;
struct mmc_card copy_card;
@@ -226,6 +243,7 @@
const struct sdhci_msm_offset *offset;
bool core_3_0v_support;
bool pltfm_init_done;
+ struct sdhci_msm_regs_restore regs_restore;
};
extern char *saved_command_line;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 8fbcdae..3eada3b 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -305,6 +305,8 @@
/* Resetting the controller clears many */
host->preset_enabled = false;
}
+ if (host->is_crypto_en)
+ host->crypto_reset_reqd = true;
}
static void sdhci_init(struct sdhci_host *host, int soft)
@@ -1749,6 +1751,33 @@
return MMC_SEND_TUNING_BLOCK;
}
+static int sdhci_crypto_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+ u32 slot)
+{
+ int err = 0;
+
+ if (host->crypto_reset_reqd && host->ops->crypto_engine_reset) {
+ err = host->ops->crypto_engine_reset(host);
+ if (err) {
+ pr_err("%s: crypto reset failed\n",
+ mmc_hostname(host->mmc));
+ goto out;
+ }
+ host->crypto_reset_reqd = false;
+ }
+
+ if (host->ops->crypto_engine_cfg) {
+ err = host->ops->crypto_engine_cfg(host, mrq, slot);
+ if (err) {
+ pr_err("%s: failed to configure crypto\n",
+ mmc_hostname(host->mmc));
+ goto out;
+ }
+ }
+out:
+ return err;
+}
+
static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host;
@@ -1815,6 +1844,13 @@
sdhci_get_tuning_cmd(host));
}
+ if (host->is_crypto_en) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ if (sdhci_crypto_cfg(host, mrq, 0))
+ goto end_req;
+ spin_lock_irqsave(&host->lock, flags);
+ }
+
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
sdhci_send_command(host, mrq->sbc);
else
@@ -1823,6 +1859,12 @@
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
+ return;
+end_req:
+ mrq->cmd->error = -EIO;
+ if (mrq->data)
+ mrq->data->error = -EIO;
+ mmc_request_done(host->mmc, mrq);
}
void sdhci_set_bus_width(struct sdhci_host *host, int width)
@@ -3704,6 +3746,27 @@
SDHCI_INT_RESPONSE, SDHCI_INT_ENABLE);
sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
}
+static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
+ struct mmc_request *mrq, u32 slot)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!host->is_crypto_en)
+ return 0;
+
+ return sdhci_crypto_cfg(host, mrq, slot);
+}
+
+static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!host->is_crypto_en)
+ return;
+
+ if (host->ops->crypto_cfg_reset)
+ host->ops->crypto_cfg_reset(host, slot);
+}
#else
static void sdhci_cmdq_set_transfer_params(struct mmc_host *mmc)
{
@@ -3747,6 +3810,18 @@
static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc)
{
+
+}
+
+static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
+ struct mmc_request *mrq, u32 slot)
+{
+ return 0;
+}
+
+static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot)
+{
+
}
#endif
@@ -3759,6 +3834,8 @@
.enhanced_strobe_mask = sdhci_enhanced_strobe_mask,
.post_cqe_halt = sdhci_cmdq_post_cqe_halt,
.set_transfer_params = sdhci_cmdq_set_transfer_params,
+ .crypto_cfg = sdhci_cmdq_crypto_cfg,
+ .crypto_cfg_reset = sdhci_cmdq_crypto_cfg_reset,
};
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index d9e656a..04e806c 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -628,6 +628,8 @@
enum sdhci_power_policy power_policy;
bool sdio_irq_async_status;
+ bool is_crypto_en;
+ bool crypto_reset_reqd;
u32 auto_cmd_err_sts;
struct ratelimit_state dbg_dump_rs;
@@ -666,6 +668,10 @@
unsigned int (*get_ro)(struct sdhci_host *host);
void (*reset)(struct sdhci_host *host, u8 mask);
int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+ int (*crypto_engine_cfg)(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot);
+ int (*crypto_engine_reset)(struct sdhci_host *host);
+ void (*crypto_cfg_reset)(struct sdhci_host *host, unsigned int slot);
void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
void (*hw_reset)(struct sdhci_host *host);
void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 6063cf4..410bcda 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3133,7 +3133,7 @@
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
(unsigned long)ar);
- if (QCA_REV_6174(ar))
+ if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
ath10k_pci_override_ce_config(ar);
ret = ath10k_pci_alloc_pipes(ar);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 9afd6f2..bb36fe5 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -46,6 +46,12 @@
/* channel 4 not supported yet */
};
+#ifdef CONFIG_PM
+static struct wiphy_wowlan_support wil_wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT,
+};
+#endif
+
/* Vendor id to be used in vendor specific command and events
* to user space.
* NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
@@ -1883,6 +1889,10 @@
wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS;
wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
}
+
+#ifdef CONFIG_PM
+ wiphy->wowlan = &wil_wowlan_support;
+#endif
}
struct wireless_dev *wil_cfg80211_init(struct device *dev)
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
index b4c4d09..b91bf51 100644
--- a/drivers/net/wireless/ath/wil6210/sysfs.c
+++ b/drivers/net/wireless/ath/wil6210/sysfs.c
@@ -291,6 +291,8 @@
return err;
}
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+
return 0;
}
@@ -299,4 +301,5 @@
struct device *dev = wil_to_dev(wil);
sysfs_remove_group(&dev->kobj, &wil6210_attribute_group);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 0f5dde1..01d44f9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -414,23 +414,24 @@
struct brcmf_cfg80211_vif *vif,
enum nl80211_iftype new_type)
{
- int iftype_num[NUM_NL80211_IFTYPES];
struct brcmf_cfg80211_vif *pos;
bool check_combos = false;
int ret = 0;
+ struct iface_combination_params params = {
+ .num_different_channels = 1,
+ };
- memset(&iftype_num[0], 0, sizeof(iftype_num));
list_for_each_entry(pos, &cfg->vif_list, list)
if (pos == vif) {
- iftype_num[new_type]++;
+ params.iftype_num[new_type]++;
} else {
/* concurrent interfaces so need check combinations */
check_combos = true;
- iftype_num[pos->wdev.iftype]++;
+ params.iftype_num[pos->wdev.iftype]++;
}
if (check_combos)
- ret = cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+ ret = cfg80211_check_combinations(cfg->wiphy, ¶ms);
return ret;
}
@@ -438,15 +439,16 @@
static int brcmf_vif_add_validate(struct brcmf_cfg80211_info *cfg,
enum nl80211_iftype new_type)
{
- int iftype_num[NUM_NL80211_IFTYPES];
struct brcmf_cfg80211_vif *pos;
+ struct iface_combination_params params = {
+ .num_different_channels = 1,
+ };
- memset(&iftype_num[0], 0, sizeof(iftype_num));
list_for_each_entry(pos, &cfg->vif_list, list)
- iftype_num[pos->wdev.iftype]++;
+ params.iftype_num[pos->wdev.iftype]++;
- iftype_num[new_type]++;
- return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+ params.iftype_num[new_type]++;
+ return cfg80211_check_combinations(cfg->wiphy, ¶ms);
}
static void convert_key_from_CPU(struct brcmf_wsec_key *key,
diff --git a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
index 3c89a73..c09e61f 100644
--- a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
+++ b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
@@ -11,9 +11,14 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/seq_file.h>
#include <linux/err.h>
#include <linux/stacktrace.h>
#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#ifdef CONFIG_WCNSS_SKB_PRE_ALLOC
+#include <linux/skbuff.h>
+#endif
static DEFINE_SPINLOCK(alloc_lock);
@@ -21,6 +26,11 @@
#define WCNSS_MAX_STACK_TRACE 64
#endif
+#define PRE_ALLOC_DEBUGFS_DIR "cnss-prealloc"
+#define PRE_ALLOC_DEBUGFS_FILE_OBJ "status"
+
+static struct dentry *debug_base;
+
struct wcnss_prealloc {
int occupied;
unsigned int size;
@@ -228,14 +238,89 @@
}
EXPORT_SYMBOL(wcnss_pre_alloc_reset);
+static int prealloc_memory_stats_show(struct seq_file *fp, void *data)
+{
+ int i = 0;
+ int used_slots = 0, free_slots = 0;
+ unsigned int tsize = 0, tused = 0, size = 0;
+
+ seq_puts(fp, "\nSlot_Size(Kb)\t\t[Used : Free]\n");
+ for (i = 0; i < ARRAY_SIZE(wcnss_allocs); i++) {
+ tsize += wcnss_allocs[i].size;
+ if (size != wcnss_allocs[i].size) {
+ if (size) {
+ seq_printf(
+ fp, "[%d : %d]\n",
+ used_slots, free_slots);
+ }
+
+ size = wcnss_allocs[i].size;
+ used_slots = 0;
+ free_slots = 0;
+ seq_printf(fp, "%d Kb\t\t\t", size / 1024);
+ }
+
+ if (wcnss_allocs[i].occupied) {
+ tused += wcnss_allocs[i].size;
+ ++used_slots;
+ } else {
+ ++free_slots;
+ }
+ }
+ seq_printf(fp, "[%d : %d]\n", used_slots, free_slots);
+
+ /* Convert byte to Kb */
+ if (tsize)
+ tsize = tsize / 1024;
+ if (tused)
+ tused = tused / 1024;
+ seq_printf(fp, "\nMemory Status:\nTotal Memory: %dKb\n", tsize);
+ seq_printf(fp, "Used: %dKb\nFree: %dKb\n", tused, tsize - tused);
+
+ return 0;
+}
+
+static int prealloc_memory_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, prealloc_memory_stats_show, NULL);
+}
+
+static const struct file_operations prealloc_memory_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = prealloc_memory_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int __init wcnss_pre_alloc_init(void)
{
- return wcnss_prealloc_init();
+ int ret;
+
+ ret = wcnss_prealloc_init();
+ if (ret) {
+ pr_err("%s: Failed to init the prealloc pool\n", __func__);
+ return ret;
+ }
+
+ debug_base = debugfs_create_dir(PRE_ALLOC_DEBUGFS_DIR, NULL);
+ if (IS_ERR_OR_NULL(debug_base)) {
+ pr_err("%s: Failed to create debugfs dir\n", __func__);
+ } else if (IS_ERR_OR_NULL(debugfs_create_file(
+ PRE_ALLOC_DEBUGFS_FILE_OBJ,
+ 0644, debug_base, NULL,
+ &prealloc_memory_stats_fops))) {
+ pr_err("%s: Failed to create debugfs file\n", __func__);
+ debugfs_remove_recursive(debug_base);
+ }
+
+ return ret;
}
static void __exit wcnss_pre_alloc_exit(void)
{
wcnss_prealloc_deinit();
+ debugfs_remove_recursive(debug_base);
}
module_init(wcnss_pre_alloc_init);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index d3bad57..0fd7d7e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2662,7 +2662,7 @@
tasklet_hrtimer_init(&data->beacon_timer,
mac80211_hwsim_beacon,
- CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
spin_lock_bh(&hwsim_radio_lock);
list_add_tail(&data->list, &hwsim_radios);
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.h b/drivers/phy/phy-qcom-ufs-qmp-v3.h
index f731aac..4d00878 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-v3.h
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.h
@@ -150,11 +150,15 @@
#define UFS_PHY_MULTI_LANE_CTRL1 PHY_OFF(0x1C4)
/* UFS PHY TX registers */
+#define QSERDES_TX0_RES_CODE_LANE_OFFSET_TX TX_OFF(0, 0x44)
+#define QSERDES_TX0_RES_CODE_LANE_OFFSET_RX TX_OFF(0, 0x48)
#define QSERDES_TX0_TRANSCEIVER_BIAS_EN TX_OFF(0, 0x5C)
#define QSERDES_TX0_LANE_MODE_1 TX_OFF(0, 0x8C)
#define QSERDES_TX0_LANE_MODE_2 TX_OFF(0, 0x90)
#define QSERDES_TX0_LANE_MODE_3 TX_OFF(0, 0x94)
+#define QSERDES_TX1_RES_CODE_LANE_OFFSET_TX TX_OFF(1, 0x44)
+#define QSERDES_TX1_RES_CODE_LANE_OFFSET_RX TX_OFF(1, 0x48)
#define QSERDES_TX1_LANE_MODE_1 TX_OFF(1, 0x8C)
@@ -177,6 +181,7 @@
#define QSERDES_RX0_SIGDET_LVL RX_OFF(0, 0x108)
#define QSERDES_RX0_SIGDET_DEGLITCH_CNTRL RX_OFF(0, 0x10C)
#define QSERDES_RX0_RX_INTERFACE_MODE RX_OFF(0, 0x11C)
+#define QSERDES_RX0_RX_MODE_00 RX_OFF(0, 0x164)
#define QSERDES_RX1_UCDR_SVS_SO_GAIN_HALF RX_OFF(1, 0x24)
#define QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER RX_OFF(1, 0x28)
@@ -193,6 +198,7 @@
#define QSERDES_RX1_SIGDET_LVL RX_OFF(1, 0x108)
#define QSERDES_RX1_SIGDET_DEGLITCH_CNTRL RX_OFF(1, 0x10C)
#define QSERDES_RX1_RX_INTERFACE_MODE RX_OFF(1, 0x11C)
+#define QSERDES_RX1_RX_MODE_00 RX_OFF(1, 0x164)
#define UFS_PHY_RX_LINECFG_DISABLE_BIT BIT(1)
@@ -223,7 +229,7 @@
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x07),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
@@ -255,13 +261,16 @@
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_TERM_BW, 0x5B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3, 0x04),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4, 0x1B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_HALF, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CONTROLS, 0x81),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_RES_CODE_LANE_OFFSET_TX, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_RES_CODE_LANE_OFFSET_RX, 0x07),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00, 0x59),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6E),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
@@ -281,7 +290,7 @@
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_TERM_BW, 0x5B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL3, 0x04),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4, 0x1B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_HALF, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN, 0x04),
@@ -289,6 +298,9 @@
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CONTROLS, 0x81),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW, 0x80),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_MULTI_LANE_CTRL1, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_RES_CODE_LANE_OFFSET_TX, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_RES_CODE_LANE_OFFSET_RX, 0x07),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00, 0x59),
};
static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index 41b5b07..6852010 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -194,6 +194,16 @@
return 0;
}
+static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
+{
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp &= ~(mask << shift);
+ tmp |= value << shift;
+ writel(tmp, reg);
+}
+
static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group)
{
@@ -211,8 +221,7 @@
reg += bank * 0x20 + pin / 16 * 0x10;
shift = pin % 16 * 2;
- writel(0x3 << shift, reg + CLR);
- writel(g->muxsel[i] << shift, reg + SET);
+ mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
}
return 0;
@@ -279,8 +288,7 @@
/* mA */
if (config & MA_PRESENT) {
shift = pin % 8 * 4;
- writel(0x3 << shift, reg + CLR);
- writel(ma << shift, reg + SET);
+ mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
}
/* vol */
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index c43b1e9..0d34d8a4 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -13,6 +13,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -1524,10 +1525,32 @@
chained_irq_exit(chip, desc);
}
+/*
+ * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
+ * tables. Since we leave GPIOs that are not capable of generating
+ * interrupts out of the irqdomain the numbering will be different and
+ * cause devices using the hardcoded IRQ numbers fail. In order not to
+ * break such machines we will only mask pins from irqdomain if the machine
+ * is not listed below.
+ */
+static const struct dmi_system_id chv_no_valid_mask[] = {
+ {
+ /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
+ .ident = "Acer Chromebook (CYAN)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
+ DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
+ },
+ },
+ {}
+};
+
static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
{
const struct chv_gpio_pinrange *range;
struct gpio_chip *chip = &pctrl->chip;
+ bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
int ret, i, offset;
*chip = chv_gpio_chip;
@@ -1536,7 +1559,7 @@
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->base = -1;
- chip->irq_need_valid_mask = true;
+ chip->irq_need_valid_mask = need_valid_mask;
ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
if (ret) {
@@ -1567,7 +1590,7 @@
intsel &= CHV_PADCTRL0_INTSEL_MASK;
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
- if (intsel >= pctrl->community->nirqs)
+ if (need_valid_mask && intsel >= pctrl->community->nirqs)
clear_bit(i, chip->irq_valid_mask);
}
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 76f077f..f87ef5a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -739,8 +739,8 @@
static const char * const nand_groups[] = {
"nand_io", "nand_io_ce0", "nand_io_ce1",
"nand_io_rb0", "nand_ale", "nand_cle",
- "nand_wen_clk", "nand_ren_clk", "nand_dqs0",
- "nand_dqs1"
+ "nand_wen_clk", "nand_ren_clk", "nand_dqs_0",
+ "nand_dqs_1"
};
static const char * const nor_groups[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b68ae42..743d1f4 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -405,6 +405,36 @@
PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
};
static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 9520166..db15141 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -951,7 +951,7 @@
ret = gpiochip_irqchip_add(chip,
&msm_gpio_irq_chip,
0,
- handle_edge_irq,
+ handle_fasteoi_irq,
IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "Failed to add irqchip to gpiochip\n");
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index c93628e..1f742f8 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -28,6 +28,7 @@
#define NORTH 0x00500000
#define SOUTH 0x00900000
#define WEST 0x00100000
+#define DUMMY 0x0
#define REG_SIZE 0x1000
#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
{ \
@@ -254,12 +255,14 @@
PINCTRL_PIN(147, "GPIO_147"),
PINCTRL_PIN(148, "GPIO_148"),
PINCTRL_PIN(149, "GPIO_149"),
- PINCTRL_PIN(150, "SDC1_CLK"),
- PINCTRL_PIN(151, "SDC1_CMD"),
- PINCTRL_PIN(152, "SDC1_DATA"),
- PINCTRL_PIN(153, "SDC2_CLK"),
- PINCTRL_PIN(154, "SDC2_CMD"),
- PINCTRL_PIN(155, "SDC2_DATA"),
+ PINCTRL_PIN(150, "SDC1_RCLK"),
+ PINCTRL_PIN(151, "SDC1_CLK"),
+ PINCTRL_PIN(152, "SDC1_CMD"),
+ PINCTRL_PIN(153, "SDC1_DATA"),
+ PINCTRL_PIN(154, "SDC2_CLK"),
+ PINCTRL_PIN(155, "SDC2_CMD"),
+ PINCTRL_PIN(156, "SDC2_DATA"),
+ PINCTRL_PIN(157, "UFS_RESET"),
};
#define DECLARE_MSM_GPIO_PINS(pin) \
@@ -322,10 +325,23 @@
DECLARE_MSM_GPIO_PINS(55);
DECLARE_MSM_GPIO_PINS(56);
DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
DECLARE_MSM_GPIO_PINS(65);
DECLARE_MSM_GPIO_PINS(66);
DECLARE_MSM_GPIO_PINS(67);
DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
DECLARE_MSM_GPIO_PINS(75);
DECLARE_MSM_GPIO_PINS(76);
DECLARE_MSM_GPIO_PINS(77);
@@ -355,6 +371,7 @@
DECLARE_MSM_GPIO_PINS(101);
DECLARE_MSM_GPIO_PINS(102);
DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
DECLARE_MSM_GPIO_PINS(105);
DECLARE_MSM_GPIO_PINS(106);
DECLARE_MSM_GPIO_PINS(107);
@@ -401,243 +418,128 @@
DECLARE_MSM_GPIO_PINS(148);
DECLARE_MSM_GPIO_PINS(149);
-static const unsigned int sdc1_clk_pins[] = { 150 };
-static const unsigned int sdc1_cmd_pins[] = { 151 };
-static const unsigned int sdc1_data_pins[] = { 152 };
-static const unsigned int sdc2_clk_pins[] = { 153 };
-static const unsigned int sdc2_cmd_pins[] = { 154 };
-static const unsigned int sdc2_data_pins[] = { 155 };
+static const unsigned int sdc1_rclk_pins[] = { 150 };
+static const unsigned int sdc1_clk_pins[] = { 151 };
+static const unsigned int sdc1_cmd_pins[] = { 152 };
+static const unsigned int sdc1_data_pins[] = { 153 };
+static const unsigned int sdc2_clk_pins[] = { 154 };
+static const unsigned int sdc2_cmd_pins[] = { 155 };
+static const unsigned int sdc2_data_pins[] = { 156 };
+static const unsigned int ufs_reset_pins[] = { 157 };
enum sdm670_functions {
msm_mux_qup0,
msm_mux_gpio,
- msm_mux_reserved0,
- msm_mux_reserved1,
- msm_mux_reserved2,
- msm_mux_reserved3,
msm_mux_qup9,
msm_mux_qdss_cti,
- msm_mux_reserved4,
- msm_mux_reserved5,
msm_mux_ddr_pxi0,
- msm_mux_reserved6,
msm_mux_ddr_bist,
msm_mux_atest_tsens2,
msm_mux_vsense_trigger,
msm_mux_atest_usb1,
- msm_mux_reserved7,
msm_mux_qup_l4,
msm_mux_GP_PDM1,
- msm_mux_reserved8,
msm_mux_qup_l5,
- msm_mux_reserved9,
msm_mux_mdp_vsync,
msm_mux_qup_l6,
msm_mux_wlan2_adc1,
msm_mux_atest_usb11,
msm_mux_ddr_pxi2,
- msm_mux_reserved10,
msm_mux_edp_lcd,
msm_mux_dbg_out,
msm_mux_wlan2_adc0,
msm_mux_atest_usb10,
- msm_mux_reserved11,
msm_mux_m_voc,
msm_mux_tsif1_sync,
msm_mux_ddr_pxi3,
- msm_mux_reserved12,
msm_mux_cam_mclk,
msm_mux_pll_bypassnl,
msm_mux_qdss_gpio0,
- msm_mux_reserved13,
msm_mux_pll_reset,
msm_mux_qdss_gpio1,
- msm_mux_reserved14,
msm_mux_qdss_gpio2,
- msm_mux_reserved15,
msm_mux_qdss_gpio3,
- msm_mux_reserved16,
msm_mux_cci_i2c,
msm_mux_qup1,
msm_mux_qdss_gpio4,
- msm_mux_reserved17,
msm_mux_qdss_gpio5,
- msm_mux_reserved18,
msm_mux_qdss_gpio6,
- msm_mux_reserved19,
msm_mux_qdss_gpio7,
- msm_mux_reserved20,
msm_mux_cci_timer0,
msm_mux_gcc_gp2,
msm_mux_qdss_gpio8,
- msm_mux_reserved21,
msm_mux_cci_timer1,
msm_mux_gcc_gp3,
msm_mux_qdss_gpio,
- msm_mux_reserved22,
msm_mux_cci_timer2,
msm_mux_qdss_gpio9,
- msm_mux_reserved23,
msm_mux_cci_timer3,
msm_mux_cci_async,
msm_mux_qdss_gpio10,
- msm_mux_reserved24,
msm_mux_cci_timer4,
msm_mux_qdss_gpio11,
- msm_mux_reserved25,
msm_mux_qdss_gpio12,
msm_mux_JITTER_BIST,
- msm_mux_reserved26,
msm_mux_qup2,
msm_mux_qdss_gpio13,
msm_mux_PLL_BIST,
- msm_mux_reserved27,
msm_mux_qdss_gpio14,
msm_mux_AGERA_PLL,
- msm_mux_reserved28,
msm_mux_phase_flag1,
msm_mux_qdss_gpio15,
msm_mux_atest_tsens,
- msm_mux_reserved29,
msm_mux_phase_flag2,
- msm_mux_reserved30,
msm_mux_qup11,
msm_mux_qup14,
- msm_mux_reserved31,
- msm_mux_reserved32,
- msm_mux_reserved33,
- msm_mux_reserved34,
msm_mux_pci_e0,
msm_mux_QUP_L4,
- msm_mux_reserved35,
msm_mux_QUP_L5,
- msm_mux_reserved36,
msm_mux_QUP_L6,
- msm_mux_reserved37,
msm_mux_usb_phy,
- msm_mux_reserved38,
msm_mux_lpass_slimbus,
- msm_mux_reserved39,
msm_mux_sd_write,
msm_mux_tsif1_error,
- msm_mux_reserved40,
msm_mux_qup3,
- msm_mux_reserved41,
- msm_mux_reserved42,
- msm_mux_reserved43,
- msm_mux_reserved44,
- msm_mux_bt_reset,
msm_mux_qup6,
- msm_mux_reserved45,
- msm_mux_reserved46,
- msm_mux_reserved47,
- msm_mux_reserved124,
- msm_mux_reserved125,
- msm_mux_reserved126,
- msm_mux_reserved127,
- msm_mux_reserved128,
- msm_mux_reserved129,
- msm_mux_qlink_request,
- msm_mux_reserved130,
- msm_mux_qlink_enable,
- msm_mux_reserved131,
- msm_mux_reserved132,
- msm_mux_reserved133,
- msm_mux_reserved134,
- msm_mux_pa_indicator,
- msm_mux_reserved135,
- msm_mux_reserved136,
- msm_mux_phase_flag26,
- msm_mux_reserved137,
- msm_mux_phase_flag27,
- msm_mux_reserved138,
- msm_mux_phase_flag28,
- msm_mux_reserved139,
- msm_mux_phase_flag6,
- msm_mux_reserved140,
- msm_mux_phase_flag29,
- msm_mux_reserved141,
- msm_mux_phase_flag30,
- msm_mux_reserved142,
- msm_mux_phase_flag31,
- msm_mux_reserved143,
- msm_mux_mss_lte,
- msm_mux_reserved144,
- msm_mux_reserved145,
- msm_mux_reserved146,
- msm_mux_reserved147,
- msm_mux_reserved148,
- msm_mux_reserved149,
- msm_mux_reserved48,
msm_mux_qup12,
- msm_mux_reserved49,
- msm_mux_reserved50,
- msm_mux_reserved51,
msm_mux_phase_flag16,
- msm_mux_reserved52,
msm_mux_qup10,
msm_mux_phase_flag11,
- msm_mux_reserved53,
msm_mux_GP_PDM0,
msm_mux_phase_flag12,
msm_mux_wlan1_adc1,
msm_mux_atest_usb13,
msm_mux_ddr_pxi1,
- msm_mux_reserved54,
msm_mux_phase_flag13,
msm_mux_wlan1_adc0,
msm_mux_atest_usb12,
- msm_mux_reserved55,
msm_mux_phase_flag17,
- msm_mux_reserved56,
msm_mux_qua_mi2s,
msm_mux_gcc_gp1,
msm_mux_phase_flag18,
- msm_mux_reserved57,
msm_mux_pri_mi2s,
msm_mux_qup8,
msm_mux_wsa_clk,
- msm_mux_reserved65,
msm_mux_pri_mi2s_ws,
msm_mux_wsa_data,
- msm_mux_reserved66,
- msm_mux_wsa_en,
msm_mux_atest_usb2,
- msm_mux_reserved67,
msm_mux_atest_usb23,
- msm_mux_reserved68,
msm_mux_ter_mi2s,
msm_mux_phase_flag8,
msm_mux_atest_usb22,
- msm_mux_reserved75,
msm_mux_phase_flag9,
msm_mux_atest_usb21,
- msm_mux_reserved76,
msm_mux_phase_flag4,
msm_mux_atest_usb20,
- msm_mux_reserved77,
- msm_mux_ssc_irq,
- msm_mux_reserved78,
msm_mux_sec_mi2s,
msm_mux_GP_PDM2,
- msm_mux_reserved79,
- msm_mux_reserved80,
msm_mux_qup15,
- msm_mux_reserved81,
- msm_mux_reserved82,
- msm_mux_reserved83,
- msm_mux_reserved84,
msm_mux_qup5,
- msm_mux_reserved85,
msm_mux_copy_gp,
- msm_mux_reserved86,
- msm_mux_reserved87,
- msm_mux_reserved88,
msm_mux_tsif1_clk,
msm_mux_qup4,
msm_mux_tgu_ch3,
msm_mux_phase_flag10,
- msm_mux_reserved89,
msm_mux_tsif1_en,
msm_mux_mdp_vsync0,
msm_mux_mdp_vsync1,
@@ -645,83 +547,61 @@
msm_mux_mdp_vsync3,
msm_mux_tgu_ch0,
msm_mux_phase_flag0,
- msm_mux_reserved90,
msm_mux_tsif1_data,
msm_mux_sdc4_cmd,
msm_mux_tgu_ch1,
- msm_mux_reserved91,
msm_mux_tsif2_error,
msm_mux_sdc43,
msm_mux_vfr_1,
msm_mux_tgu_ch2,
- msm_mux_reserved92,
msm_mux_tsif2_clk,
msm_mux_sdc4_clk,
msm_mux_qup7,
- msm_mux_reserved93,
msm_mux_tsif2_en,
msm_mux_sdc42,
- msm_mux_reserved94,
msm_mux_tsif2_data,
msm_mux_sdc41,
- msm_mux_reserved95,
msm_mux_tsif2_sync,
msm_mux_sdc40,
msm_mux_phase_flag3,
- msm_mux_reserved96,
msm_mux_ldo_en,
- msm_mux_reserved97,
msm_mux_ldo_update,
- msm_mux_reserved98,
msm_mux_phase_flag14,
msm_mux_prng_rosc,
- msm_mux_reserved99,
msm_mux_phase_flag15,
- msm_mux_reserved100,
msm_mux_phase_flag5,
- msm_mux_reserved101,
msm_mux_pci_e1,
- msm_mux_reserved102,
msm_mux_COPY_PHASE,
- msm_mux_reserved103,
msm_mux_uim2_data,
msm_mux_qup13,
- msm_mux_reserved105,
msm_mux_uim2_clk,
- msm_mux_reserved106,
msm_mux_uim2_reset,
- msm_mux_reserved107,
msm_mux_uim2_present,
- msm_mux_reserved108,
msm_mux_uim1_data,
- msm_mux_reserved109,
msm_mux_uim1_clk,
- msm_mux_reserved110,
msm_mux_uim1_reset,
- msm_mux_reserved111,
msm_mux_uim1_present,
- msm_mux_reserved112,
msm_mux_uim_batt,
msm_mux_edp_hot,
- msm_mux_reserved113,
msm_mux_NAV_PPS,
msm_mux_GPS_TX,
- msm_mux_reserved114,
- msm_mux_reserved115,
- msm_mux_reserved116,
msm_mux_atest_char,
- msm_mux_reserved117,
msm_mux_adsp_ext,
msm_mux_atest_char3,
- msm_mux_reserved118,
msm_mux_atest_char2,
- msm_mux_reserved119,
msm_mux_atest_char1,
- msm_mux_reserved120,
msm_mux_atest_char0,
- msm_mux_reserved121,
- msm_mux_reserved122,
- msm_mux_reserved123,
+ msm_mux_qlink_request,
+ msm_mux_qlink_enable,
+ msm_mux_pa_indicator,
+ msm_mux_phase_flag26,
+ msm_mux_phase_flag27,
+ msm_mux_phase_flag28,
+ msm_mux_phase_flag6,
+ msm_mux_phase_flag29,
+ msm_mux_phase_flag30,
+ msm_mux_phase_flag31,
+ msm_mux_mss_lte,
msm_mux_NA,
};
@@ -735,31 +615,21 @@
"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
- "gpio43", "gpio44", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50",
- "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", "gpio57",
- "gpio65", "gpio66", "gpio75", "gpio76", "gpio77", "gpio81", "gpio82",
- "gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
- "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
- "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102",
- "gpio103", "gpio105", "gpio106", "gpio107", "gpio108", "gpio109",
- "gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115",
- "gpio116", "gpio126", "gpio127", "gpio128", "gpio129", "gpio130",
- "gpio131", "gpio132", "gpio133", "gpio134", "gpio135", "gpio136",
- "gpio137", "gpio138", "gpio139", "gpio140", "gpio141", "gpio142",
- "gpio143", "gpio144", "gpio145", "gpio146", "gpio147", "gpio148",
- "gpio149",
-};
-static const char * const reserved0_groups[] = {
- "gpio0",
-};
-static const char * const reserved1_groups[] = {
- "gpio1",
-};
-static const char * const reserved2_groups[] = {
- "gpio2",
-};
-static const char * const reserved3_groups[] = {
- "gpio3",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio65", "gpio66", "gpio67", "gpio68", "gpio75", "gpio76",
+ "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
+ "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90",
+ "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97",
+ "gpio98", "gpio99", "gpio100", "gpio101", "gpio102", "gpio103",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149",
};
static const char * const qup9_groups[] = {
"gpio4", "gpio5", "gpio6", "gpio7",
@@ -767,18 +637,9 @@
static const char * const qdss_cti_groups[] = {
"gpio4", "gpio5", "gpio51", "gpio52", "gpio90", "gpio91",
};
-static const char * const reserved4_groups[] = {
- "gpio4",
-};
-static const char * const reserved5_groups[] = {
- "gpio5",
-};
static const char * const ddr_pxi0_groups[] = {
"gpio6", "gpio7",
};
-static const char * const reserved6_groups[] = {
- "gpio6",
-};
static const char * const ddr_bist_groups[] = {
"gpio7", "gpio8", "gpio9", "gpio10",
};
@@ -791,24 +652,15 @@
static const char * const atest_usb1_groups[] = {
"gpio7",
};
-static const char * const reserved7_groups[] = {
- "gpio7",
-};
static const char * const qup_l4_groups[] = {
"gpio8", "gpio105", "gpio123",
};
static const char * const GP_PDM1_groups[] = {
"gpio8", "gpio66",
};
-static const char * const reserved8_groups[] = {
- "gpio8",
-};
static const char * const qup_l5_groups[] = {
"gpio9", "gpio106", "gpio124",
};
-static const char * const reserved9_groups[] = {
- "gpio9",
-};
static const char * const mdp_vsync_groups[] = {
"gpio10", "gpio11", "gpio12", "gpio97", "gpio98",
};
@@ -824,9 +676,6 @@
static const char * const ddr_pxi2_groups[] = {
"gpio10", "gpio11",
};
-static const char * const reserved10_groups[] = {
- "gpio10",
-};
static const char * const edp_lcd_groups[] = {
"gpio11",
};
@@ -839,9 +688,6 @@
static const char * const atest_usb10_groups[] = {
"gpio11",
};
-static const char * const reserved11_groups[] = {
- "gpio11",
-};
static const char * const m_voc_groups[] = {
"gpio12",
};
@@ -851,9 +697,6 @@
static const char * const ddr_pxi3_groups[] = {
"gpio12", "gpio13",
};
-static const char * const reserved12_groups[] = {
- "gpio12",
-};
static const char * const cam_mclk_groups[] = {
"gpio13", "gpio14", "gpio15", "gpio16",
};
@@ -863,30 +706,18 @@
static const char * const qdss_gpio0_groups[] = {
"gpio13", "gpio117",
};
-static const char * const reserved13_groups[] = {
- "gpio13",
-};
static const char * const pll_reset_groups[] = {
"gpio14",
};
static const char * const qdss_gpio1_groups[] = {
"gpio14", "gpio118",
};
-static const char * const reserved14_groups[] = {
- "gpio14",
-};
static const char * const qdss_gpio2_groups[] = {
"gpio15", "gpio119",
};
-static const char * const reserved15_groups[] = {
- "gpio15",
-};
static const char * const qdss_gpio3_groups[] = {
"gpio16", "gpio120",
};
-static const char * const reserved16_groups[] = {
- "gpio16",
-};
static const char * const cci_i2c_groups[] = {
"gpio17", "gpio18", "gpio19", "gpio20",
};
@@ -896,27 +727,15 @@
static const char * const qdss_gpio4_groups[] = {
"gpio17", "gpio121",
};
-static const char * const reserved17_groups[] = {
- "gpio17",
-};
static const char * const qdss_gpio5_groups[] = {
"gpio18", "gpio122",
};
-static const char * const reserved18_groups[] = {
- "gpio18",
-};
static const char * const qdss_gpio6_groups[] = {
"gpio19", "gpio41",
};
-static const char * const reserved19_groups[] = {
- "gpio19",
-};
static const char * const qdss_gpio7_groups[] = {
"gpio20", "gpio42",
};
-static const char * const reserved20_groups[] = {
- "gpio20",
-};
static const char * const cci_timer0_groups[] = {
"gpio21",
};
@@ -926,9 +745,6 @@
static const char * const qdss_gpio8_groups[] = {
"gpio21", "gpio75",
};
-static const char * const reserved21_groups[] = {
- "gpio21",
-};
static const char * const cci_timer1_groups[] = {
"gpio22",
};
@@ -938,18 +754,12 @@
static const char * const qdss_gpio_groups[] = {
"gpio22", "gpio30", "gpio123", "gpio124",
};
-static const char * const reserved22_groups[] = {
- "gpio22",
-};
static const char * const cci_timer2_groups[] = {
"gpio23",
};
static const char * const qdss_gpio9_groups[] = {
"gpio23", "gpio76",
};
-static const char * const reserved23_groups[] = {
- "gpio23",
-};
static const char * const cci_timer3_groups[] = {
"gpio24",
};
@@ -959,27 +769,18 @@
static const char * const qdss_gpio10_groups[] = {
"gpio24", "gpio77",
};
-static const char * const reserved24_groups[] = {
- "gpio24",
-};
static const char * const cci_timer4_groups[] = {
"gpio25",
};
static const char * const qdss_gpio11_groups[] = {
"gpio25", "gpio79",
};
-static const char * const reserved25_groups[] = {
- "gpio25",
-};
static const char * const qdss_gpio12_groups[] = {
"gpio26", "gpio80",
};
static const char * const JITTER_BIST_groups[] = {
"gpio26", "gpio35",
};
-static const char * const reserved26_groups[] = {
- "gpio26",
-};
static const char * const qup2_groups[] = {
"gpio27", "gpio28", "gpio29", "gpio30",
};
@@ -989,18 +790,12 @@
static const char * const PLL_BIST_groups[] = {
"gpio27", "gpio36",
};
-static const char * const reserved27_groups[] = {
- "gpio27",
-};
static const char * const qdss_gpio14_groups[] = {
"gpio28", "gpio43",
};
static const char * const AGERA_PLL_groups[] = {
"gpio28", "gpio37",
};
-static const char * const reserved28_groups[] = {
- "gpio28",
-};
static const char * const phase_flag1_groups[] = {
"gpio29",
};
@@ -1010,246 +805,57 @@
static const char * const atest_tsens_groups[] = {
"gpio29",
};
-static const char * const reserved29_groups[] = {
- "gpio29",
-};
static const char * const phase_flag2_groups[] = {
"gpio30",
};
-static const char * const reserved30_groups[] = {
- "gpio30",
-};
static const char * const qup11_groups[] = {
"gpio31", "gpio32", "gpio33", "gpio34",
};
static const char * const qup14_groups[] = {
"gpio31", "gpio32", "gpio33", "gpio34",
};
-static const char * const reserved31_groups[] = {
- "gpio31",
-};
-static const char * const reserved32_groups[] = {
- "gpio32",
-};
-static const char * const reserved33_groups[] = {
- "gpio33",
-};
-static const char * const reserved34_groups[] = {
- "gpio34",
-};
static const char * const pci_e0_groups[] = {
"gpio35", "gpio36",
};
static const char * const QUP_L4_groups[] = {
"gpio35", "gpio75",
};
-static const char * const reserved35_groups[] = {
- "gpio35",
-};
static const char * const QUP_L5_groups[] = {
"gpio36", "gpio76",
};
-static const char * const reserved36_groups[] = {
- "gpio36",
-};
static const char * const QUP_L6_groups[] = {
"gpio37", "gpio77",
};
-static const char * const reserved37_groups[] = {
- "gpio37",
-};
static const char * const usb_phy_groups[] = {
"gpio38",
};
-static const char * const reserved38_groups[] = {
- "gpio38",
-};
static const char * const lpass_slimbus_groups[] = {
"gpio39",
};
-static const char * const reserved39_groups[] = {
- "gpio39",
-};
static const char * const sd_write_groups[] = {
"gpio40",
};
static const char * const tsif1_error_groups[] = {
"gpio40",
};
-static const char * const reserved40_groups[] = {
- "gpio40",
-};
static const char * const qup3_groups[] = {
"gpio41", "gpio42", "gpio43", "gpio44",
};
-static const char * const reserved41_groups[] = {
- "gpio41",
-};
-static const char * const reserved42_groups[] = {
- "gpio42",
-};
-static const char * const reserved43_groups[] = {
- "gpio43",
-};
-static const char * const reserved44_groups[] = {
- "gpio44",
-};
-static const char * const bt_reset_groups[] = {
- "gpio45",
-};
static const char * const qup6_groups[] = {
"gpio45", "gpio46", "gpio47", "gpio48",
};
-static const char * const reserved45_groups[] = {
- "gpio45",
-};
-static const char * const reserved46_groups[] = {
- "gpio46",
-};
-static const char * const reserved47_groups[] = {
- "gpio47",
-};
-static const char * const reserved124_groups[] = {
- "gpio124",
-};
-static const char * const reserved125_groups[] = {
- "gpio125",
-};
-static const char * const reserved126_groups[] = {
- "gpio126",
-};
-static const char * const reserved127_groups[] = {
- "gpio127",
-};
-static const char * const reserved128_groups[] = {
- "gpio128",
-};
-static const char * const reserved129_groups[] = {
- "gpio129",
-};
-static const char * const qlink_request_groups[] = {
- "gpio130",
-};
-static const char * const reserved130_groups[] = {
- "gpio130",
-};
-static const char * const qlink_enable_groups[] = {
- "gpio131",
-};
-static const char * const reserved131_groups[] = {
- "gpio131",
-};
-static const char * const reserved132_groups[] = {
- "gpio132",
-};
-static const char * const reserved133_groups[] = {
- "gpio133",
-};
-static const char * const reserved134_groups[] = {
- "gpio134",
-};
-static const char * const pa_indicator_groups[] = {
- "gpio135",
-};
-static const char * const reserved135_groups[] = {
- "gpio135",
-};
-static const char * const reserved136_groups[] = {
- "gpio136",
-};
-static const char * const phase_flag26_groups[] = {
- "gpio137",
-};
-static const char * const reserved137_groups[] = {
- "gpio137",
-};
-static const char * const phase_flag27_groups[] = {
- "gpio138",
-};
-static const char * const reserved138_groups[] = {
- "gpio138",
-};
-static const char * const phase_flag28_groups[] = {
- "gpio139",
-};
-static const char * const reserved139_groups[] = {
- "gpio139",
-};
-static const char * const phase_flag6_groups[] = {
- "gpio140",
-};
-static const char * const reserved140_groups[] = {
- "gpio140",
-};
-static const char * const phase_flag29_groups[] = {
- "gpio141",
-};
-static const char * const reserved141_groups[] = {
- "gpio141",
-};
-static const char * const phase_flag30_groups[] = {
- "gpio142",
-};
-static const char * const reserved142_groups[] = {
- "gpio142",
-};
-static const char * const phase_flag31_groups[] = {
- "gpio143",
-};
-static const char * const reserved143_groups[] = {
- "gpio143",
-};
-static const char * const mss_lte_groups[] = {
- "gpio144", "gpio145",
-};
-static const char * const reserved144_groups[] = {
- "gpio144",
-};
-static const char * const reserved145_groups[] = {
- "gpio145",
-};
-static const char * const reserved146_groups[] = {
- "gpio146",
-};
-static const char * const reserved147_groups[] = {
- "gpio147",
-};
-static const char * const reserved148_groups[] = {
- "gpio148",
-};
-static const char * const reserved149_groups[] = {
- "gpio149", "gpio149",
-};
-static const char * const reserved48_groups[] = {
- "gpio48",
-};
static const char * const qup12_groups[] = {
"gpio49", "gpio50", "gpio51", "gpio52",
};
-static const char * const reserved49_groups[] = {
- "gpio49",
-};
-static const char * const reserved50_groups[] = {
- "gpio50",
-};
-static const char * const reserved51_groups[] = {
- "gpio51",
-};
static const char * const phase_flag16_groups[] = {
"gpio52",
};
-static const char * const reserved52_groups[] = {
- "gpio52",
-};
static const char * const qup10_groups[] = {
"gpio53", "gpio54", "gpio55", "gpio56",
};
static const char * const phase_flag11_groups[] = {
"gpio53",
};
-static const char * const reserved53_groups[] = {
- "gpio53",
-};
static const char * const GP_PDM0_groups[] = {
"gpio54", "gpio95",
};
@@ -1265,9 +871,6 @@
static const char * const ddr_pxi1_groups[] = {
"gpio54", "gpio55",
};
-static const char * const reserved54_groups[] = {
- "gpio54",
-};
static const char * const phase_flag13_groups[] = {
"gpio55",
};
@@ -1277,15 +880,9 @@
static const char * const atest_usb12_groups[] = {
"gpio55",
};
-static const char * const reserved55_groups[] = {
- "gpio55",
-};
static const char * const phase_flag17_groups[] = {
"gpio56",
};
-static const char * const reserved56_groups[] = {
- "gpio56",
-};
static const char * const qua_mi2s_groups[] = {
"gpio57",
};
@@ -1295,9 +892,6 @@
static const char * const phase_flag18_groups[] = {
"gpio57",
};
-static const char * const reserved57_groups[] = {
- "gpio57",
-};
static const char * const pri_mi2s_groups[] = {
"gpio65", "gpio67", "gpio68",
};
@@ -1307,33 +901,18 @@
static const char * const wsa_clk_groups[] = {
"gpio65",
};
-static const char * const reserved65_groups[] = {
- "gpio65",
-};
static const char * const pri_mi2s_ws_groups[] = {
"gpio66",
};
static const char * const wsa_data_groups[] = {
"gpio66",
};
-static const char * const reserved66_groups[] = {
- "gpio66",
-};
-static const char * const wsa_en_groups[] = {
- "gpio67", "gpio68",
-};
static const char * const atest_usb2_groups[] = {
"gpio67",
};
-static const char * const reserved67_groups[] = {
- "gpio67",
-};
static const char * const atest_usb23_groups[] = {
"gpio68",
};
-static const char * const reserved68_groups[] = {
- "gpio68",
-};
static const char * const ter_mi2s_groups[] = {
"gpio75", "gpio76", "gpio77", "gpio78",
};
@@ -1343,79 +922,33 @@
static const char * const atest_usb22_groups[] = {
"gpio75",
};
-static const char * const reserved75_groups[] = {
- "gpio75",
-};
static const char * const phase_flag9_groups[] = {
"gpio76",
};
static const char * const atest_usb21_groups[] = {
"gpio76",
};
-static const char * const reserved76_groups[] = {
- "gpio76",
-};
static const char * const phase_flag4_groups[] = {
"gpio77",
};
static const char * const atest_usb20_groups[] = {
"gpio77",
};
-static const char * const reserved77_groups[] = {
- "gpio77",
-};
-static const char * const ssc_irq_groups[] = {
- "gpio78", "gpio79", "gpio80", "gpio117", "gpio118", "gpio119",
- "gpio120", "gpio121", "gpio122", "gpio123", "gpio124", "gpio125",
-};
-static const char * const reserved78_groups[] = {
- "gpio78",
-};
static const char * const sec_mi2s_groups[] = {
"gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
};
static const char * const GP_PDM2_groups[] = {
"gpio79",
};
-static const char * const reserved79_groups[] = {
- "gpio79",
-};
-static const char * const reserved80_groups[] = {
- "gpio80",
-};
static const char * const qup15_groups[] = {
"gpio81", "gpio82", "gpio83", "gpio84",
};
-static const char * const reserved81_groups[] = {
- "gpio81",
-};
-static const char * const reserved82_groups[] = {
- "gpio82",
-};
-static const char * const reserved83_groups[] = {
- "gpio83",
-};
-static const char * const reserved84_groups[] = {
- "gpio84",
-};
static const char * const qup5_groups[] = {
"gpio85", "gpio86", "gpio87", "gpio88",
};
-static const char * const reserved85_groups[] = {
- "gpio85",
-};
static const char * const copy_gp_groups[] = {
"gpio86",
};
-static const char * const reserved86_groups[] = {
- "gpio86",
-};
-static const char * const reserved87_groups[] = {
- "gpio87",
-};
-static const char * const reserved88_groups[] = {
- "gpio88",
-};
static const char * const tsif1_clk_groups[] = {
"gpio89",
};
@@ -1428,9 +961,6 @@
static const char * const phase_flag10_groups[] = {
"gpio89",
};
-static const char * const reserved89_groups[] = {
- "gpio89",
-};
static const char * const tsif1_en_groups[] = {
"gpio90",
};
@@ -1452,9 +982,6 @@
static const char * const phase_flag0_groups[] = {
"gpio90",
};
-static const char * const reserved90_groups[] = {
- "gpio90",
-};
static const char * const tsif1_data_groups[] = {
"gpio91",
};
@@ -1464,9 +991,6 @@
static const char * const tgu_ch1_groups[] = {
"gpio91",
};
-static const char * const reserved91_groups[] = {
- "gpio91",
-};
static const char * const tsif2_error_groups[] = {
"gpio92",
};
@@ -1479,9 +1003,6 @@
static const char * const tgu_ch2_groups[] = {
"gpio92",
};
-static const char * const reserved92_groups[] = {
- "gpio92",
-};
static const char * const tsif2_clk_groups[] = {
"gpio93",
};
@@ -1491,27 +1012,18 @@
static const char * const qup7_groups[] = {
"gpio93", "gpio94", "gpio95", "gpio96",
};
-static const char * const reserved93_groups[] = {
- "gpio93",
-};
static const char * const tsif2_en_groups[] = {
"gpio94",
};
static const char * const sdc42_groups[] = {
"gpio94",
};
-static const char * const reserved94_groups[] = {
- "gpio94",
-};
static const char * const tsif2_data_groups[] = {
"gpio95",
};
static const char * const sdc41_groups[] = {
"gpio95",
};
-static const char * const reserved95_groups[] = {
- "gpio95",
-};
static const char * const tsif2_sync_groups[] = {
"gpio96",
};
@@ -1521,114 +1033,63 @@
static const char * const phase_flag3_groups[] = {
"gpio96",
};
-static const char * const reserved96_groups[] = {
- "gpio96",
-};
static const char * const ldo_en_groups[] = {
"gpio97",
};
-static const char * const reserved97_groups[] = {
- "gpio97",
-};
static const char * const ldo_update_groups[] = {
"gpio98",
};
-static const char * const reserved98_groups[] = {
- "gpio98",
-};
static const char * const phase_flag14_groups[] = {
"gpio99",
};
static const char * const prng_rosc_groups[] = {
"gpio99", "gpio102",
};
-static const char * const reserved99_groups[] = {
- "gpio99",
-};
static const char * const phase_flag15_groups[] = {
"gpio100",
};
-static const char * const reserved100_groups[] = {
- "gpio100",
-};
static const char * const phase_flag5_groups[] = {
"gpio101",
};
-static const char * const reserved101_groups[] = {
- "gpio101",
-};
static const char * const pci_e1_groups[] = {
"gpio102", "gpio103",
};
-static const char * const reserved102_groups[] = {
- "gpio102",
-};
static const char * const COPY_PHASE_groups[] = {
"gpio103",
};
-static const char * const reserved103_groups[] = {
- "gpio103",
-};
static const char * const uim2_data_groups[] = {
"gpio105",
};
static const char * const qup13_groups[] = {
"gpio105", "gpio106", "gpio107", "gpio108",
};
-static const char * const reserved105_groups[] = {
- "gpio105",
-};
static const char * const uim2_clk_groups[] = {
"gpio106",
};
-static const char * const reserved106_groups[] = {
- "gpio106",
-};
static const char * const uim2_reset_groups[] = {
"gpio107",
};
-static const char * const reserved107_groups[] = {
- "gpio107",
-};
static const char * const uim2_present_groups[] = {
"gpio108",
};
-static const char * const reserved108_groups[] = {
- "gpio108",
-};
static const char * const uim1_data_groups[] = {
"gpio109",
};
-static const char * const reserved109_groups[] = {
- "gpio109",
-};
static const char * const uim1_clk_groups[] = {
"gpio110",
};
-static const char * const reserved110_groups[] = {
- "gpio110",
-};
static const char * const uim1_reset_groups[] = {
"gpio111",
};
-static const char * const reserved111_groups[] = {
- "gpio111",
-};
static const char * const uim1_present_groups[] = {
"gpio112",
};
-static const char * const reserved112_groups[] = {
- "gpio112",
-};
static const char * const uim_batt_groups[] = {
"gpio113",
};
static const char * const edp_hot_groups[] = {
"gpio113",
};
-static const char * const reserved113_groups[] = {
- "gpio113",
-};
static const char * const NAV_PPS_groups[] = {
"gpio114", "gpio114", "gpio115", "gpio115", "gpio128", "gpio128",
"gpio129", "gpio129", "gpio143", "gpio143",
@@ -1636,285 +1097,171 @@
static const char * const GPS_TX_groups[] = {
"gpio114", "gpio115", "gpio128", "gpio129", "gpio143", "gpio145",
};
-static const char * const reserved114_groups[] = {
- "gpio114",
-};
-static const char * const reserved115_groups[] = {
- "gpio115",
-};
-static const char * const reserved116_groups[] = {
- "gpio116",
-};
static const char * const atest_char_groups[] = {
"gpio117",
};
-static const char * const reserved117_groups[] = {
- "gpio117",
-};
static const char * const adsp_ext_groups[] = {
"gpio118",
};
static const char * const atest_char3_groups[] = {
"gpio118",
};
-static const char * const reserved118_groups[] = {
- "gpio118",
-};
static const char * const atest_char2_groups[] = {
"gpio119",
};
-static const char * const reserved119_groups[] = {
- "gpio119",
-};
static const char * const atest_char1_groups[] = {
"gpio120",
};
-static const char * const reserved120_groups[] = {
- "gpio120",
-};
static const char * const atest_char0_groups[] = {
"gpio121",
};
-static const char * const reserved121_groups[] = {
- "gpio121",
+static const char * const qlink_request_groups[] = {
+ "gpio130",
};
-static const char * const reserved122_groups[] = {
- "gpio122",
+static const char * const qlink_enable_groups[] = {
+ "gpio131",
};
-static const char * const reserved123_groups[] = {
- "gpio123",
+static const char * const pa_indicator_groups[] = {
+ "gpio135",
+};
+static const char * const phase_flag26_groups[] = {
+ "gpio137",
+};
+static const char * const phase_flag27_groups[] = {
+ "gpio138",
+};
+static const char * const phase_flag28_groups[] = {
+ "gpio139",
+};
+static const char * const phase_flag6_groups[] = {
+ "gpio140",
+};
+static const char * const phase_flag29_groups[] = {
+ "gpio141",
+};
+static const char * const phase_flag30_groups[] = {
+ "gpio142",
+};
+static const char * const phase_flag31_groups[] = {
+ "gpio143",
+};
+static const char * const mss_lte_groups[] = {
+ "gpio144", "gpio145",
};
static const struct msm_function sdm670_functions[] = {
FUNCTION(qup0),
FUNCTION(gpio),
- FUNCTION(reserved0),
- FUNCTION(reserved1),
- FUNCTION(reserved2),
- FUNCTION(reserved3),
FUNCTION(qup9),
FUNCTION(qdss_cti),
- FUNCTION(reserved4),
- FUNCTION(reserved5),
FUNCTION(ddr_pxi0),
- FUNCTION(reserved6),
FUNCTION(ddr_bist),
FUNCTION(atest_tsens2),
FUNCTION(vsense_trigger),
FUNCTION(atest_usb1),
- FUNCTION(reserved7),
FUNCTION(qup_l4),
FUNCTION(GP_PDM1),
- FUNCTION(reserved8),
FUNCTION(qup_l5),
- FUNCTION(reserved9),
FUNCTION(mdp_vsync),
FUNCTION(qup_l6),
FUNCTION(wlan2_adc1),
FUNCTION(atest_usb11),
FUNCTION(ddr_pxi2),
- FUNCTION(reserved10),
FUNCTION(edp_lcd),
FUNCTION(dbg_out),
FUNCTION(wlan2_adc0),
FUNCTION(atest_usb10),
- FUNCTION(reserved11),
FUNCTION(m_voc),
FUNCTION(tsif1_sync),
FUNCTION(ddr_pxi3),
- FUNCTION(reserved12),
FUNCTION(cam_mclk),
FUNCTION(pll_bypassnl),
FUNCTION(qdss_gpio0),
- FUNCTION(reserved13),
FUNCTION(pll_reset),
FUNCTION(qdss_gpio1),
- FUNCTION(reserved14),
FUNCTION(qdss_gpio2),
- FUNCTION(reserved15),
FUNCTION(qdss_gpio3),
- FUNCTION(reserved16),
FUNCTION(cci_i2c),
FUNCTION(qup1),
FUNCTION(qdss_gpio4),
- FUNCTION(reserved17),
FUNCTION(qdss_gpio5),
- FUNCTION(reserved18),
FUNCTION(qdss_gpio6),
- FUNCTION(reserved19),
FUNCTION(qdss_gpio7),
- FUNCTION(reserved20),
FUNCTION(cci_timer0),
FUNCTION(gcc_gp2),
FUNCTION(qdss_gpio8),
- FUNCTION(reserved21),
FUNCTION(cci_timer1),
FUNCTION(gcc_gp3),
FUNCTION(qdss_gpio),
- FUNCTION(reserved22),
FUNCTION(cci_timer2),
FUNCTION(qdss_gpio9),
- FUNCTION(reserved23),
FUNCTION(cci_timer3),
FUNCTION(cci_async),
FUNCTION(qdss_gpio10),
- FUNCTION(reserved24),
FUNCTION(cci_timer4),
FUNCTION(qdss_gpio11),
- FUNCTION(reserved25),
FUNCTION(qdss_gpio12),
FUNCTION(JITTER_BIST),
- FUNCTION(reserved26),
FUNCTION(qup2),
FUNCTION(qdss_gpio13),
FUNCTION(PLL_BIST),
- FUNCTION(reserved27),
FUNCTION(qdss_gpio14),
FUNCTION(AGERA_PLL),
- FUNCTION(reserved28),
FUNCTION(phase_flag1),
FUNCTION(qdss_gpio15),
FUNCTION(atest_tsens),
- FUNCTION(reserved29),
FUNCTION(phase_flag2),
- FUNCTION(reserved30),
FUNCTION(qup11),
FUNCTION(qup14),
- FUNCTION(reserved31),
- FUNCTION(reserved32),
- FUNCTION(reserved33),
- FUNCTION(reserved34),
FUNCTION(pci_e0),
FUNCTION(QUP_L4),
- FUNCTION(reserved35),
FUNCTION(QUP_L5),
- FUNCTION(reserved36),
FUNCTION(QUP_L6),
- FUNCTION(reserved37),
FUNCTION(usb_phy),
- FUNCTION(reserved38),
FUNCTION(lpass_slimbus),
- FUNCTION(reserved39),
FUNCTION(sd_write),
FUNCTION(tsif1_error),
- FUNCTION(reserved40),
FUNCTION(qup3),
- FUNCTION(reserved41),
- FUNCTION(reserved42),
- FUNCTION(reserved43),
- FUNCTION(reserved44),
- FUNCTION(bt_reset),
FUNCTION(qup6),
- FUNCTION(reserved45),
- FUNCTION(reserved46),
- FUNCTION(reserved47),
- FUNCTION(reserved124),
- FUNCTION(reserved125),
- FUNCTION(reserved126),
- FUNCTION(reserved127),
- FUNCTION(reserved128),
- FUNCTION(reserved129),
- FUNCTION(qlink_request),
- FUNCTION(reserved130),
- FUNCTION(qlink_enable),
- FUNCTION(reserved131),
- FUNCTION(reserved132),
- FUNCTION(reserved133),
- FUNCTION(reserved134),
- FUNCTION(pa_indicator),
- FUNCTION(reserved135),
- FUNCTION(reserved136),
- FUNCTION(phase_flag26),
- FUNCTION(reserved137),
- FUNCTION(phase_flag27),
- FUNCTION(reserved138),
- FUNCTION(phase_flag28),
- FUNCTION(reserved139),
- FUNCTION(phase_flag6),
- FUNCTION(reserved140),
- FUNCTION(phase_flag29),
- FUNCTION(reserved141),
- FUNCTION(phase_flag30),
- FUNCTION(reserved142),
- FUNCTION(phase_flag31),
- FUNCTION(reserved143),
- FUNCTION(mss_lte),
- FUNCTION(reserved144),
- FUNCTION(reserved145),
- FUNCTION(reserved146),
- FUNCTION(reserved147),
- FUNCTION(reserved148),
- FUNCTION(reserved149),
- FUNCTION(reserved48),
FUNCTION(qup12),
- FUNCTION(reserved49),
- FUNCTION(reserved50),
- FUNCTION(reserved51),
FUNCTION(phase_flag16),
- FUNCTION(reserved52),
FUNCTION(qup10),
FUNCTION(phase_flag11),
- FUNCTION(reserved53),
FUNCTION(GP_PDM0),
FUNCTION(phase_flag12),
FUNCTION(wlan1_adc1),
FUNCTION(atest_usb13),
FUNCTION(ddr_pxi1),
- FUNCTION(reserved54),
FUNCTION(phase_flag13),
FUNCTION(wlan1_adc0),
FUNCTION(atest_usb12),
- FUNCTION(reserved55),
FUNCTION(phase_flag17),
- FUNCTION(reserved56),
FUNCTION(qua_mi2s),
FUNCTION(gcc_gp1),
FUNCTION(phase_flag18),
- FUNCTION(reserved57),
FUNCTION(pri_mi2s),
FUNCTION(qup8),
FUNCTION(wsa_clk),
- FUNCTION(reserved65),
FUNCTION(pri_mi2s_ws),
FUNCTION(wsa_data),
- FUNCTION(reserved66),
- FUNCTION(wsa_en),
FUNCTION(atest_usb2),
- FUNCTION(reserved67),
FUNCTION(atest_usb23),
- FUNCTION(reserved68),
FUNCTION(ter_mi2s),
FUNCTION(phase_flag8),
FUNCTION(atest_usb22),
- FUNCTION(reserved75),
FUNCTION(phase_flag9),
FUNCTION(atest_usb21),
- FUNCTION(reserved76),
FUNCTION(phase_flag4),
FUNCTION(atest_usb20),
- FUNCTION(reserved77),
- FUNCTION(ssc_irq),
- FUNCTION(reserved78),
FUNCTION(sec_mi2s),
FUNCTION(GP_PDM2),
- FUNCTION(reserved79),
- FUNCTION(reserved80),
FUNCTION(qup15),
- FUNCTION(reserved81),
- FUNCTION(reserved82),
- FUNCTION(reserved83),
- FUNCTION(reserved84),
FUNCTION(qup5),
- FUNCTION(reserved85),
FUNCTION(copy_gp),
- FUNCTION(reserved86),
- FUNCTION(reserved87),
- FUNCTION(reserved88),
FUNCTION(tsif1_clk),
FUNCTION(qup4),
FUNCTION(tgu_ch3),
FUNCTION(phase_flag10),
- FUNCTION(reserved89),
FUNCTION(tsif1_en),
FUNCTION(mdp_vsync0),
FUNCTION(mdp_vsync1),
@@ -1922,326 +1269,320 @@
FUNCTION(mdp_vsync3),
FUNCTION(tgu_ch0),
FUNCTION(phase_flag0),
- FUNCTION(reserved90),
FUNCTION(tsif1_data),
FUNCTION(sdc4_cmd),
FUNCTION(tgu_ch1),
- FUNCTION(reserved91),
FUNCTION(tsif2_error),
FUNCTION(sdc43),
FUNCTION(vfr_1),
FUNCTION(tgu_ch2),
- FUNCTION(reserved92),
FUNCTION(tsif2_clk),
FUNCTION(sdc4_clk),
FUNCTION(qup7),
- FUNCTION(reserved93),
FUNCTION(tsif2_en),
FUNCTION(sdc42),
- FUNCTION(reserved94),
FUNCTION(tsif2_data),
FUNCTION(sdc41),
- FUNCTION(reserved95),
FUNCTION(tsif2_sync),
FUNCTION(sdc40),
FUNCTION(phase_flag3),
- FUNCTION(reserved96),
FUNCTION(ldo_en),
- FUNCTION(reserved97),
FUNCTION(ldo_update),
- FUNCTION(reserved98),
FUNCTION(phase_flag14),
FUNCTION(prng_rosc),
- FUNCTION(reserved99),
FUNCTION(phase_flag15),
- FUNCTION(reserved100),
FUNCTION(phase_flag5),
- FUNCTION(reserved101),
FUNCTION(pci_e1),
- FUNCTION(reserved102),
FUNCTION(COPY_PHASE),
- FUNCTION(reserved103),
FUNCTION(uim2_data),
FUNCTION(qup13),
- FUNCTION(reserved105),
FUNCTION(uim2_clk),
- FUNCTION(reserved106),
FUNCTION(uim2_reset),
- FUNCTION(reserved107),
FUNCTION(uim2_present),
- FUNCTION(reserved108),
FUNCTION(uim1_data),
- FUNCTION(reserved109),
FUNCTION(uim1_clk),
- FUNCTION(reserved110),
FUNCTION(uim1_reset),
- FUNCTION(reserved111),
FUNCTION(uim1_present),
- FUNCTION(reserved112),
FUNCTION(uim_batt),
FUNCTION(edp_hot),
- FUNCTION(reserved113),
FUNCTION(NAV_PPS),
FUNCTION(GPS_TX),
- FUNCTION(reserved114),
- FUNCTION(reserved115),
- FUNCTION(reserved116),
FUNCTION(atest_char),
- FUNCTION(reserved117),
FUNCTION(adsp_ext),
FUNCTION(atest_char3),
- FUNCTION(reserved118),
FUNCTION(atest_char2),
- FUNCTION(reserved119),
FUNCTION(atest_char1),
- FUNCTION(reserved120),
FUNCTION(atest_char0),
- FUNCTION(reserved121),
- FUNCTION(reserved122),
- FUNCTION(reserved123),
+ FUNCTION(qlink_request),
+ FUNCTION(qlink_enable),
+ FUNCTION(pa_indicator),
+ FUNCTION(phase_flag26),
+ FUNCTION(phase_flag27),
+ FUNCTION(phase_flag28),
+ FUNCTION(phase_flag6),
+ FUNCTION(phase_flag29),
+ FUNCTION(phase_flag30),
+ FUNCTION(phase_flag31),
+ FUNCTION(mss_lte),
};
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
static const struct msm_pingroup sdm670_groups[] = {
- PINGROUP(0, SOUTH, qup0, NA, reserved0, NA, NA, NA, NA, NA, NA),
- PINGROUP(1, SOUTH, qup0, NA, reserved1, NA, NA, NA, NA, NA, NA),
- PINGROUP(2, SOUTH, qup0, NA, reserved2, NA, NA, NA, NA, NA, NA),
- PINGROUP(3, SOUTH, qup0, NA, reserved3, NA, NA, NA, NA, NA, NA),
- PINGROUP(4, NORTH, qup9, qdss_cti, reserved4, NA, NA, NA, NA, NA, NA),
- PINGROUP(5, NORTH, qup9, qdss_cti, reserved5, NA, NA, NA, NA, NA, NA),
- PINGROUP(6, NORTH, qup9, NA, ddr_pxi0, reserved6, NA, NA, NA, NA, NA),
- PINGROUP(7, NORTH, qup9, ddr_bist, NA, atest_tsens2, vsense_trigger,
- atest_usb1, ddr_pxi0, reserved7, NA),
- PINGROUP(8, WEST, qup_l4, GP_PDM1, ddr_bist, NA, reserved8, NA, NA, NA,
- NA),
- PINGROUP(9, WEST, qup_l5, ddr_bist, reserved9, NA, NA, NA, NA, NA, NA),
- PINGROUP(10, NORTH, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
- atest_usb11, ddr_pxi2, reserved10, NA, NA),
- PINGROUP(11, NORTH, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
- atest_usb10, ddr_pxi2, reserved11, NA, NA),
- PINGROUP(12, SOUTH, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, reserved12,
- NA, NA, NA, NA),
- PINGROUP(13, WEST, cam_mclk, pll_bypassnl, qdss_gpio0, ddr_pxi3,
- reserved13, NA, NA, NA, NA),
- PINGROUP(14, WEST, cam_mclk, pll_reset, qdss_gpio1, reserved14, NA, NA,
- NA, NA, NA),
- PINGROUP(15, WEST, cam_mclk, qdss_gpio2, reserved15, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(16, WEST, cam_mclk, qdss_gpio3, reserved16, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(17, WEST, cci_i2c, qup1, qdss_gpio4, reserved17, NA, NA, NA,
- NA, NA),
- PINGROUP(18, WEST, cci_i2c, qup1, NA, qdss_gpio5, reserved18, NA, NA,
- NA, NA),
- PINGROUP(19, WEST, cci_i2c, qup1, NA, qdss_gpio6, reserved19, NA, NA,
- NA, NA),
- PINGROUP(20, WEST, cci_i2c, qup1, NA, qdss_gpio7, reserved20, NA, NA,
- NA, NA),
- PINGROUP(21, WEST, cci_timer0, gcc_gp2, qdss_gpio8, NA, reserved21, NA,
- NA, NA, NA),
- PINGROUP(22, WEST, cci_timer1, gcc_gp3, qdss_gpio, NA, reserved22, NA,
- NA, NA, NA),
- PINGROUP(23, WEST, cci_timer2, qdss_gpio9, NA, reserved23, NA, NA, NA,
- NA, NA),
- PINGROUP(24, WEST, cci_timer3, cci_async, qdss_gpio10, reserved24, NA,
- NA, NA, NA, NA),
- PINGROUP(25, WEST, cci_timer4, cci_async, qdss_gpio11, NA, reserved25,
- NA, NA, NA, NA),
- PINGROUP(26, WEST, cci_async, qdss_gpio12, JITTER_BIST, NA, reserved26,
- NA, NA, NA, NA),
- PINGROUP(27, WEST, qup2, qdss_gpio13, PLL_BIST, NA, reserved27, NA, NA,
- NA, NA),
- PINGROUP(28, WEST, qup2, qdss_gpio14, AGERA_PLL, NA, reserved28, NA,
- NA, NA, NA),
- PINGROUP(29, WEST, qup2, NA, phase_flag1, qdss_gpio15, atest_tsens,
- reserved29, NA, NA, NA),
- PINGROUP(30, WEST, qup2, phase_flag2, qdss_gpio, reserved30, NA, NA,
- NA, NA, NA),
- PINGROUP(31, WEST, qup11, qup14, reserved31, NA, NA, NA, NA, NA, NA),
- PINGROUP(32, WEST, qup11, qup14, NA, reserved32, NA, NA, NA, NA, NA),
- PINGROUP(33, WEST, qup11, qup14, NA, reserved33, NA, NA, NA, NA, NA),
- PINGROUP(34, WEST, qup11, qup14, NA, reserved34, NA, NA, NA, NA, NA),
- PINGROUP(35, NORTH, pci_e0, QUP_L4, JITTER_BIST, NA, reserved35, NA,
- NA, NA, NA),
- PINGROUP(36, NORTH, pci_e0, QUP_L5, PLL_BIST, NA, reserved36, NA, NA,
- NA, NA),
- PINGROUP(37, NORTH, QUP_L6, AGERA_PLL, NA, reserved37, NA, NA, NA, NA,
- NA),
- PINGROUP(38, NORTH, usb_phy, NA, reserved38, NA, NA, NA, NA, NA, NA),
- PINGROUP(39, NORTH, lpass_slimbus, NA, reserved39, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(40, NORTH, sd_write, tsif1_error, NA, reserved40, NA, NA, NA,
- NA, NA),
- PINGROUP(41, SOUTH, qup3, NA, qdss_gpio6, reserved41, NA, NA, NA, NA,
- NA),
- PINGROUP(42, SOUTH, qup3, NA, qdss_gpio7, reserved42, NA, NA, NA, NA,
- NA),
- PINGROUP(43, SOUTH, qup3, NA, qdss_gpio14, reserved43, NA, NA, NA, NA,
- NA),
- PINGROUP(44, SOUTH, qup3, NA, qdss_gpio15, reserved44, NA, NA, NA, NA,
- NA),
- PINGROUP(45, SOUTH, qup6, NA, reserved45, NA, NA, NA, NA, NA, NA),
- PINGROUP(46, SOUTH, qup6, NA, reserved46, NA, NA, NA, NA, NA, NA),
- PINGROUP(47, SOUTH, qup6, reserved47, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(48, SOUTH, qup6, reserved48, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(49, NORTH, qup12, reserved49, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(50, NORTH, qup12, reserved50, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(51, NORTH, qup12, qdss_cti, reserved51, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(52, NORTH, qup12, phase_flag16, qdss_cti, reserved52, NA, NA,
- NA, NA, NA),
- PINGROUP(53, NORTH, qup10, phase_flag11, reserved53, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(54, NORTH, qup10, GP_PDM0, phase_flag12, NA, wlan1_adc1,
- atest_usb13, ddr_pxi1, reserved54, NA),
- PINGROUP(55, NORTH, qup10, phase_flag13, NA, wlan1_adc0, atest_usb12,
- ddr_pxi1, reserved55, NA, NA),
- PINGROUP(56, NORTH, qup10, phase_flag17, reserved56, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(57, NORTH, qua_mi2s, gcc_gp1, phase_flag18, reserved57, NA,
- NA, NA, NA, NA),
- PINGROUP(65, NORTH, pri_mi2s, qup8, wsa_clk, NA, reserved65, NA, NA,
- NA, NA),
- PINGROUP(66, NORTH, pri_mi2s_ws, qup8, wsa_data, GP_PDM1, NA,
- reserved66, NA, NA, NA),
- PINGROUP(67, NORTH, pri_mi2s, qup8, NA, atest_usb2, reserved67, NA, NA,
- NA, NA),
- PINGROUP(68, NORTH, pri_mi2s, qup8, NA, atest_usb23, reserved68, NA,
- NA, NA, NA),
- PINGROUP(75, NORTH, ter_mi2s, phase_flag8, qdss_gpio8, atest_usb22,
- QUP_L4, reserved75, NA, NA, NA),
- PINGROUP(76, NORTH, ter_mi2s, phase_flag9, qdss_gpio9, atest_usb21,
- QUP_L5, reserved76, NA, NA, NA),
- PINGROUP(77, NORTH, ter_mi2s, phase_flag4, qdss_gpio10, atest_usb20,
- QUP_L6, reserved77, NA, NA, NA),
- PINGROUP(78, NORTH, ter_mi2s, gcc_gp1, NA, reserved78, NA, NA, NA, NA,
- NA),
- PINGROUP(79, NORTH, sec_mi2s, GP_PDM2, NA, qdss_gpio11, NA, reserved79,
- NA, NA, NA),
- PINGROUP(80, NORTH, sec_mi2s, NA, qdss_gpio12, reserved80, NA, NA, NA,
- NA, NA),
- PINGROUP(81, NORTH, sec_mi2s, qup15, NA, reserved81, NA, NA, NA, NA,
- NA),
- PINGROUP(82, NORTH, sec_mi2s, qup15, NA, reserved82, NA, NA, NA, NA,
- NA),
- PINGROUP(83, NORTH, sec_mi2s, qup15, NA, reserved83, NA, NA, NA, NA,
- NA),
- PINGROUP(84, NORTH, qup15, NA, reserved84, NA, NA, NA, NA, NA, NA),
- PINGROUP(85, SOUTH, qup5, NA, reserved85, NA, NA, NA, NA, NA, NA),
- PINGROUP(86, SOUTH, qup5, copy_gp, NA, reserved86, NA, NA, NA, NA, NA),
- PINGROUP(87, SOUTH, qup5, NA, reserved87, NA, NA, NA, NA, NA, NA),
- PINGROUP(88, SOUTH, qup5, NA, reserved88, NA, NA, NA, NA, NA, NA),
- PINGROUP(89, SOUTH, tsif1_clk, qup4, tgu_ch3, phase_flag10, reserved89,
- NA, NA, NA, NA),
- PINGROUP(90, SOUTH, tsif1_en, mdp_vsync0, qup4, mdp_vsync1, mdp_vsync2,
- mdp_vsync3, tgu_ch0, phase_flag0, qdss_cti),
- PINGROUP(91, SOUTH, tsif1_data, sdc4_cmd, qup4, tgu_ch1, NA, qdss_cti,
- reserved91, NA, NA),
- PINGROUP(92, SOUTH, tsif2_error, sdc43, qup4, vfr_1, tgu_ch2, NA,
- reserved92, NA, NA),
- PINGROUP(93, SOUTH, tsif2_clk, sdc4_clk, qup7, NA, qdss_gpio13,
- reserved93, NA, NA, NA),
- PINGROUP(94, SOUTH, tsif2_en, sdc42, qup7, NA, reserved94, NA, NA, NA,
- NA),
- PINGROUP(95, SOUTH, tsif2_data, sdc41, qup7, GP_PDM0, NA, reserved95,
- NA, NA, NA),
- PINGROUP(96, SOUTH, tsif2_sync, sdc40, qup7, phase_flag3, reserved96,
- NA, NA, NA, NA),
- PINGROUP(97, WEST, NA, NA, mdp_vsync, ldo_en, reserved97, NA, NA, NA,
- NA),
- PINGROUP(98, WEST, NA, mdp_vsync, ldo_update, reserved98, NA, NA, NA,
- NA, NA),
- PINGROUP(99, NORTH, phase_flag14, prng_rosc, reserved99, NA, NA, NA,
- NA, NA, NA),
- PINGROUP(100, WEST, phase_flag15, reserved100, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(101, WEST, NA, phase_flag5, reserved101, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(102, WEST, pci_e1, prng_rosc, reserved102, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(103, WEST, pci_e1, COPY_PHASE, reserved103, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(105, NORTH, uim2_data, qup13, qup_l4, NA, reserved105, NA, NA,
- NA, NA),
- PINGROUP(106, NORTH, uim2_clk, qup13, qup_l5, NA, reserved106, NA, NA,
- NA, NA),
- PINGROUP(107, NORTH, uim2_reset, qup13, qup_l6, reserved107, NA, NA,
- NA, NA, NA),
- PINGROUP(108, NORTH, uim2_present, qup13, reserved108, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(109, NORTH, uim1_data, reserved109, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(110, NORTH, uim1_clk, reserved110, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(111, NORTH, uim1_reset, reserved111, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(112, NORTH, uim1_present, reserved112, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(113, NORTH, uim_batt, edp_hot, reserved113, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(114, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, reserved114, NA,
- NA, NA),
- PINGROUP(115, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, reserved115, NA,
- NA, NA),
- PINGROUP(116, SOUTH, NA, reserved116, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(117, NORTH, NA, qdss_gpio0, atest_char, reserved117, NA, NA,
- NA, NA, NA),
- PINGROUP(118, NORTH, adsp_ext, NA, qdss_gpio1, atest_char3,
- reserved118, NA, NA, NA, NA),
- PINGROUP(119, NORTH, NA, qdss_gpio2, atest_char2, reserved119, NA, NA,
- NA, NA, NA),
- PINGROUP(120, NORTH, NA, qdss_gpio3, atest_char1, reserved120, NA, NA,
- NA, NA, NA),
- PINGROUP(121, NORTH, NA, qdss_gpio4, atest_char0, reserved121, NA, NA,
- NA, NA, NA),
- PINGROUP(122, NORTH, NA, qdss_gpio5, reserved122, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(123, NORTH, qup_l4, NA, qdss_gpio, reserved123, NA, NA, NA,
- NA, NA),
- PINGROUP(124, NORTH, qup_l5, NA, qdss_gpio, reserved124, NA, NA, NA,
- NA, NA),
- PINGROUP(125, NORTH, qup_l6, NA, reserved125, NA, NA, NA, NA, NA, NA),
- PINGROUP(126, NORTH, NA, reserved126, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(127, WEST, NA, NA, reserved127, NA, NA, NA, NA, NA, NA),
- PINGROUP(128, WEST, NAV_PPS, NAV_PPS, GPS_TX, NA, reserved128, NA, NA,
- NA, NA),
- PINGROUP(129, WEST, NAV_PPS, NAV_PPS, GPS_TX, NA, reserved129, NA, NA,
- NA, NA),
- PINGROUP(130, WEST, qlink_request, NA, reserved130, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(131, WEST, qlink_enable, NA, reserved131, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(132, WEST, NA, NA, reserved132, NA, NA, NA, NA, NA, NA),
- PINGROUP(133, NORTH, NA, reserved133, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(134, NORTH, NA, reserved134, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(135, WEST, NA, pa_indicator, NA, reserved135, NA, NA, NA, NA,
- NA),
- PINGROUP(136, WEST, NA, NA, reserved136, NA, NA, NA, NA, NA, NA),
- PINGROUP(137, WEST, NA, NA, phase_flag26, reserved137, NA, NA, NA, NA,
- NA),
- PINGROUP(138, WEST, NA, NA, phase_flag27, reserved138, NA, NA, NA, NA,
- NA),
- PINGROUP(139, WEST, NA, phase_flag28, reserved139, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(140, WEST, NA, NA, phase_flag6, reserved140, NA, NA, NA, NA,
- NA),
- PINGROUP(141, WEST, NA, phase_flag29, reserved141, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(142, WEST, NA, phase_flag30, reserved142, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(143, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, phase_flag31,
- reserved143, NA, NA, NA),
- PINGROUP(144, SOUTH, mss_lte, reserved144, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(145, SOUTH, mss_lte, GPS_TX, reserved145, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(146, WEST, NA, NA, reserved146, NA, NA, NA, NA, NA, NA),
- PINGROUP(147, WEST, NA, NA, reserved147, NA, NA, NA, NA, NA, NA),
- PINGROUP(148, WEST, NA, reserved148, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(149, WEST, NA, reserved149, NA, NA, NA, NA, NA, NA, NA),
- SDC_QDSD_PINGROUP(sdc1_clk, 0x599000, 13, 6),
- SDC_QDSD_PINGROUP(sdc1_cmd, 0x599000, 11, 3),
- SDC_QDSD_PINGROUP(sdc1_data, 0x599000, 9, 0),
- SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
- SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
- SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
+ [0] = PINGROUP(0, SOUTH, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+ [1] = PINGROUP(1, SOUTH, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+ [2] = PINGROUP(2, SOUTH, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+ [3] = PINGROUP(3, SOUTH, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+ [4] = PINGROUP(4, NORTH, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+ [5] = PINGROUP(5, NORTH, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+ [6] = PINGROUP(6, NORTH, qup9, NA, ddr_pxi0, NA, NA, NA, NA, NA, NA),
+ [7] = PINGROUP(7, NORTH, qup9, ddr_bist, NA, atest_tsens2,
+ vsense_trigger, atest_usb1, ddr_pxi0, NA, NA),
+ [8] = PINGROUP(8, WEST, qup_l4, GP_PDM1, ddr_bist, NA, NA, NA, NA, NA,
+ NA),
+ [9] = PINGROUP(9, WEST, qup_l5, ddr_bist, NA, NA, NA, NA, NA, NA, NA),
+ [10] = PINGROUP(10, NORTH, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
+ atest_usb11, ddr_pxi2, NA, NA, NA),
+ [11] = PINGROUP(11, NORTH, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
+ atest_usb10, ddr_pxi2, NA, NA, NA),
+ [12] = PINGROUP(12, SOUTH, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, NA,
+ NA, NA, NA, NA),
+ [13] = PINGROUP(13, WEST, cam_mclk, pll_bypassnl, qdss_gpio0, ddr_pxi3,
+ NA, NA, NA, NA, NA),
+ [14] = PINGROUP(14, WEST, cam_mclk, pll_reset, qdss_gpio1, NA, NA, NA,
+ NA, NA, NA),
+ [15] = PINGROUP(15, WEST, cam_mclk, qdss_gpio2, NA, NA, NA, NA, NA, NA,
+ NA),
+ [16] = PINGROUP(16, WEST, cam_mclk, qdss_gpio3, NA, NA, NA, NA, NA, NA,
+ NA),
+ [17] = PINGROUP(17, WEST, cci_i2c, qup1, qdss_gpio4, NA, NA, NA, NA,
+ NA, NA),
+ [18] = PINGROUP(18, WEST, cci_i2c, qup1, NA, qdss_gpio5, NA, NA, NA,
+ NA, NA),
+ [19] = PINGROUP(19, WEST, cci_i2c, qup1, NA, qdss_gpio6, NA, NA, NA,
+ NA, NA),
+ [20] = PINGROUP(20, WEST, cci_i2c, qup1, NA, qdss_gpio7, NA, NA, NA,
+ NA, NA),
+ [21] = PINGROUP(21, WEST, cci_timer0, gcc_gp2, qdss_gpio8, NA, NA, NA,
+ NA, NA, NA),
+ [22] = PINGROUP(22, WEST, cci_timer1, gcc_gp3, qdss_gpio, NA, NA, NA,
+ NA, NA, NA),
+ [23] = PINGROUP(23, WEST, cci_timer2, qdss_gpio9, NA, NA, NA, NA, NA,
+ NA, NA),
+ [24] = PINGROUP(24, WEST, cci_timer3, cci_async, qdss_gpio10, NA, NA,
+ NA, NA, NA, NA),
+ [25] = PINGROUP(25, WEST, cci_timer4, cci_async, qdss_gpio11, NA, NA,
+ NA, NA, NA, NA),
+ [26] = PINGROUP(26, WEST, cci_async, qdss_gpio12, JITTER_BIST, NA, NA,
+ NA, NA, NA, NA),
+ [27] = PINGROUP(27, WEST, qup2, qdss_gpio13, PLL_BIST, NA, NA, NA, NA,
+ NA, NA),
+ [28] = PINGROUP(28, WEST, qup2, qdss_gpio14, AGERA_PLL, NA, NA, NA, NA,
+ NA, NA),
+ [29] = PINGROUP(29, WEST, qup2, NA, phase_flag1, qdss_gpio15,
+ atest_tsens, NA, NA, NA, NA),
+ [30] = PINGROUP(30, WEST, qup2, phase_flag2, qdss_gpio, NA, NA, NA, NA,
+ NA, NA),
+ [31] = PINGROUP(31, WEST, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+ [32] = PINGROUP(32, WEST, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+ [33] = PINGROUP(33, WEST, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+ [34] = PINGROUP(34, WEST, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+ [35] = PINGROUP(35, NORTH, pci_e0, QUP_L4, JITTER_BIST, NA, NA, NA, NA,
+ NA, NA),
+ [36] = PINGROUP(36, NORTH, pci_e0, QUP_L5, PLL_BIST, NA, NA, NA, NA,
+ NA, NA),
+ [37] = PINGROUP(37, NORTH, QUP_L6, AGERA_PLL, NA, NA, NA, NA, NA, NA,
+ NA),
+ [38] = PINGROUP(38, NORTH, usb_phy, NA, NA, NA, NA, NA, NA, NA, NA),
+ [39] = PINGROUP(39, NORTH, lpass_slimbus, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [40] = PINGROUP(40, NORTH, sd_write, tsif1_error, NA, NA, NA, NA, NA,
+ NA, NA),
+ [41] = PINGROUP(41, SOUTH, qup3, NA, qdss_gpio6, NA, NA, NA, NA, NA,
+ NA),
+ [42] = PINGROUP(42, SOUTH, qup3, NA, qdss_gpio7, NA, NA, NA, NA, NA,
+ NA),
+ [43] = PINGROUP(43, SOUTH, qup3, NA, qdss_gpio14, NA, NA, NA, NA, NA,
+ NA),
+ [44] = PINGROUP(44, SOUTH, qup3, NA, qdss_gpio15, NA, NA, NA, NA, NA,
+ NA),
+ [45] = PINGROUP(45, SOUTH, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [46] = PINGROUP(46, SOUTH, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [47] = PINGROUP(47, SOUTH, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [48] = PINGROUP(48, SOUTH, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [49] = PINGROUP(49, NORTH, qup12, NA, NA, NA, NA, NA, NA, NA, NA),
+ [50] = PINGROUP(50, NORTH, qup12, NA, NA, NA, NA, NA, NA, NA, NA),
+ [51] = PINGROUP(51, NORTH, qup12, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+ [52] = PINGROUP(52, NORTH, qup12, phase_flag16, qdss_cti, NA, NA, NA,
+ NA, NA, NA),
+ [53] = PINGROUP(53, NORTH, qup10, phase_flag11, NA, NA, NA, NA, NA, NA,
+ NA),
+ [54] = PINGROUP(54, NORTH, qup10, GP_PDM0, phase_flag12, NA,
+ wlan1_adc1, atest_usb13, ddr_pxi1, NA, NA),
+ [55] = PINGROUP(55, NORTH, qup10, phase_flag13, NA, wlan1_adc0,
+ atest_usb12, ddr_pxi1, NA, NA, NA),
+ [56] = PINGROUP(56, NORTH, qup10, phase_flag17, NA, NA, NA, NA, NA, NA,
+ NA),
+ [57] = PINGROUP(57, NORTH, qua_mi2s, gcc_gp1, phase_flag18, NA, NA, NA,
+ NA, NA, NA),
+ [58] = PINGROUP(58, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [59] = PINGROUP(59, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [60] = PINGROUP(60, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [61] = PINGROUP(61, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [62] = PINGROUP(62, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [63] = PINGROUP(63, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [64] = PINGROUP(64, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [65] = PINGROUP(65, NORTH, pri_mi2s, qup8, wsa_clk, NA, NA, NA, NA, NA,
+ NA),
+ [66] = PINGROUP(66, NORTH, pri_mi2s_ws, qup8, wsa_data, GP_PDM1, NA,
+ NA, NA, NA, NA),
+ [67] = PINGROUP(67, NORTH, pri_mi2s, qup8, NA, atest_usb2, NA, NA, NA,
+ NA, NA),
+ [68] = PINGROUP(68, NORTH, pri_mi2s, qup8, NA, atest_usb23, NA, NA, NA,
+ NA, NA),
+ [69] = PINGROUP(69, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [70] = PINGROUP(70, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [71] = PINGROUP(71, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [72] = PINGROUP(72, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [73] = PINGROUP(73, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [74] = PINGROUP(74, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [75] = PINGROUP(75, NORTH, ter_mi2s, phase_flag8, qdss_gpio8,
+ atest_usb22, QUP_L4, NA, NA, NA, NA),
+ [76] = PINGROUP(76, NORTH, ter_mi2s, phase_flag9, qdss_gpio9,
+ atest_usb21, QUP_L5, NA, NA, NA, NA),
+ [77] = PINGROUP(77, NORTH, ter_mi2s, phase_flag4, qdss_gpio10,
+ atest_usb20, QUP_L6, NA, NA, NA, NA),
+ [78] = PINGROUP(78, NORTH, ter_mi2s, gcc_gp1, NA, NA, NA, NA, NA, NA,
+ NA),
+ [79] = PINGROUP(79, NORTH, sec_mi2s, GP_PDM2, NA, qdss_gpio11, NA, NA,
+ NA, NA, NA),
+ [80] = PINGROUP(80, NORTH, sec_mi2s, NA, qdss_gpio12, NA, NA, NA, NA,
+ NA, NA),
+ [81] = PINGROUP(81, NORTH, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
+ [82] = PINGROUP(82, NORTH, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
+ [83] = PINGROUP(83, NORTH, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
+ [84] = PINGROUP(84, NORTH, qup15, NA, NA, NA, NA, NA, NA, NA, NA),
+ [85] = PINGROUP(85, SOUTH, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+ [86] = PINGROUP(86, SOUTH, qup5, copy_gp, NA, NA, NA, NA, NA, NA, NA),
+ [87] = PINGROUP(87, SOUTH, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+ [88] = PINGROUP(88, SOUTH, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+ [89] = PINGROUP(89, SOUTH, tsif1_clk, qup4, tgu_ch3, phase_flag10, NA,
+ NA, NA, NA, NA),
+ [90] = PINGROUP(90, SOUTH, tsif1_en, mdp_vsync0, qup4, mdp_vsync1,
+ mdp_vsync2, mdp_vsync3, tgu_ch0, phase_flag0, qdss_cti),
+ [91] = PINGROUP(91, SOUTH, tsif1_data, sdc4_cmd, qup4, tgu_ch1, NA,
+ qdss_cti, NA, NA, NA),
+ [92] = PINGROUP(92, SOUTH, tsif2_error, sdc43, qup4, vfr_1, tgu_ch2,
+ NA, NA, NA, NA),
+ [93] = PINGROUP(93, SOUTH, tsif2_clk, sdc4_clk, qup7, NA, qdss_gpio13,
+ NA, NA, NA, NA),
+ [94] = PINGROUP(94, SOUTH, tsif2_en, sdc42, qup7, NA, NA, NA, NA, NA,
+ NA),
+ [95] = PINGROUP(95, SOUTH, tsif2_data, sdc41, qup7, GP_PDM0, NA, NA,
+ NA, NA, NA),
+ [96] = PINGROUP(96, SOUTH, tsif2_sync, sdc40, qup7, phase_flag3, NA,
+ NA, NA, NA, NA),
+ [97] = PINGROUP(97, WEST, NA, NA, mdp_vsync, ldo_en, NA, NA, NA, NA,
+ NA),
+ [98] = PINGROUP(98, WEST, NA, mdp_vsync, ldo_update, NA, NA, NA, NA,
+ NA, NA),
+ [99] = PINGROUP(99, NORTH, phase_flag14, prng_rosc, NA, NA, NA, NA, NA,
+ NA, NA),
+ [100] = PINGROUP(100, WEST, phase_flag15, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [101] = PINGROUP(101, WEST, NA, phase_flag5, NA, NA, NA, NA, NA, NA,
+ NA),
+ [102] = PINGROUP(102, WEST, pci_e1, prng_rosc, NA, NA, NA, NA, NA, NA,
+ NA),
+ [103] = PINGROUP(103, WEST, pci_e1, COPY_PHASE, NA, NA, NA, NA, NA, NA,
+ NA),
+ [104] = PINGROUP(104, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [105] = PINGROUP(105, NORTH, uim2_data, qup13, qup_l4, NA, NA, NA, NA,
+ NA, NA),
+ [106] = PINGROUP(106, NORTH, uim2_clk, qup13, qup_l5, NA, NA, NA, NA,
+ NA, NA),
+ [107] = PINGROUP(107, NORTH, uim2_reset, qup13, qup_l6, NA, NA, NA, NA,
+ NA, NA),
+ [108] = PINGROUP(108, NORTH, uim2_present, qup13, NA, NA, NA, NA, NA,
+ NA, NA),
+ [109] = PINGROUP(109, NORTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ [110] = PINGROUP(110, NORTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ [111] = PINGROUP(111, NORTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [112] = PINGROUP(112, NORTH, uim1_present, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [113] = PINGROUP(113, NORTH, uim_batt, edp_hot, NA, NA, NA, NA, NA, NA,
+ NA),
+ [114] = PINGROUP(114, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA,
+ NA, NA),
+ [115] = PINGROUP(115, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA,
+ NA, NA),
+ [116] = PINGROUP(116, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [117] = PINGROUP(117, NORTH, NA, qdss_gpio0, atest_char, NA, NA, NA,
+ NA, NA, NA),
+ [118] = PINGROUP(118, NORTH, adsp_ext, NA, qdss_gpio1, atest_char3, NA,
+ NA, NA, NA, NA),
+ [119] = PINGROUP(119, NORTH, NA, qdss_gpio2, atest_char2, NA, NA, NA,
+ NA, NA, NA),
+ [120] = PINGROUP(120, NORTH, NA, qdss_gpio3, atest_char1, NA, NA, NA,
+ NA, NA, NA),
+ [121] = PINGROUP(121, NORTH, NA, qdss_gpio4, atest_char0, NA, NA, NA,
+ NA, NA, NA),
+ [122] = PINGROUP(122, NORTH, NA, qdss_gpio5, NA, NA, NA, NA, NA, NA,
+ NA),
+ [123] = PINGROUP(123, NORTH, qup_l4, NA, qdss_gpio, NA, NA, NA, NA, NA,
+ NA),
+ [124] = PINGROUP(124, NORTH, qup_l5, NA, qdss_gpio, NA, NA, NA, NA, NA,
+ NA),
+ [125] = PINGROUP(125, NORTH, qup_l6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [126] = PINGROUP(126, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [127] = PINGROUP(127, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [128] = PINGROUP(128, WEST, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA, NA,
+ NA, NA),
+ [129] = PINGROUP(129, WEST, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA, NA,
+ NA, NA),
+ [130] = PINGROUP(130, WEST, qlink_request, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [131] = PINGROUP(131, WEST, qlink_enable, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [132] = PINGROUP(132, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [133] = PINGROUP(133, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [134] = PINGROUP(134, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [135] = PINGROUP(135, WEST, NA, pa_indicator, NA, NA, NA, NA, NA, NA,
+ NA),
+ [136] = PINGROUP(136, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [137] = PINGROUP(137, WEST, NA, NA, phase_flag26, NA, NA, NA, NA, NA,
+ NA),
+ [138] = PINGROUP(138, WEST, NA, NA, phase_flag27, NA, NA, NA, NA, NA,
+ NA),
+ [139] = PINGROUP(139, WEST, NA, phase_flag28, NA, NA, NA, NA, NA, NA,
+ NA),
+ [140] = PINGROUP(140, WEST, NA, NA, phase_flag6, NA, NA, NA, NA, NA,
+ NA),
+ [141] = PINGROUP(141, WEST, NA, phase_flag29, NA, NA, NA, NA, NA, NA,
+ NA),
+ [142] = PINGROUP(142, WEST, NA, phase_flag30, NA, NA, NA, NA, NA, NA,
+ NA),
+ [143] = PINGROUP(143, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, phase_flag31,
+ NA, NA, NA, NA),
+ [144] = PINGROUP(144, SOUTH, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA),
+ [145] = PINGROUP(145, SOUTH, mss_lte, GPS_TX, NA, NA, NA, NA, NA, NA,
+ NA),
+ [146] = PINGROUP(146, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [147] = PINGROUP(147, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [148] = PINGROUP(148, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [149] = PINGROUP(149, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [150] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x599000, 15, 0),
+ [151] = SDC_QDSD_PINGROUP(sdc1_clk, 0x599000, 13, 6),
+ [152] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x599000, 11, 3),
+ [153] = SDC_QDSD_PINGROUP(sdc1_data, 0x599000, 9, 0),
+ [154] = SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
+ [155] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
+ [156] = SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
+ [157] = UFS_RESET(ufs_reset, 0x99f000),
};
static const struct msm_pinctrl_soc_data sdm670_pinctrl = {
@@ -2251,7 +1592,7 @@
.nfunctions = ARRAY_SIZE(sdm670_functions),
.groups = sdm670_groups,
.ngroups = ARRAY_SIZE(sdm670_groups),
- .ngpios = 136,
+ .ngpios = 150,
};
static int sdm670_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index cf80ce1..4a5a0fe 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -586,6 +586,9 @@
ret = info->ops->init(pfc);
if (ret < 0)
return ret;
+
+ /* .init() may have overridden pfc->info */
+ info = pfc->info;
}
/* Enable dummy states for those platforms without pinctrl support */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 7ca37c3..baa98d7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -203,7 +203,7 @@
/* IPSR6 */
FN_AUDIO_CLKB, FN_STP_OPWM_0_B, FN_MSIOF1_SCK_B,
- FN_SCIF_CLK, FN_BPFCLK_E,
+ FN_SCIF_CLK, FN_DVC_MUTE, FN_BPFCLK_E,
FN_AUDIO_CLKC, FN_SCIFB0_SCK_C, FN_MSIOF1_SYNC_B, FN_RX2,
FN_SCIFA2_RXD, FN_FMIN_E,
FN_AUDIO_CLKOUT, FN_MSIOF1_SS1_B, FN_TX2, FN_SCIFA2_TXD,
@@ -573,7 +573,7 @@
/* IPSR6 */
AUDIO_CLKB_MARK, STP_OPWM_0_B_MARK, MSIOF1_SCK_B_MARK,
- SCIF_CLK_MARK, BPFCLK_E_MARK,
+ SCIF_CLK_MARK, DVC_MUTE_MARK, BPFCLK_E_MARK,
AUDIO_CLKC_MARK, SCIFB0_SCK_C_MARK, MSIOF1_SYNC_B_MARK, RX2_MARK,
SCIFA2_RXD_MARK, FMIN_E_MARK,
AUDIO_CLKOUT_MARK, MSIOF1_SS1_B_MARK, TX2_MARK, SCIFA2_TXD_MARK,
@@ -1010,14 +1010,17 @@
PINMUX_IPSR_MSEL(IP4_12_10, SCL2, SEL_IIC2_0),
PINMUX_IPSR_MSEL(IP4_12_10, GPS_CLK_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP4_12_10, GLO_Q0_D, SEL_GPS_3),
+ PINMUX_IPSR_MSEL(IP4_12_10, HSCK1_E, SEL_HSCIF1_4),
PINMUX_IPSR_GPSR(IP4_15_13, SSI_WS2),
PINMUX_IPSR_MSEL(IP4_15_13, SDA2, SEL_IIC2_0),
PINMUX_IPSR_MSEL(IP4_15_13, GPS_SIGN_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP4_15_13, RX2_E, SEL_SCIF2_4),
PINMUX_IPSR_MSEL(IP4_15_13, GLO_Q1_D, SEL_GPS_3),
+ PINMUX_IPSR_MSEL(IP4_15_13, HCTS1_N_E, SEL_HSCIF1_4),
PINMUX_IPSR_GPSR(IP4_18_16, SSI_SDATA2),
PINMUX_IPSR_MSEL(IP4_18_16, GPS_MAG_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP4_18_16, TX2_E, SEL_SCIF2_4),
+ PINMUX_IPSR_MSEL(IP4_18_16, HRTS1_N_E, SEL_HSCIF1_4),
PINMUX_IPSR_GPSR(IP4_19, SSI_SCK34),
PINMUX_IPSR_GPSR(IP4_20, SSI_WS34),
PINMUX_IPSR_GPSR(IP4_21, SSI_SDATA3),
@@ -1090,6 +1093,7 @@
PINMUX_IPSR_MSEL(IP6_2_0, STP_OPWM_0_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP6_2_0, MSIOF1_SCK_B, SEL_SOF1_1),
PINMUX_IPSR_MSEL(IP6_2_0, SCIF_CLK, SEL_SCIF_0),
+ PINMUX_IPSR_GPSR(IP6_2_0, DVC_MUTE),
PINMUX_IPSR_MSEL(IP6_2_0, BPFCLK_E, SEL_FM_4),
PINMUX_IPSR_GPSR(IP6_5_3, AUDIO_CLKC),
PINMUX_IPSR_MSEL(IP6_5_3, SCIFB0_SCK_C, SEL_SCIFB_2),
@@ -1099,7 +1103,7 @@
PINMUX_IPSR_MSEL(IP6_5_3, FMIN_E, SEL_FM_4),
PINMUX_IPSR_GPSR(IP6_7_6, AUDIO_CLKOUT),
PINMUX_IPSR_MSEL(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1),
- PINMUX_IPSR_MSEL(IP6_5_3, TX2, SEL_SCIF2_0),
+ PINMUX_IPSR_MSEL(IP6_7_6, TX2, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0),
PINMUX_IPSR_GPSR(IP6_9_8, IRQ0),
PINMUX_IPSR_MSEL(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3),
@@ -5810,7 +5814,7 @@
0, 0,
/* IP6_2_0 [3] */
FN_AUDIO_CLKB, FN_STP_OPWM_0_B, FN_MSIOF1_SCK_B,
- FN_SCIF_CLK, 0, FN_BPFCLK_E,
+ FN_SCIF_CLK, FN_DVC_MUTE, FN_BPFCLK_E,
0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index ed734f56..ef093ac 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -281,8 +281,8 @@
FN_AVB_AVTP_CAPTURE, FN_ETH_CRS_DV_B, FN_SSI_WS1, FN_SCIF1_TXD_B,
FN_IIC1_SDA_C, FN_VI1_DATA0, FN_CAN0_TX_D, FN_AVB_AVTP_MATCH,
FN_ETH_RX_ER_B, FN_SSI_SDATA1, FN_HSCIF1_HRX_B, FN_SDATA, FN_VI1_DATA1,
- FN_ATAG0_N, FN_ETH_RXD0_B, FN_SSI_SCK2, FN_HSCIF1_HTX_B, FN_VI1_DATA2,
- FN_MDATA, FN_ATAWR0_N, FN_ETH_RXD1_B,
+ FN_ATAWR0_N, FN_ETH_RXD0_B, FN_SSI_SCK2, FN_HSCIF1_HTX_B, FN_VI1_DATA2,
+ FN_MDATA, FN_ATAG0_N, FN_ETH_RXD1_B,
/* IPSR13 */
FN_SSI_WS2, FN_HSCIF1_HCTS_N_B, FN_SCIFA0_RXD_D, FN_VI1_DATA3, FN_SCKZ,
@@ -575,8 +575,8 @@
ETH_CRS_DV_B_MARK, SSI_WS1_MARK, SCIF1_TXD_B_MARK, IIC1_SDA_C_MARK,
VI1_DATA0_MARK, CAN0_TX_D_MARK, AVB_AVTP_MATCH_MARK, ETH_RX_ER_B_MARK,
SSI_SDATA1_MARK, HSCIF1_HRX_B_MARK, VI1_DATA1_MARK, SDATA_MARK,
- ATAG0_N_MARK, ETH_RXD0_B_MARK, SSI_SCK2_MARK, HSCIF1_HTX_B_MARK,
- VI1_DATA2_MARK, MDATA_MARK, ATAWR0_N_MARK, ETH_RXD1_B_MARK,
+ ATAWR0_N_MARK, ETH_RXD0_B_MARK, SSI_SCK2_MARK, HSCIF1_HTX_B_MARK,
+ VI1_DATA2_MARK, MDATA_MARK, ATAG0_N_MARK, ETH_RXD1_B_MARK,
/* IPSR13 */
SSI_WS2_MARK, HSCIF1_HCTS_N_B_MARK, SCIFA0_RXD_D_MARK, VI1_DATA3_MARK,
@@ -1413,13 +1413,13 @@
PINMUX_IPSR_MSEL(IP12_26_24, HSCIF1_HRX_B, SEL_HSCIF1_1),
PINMUX_IPSR_GPSR(IP12_26_24, VI1_DATA1),
PINMUX_IPSR_MSEL(IP12_26_24, SDATA, SEL_FSN_0),
- PINMUX_IPSR_GPSR(IP12_26_24, ATAG0_N),
+ PINMUX_IPSR_GPSR(IP12_26_24, ATAWR0_N),
PINMUX_IPSR_MSEL(IP12_26_24, ETH_RXD0_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP12_29_27, SSI_SCK2, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP12_29_27, HSCIF1_HTX_B, SEL_HSCIF1_1),
PINMUX_IPSR_GPSR(IP12_29_27, VI1_DATA2),
PINMUX_IPSR_MSEL(IP12_29_27, MDATA, SEL_FSN_0),
- PINMUX_IPSR_GPSR(IP12_29_27, ATAWR0_N),
+ PINMUX_IPSR_GPSR(IP12_29_27, ATAG0_N),
PINMUX_IPSR_MSEL(IP12_29_27, ETH_RXD1_B, SEL_ETH_1),
/* IPSR13 */
@@ -4938,10 +4938,10 @@
0, 0, 0, 0,
/* IP12_29_27 [3] */
FN_SSI_SCK2, FN_HSCIF1_HTX_B, FN_VI1_DATA2, FN_MDATA,
- FN_ATAWR0_N, FN_ETH_RXD1_B, 0, 0,
+ FN_ATAG0_N, FN_ETH_RXD1_B, 0, 0,
/* IP12_26_24 [3] */
FN_SSI_SDATA1, FN_HSCIF1_HRX_B, FN_VI1_DATA1, FN_SDATA,
- FN_ATAG0_N, FN_ETH_RXD0_B, 0, 0,
+ FN_ATAWR0_N, FN_ETH_RXD0_B, 0, 0,
/* IP12_23_21 [3] */
FN_SSI_WS1, FN_SCIF1_TXD_B, FN_IIC1_SDA_C, FN_VI1_DATA0,
FN_CAN0_TX_D, FN_AVB_AVTP_MATCH, FN_ETH_RX_ER_B, 0,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 84cee66..0acb0a7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -2056,7 +2056,7 @@
RCAR_GP_PIN(6, 21),
};
static const unsigned int hscif2_clk_b_mux[] = {
- HSCK1_B_MARK,
+ HSCK2_B_MARK,
};
static const unsigned int hscif2_ctrl_b_pins[] = {
/* RTS, CTS */
@@ -2129,7 +2129,7 @@
RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
};
static const unsigned int hscif4_ctrl_mux[] = {
- HRTS4_N_MARK, HCTS3_N_MARK,
+ HRTS4_N_MARK, HCTS4_N_MARK,
};
static const unsigned int hscif4_data_b_pins[] = {
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index efc4371..a36fd4b 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -798,7 +798,7 @@
break;
case PIN_CONFIG_OUTPUT:
__stm32_gpio_set(bank, offset, arg);
- ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false);
+ ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
break;
default:
ret = -EINVAL;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index 90b973e..a7c81e9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -394,7 +394,7 @@
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "owa")), /* DOUT */
+ SUNXI_FUNCTION(0x3, "spdif")), /* DOUT */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out")),
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 808391f..9f417bb 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -2971,6 +2971,25 @@
}
EXPORT_SYMBOL(ipa_get_pdev);
+int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data),
+ void *user_data)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_ntn_uc_reg_rdyCB,
+ ipauc_ready_cb, user_data);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_ntn_uc_reg_rdyCB);
+
+void ipa_ntn_uc_dereg_rdyCB(void)
+{
+ IPA_API_DISPATCH(ipa_ntn_uc_dereg_rdyCB);
+}
+EXPORT_SYMBOL(ipa_ntn_uc_dereg_rdyCB);
+
+
static const struct dev_pm_ops ipa_pm_ops = {
.suspend_noirq = ipa_ap_suspend,
.resume_noirq = ipa_ap_resume,
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index d3d4178..133e058 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -378,6 +378,11 @@
int ipa_ep_idx_dl);
struct device *(*ipa_get_pdev)(void);
+
+ int (*ipa_ntn_uc_reg_rdyCB)(void (*ipauc_ready_cb)(void *user_data),
+ void *user_data);
+
+ void (*ipa_ntn_uc_dereg_rdyCB)(void);
};
#ifdef CONFIG_IPA
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
index 2dd82c1..a15a9d8 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -620,3 +620,41 @@
return ret;
}
EXPORT_SYMBOL(ipa_uc_offload_cleanup);
+
+/**
+ * ipa_uc_offload_uc_rdyCB() - To register uC ready CB if uC not
+ * ready
+ * @inout: [in/out] input/output parameters
+ * from/to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *inp)
+{
+ int ret = 0;
+
+ if (!inp) {
+ IPA_UC_OFFLOAD_ERR("Invalid input\n");
+ return -EINVAL;
+ }
+
+ if (inp->proto == IPA_UC_NTN)
+ ret = ipa_ntn_uc_reg_rdyCB(inp->notify, inp->priv);
+
+ if (ret == -EEXIST) {
+ inp->is_uC_ready = true;
+ ret = 0;
+ } else
+ inp->is_uC_ready = false;
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_reg_rdyCB);
+
+void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto)
+{
+ if (proto == IPA_UC_NTN)
+ ipa_ntn_uc_dereg_rdyCB();
+}
+EXPORT_SYMBOL(ipa_uc_offload_dereg_rdyCB);
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 69c83d4..07bca0c 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -374,13 +374,15 @@
struct ipa_ntn_conn_out_params *outp);
int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
-
u8 *ipa_write_64(u64 w, u8 *dest);
u8 *ipa_write_32(u32 w, u8 *dest);
u8 *ipa_write_16(u16 hw, u8 *dest);
u8 *ipa_write_8(u8 b, u8 *dest);
u8 *ipa_pad_to_64(u8 *dest);
u8 *ipa_pad_to_32(u8 *dest);
+int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data),
+ void *user_data);
+void ipa_ntn_uc_dereg_rdyCB(void);
const char *ipa_get_version_string(enum ipa_hw_type ver);
#endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
index ae6cfc4..0bc4b76 100644
--- a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,4 +21,7 @@
struct ipa_ntn_conn_out_params *outp);
int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data),
+ void *user_data);
+void ipa_ntn_uc_dereg_rdyCB(void);
#endif /* _IPA_UC_OFFLOAD_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 53ab299..bfd0446 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -641,7 +641,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_nat_dma_cmd *)param)->entries,
pre_entry);
retval = -EFAULT;
@@ -688,7 +688,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr *)param)->num_hdrs,
pre_entry);
retval = -EFAULT;
@@ -727,7 +727,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -767,7 +767,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_rt_rule *)param)->
num_rules,
pre_entry);
@@ -807,7 +807,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_rt_rule *)param)->
num_rules,
pre_entry);
@@ -847,7 +847,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -886,7 +886,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_flt_rule *)param)->
num_rules,
pre_entry);
@@ -926,7 +926,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_flt_rule *)param)->
num_hdls,
pre_entry);
@@ -966,7 +966,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_flt_rule *)param)->
num_rules,
pre_entry);
@@ -1104,7 +1104,7 @@
if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props, pre_entry);
retval = -EFAULT;
@@ -1149,7 +1149,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props, pre_entry);
retval = -EFAULT;
@@ -1194,7 +1194,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props, pre_entry);
retval = -EFAULT;
@@ -1232,7 +1232,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_msg_meta *)param)->msg_len
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_msg_meta *)param)->msg_len,
pre_entry);
retval = -EFAULT;
@@ -1372,7 +1372,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs, pre_entry);
retval = -EFAULT;
@@ -1411,7 +1411,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
param)->num_hdls != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr_proc_ctx *)param)->
num_hdls,
pre_entry);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index 3418896..f7b0864 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1015,25 +1015,25 @@
if (rule->action != IPA_PASS_TO_EXCEPTION) {
if (!rule->eq_attrib_type) {
if (!rule->rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
rt_tbl = ipa_id_find(rule->rt_tbl_hdl);
if (rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
- if (rt_tbl->cookie != IPA_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -1046,7 +1046,7 @@
}
INIT_LIST_HEAD(&entry->link);
entry->rule = *rule;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_FLT_COOKIE;
entry->rt_tbl = rt_tbl;
entry->tbl = tbl;
if (add_rear) {
@@ -1065,13 +1065,19 @@
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
*rule_hdl = id;
entry->id = id;
IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
return 0;
-
+ipa_insert_failed:
+ tbl->rule_cnt--;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ list_del(&entry->link);
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
error:
return -EPERM;
}
@@ -1083,12 +1089,12 @@
entry = ipa_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
id = entry->id;
@@ -1115,12 +1121,12 @@
entry = ipa_id_find(frule->rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -1130,25 +1136,25 @@
if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
if (!frule->rule.eq_attrib_type) {
if (!frule->rule.rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
rt_tbl = ipa_id_find(frule->rule.rt_tbl_hdl);
if (rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
- if (rt_tbl->cookie != IPA_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -1172,7 +1178,7 @@
struct ipa_flt_tbl *tbl;
if (rule == NULL || rule_hdl == NULL) {
- IPAERR("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
return -EINVAL;
}
@@ -1191,14 +1197,14 @@
int ipa_ep_idx;
if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
- IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
rule_hdl, ep);
return -EINVAL;
}
ipa_ep_idx = ipa2_get_ep_mapping(ep);
if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
- IPAERR("ep not valid ep=%d\n", ep);
+ IPAERR_RL("ep not valid ep=%d\n", ep);
return -EINVAL;
}
if (ipa_ctx->ep[ipa_ep_idx].valid == 0)
@@ -1225,7 +1231,7 @@
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1243,7 +1249,7 @@
rules->rules[i].at_rear,
&rules->rules[i].flt_rule_hdl);
if (result) {
- IPAERR("failed to add flt rule %d\n", i);
+ IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1276,14 +1282,14 @@
int result;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del rt rule %i\n", i);
+ IPAERR_RL("failed to del rt rule %i\n", i);
hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1316,14 +1322,14 @@
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
- IPAERR("failed to mdfy rt rule %i\n", i);
+ IPAERR_RL("failed to mdfy rt rule %i\n", i);
hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
@@ -1357,7 +1363,7 @@
int result;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1393,7 +1399,7 @@
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index bb6f8ec..c8663c9 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -547,7 +547,7 @@
{
struct ipa_hdr_entry *hdr_entry;
struct ipa_hdr_proc_ctx_entry *entry;
- struct ipa_hdr_proc_ctx_offset_entry *offset;
+ struct ipa_hdr_proc_ctx_offset_entry *offset = NULL;
u32 bin;
struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl;
int id;
@@ -558,13 +558,13 @@
proc_ctx->type, proc_ctx->hdr_hdl);
if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
- IPAERR("invalid processing type %d\n", proc_ctx->type);
+ IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
return -EINVAL;
}
hdr_entry = ipa_id_find(proc_ctx->hdr_hdl);
- if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) {
- IPAERR("hdr_hdl is invalid\n");
+ if (!hdr_entry || (hdr_entry->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("hdr_hdl is invalid\n");
return -EINVAL;
}
@@ -580,7 +580,7 @@
entry->hdr = hdr_entry;
if (add_ref_hdr)
hdr_entry->ref_cnt++;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_PROC_HDR_COOKIE;
needed_len = (proc_ctx->type == IPA_HDR_PROC_NONE) ?
sizeof(struct ipa_hdr_proc_ctx_add_hdr_seq) :
@@ -592,7 +592,7 @@
ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
bin = IPA_HDR_PROC_CTX_BIN1;
} else {
- IPAERR("unexpected needed len %d\n", needed_len);
+ IPAERR_RL("unexpected needed len %d\n", needed_len);
WARN_ON(1);
goto bad_len;
}
@@ -602,7 +602,7 @@
IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
if (list_empty(&htbl->head_free_offset_list[bin])) {
if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
- IPAERR("hdr proc ctx table overflow\n");
+ IPAERR_RL("hdr proc ctx table overflow\n");
goto bad_len;
}
@@ -640,6 +640,7 @@
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
proc_ctx->proc_ctx_hdl = id;
@@ -647,6 +648,14 @@
return 0;
+ipa_insert_failed:
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ list_del(&entry->link);
+ htbl->proc_ctx_cnt--;
+
bad_len:
if (add_ref_hdr)
hdr_entry->ref_cnt--;
@@ -659,7 +668,7 @@
static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
{
struct ipa_hdr_entry *entry;
- struct ipa_hdr_offset_entry *offset;
+ struct ipa_hdr_offset_entry *offset = NULL;
u32 bin;
struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
int id;
@@ -667,12 +676,12 @@
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
goto error;
}
if (!HDR_TYPE_IS_VALID(hdr->type)) {
- IPAERR("invalid hdr type %d\n", hdr->type);
+ IPAERR_RL("invalid hdr type %d\n", hdr->type);
goto error;
}
@@ -691,7 +700,7 @@
entry->type = hdr->type;
entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
entry->eth2_ofst = hdr->eth2_ofst;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_HDR_COOKIE;
if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
bin = IPA_HDR_BIN0;
@@ -704,7 +713,7 @@
else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
bin = IPA_HDR_BIN4;
else {
- IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
goto bad_hdr_len;
}
@@ -780,6 +789,7 @@
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
hdr->hdr_hdl = id;
@@ -804,10 +814,19 @@
entry->ref_cnt--;
hdr->hdr_hdl = 0;
ipa_id_remove(id);
+ipa_insert_failed:
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
+ entry->hdr_len, DMA_TO_DEVICE);
+ } else {
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ }
htbl->hdr_cnt--;
list_del(&entry->link);
- dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
- entry->hdr_len, DMA_TO_DEVICE);
+
fail_dma_mapping:
entry->is_hdr_proc_ctx = false;
bad_hdr_len:
@@ -824,8 +843,8 @@
struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl;
entry = ipa_id_find(proc_ctx_hdl);
- if (!entry || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parm\n");
+ if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -833,7 +852,7 @@
htbl->proc_ctx_cnt, entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("proc_ctx already deleted by user\n");
+ IPAERR_RL("proc_ctx already deleted by user\n");
return -EINVAL;
}
@@ -871,12 +890,12 @@
entry = ipa_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad parm\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -888,7 +907,7 @@
htbl->hdr_cnt, entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("hdr already deleted by user\n");
+ IPAERR_RL("hdr already deleted by user\n");
return -EINVAL;
}
@@ -937,12 +956,12 @@
int result = -EFAULT;
if (unlikely(!ipa_ctx)) {
- IPAERR("IPA driver was not initialized\n");
+ IPAERR_RL("IPA driver was not initialized\n");
return -EINVAL;
}
if (hdrs == NULL || hdrs->num_hdrs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -951,7 +970,7 @@
hdrs->num_hdrs);
for (i = 0; i < hdrs->num_hdrs; i++) {
if (__ipa_add_hdr(&hdrs->hdr[i])) {
- IPAERR("failed to add hdr %d\n", i);
+ IPAERR_RL("failed to add hdr %d\n", i);
hdrs->hdr[i].status = -1;
} else {
hdrs->hdr[i].status = 0;
@@ -992,14 +1011,14 @@
}
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_hdr(hdls->hdl[i].hdl, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -1048,13 +1067,13 @@
if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 ||
ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) {
- IPAERR("Processing context not supported on IPA HW %d\n",
+ IPAERR_RL("Processing context not supported on IPA HW %d\n",
ipa_ctx->ipa_hw_type);
return -EFAULT;
}
if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1063,7 +1082,7 @@
proc_ctxs->num_proc_ctxs);
for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
- IPAERR("failed to add hdr pric ctx %d\n", i);
+ IPAERR_RL("failed to add hdr pric ctx %d\n", i);
proc_ctxs->proc_ctx[i].status = -1;
} else {
proc_ctxs->proc_ctx[i].status = 0;
@@ -1108,14 +1127,14 @@
}
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -1352,7 +1371,7 @@
}
if (lookup == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
@@ -1439,13 +1458,13 @@
entry = ipa_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto bail;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("invalid header entry\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("invalid header entry\n");
result = -EINVAL;
goto bail;
}
@@ -1474,7 +1493,7 @@
int result = -EFAULT;
if (copy == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 5568f8b..a85addb 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -37,7 +37,15 @@
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
+
#define IPA_COOKIE 0x57831603
+#define IPA_RT_RULE_COOKIE 0x57831604
+#define IPA_RT_TBL_COOKIE 0x57831605
+#define IPA_FLT_COOKIE 0x57831606
+#define IPA_HDR_COOKIE 0x57831607
+#define IPA_PROC_HDR_COOKIE 0x57831608
+
+
#define MTU_BYTE 1500
#define IPA_MAX_NUM_PIPES 0x14
@@ -60,6 +68,18 @@
#define IPAERR(fmt, args...) \
pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define IPAERR_RL(fmt, args...) \
+ do { \
+ pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__, \
+ __LINE__, ## args);\
+ if (ipa_ctx) { \
+ IPA_IPC_LOGGING(ipa_ctx->logbuf, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ } \
+ } while (0)
+
#define WLAN_AMPDU_TX_EP 15
#define WLAN_PROD_TX_EP 19
#define WLAN1_CONS_RX_EP 14
@@ -196,8 +216,8 @@
*/
struct ipa_flt_entry {
struct list_head link;
- struct ipa_flt_rule rule;
u32 cookie;
+ struct ipa_flt_rule rule;
struct ipa_flt_tbl *tbl;
struct ipa_rt_tbl *rt_tbl;
u32 hw_len;
@@ -222,13 +242,13 @@
*/
struct ipa_rt_tbl {
struct list_head link;
+ u32 cookie;
struct list_head head_rt_rule_list;
char name[IPA_RESOURCE_NAME_MAX];
u32 idx;
u32 rule_cnt;
u32 ref_cnt;
struct ipa_rt_tbl_set *set;
- u32 cookie;
bool in_sys;
u32 sz;
struct ipa_mem_buffer curr_mem;
@@ -259,6 +279,7 @@
*/
struct ipa_hdr_entry {
struct list_head link;
+ u32 cookie;
u8 hdr[IPA_HDR_MAX_SIZE];
u32 hdr_len;
char name[IPA_RESOURCE_NAME_MAX];
@@ -268,7 +289,6 @@
dma_addr_t phys_base;
struct ipa_hdr_proc_ctx_entry *proc_ctx;
struct ipa_hdr_offset_entry *offset_entry;
- u32 cookie;
u32 ref_cnt;
int id;
u8 is_eth2_ofst_valid;
@@ -341,10 +361,10 @@
*/
struct ipa_hdr_proc_ctx_entry {
struct list_head link;
+ u32 cookie;
enum ipa_hdr_proc_type type;
struct ipa_hdr_proc_ctx_offset_entry *offset_entry;
struct ipa_hdr_entry *hdr;
- u32 cookie;
u32 ref_cnt;
int id;
bool user_deleted;
@@ -400,8 +420,8 @@
*/
struct ipa_rt_entry {
struct list_head link;
- struct ipa_rt_rule rule;
u32 cookie;
+ struct ipa_rt_rule rule;
struct ipa_rt_tbl *tbl;
struct ipa_hdr_entry *hdr;
struct ipa_hdr_proc_ctx_entry *proc_ctx;
@@ -1512,6 +1532,8 @@
ipa_notify_cb notify, void *priv, u8 hdr_len,
struct ipa_ntn_conn_out_params *outp);
int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+int ipa2_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv);
+void ipa2_ntn_uc_dereg_rdyCB(void);
/*
* To retrieve doorbell physical address of
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
index dc27636..e6954b7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -271,6 +271,14 @@
mutex_lock(&ipa_ctx->lock);
list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
if (!strcmp(entry->name, tx->name)) {
+ /* add the entry check */
+ if (entry->num_tx_props != tx->num_tx_props) {
+ IPAERR("invalid entry number(%u %u)\n",
+ entry->num_tx_props,
+ tx->num_tx_props);
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+ }
memcpy(tx->tx, entry->tx, entry->num_tx_props *
sizeof(struct ipa_ioc_tx_intf_prop));
result = 0;
@@ -304,6 +312,14 @@
mutex_lock(&ipa_ctx->lock);
list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
if (!strcmp(entry->name, rx->name)) {
+ /* add the entry check */
+ if (entry->num_rx_props != rx->num_rx_props) {
+ IPAERR("invalid entry number(%u %u)\n",
+ entry->num_rx_props,
+ rx->num_rx_props);
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+ }
memcpy(rx->rx, entry->rx, entry->num_rx_props *
sizeof(struct ipa_ioc_rx_intf_prop));
result = 0;
@@ -337,6 +353,14 @@
mutex_lock(&ipa_ctx->lock);
list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
if (!strcmp(entry->name, ext->name)) {
+ /* add the entry check */
+ if (entry->num_ext_props != ext->num_ext_props) {
+ IPAERR("invalid entry number(%u %u)\n",
+ entry->num_ext_props,
+ ext->num_ext_props);
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+ }
memcpy(ext->ext, entry->ext, entry->num_ext_props *
sizeof(struct ipa_ioc_ext_intf_prop));
result = 0;
@@ -380,13 +404,13 @@
if (meta == NULL || (buff == NULL && callback != NULL) ||
(buff != NULL && callback == NULL)) {
- IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+ IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n",
meta, buff, callback);
return -EINVAL;
}
if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
- IPAERR("unsupported message type %d\n", meta->msg_type);
+ IPAERR_RL("unsupported message type %d\n", meta->msg_type);
return -EINVAL;
}
@@ -609,7 +633,7 @@
int result = -EINVAL;
if (meta == NULL || buff == NULL || !count) {
- IPAERR("invalid param name=%p buff=%p count=%zu\n",
+ IPAERR_RL("invalid param name=%p buff=%p count=%zu\n",
meta, buff, count);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index a7f983e..50b2706 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -252,8 +252,8 @@
mutex_lock(&nat_ctx->lock);
if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
- IPAERR("Nat device name mismatch\n");
- IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+ IPAERR_RL("Nat device name mismatch\n");
+ IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
result = -EPERM;
goto bail;
}
@@ -272,7 +272,7 @@
if (mem->size <= 0 ||
nat_ctx->is_dev_init == true) {
- IPAERR("Invalid Parameters or device is already init\n");
+ IPAERR_RL("Invalid Parameters or device is already init\n");
result = -EPERM;
goto bail;
}
@@ -335,8 +335,8 @@
/* check for integer overflow */
if (init->ipv4_rules_offset >
- UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
+ (UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1)))) {
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Table Entry offset is not
@@ -345,8 +345,8 @@
tmp = init->ipv4_rules_offset +
(TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->ipv4_rules_offset, (init->table_entries + 1),
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -355,7 +355,7 @@
/* check for integer overflow */
if (init->expn_rules_offset >
UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Expn Table Entry offset is not
@@ -364,8 +364,8 @@
tmp = init->expn_rules_offset +
(TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->expn_rules_offset, init->expn_table_entries,
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -374,7 +374,7 @@
/* check for integer overflow */
if (init->index_offset >
UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Indx Table Entry offset is not
@@ -383,8 +383,8 @@
tmp = init->index_offset +
(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Indx Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_offset, (init->table_entries + 1),
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -392,8 +392,8 @@
/* check for integer overflow */
if (init->index_expn_offset >
- UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
+ (UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries))) {
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Expn Table entry offset is not
@@ -402,8 +402,8 @@
tmp = init->index_expn_offset +
(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Indx Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_expn_offset, init->expn_table_entries,
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -448,16 +448,16 @@
(init->expn_rules_offset > offset) ||
(init->index_offset > offset) ||
(init->index_expn_offset > offset)) {
- IPAERR("Failed due to integer overflow\n");
- IPAERR("nat.mem.dma_handle: 0x%pa\n",
+ IPAERR_RL("Failed due to integer overflow\n");
+ IPAERR_RL("nat.mem.dma_handle: 0x%pa\n",
&ipa_ctx->nat_mem.dma_handle);
- IPAERR("ipv4_rules_offset: 0x%x\n",
+ IPAERR_RL("ipv4_rules_offset: 0x%x\n",
init->ipv4_rules_offset);
- IPAERR("expn_rules_offset: 0x%x\n",
+ IPAERR_RL("expn_rules_offset: 0x%x\n",
init->expn_rules_offset);
- IPAERR("index_offset: 0x%x\n",
+ IPAERR_RL("index_offset: 0x%x\n",
init->index_offset);
- IPAERR("index_expn_offset: 0x%x\n",
+ IPAERR_RL("index_expn_offset: 0x%x\n",
init->index_expn_offset);
result = -EPERM;
goto free_mem;
@@ -513,7 +513,7 @@
desc[1].len = size;
IPADBG("posting v4 init command\n");
if (ipa_send_cmd(2, desc)) {
- IPAERR("Fail to send immediate command\n");
+ IPAERR_RL("Fail to send immediate command\n");
result = -EPERM;
goto free_mem;
}
@@ -578,7 +578,7 @@
IPADBG("\n");
if (dma->entries <= 0) {
- IPAERR("Invalid number of commands %d\n",
+ IPAERR_RL("Invalid number of commands %d\n",
dma->entries);
ret = -EPERM;
goto bail;
@@ -586,7 +586,7 @@
for (cnt = 0; cnt < dma->entries; cnt++) {
if (dma->dma[cnt].table_index >= 1) {
- IPAERR("Invalid table index %d\n",
+ IPAERR_RL("Invalid table index %d\n",
dma->dma[cnt].table_index);
ret = -EPERM;
goto bail;
@@ -597,7 +597,7 @@
if (dma->dma[cnt].offset >=
(ipa_ctx->nat_mem.size_base_tables + 1) *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -609,7 +609,7 @@
if (dma->dma[cnt].offset >=
ipa_ctx->nat_mem.size_expansion_tables *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -621,7 +621,7 @@
if (dma->dma[cnt].offset >=
(ipa_ctx->nat_mem.size_base_tables + 1) *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -633,7 +633,7 @@
if (dma->dma[cnt].offset >=
ipa_ctx->nat_mem.size_expansion_tables *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -642,7 +642,7 @@
break;
default:
- IPAERR("Invalid base_addr %d\n",
+ IPAERR_RL("Invalid base_addr %d\n",
dma->dma[cnt].base_addr);
ret = -EPERM;
goto bail;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 5b70853..0a3c0e5 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -850,16 +850,20 @@
struct ipa_rt_tbl *entry;
if (in->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
+ mutex_lock(&ipa_ctx->lock);
/* check if this table exists */
entry = __ipa_find_rt_tbl(in->ip, in->name);
- if (!entry)
+ if (!entry) {
+ mutex_unlock(&ipa_ctx->lock);
return -EFAULT;
+ }
in->idx = entry->idx;
+ mutex_unlock(&ipa_ctx->lock);
return 0;
}
@@ -902,7 +906,7 @@
INIT_LIST_HEAD(&entry->link);
strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
entry->set = set;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_RT_TBL_COOKIE;
entry->in_sys = (ip == IPA_IP_v4) ?
!ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
set->tbl_cnt++;
@@ -915,12 +919,16 @@
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
}
return entry;
+ipa_insert_failed:
+ set->tbl_cnt--;
+ list_del(&entry->link);
fail_rt_idx_alloc:
entry->cookie = 0;
kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
@@ -933,13 +941,13 @@
enum ipa_ip_type ip = IPA_IP_MAX;
u32 id;
- if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parms\n");
+ if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("bad parms\n");
return -EINVAL;
}
id = entry->id;
if (ipa_id_find(id) == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EPERM;
}
@@ -947,8 +955,11 @@
ip = IPA_IP_v4;
else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ return -EPERM;
+ }
+
if (!entry->in_sys) {
list_del(&entry->link);
@@ -987,13 +998,14 @@
if (rule->hdr_hdl) {
hdr = ipa_id_find(rule->hdr_hdl);
- if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
+ if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid hdr\n");
goto error;
}
} else if (rule->hdr_proc_ctx_hdl) {
proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl);
- if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
+ if ((proc_ctx == NULL) ||
+ (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid proc ctx\n");
goto error;
}
@@ -1001,7 +1013,7 @@
tbl = __ipa_add_rt_tbl(ip, name);
- if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+ if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
IPAERR("bad params\n");
goto error;
}
@@ -1022,7 +1034,7 @@
goto error;
}
INIT_LIST_HEAD(&entry->link);
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_RT_RULE_COOKIE;
entry->rule = *rule;
entry->tbl = tbl;
entry->hdr = hdr;
@@ -1074,7 +1086,7 @@
int ret;
if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1084,7 +1096,7 @@
&rules->rules[i].rule,
rules->rules[i].at_rear,
&rules->rules[i].rt_rule_hdl)) {
- IPAERR("failed to add rt rule %d\n", i);
+ IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1111,12 +1123,12 @@
entry = ipa_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
@@ -1130,7 +1142,7 @@
entry->tbl->rule_cnt);
if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
if (__ipa_del_rt_tbl(entry->tbl))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
}
entry->cookie = 0;
id = entry->id;
@@ -1157,14 +1169,14 @@
int ret;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del rt rule %i\n", i);
+ IPAERR_RL("failed to del rt rule %i\n", i);
hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1197,7 +1209,7 @@
int ret;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1241,7 +1253,7 @@
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1259,7 +1271,7 @@
* filtering rules point to routing tables
*/
if (ipa2_reset_flt(ip))
- IPAERR("fail to reset flt ip=%d\n", ip);
+ IPAERR_RL("fail to reset flt ip=%d\n", ip);
set = &ipa_ctx->rt_tbl_set[ip];
rset = &ipa_ctx->reap_rt_tbl_set[ip];
@@ -1345,18 +1357,18 @@
int result = -EFAULT;
if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
entry = __ipa_find_rt_tbl(lookup->ip, lookup->name);
- if (entry && entry->cookie == IPA_COOKIE) {
+ if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
entry->ref_cnt++;
lookup->hdl = entry->id;
/* commit for get */
if (ipa_ctx->ctrl->ipa_commit_rt(lookup->ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
result = 0;
}
@@ -1382,13 +1394,13 @@
mutex_lock(&ipa_ctx->lock);
entry = ipa_id_find(rt_tbl_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto ret;
}
- if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
- IPAERR("bad parms\n");
+ if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
+ IPAERR_RL("bad parms\n");
result = -EINVAL;
goto ret;
}
@@ -1397,16 +1409,19 @@
ip = IPA_IP_v4;
else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ result = -EINVAL;
+ goto ret;
+ }
entry->ref_cnt--;
if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
if (__ipa_del_rt_tbl(entry))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
/* commit for put */
if (ipa_ctx->ctrl->ipa_commit_rt(ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
}
result = 0;
@@ -1425,20 +1440,20 @@
if (rtrule->rule.hdr_hdl) {
hdr = ipa_id_find(rtrule->rule.hdr_hdl);
- if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
- IPAERR("rt rule does not point to valid hdr\n");
+ if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("rt rule does not point to valid hdr\n");
goto error;
}
}
entry = ipa_id_find(rtrule->rt_rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -1471,14 +1486,14 @@
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
- IPAERR("failed to mdfy rt rule %i\n", i);
+ IPAERR_RL("failed to mdfy rt rule %i\n", i);
hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
index 6f59ebd..d4116eb 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -165,6 +165,17 @@
return -EEXIST;
}
+int ipa2_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv)
+{
+ return ipa2_register_ipa_ready_cb(ipauc_ready_cb, priv);
+}
+
+void ipa2_ntn_uc_dereg_rdyCB(void)
+{
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb = NULL;
+ ipa_ctx->uc_ntn_ctx.priv = NULL;
+}
+
static void ipa_uc_ntn_loaded_handler(void)
{
if (!ipa_ctx) {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index b7815cb..a454382 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -1673,7 +1673,7 @@
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm, %d\n", clnt_hdl);
+ IPAERR_RL("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1686,7 +1686,7 @@
ep = &ipa_ctx->ep[clnt_hdl];
if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 4652fc8..2c88244 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -847,7 +847,7 @@
}
if (client >= IPA_CLIENT_MAX || client < 0) {
- IPAERR("Bad client number! client =%d\n", client);
+ IPAERR_RL("Bad client number! client =%d\n", client);
return INVALID_EP_MAPPING_INDEX;
}
@@ -1700,7 +1700,7 @@
if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
IPA_FLT_FLOW_LABEL) {
- IPAERR("v6 attrib's specified for v4 rule\n");
+ IPAERR_RL("v6 attrib's specified for v4 rule\n");
return -EPERM;
}
@@ -1712,7 +1712,7 @@
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1732,7 +1732,7 @@
if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1746,7 +1746,7 @@
if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1760,11 +1760,11 @@
if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAERR("bad src port range param\n");
+ IPAERR_RL("bad src port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1778,11 +1778,11 @@
if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAERR("bad dst port range param\n");
+ IPAERR_RL("bad dst port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1796,7 +1796,7 @@
if (attrib->attrib_mask & IPA_FLT_TYPE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1809,7 +1809,7 @@
if (attrib->attrib_mask & IPA_FLT_CODE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1822,7 +1822,7 @@
if (attrib->attrib_mask & IPA_FLT_SPI) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1836,7 +1836,7 @@
if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1850,7 +1850,7 @@
if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1877,7 +1877,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1892,7 +1892,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1907,7 +1907,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1922,7 +1922,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1937,7 +1937,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1955,7 +1955,7 @@
/* error check */
if (attrib->attrib_mask & IPA_FLT_TOS ||
attrib->attrib_mask & IPA_FLT_PROTOCOL) {
- IPAERR("v4 attrib's specified for v6 rule\n");
+ IPAERR_RL("v4 attrib's specified for v6 rule\n");
return -EPERM;
}
@@ -1967,7 +1967,7 @@
if (attrib->attrib_mask & IPA_FLT_TYPE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1980,7 +1980,7 @@
if (attrib->attrib_mask & IPA_FLT_CODE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1993,7 +1993,7 @@
if (attrib->attrib_mask & IPA_FLT_SPI) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -2007,7 +2007,7 @@
if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2021,7 +2021,7 @@
if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2035,11 +2035,11 @@
if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAERR("bad src port range param\n");
+ IPAERR_RL("bad src port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2053,11 +2053,11 @@
if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAERR("bad dst port range param\n");
+ IPAERR_RL("bad dst port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2071,7 +2071,7 @@
if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2097,7 +2097,7 @@
if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2129,7 +2129,7 @@
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2174,7 +2174,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2189,7 +2189,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2204,7 +2204,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2219,7 +2219,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2234,7 +2234,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -2247,7 +2247,7 @@
}
} else {
- IPAERR("unsupported ip %d\n", ip);
+ IPAERR_RL("unsupported ip %d\n", ip);
return -EPERM;
}
@@ -2257,7 +2257,7 @@
*/
if (attrib->attrib_mask == 0) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -3548,19 +3548,19 @@
}
if (param_in->client >= IPA_CLIENT_MAX) {
- IPAERR("bad parm client:%d\n", param_in->client);
+ IPAERR_RL("bad parm client:%d\n", param_in->client);
goto fail;
}
ipa_ep_idx = ipa2_get_ep_mapping(param_in->client);
if (ipa_ep_idx == -1) {
- IPAERR("Invalid client.\n");
+ IPAERR_RL("Invalid client.\n");
goto fail;
}
ep = &ipa_ctx->ep[ipa_ep_idx];
if (!ep->valid) {
- IPAERR("EP not allocated.\n");
+ IPAERR_RL("EP not allocated.\n");
goto fail;
}
@@ -3574,7 +3574,7 @@
ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta;
result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
if (result)
- IPAERR("qmap_id %d write failed on ep=%d\n",
+ IPAERR_RL("qmap_id %d write failed on ep=%d\n",
meta.qmap_id, ipa_ep_idx);
result = 0;
}
@@ -5086,6 +5086,8 @@
api_ctrl->ipa_tear_down_uc_offload_pipes =
ipa2_tear_down_uc_offload_pipes;
api_ctrl->ipa_get_pdev = ipa2_get_pdev;
+ api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa2_ntn_uc_reg_rdyCB;
+ api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa2_ntn_uc_dereg_rdyCB;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 11eeb2f..0bdfea9 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -2856,6 +2856,10 @@
kfree(req);
kfree(resp);
return rc;
+ } else if (data == NULL) {
+ kfree(req);
+ kfree(resp);
+ return 0;
}
if (resp->dl_dst_pipe_stats_list_valid) {
@@ -3037,8 +3041,11 @@
int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
{
enum ipa_upstream_type upstream_type;
+ struct wan_ioctl_query_tether_stats tether_stats;
int rc = 0;
+ memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3056,7 +3063,7 @@
} else {
IPAWANDBG(" reset modem-backhaul stats\n");
rc = rmnet_ipa_query_tethering_stats_modem(
- NULL, true);
+ &tether_stats, true);
if (rc) {
IPAWANERR("reset MODEM stats failed\n");
return rc;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index a5fb576..cd4e016 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -235,6 +235,10 @@
static void ipa3_post_init_wq(struct work_struct *work);
static DECLARE_WORK(ipa3_post_init_work, ipa3_post_init_wq);
+static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
+static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work,
+ ipa_dec_clients_disable_clks_on_wq);
+
static struct ipa3_plat_drv_res ipa3_res = {0, };
struct msm_bus_scale_pdata *ipa3_bus_scale_table;
@@ -314,7 +318,7 @@
}
cnt += scnprintf(buf + cnt, size - cnt,
"\nTotal active clients count: %d\n",
- ipa3_ctx->ipa3_active_clients.cnt);
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
return cnt;
}
@@ -322,11 +326,11 @@
static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
unsigned long event, void *ptr)
{
- ipa3_active_clients_lock();
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
ipa3_active_clients_log_print_table(active_clients_table_buf,
IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
IPAERR("%s", active_clients_table_buf);
- ipa3_active_clients_unlock();
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
return NOTIFY_DONE;
}
@@ -395,11 +399,11 @@
void ipa3_active_clients_log_clear(void)
{
- ipa3_active_clients_lock();
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
ipa3_ctx->ipa3_active_clients_logging.log_tail =
IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
- ipa3_active_clients_unlock();
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
}
static void ipa3_active_clients_log_destroy(void)
@@ -538,7 +542,7 @@
msg_meta.msg_len = sizeof(struct ipa_wan_msg);
retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
if (retval) {
- IPAERR("ipa3_send_msg failed: %d\n", retval);
+ IPAERR_RL("ipa3_send_msg failed: %d\n", retval);
kfree(wan_msg);
return retval;
}
@@ -715,7 +719,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_nat_dma_cmd *)param)->entries,
pre_entry);
retval = -EFAULT;
@@ -774,7 +778,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr *)param)->num_hdrs,
pre_entry);
retval = -EFAULT;
@@ -813,7 +817,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -853,7 +857,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_rt_rule *)param)->
num_rules,
pre_entry);
@@ -893,7 +897,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
num_rules != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_rt_rule_after *)param)->
num_rules,
pre_entry);
@@ -935,7 +939,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_rt_rule *)param)->
num_rules,
pre_entry);
@@ -975,7 +979,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -1014,7 +1018,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_flt_rule *)param)->
num_rules,
pre_entry);
@@ -1056,7 +1060,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
num_rules != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_flt_rule_after *)param)->
num_rules,
pre_entry);
@@ -1097,7 +1101,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_flt_rule *)param)->
num_hdls,
pre_entry);
@@ -1137,7 +1141,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_flt_rule *)param)->
num_rules,
pre_entry);
@@ -1275,7 +1279,7 @@
if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props, pre_entry);
retval = -EFAULT;
@@ -1320,7 +1324,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props, pre_entry);
retval = -EFAULT;
@@ -1365,7 +1369,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props, pre_entry);
retval = -EFAULT;
@@ -1403,7 +1407,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_msg_meta *)param)->msg_len
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_msg_meta *)param)->msg_len,
pre_entry);
retval = -EFAULT;
@@ -1543,7 +1547,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs, pre_entry);
retval = -EFAULT;
@@ -1582,7 +1586,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
param)->num_hdls != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr_proc_ctx *)param)->
num_hdls,
pre_entry);
@@ -3267,7 +3271,6 @@
}
ipa3_uc_notify_clk_state(true);
- ipa3_suspend_apps_pipes(false);
}
static unsigned int ipa3_get_bus_vote(void)
@@ -3466,13 +3469,33 @@
*/
void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
{
- ipa3_active_clients_lock();
+ int ret;
+
ipa3_active_clients_log_inc(id, false);
- ipa3_ctx->ipa3_active_clients.cnt++;
- if (ipa3_ctx->ipa3_active_clients.cnt == 1)
- ipa3_enable_clks();
- IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
- ipa3_active_clients_unlock();
+ ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
+ if (ret) {
+ IPADBG_LOW("active clients = %d\n",
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+ return;
+ }
+
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+
+ /* somebody might voted to clocks meanwhile */
+ ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
+ if (ret) {
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+ IPADBG_LOW("active clients = %d\n",
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+ return;
+ }
+
+ ipa3_enable_clks();
+ atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
+ IPADBG_LOW("active clients = %d\n",
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+ ipa3_suspend_apps_pipes(false);
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
}
/**
@@ -3486,23 +3509,57 @@
int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
*id)
{
- int res = 0;
- unsigned long flags;
+ int ret;
- if (ipa3_active_clients_trylock(&flags) == 0)
- return -EPERM;
-
- if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
- res = -EPERM;
- goto bail;
+ ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
+ if (ret) {
+ ipa3_active_clients_log_inc(id, true);
+ IPADBG_LOW("active clients = %d\n",
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+ return 0;
}
- ipa3_active_clients_log_inc(id, true);
- ipa3_ctx->ipa3_active_clients.cnt++;
- IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
-bail:
- ipa3_active_clients_trylock_unlock(&flags);
- return res;
+ return -EPERM;
+}
+
+static void __ipa3_dec_client_disable_clks(void)
+{
+ int ret;
+
+ if (!atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) {
+ IPAERR("trying to disable clocks with refcnt is 0!\n");
+ ipa_assert();
+ return;
+ }
+
+ ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
+ if (ret)
+ goto bail;
+
+ /* seems like this is the only client holding the clocks */
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+ if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
+ ipa3_ctx->tag_process_before_gating) {
+ ipa3_ctx->tag_process_before_gating = false;
+ /*
+ * When TAG process ends, active clients will be
+ * decreased
+ */
+ queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
+ goto unlock_mutex;
+ }
+
+ /* a different context might increase the clock reference meanwhile */
+ ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt);
+ if (ret > 0)
+ goto unlock_mutex;
+ ipa3_disable_clks();
+
+unlock_mutex:
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+bail:
+ IPADBG_LOW("active clients = %d\n",
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
}
/**
@@ -3518,29 +3575,39 @@
*/
void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
{
- struct ipa_active_client_logging_info log_info;
-
- ipa3_active_clients_lock();
ipa3_active_clients_log_dec(id, false);
- ipa3_ctx->ipa3_active_clients.cnt--;
- IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
- if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
- if (ipa3_ctx->tag_process_before_gating) {
- ipa3_ctx->tag_process_before_gating = false;
- /*
- * When TAG process ends, active clients will be
- * decreased
- */
- IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
- "TAG_PROCESS");
- ipa3_active_clients_log_inc(&log_info, false);
- ipa3_ctx->ipa3_active_clients.cnt = 1;
- queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
- } else {
- ipa3_disable_clks();
- }
+ __ipa3_dec_client_disable_clks();
+}
+
+static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work)
+{
+ __ipa3_dec_client_disable_clks();
+}
+
+/**
+ * ipa3_dec_client_disable_clks_no_block() - Decrease active clients counter
+ * if possible without blocking. If this is the last client then the desrease
+ * will happen from work queue context.
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_dec_client_disable_clks_no_block(
+ struct ipa_active_client_logging_info *id)
+{
+ int ret;
+
+ ipa3_active_clients_log_dec(id, true);
+ ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
+ if (ret) {
+ IPADBG_LOW("active clients = %d\n",
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+ return;
}
- ipa3_active_clients_unlock();
+
+ /* seems like this is the only client holding the clocks */
+ queue_work(ipa3_ctx->power_mgmt_wq,
+ &ipa_dec_clients_disable_clks_on_wq_work);
}
/**
@@ -3636,34 +3703,20 @@
return 0;
}
- ipa3_active_clients_lock();
+ /* Hold the mutex to avoid race conditions with ipa3_enable_clocks() */
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
ipa3_ctx->curr_ipa_clk_rate = clk_rate;
IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
- if (ipa3_ctx->ipa3_active_clients.cnt > 0) {
- struct ipa_active_client_logging_info log_info;
-
- /*
- * clk_set_rate should be called with unlocked lock to allow
- * clients to get a reference to IPA clock synchronously.
- * Hold a reference to IPA clock here to make sure clock
- * state does not change during set_rate.
- */
- IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
- ipa3_ctx->ipa3_active_clients.cnt++;
- ipa3_active_clients_log_inc(&log_info, false);
- ipa3_active_clients_unlock();
-
+ if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
if (ipa3_clk)
clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
- ipa3_get_bus_vote()))
+ ipa3_get_bus_vote()))
WARN_ON(1);
- /* remove the vote added here */
- IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
} else {
IPADBG_LOW("clocks are gated, not setting rate\n");
- ipa3_active_clients_unlock();
}
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
IPADBG_LOW("Done\n");
return 0;
@@ -4625,10 +4678,9 @@
}
mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
- spin_lock_init(&ipa3_ctx->ipa3_active_clients.spinlock);
IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
ipa3_active_clients_log_inc(&log_info, false);
- ipa3_ctx->ipa3_active_clients.cnt = 1;
+ atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1);
/* Create workqueues for power management */
ipa3_ctx->power_mgmt_wq =
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 24c3d62..f172dc4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -363,14 +363,14 @@
{
int nbytes;
- ipa3_active_clients_lock();
- if (ipa3_ctx->ipa3_active_clients.cnt)
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+ if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt))
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"IPA APPS power state is ON\n");
else
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"IPA APPS power state is OFF\n");
- ipa3_active_clients_unlock();
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
@@ -1064,7 +1064,7 @@
ipa3_ctx->stats.stat_compl,
ipa3_ctx->stats.aggr_close,
ipa3_ctx->stats.wan_aggr_close,
- ipa3_ctx->ipa3_active_clients.cnt,
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt),
connect,
ipa3_ctx->stats.wan_rx_empty,
ipa3_ctx->stats.wan_repl_rx_empty,
@@ -1257,8 +1257,6 @@
if (!ipa3_get_ntn_stats(&stats)) {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"TX num_pkts_processed=%u\n"
- "TX tail_ptr_val=%u\n"
- "TX num_db_fired=%u\n"
"TX ringFull=%u\n"
"TX ringEmpty=%u\n"
"TX ringUsageHigh=%u\n"
@@ -1270,27 +1268,25 @@
"TX bamFifoUsageLow=%u\n"
"TX bamUtilCount=%u\n"
"TX num_db=%u\n"
- "TX num_qmb_int_handled=%u\n",
+ "TX num_qmb_int_handled=%u\n"
+ "TX ipa_pipe_number=%u\n",
TX_STATS(num_pkts_processed),
- TX_STATS(tail_ptr_val),
- TX_STATS(num_db_fired),
- TX_STATS(tx_comp_ring_stats.ringFull),
- TX_STATS(tx_comp_ring_stats.ringEmpty),
- TX_STATS(tx_comp_ring_stats.ringUsageHigh),
- TX_STATS(tx_comp_ring_stats.ringUsageLow),
- TX_STATS(tx_comp_ring_stats.RingUtilCount),
- TX_STATS(bam_stats.bamFifoFull),
- TX_STATS(bam_stats.bamFifoEmpty),
- TX_STATS(bam_stats.bamFifoUsageHigh),
- TX_STATS(bam_stats.bamFifoUsageLow),
- TX_STATS(bam_stats.bamUtilCount),
+ TX_STATS(ring_stats.ringFull),
+ TX_STATS(ring_stats.ringEmpty),
+ TX_STATS(ring_stats.ringUsageHigh),
+ TX_STATS(ring_stats.ringUsageLow),
+ TX_STATS(ring_stats.RingUtilCount),
+ TX_STATS(gsi_stats.bamFifoFull),
+ TX_STATS(gsi_stats.bamFifoEmpty),
+ TX_STATS(gsi_stats.bamFifoUsageHigh),
+ TX_STATS(gsi_stats.bamFifoUsageLow),
+ TX_STATS(gsi_stats.bamUtilCount),
TX_STATS(num_db),
- TX_STATS(num_qmb_int_handled));
+ TX_STATS(num_qmb_int_handled),
+ TX_STATS(ipa_pipe_number));
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
- "RX max_outstanding_pkts=%u\n"
"RX num_pkts_processed=%u\n"
- "RX rx_ring_rp_value=%u\n"
"RX ringFull=%u\n"
"RX ringEmpty=%u\n"
"RX ringUsageHigh=%u\n"
@@ -1301,21 +1297,23 @@
"RX bamFifoUsageHigh=%u\n"
"RX bamFifoUsageLow=%u\n"
"RX bamUtilCount=%u\n"
- "RX num_db=%u\n",
- RX_STATS(max_outstanding_pkts),
+ "RX num_db=%u\n"
+ "RX num_qmb_int_handled=%u\n"
+ "RX ipa_pipe_number=%u\n",
RX_STATS(num_pkts_processed),
- RX_STATS(rx_ring_rp_value),
- RX_STATS(rx_ind_ring_stats.ringFull),
- RX_STATS(rx_ind_ring_stats.ringEmpty),
- RX_STATS(rx_ind_ring_stats.ringUsageHigh),
- RX_STATS(rx_ind_ring_stats.ringUsageLow),
- RX_STATS(rx_ind_ring_stats.RingUtilCount),
- RX_STATS(bam_stats.bamFifoFull),
- RX_STATS(bam_stats.bamFifoEmpty),
- RX_STATS(bam_stats.bamFifoUsageHigh),
- RX_STATS(bam_stats.bamFifoUsageLow),
- RX_STATS(bam_stats.bamUtilCount),
- RX_STATS(num_db));
+ RX_STATS(ring_stats.ringFull),
+ RX_STATS(ring_stats.ringEmpty),
+ RX_STATS(ring_stats.ringUsageHigh),
+ RX_STATS(ring_stats.ringUsageLow),
+ RX_STATS(ring_stats.RingUtilCount),
+ RX_STATS(gsi_stats.bamFifoFull),
+ RX_STATS(gsi_stats.bamFifoEmpty),
+ RX_STATS(gsi_stats.bamFifoUsageHigh),
+ RX_STATS(gsi_stats.bamFifoUsageLow),
+ RX_STATS(gsi_stats.bamUtilCount),
+ RX_STATS(num_db),
+ RX_STATS(num_qmb_int_handled),
+ RX_STATS(ipa_pipe_number));
cnt += nbytes;
} else {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -1785,12 +1783,12 @@
return 0;
}
memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENT_BUF_SIZE);
- ipa3_active_clients_lock();
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
cnt = ipa3_active_clients_log_print_buffer(active_clients_buf,
IPA_DBG_ACTIVE_CLIENT_BUF_SIZE - IPA_MAX_MSG_LEN);
table_size = ipa3_active_clients_log_print_table(active_clients_buf
+ cnt, IPA_MAX_MSG_LEN);
- ipa3_active_clients_unlock();
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
return simple_read_from_buffer(ubuf, count, ppos,
active_clients_buf, cnt + table_size);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 4fb4da8..018467a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -759,6 +759,15 @@
trace_idle_sleep_enter3(sys->ep->client);
usleep_range(POLLING_MIN_SLEEP_RX, POLLING_MAX_SLEEP_RX);
trace_idle_sleep_exit3(sys->ep->client);
+
+ /*
+ * if pipe is out of buffers there is no point polling for
+ * completed descs; release the worker so delayed work can
+ * run in a timely manner
+ */
+ if (sys->len - sys->len_pending_xfer == 0)
+ break;
+
} while (inactive_cycles <= POLLING_INACTIVITY_RX);
trace_poll_to_intr3(sys->ep->client);
@@ -775,8 +784,8 @@
sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
if (sys->ep->napi_enabled) {
- ipa3_rx_switch_to_intr_mode(sys);
- IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
+ /* interrupt mode is done in ipa3_rx_poll context */
+ ipa_assert();
} else
ipa3_handle_rx(sys);
}
@@ -1549,6 +1558,8 @@
struct ipa3_rx_pkt_wrapper *rx_pkt;
struct ipa3_rx_pkt_wrapper *tmp;
+ spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
list_for_each_entry_safe(rx_pkt, tmp,
&ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
list_del(&rx_pkt->link);
@@ -1569,6 +1580,8 @@
IPAERR("wlan comm buff total cnt: %d\n",
ipa3_ctx->wc_memb.wlan_comm_total_cnt);
+ spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
}
static void ipa3_alloc_wlan_rx_common_cache(u32 size)
@@ -1606,11 +1619,13 @@
goto fail_dma_mapping;
}
+ spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
list_add_tail(&rx_pkt->link,
&ipa3_ctx->wc_memb.wlan_comm_desc_list);
rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt;
ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
+ spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
}
@@ -3255,6 +3270,7 @@
{
struct ipa3_sys_context *sys;
struct ipa3_rx_pkt_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
+ int clk_off;
if (!notify) {
IPAERR("gsi notify is NULL.\n");
@@ -3286,7 +3302,20 @@
GSI_CHAN_MODE_POLL);
ipa3_inc_acquire_wakelock();
atomic_set(&sys->curr_polling_state, 1);
- queue_work(sys->wq, &sys->work);
+ if (sys->ep->napi_enabled) {
+ struct ipa_active_client_logging_info log;
+
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
+ clk_off = ipa3_inc_client_enable_clks_no_block(
+ &log);
+ if (!clk_off)
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ else
+ queue_work(sys->wq, &sys->work);
+ } else {
+ queue_work(sys->wq, &sys->work);
+ }
}
break;
default:
@@ -3654,6 +3683,9 @@
int cnt = 0;
struct ipa_mem_buffer mem_info = {0};
static int total_cnt;
+ struct ipa_active_client_logging_info log;
+
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -3666,6 +3698,7 @@
while (cnt < weight &&
atomic_read(&ep->sys->curr_polling_state)) {
+ atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
ret = ipa_poll_gsi_pkt(ep->sys, &mem_info);
if (ret)
break;
@@ -3683,7 +3716,8 @@
if (cnt < weight) {
ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
- queue_work(ep->sys->wq, &ep->sys->switch_to_intr_work.work);
+ ipa3_rx_switch_to_intr_mode(ep->sys);
+ ipa3_dec_client_disable_clks_no_block(&log);
}
return cnt;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index bfcaa2b..827fbe2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -741,7 +741,7 @@
goto error;
}
- if ((*rt_tbl)->cookie != IPA_COOKIE) {
+ if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) {
IPAERR("RT table cookie is invalid\n");
goto error;
}
@@ -800,7 +800,7 @@
}
INIT_LIST_HEAD(&((*entry)->link));
(*entry)->rule = *rule;
- (*entry)->cookie = IPA_COOKIE;
+ (*entry)->cookie = IPA_FLT_COOKIE;
(*entry)->rt_tbl = rt_tbl;
(*entry)->tbl = tbl;
if (rule->rule_id) {
@@ -835,12 +835,18 @@
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
*rule_hdl = id;
entry->id = id;
IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
return 0;
+ipa_insert_failed:
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ tbl->rule_cnt--;
+ return -EPERM;
}
static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
@@ -866,9 +872,16 @@
list_add(&entry->link, &tbl->head_flt_rule_list);
}
- __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+ if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
+ goto ipa_insert_failed;
return 0;
+ipa_insert_failed:
+ list_del(&entry->link);
+ /* if rule id was allocated from idr, remove it */
+ if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+ kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
error:
return -EPERM;
@@ -887,7 +900,7 @@
goto error;
if (rule == NULL || rule_hdl == NULL) {
- IPAERR("bad parms rule=%p rule_hdl=%p\n", rule,
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule,
rule_hdl);
goto error;
}
@@ -900,7 +913,8 @@
list_add(&entry->link, &((*add_after_entry)->link));
- __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+ if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
+ goto ipa_insert_failed;
/*
* prepare for next insertion
@@ -909,6 +923,13 @@
return 0;
+ipa_insert_failed:
+ list_del(&entry->link);
+ /* if rule id was allocated from idr, remove it */
+ if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+ kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
error:
*add_after_entry = NULL;
return -EPERM;
@@ -921,12 +942,12 @@
entry = ipa3_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
id = entry->id;
@@ -958,12 +979,12 @@
entry = ipa3_id_find(frule->rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -973,25 +994,25 @@
if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
if (!frule->rule.eq_attrib_type) {
if (!frule->rule.rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
if (rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
- if (rt_tbl->cookie != IPA_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -1036,7 +1057,7 @@
int ipa_ep_idx;
if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
- IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
rule_hdl, ep);
return -EINVAL;
@@ -1066,7 +1087,7 @@
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1081,7 +1102,7 @@
result = -1;
if (result) {
- IPAERR("failed to add flt rule %d\n", i);
+ IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1089,7 +1110,7 @@
}
if (rules->global) {
- IPAERR("no support for global filter rules\n");
+ IPAERR_RL("no support for global filter rules\n");
result = -EPERM;
goto bail;
}
@@ -1124,12 +1145,12 @@
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
if (rules->ep >= IPA_CLIENT_MAX) {
- IPAERR("bad parms ep=%d\n", rules->ep);
+ IPAERR_RL("bad parms ep=%d\n", rules->ep);
return -EINVAL;
}
@@ -1144,20 +1165,20 @@
entry = ipa3_id_find(rules->add_after_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto bail;
}
if (entry->tbl != tbl) {
- IPAERR("given entry does not match the table\n");
+ IPAERR_RL("given entry does not match the table\n");
result = -EINVAL;
goto bail;
}
if (tbl->sticky_rear)
if (&entry->link == tbl->head_flt_rule_list.prev) {
- IPAERR("cannot add rule at end of a sticky table");
+ IPAERR_RL("cannot add rule at end of a sticky table");
result = -EINVAL;
goto bail;
}
@@ -1179,7 +1200,7 @@
&entry);
if (result) {
- IPAERR("failed to add flt rule %d\n", i);
+ IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1213,14 +1234,14 @@
int result;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del flt rule %i\n", i);
+ IPAERR_RL("failed to del flt rule %i\n", i);
hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1253,14 +1274,14 @@
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
- IPAERR("failed to mdfy flt rule %i\n", i);
+ IPAERR_RL("failed to mdfy flt rule %i\n", i);
hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
@@ -1294,7 +1315,7 @@
int result;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1330,7 +1351,7 @@
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 14d776e..da7bcd0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -327,17 +327,17 @@
proc_ctx->type, proc_ctx->hdr_hdl);
if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
- IPAERR("invalid processing type %d\n", proc_ctx->type);
+ IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
return -EINVAL;
}
hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
if (!hdr_entry) {
- IPAERR("hdr_hdl is invalid\n");
+ IPAERR_RL("hdr_hdl is invalid\n");
return -EINVAL;
}
- if (hdr_entry->cookie != IPA_COOKIE) {
- IPAERR("Invalid header cookie %u\n", hdr_entry->cookie);
+ if (hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
WARN_ON(1);
return -EINVAL;
}
@@ -357,7 +357,7 @@
entry->l2tp_params = proc_ctx->l2tp_params;
if (add_ref_hdr)
hdr_entry->ref_cnt++;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_PROC_HDR_COOKIE;
needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
@@ -367,7 +367,7 @@
ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
bin = IPA_HDR_PROC_CTX_BIN1;
} else {
- IPAERR("unexpected needed len %d\n", needed_len);
+ IPAERR_RL("unexpected needed len %d\n", needed_len);
WARN_ON(1);
goto bad_len;
}
@@ -377,7 +377,7 @@
IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
if (list_empty(&htbl->head_free_offset_list[bin])) {
if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
- IPAERR("hdr proc ctx table overflow\n");
+ IPAERR_RL("hdr proc ctx table overflow\n");
goto bad_len;
}
@@ -415,6 +415,7 @@
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
proc_ctx->proc_ctx_hdl = id;
@@ -422,6 +423,14 @@
return 0;
+ipa_insert_failed:
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ list_del(&entry->link);
+ htbl->proc_ctx_cnt--;
+
bad_len:
if (add_ref_hdr)
hdr_entry->ref_cnt--;
@@ -434,19 +443,19 @@
static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
{
struct ipa3_hdr_entry *entry;
- struct ipa_hdr_offset_entry *offset;
+ struct ipa_hdr_offset_entry *offset = NULL;
u32 bin;
struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
int id;
int mem_size;
if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
goto error;
}
if (!HDR_TYPE_IS_VALID(hdr->type)) {
- IPAERR("invalid hdr type %d\n", hdr->type);
+ IPAERR_RL("invalid hdr type %d\n", hdr->type);
goto error;
}
@@ -465,7 +474,7 @@
entry->type = hdr->type;
entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
entry->eth2_ofst = hdr->eth2_ofst;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_HDR_COOKIE;
if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
bin = IPA_HDR_BIN0;
@@ -478,7 +487,7 @@
else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
bin = IPA_HDR_BIN4;
else {
- IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
goto bad_hdr_len;
}
@@ -544,6 +553,7 @@
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
hdr->hdr_hdl = id;
@@ -568,10 +578,19 @@
entry->ref_cnt--;
hdr->hdr_hdl = 0;
ipa3_id_remove(id);
+ipa_insert_failed:
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
+ entry->hdr_len, DMA_TO_DEVICE);
+ } else {
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ }
htbl->hdr_cnt--;
list_del(&entry->link);
- dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
- entry->hdr_len, DMA_TO_DEVICE);
+
fail_dma_mapping:
entry->is_hdr_proc_ctx = false;
@@ -589,8 +608,8 @@
struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
entry = ipa3_id_find(proc_ctx_hdl);
- if (!entry || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parm\n");
+ if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -598,7 +617,7 @@
htbl->proc_ctx_cnt, entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("proc_ctx already deleted by user\n");
+ IPAERR_RL("proc_ctx already deleted by user\n");
return -EINVAL;
}
@@ -636,12 +655,12 @@
entry = ipa3_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad parm\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -654,7 +673,7 @@
entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("proc_ctx already deleted by user\n");
+ IPAERR_RL("proc_ctx already deleted by user\n");
return -EINVAL;
}
@@ -703,7 +722,7 @@
int result = -EFAULT;
if (hdrs == NULL || hdrs->num_hdrs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -712,7 +731,7 @@
hdrs->num_hdrs);
for (i = 0; i < hdrs->num_hdrs; i++) {
if (__ipa_add_hdr(&hdrs->hdr[i])) {
- IPAERR("failed to add hdr %d\n", i);
+ IPAERR_RL("failed to add hdr %d\n", i);
hdrs->hdr[i].status = -1;
} else {
hdrs->hdr[i].status = 0;
@@ -748,14 +767,14 @@
int result = -EFAULT;
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -803,7 +822,7 @@
int result = -EFAULT;
if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -812,7 +831,7 @@
proc_ctxs->num_proc_ctxs);
for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
- IPAERR("failed to add hdr pric ctx %d\n", i);
+ IPAERR_RL("failed to add hdr pric ctx %d\n", i);
proc_ctxs->proc_ctx[i].status = -1;
} else {
proc_ctxs->proc_ctx[i].status = 0;
@@ -850,14 +869,14 @@
int result;
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -1064,7 +1083,7 @@
struct ipa3_hdr_entry *entry;
if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Header name too long: %s\n", name);
+ IPAERR_RL("Header name too long: %s\n", name);
return NULL;
}
@@ -1094,7 +1113,7 @@
int result = -1;
if (lookup == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
@@ -1181,13 +1200,13 @@
entry = ipa3_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto bail;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("invalid header entry\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("invalid header entry\n");
result = -EINVAL;
goto bail;
}
@@ -1216,7 +1235,7 @@
int result = -EFAULT;
if (copy == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index fcb7c72..ed31423 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -39,6 +39,12 @@
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
#define IPA_COOKIE 0x57831603
+#define IPA_RT_RULE_COOKIE 0x57831604
+#define IPA_RT_TBL_COOKIE 0x57831605
+#define IPA_FLT_COOKIE 0x57831606
+#define IPA_HDR_COOKIE 0x57831607
+#define IPA_PROC_HDR_COOKIE 0x57831608
+
#define MTU_BYTE 1500
#define IPA_EP_NOT_ALLOCATED (-1)
@@ -94,6 +100,18 @@
} \
} while (0)
+#define IPAERR_RL(fmt, args...) \
+ do { \
+ pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args);\
+ if (ipa3_ctx) { \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ } \
+ } while (0)
+
#define WLAN_AMPDU_TX_EP 15
#define WLAN_PROD_TX_EP 19
#define WLAN1_CONS_RX_EP 14
@@ -207,8 +225,8 @@
*/
struct ipa3_flt_entry {
struct list_head link;
- struct ipa_flt_rule rule;
u32 cookie;
+ struct ipa_flt_rule rule;
struct ipa3_flt_tbl *tbl;
struct ipa3_rt_tbl *rt_tbl;
u32 hw_len;
@@ -236,13 +254,13 @@
*/
struct ipa3_rt_tbl {
struct list_head link;
+ u32 cookie;
struct list_head head_rt_rule_list;
char name[IPA_RESOURCE_NAME_MAX];
u32 idx;
u32 rule_cnt;
u32 ref_cnt;
struct ipa3_rt_tbl_set *set;
- u32 cookie;
bool in_sys[IPA_RULE_TYPE_MAX];
u32 sz[IPA_RULE_TYPE_MAX];
struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
@@ -274,6 +292,7 @@
*/
struct ipa3_hdr_entry {
struct list_head link;
+ u32 cookie;
u8 hdr[IPA_HDR_MAX_SIZE];
u32 hdr_len;
char name[IPA_RESOURCE_NAME_MAX];
@@ -283,7 +302,6 @@
dma_addr_t phys_base;
struct ipa3_hdr_proc_ctx_entry *proc_ctx;
struct ipa_hdr_offset_entry *offset_entry;
- u32 cookie;
u32 ref_cnt;
int id;
u8 is_eth2_ofst_valid;
@@ -333,11 +351,11 @@
*/
struct ipa3_hdr_proc_ctx_entry {
struct list_head link;
+ u32 cookie;
enum ipa_hdr_proc_type type;
union ipa_l2tp_hdr_proc_ctx_params l2tp_params;
struct ipa3_hdr_proc_ctx_offset_entry *offset_entry;
struct ipa3_hdr_entry *hdr;
- u32 cookie;
u32 ref_cnt;
int id;
bool user_deleted;
@@ -399,8 +417,8 @@
*/
struct ipa3_rt_entry {
struct list_head link;
- struct ipa_rt_rule rule;
u32 cookie;
+ struct ipa_rt_rule rule;
struct ipa3_rt_tbl *tbl;
struct ipa3_hdr_entry *hdr;
struct ipa3_hdr_proc_ctx_entry *proc_ctx;
@@ -844,9 +862,7 @@
struct ipa3_active_clients {
struct mutex mutex;
- spinlock_t spinlock;
- bool mutex_locked;
- int cnt;
+ atomic_t cnt;
};
struct ipa3_wakelock_ref_cnt {
@@ -1675,6 +1691,8 @@
ipa_notify_cb notify, void *priv, u8 hdr_len,
struct ipa_ntn_conn_out_params *outp);
int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+int ipa3_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv);
+void ipa3_ntn_uc_dereg_rdyCB(void);
/*
* To retrieve doorbell physical address of
@@ -1845,6 +1863,8 @@
int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
*id);
void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+void ipa3_dec_client_disable_clks_no_block(
+ struct ipa_active_client_logging_info *id);
void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
bool int_ctx);
void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
@@ -1921,10 +1941,7 @@
bool ipa3_should_pipe_be_suspended(enum ipa_client_type client);
int ipa3_tag_aggr_force_close(int pipe_num);
-void ipa3_active_clients_lock(void);
-int ipa3_active_clients_trylock(unsigned long *flags);
void ipa3_active_clients_unlock(void);
-void ipa3_active_clients_trylock_unlock(unsigned long *flags);
int ipa3_wdi_init(void);
int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id);
int ipa3_tag_process(struct ipa3_desc *desc, int num_descs,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
index e7f8acd..6d82da2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
@@ -322,27 +322,21 @@
static irqreturn_t ipa3_isr(int irq, void *ctxt)
{
- unsigned long flags;
+ struct ipa_active_client_logging_info log_info;
+ IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
IPADBG_LOW("Enter\n");
/* defer interrupt handling in case IPA is not clocked on */
- if (ipa3_active_clients_trylock(&flags) == 0) {
+ if (ipa3_inc_client_enable_clks_no_block(&log_info)) {
IPADBG("defer interrupt processing\n");
queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
return IRQ_HANDLED;
}
- if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
- IPADBG("defer interrupt processing\n");
- queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
- goto bail;
- }
-
ipa3_process_interrupts(true);
IPADBG_LOW("Exit\n");
-bail:
- ipa3_active_clients_trylock_unlock(&flags);
+ ipa3_dec_client_disable_clks(&log_info);
return IRQ_HANDLED;
}
/**
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index fe6d245..2bd7b79 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -227,7 +227,7 @@
if (strnlen(lookup->name, IPA_RESOURCE_NAME_MAX) ==
IPA_RESOURCE_NAME_MAX) {
- IPAERR("Interface name too long. (%s)\n", lookup->name);
+ IPAERR_RL("Interface name too long. (%s)\n", lookup->name);
return result;
}
@@ -268,13 +268,21 @@
}
if (strnlen(tx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Interface name too long. (%s)\n", tx->name);
+ IPAERR_RL("Interface name too long. (%s)\n", tx->name);
return result;
}
mutex_lock(&ipa3_ctx->lock);
list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
if (!strcmp(entry->name, tx->name)) {
+ /* add the entry check */
+ if (entry->num_tx_props != tx->num_tx_props) {
+ IPAERR("invalid entry number(%u %u)\n",
+ entry->num_tx_props,
+ tx->num_tx_props);
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+ }
memcpy(tx->tx, entry->tx, entry->num_tx_props *
sizeof(struct ipa_ioc_tx_intf_prop));
result = 0;
@@ -307,13 +315,21 @@
}
if (strnlen(rx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Interface name too long. (%s)\n", rx->name);
+ IPAERR_RL("Interface name too long. (%s)\n", rx->name);
return result;
}
mutex_lock(&ipa3_ctx->lock);
list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
if (!strcmp(entry->name, rx->name)) {
+ /* add the entry check */
+ if (entry->num_rx_props != rx->num_rx_props) {
+ IPAERR("invalid entry number(%u %u)\n",
+ entry->num_rx_props,
+ rx->num_rx_props);
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+ }
memcpy(rx->rx, entry->rx, entry->num_rx_props *
sizeof(struct ipa_ioc_rx_intf_prop));
result = 0;
@@ -348,6 +364,14 @@
mutex_lock(&ipa3_ctx->lock);
list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
if (!strcmp(entry->name, ext->name)) {
+ /* add the entry check */
+ if (entry->num_ext_props != ext->num_ext_props) {
+ IPAERR("invalid entry number(%u %u)\n",
+ entry->num_ext_props,
+ ext->num_ext_props);
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+ }
memcpy(ext->ext, entry->ext, entry->num_ext_props *
sizeof(struct ipa_ioc_ext_intf_prop));
result = 0;
@@ -386,13 +410,13 @@
if (meta == NULL || (buff == NULL && callback != NULL) ||
(buff != NULL && callback == NULL)) {
- IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+ IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n",
meta, buff, callback);
return -EINVAL;
}
if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
- IPAERR("unsupported message type %d\n", meta->msg_type);
+ IPAERR_RL("unsupported message type %d\n", meta->msg_type);
return -EINVAL;
}
@@ -616,7 +640,7 @@
int result = -EINVAL;
if (meta == NULL || buff == NULL || !count) {
- IPAERR("invalid param name=%p buff=%p count=%zu\n",
+ IPAERR_RL("invalid param name=%p buff=%p count=%zu\n",
meta, buff, count);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index a153f2d..958fc6c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -253,8 +253,8 @@
mutex_lock(&nat_ctx->lock);
if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
- IPAERR("Nat device name mismatch\n");
- IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+ IPAERR_RL("Nat device name mismatch\n");
+ IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
result = -EPERM;
goto bail;
}
@@ -273,7 +273,7 @@
if (mem->size <= 0 ||
nat_ctx->is_dev_init == true) {
- IPAERR("Invalid Parameters or device is already init\n");
+ IPAERR_RL("Invalid Parameters or device is already init\n");
result = -EPERM;
goto bail;
}
@@ -371,7 +371,7 @@
/* check for integer overflow */
if (init->ipv4_rules_offset >
UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Table Entry offset is not
@@ -380,8 +380,8 @@
tmp = init->ipv4_rules_offset +
(TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->ipv4_rules_offset, (init->table_entries + 1),
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -389,8 +389,8 @@
/* check for integer overflow */
if (init->expn_rules_offset >
- UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
+ (UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries))) {
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Expn Table Entry offset is not
@@ -399,8 +399,8 @@
tmp = init->expn_rules_offset +
(TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->expn_rules_offset, init->expn_table_entries,
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -409,7 +409,7 @@
/* check for integer overflow */
if (init->index_offset >
UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Indx Table Entry offset is not
@@ -418,8 +418,8 @@
tmp = init->index_offset +
(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Indx Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_offset, (init->table_entries + 1),
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -428,7 +428,7 @@
/* check for integer overflow */
if (init->index_expn_offset >
UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Expn Table entry offset is not
@@ -437,8 +437,8 @@
tmp = init->index_expn_offset +
(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Indx Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_expn_offset, init->expn_table_entries,
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -476,16 +476,16 @@
(init->expn_rules_offset > offset) ||
(init->index_offset > offset) ||
(init->index_expn_offset > offset)) {
- IPAERR("Failed due to integer overflow\n");
- IPAERR("nat.mem.dma_handle: 0x%pa\n",
+ IPAERR_RL("Failed due to integer overflow\n");
+ IPAERR_RL("nat.mem.dma_handle: 0x%pa\n",
&ipa3_ctx->nat_mem.dma_handle);
- IPAERR("ipv4_rules_offset: 0x%x\n",
+ IPAERR_RL("ipv4_rules_offset: 0x%x\n",
init->ipv4_rules_offset);
- IPAERR("expn_rules_offset: 0x%x\n",
+ IPAERR_RL("expn_rules_offset: 0x%x\n",
init->expn_rules_offset);
- IPAERR("index_offset: 0x%x\n",
+ IPAERR_RL("index_offset: 0x%x\n",
init->index_offset);
- IPAERR("index_expn_offset: 0x%x\n",
+ IPAERR_RL("index_expn_offset: 0x%x\n",
init->index_expn_offset);
result = -EPERM;
goto destroy_imm_cmd;
@@ -544,7 +544,7 @@
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
if (!cmd_pyld[num_cmd]) {
- IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+ IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
result = -EPERM;
goto destroy_imm_cmd;
}
@@ -747,7 +747,7 @@
IPADBG("\n");
if (dma->entries <= 0) {
- IPAERR("Invalid number of commands %d\n",
+ IPAERR_RL("Invalid number of commands %d\n",
dma->entries);
ret = -EPERM;
goto bail;
@@ -755,7 +755,7 @@
for (cnt = 0; cnt < dma->entries; cnt++) {
if (dma->dma[cnt].table_index >= 1) {
- IPAERR("Invalid table index %d\n",
+ IPAERR_RL("Invalid table index %d\n",
dma->dma[cnt].table_index);
ret = -EPERM;
goto bail;
@@ -766,7 +766,7 @@
if (dma->dma[cnt].offset >=
(ipa3_ctx->nat_mem.size_base_tables + 1) *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -778,7 +778,7 @@
if (dma->dma[cnt].offset >=
ipa3_ctx->nat_mem.size_expansion_tables *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -790,7 +790,7 @@
if (dma->dma[cnt].offset >=
(ipa3_ctx->nat_mem.size_base_tables + 1) *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -802,7 +802,7 @@
if (dma->dma[cnt].offset >=
ipa3_ctx->nat_mem.size_expansion_tables *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -811,7 +811,7 @@
break;
default:
- IPAERR("Invalid base_addr %d\n",
+ IPAERR_RL("Invalid base_addr %d\n",
dma->dma[cnt].base_addr);
ret = -EPERM;
goto bail;
@@ -853,7 +853,7 @@
cmd.data = dma->dma[cnt].data;
cmd_pyld = ipahal_construct_imm_cmd(cmd_name, &cmd, false);
if (!cmd_pyld) {
- IPAERR("Fail to construct nat_dma imm cmd\n");
+ IPAERR_RL("Fail to construct nat_dma imm cmd\n");
continue;
}
desc[1].type = IPA_IMM_CMD_DESC;
@@ -1016,7 +1016,7 @@
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
if (!cmd_pyld) {
- IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+ IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
result = -EPERM;
goto destroy_regwrt_imm_cmd;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index cf28986..5f14032 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -694,7 +694,7 @@
struct ipa3_rt_tbl_set *set;
if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Name too long: %s\n", name);
+ IPAERR_RL("Name too long: %s\n", name);
return NULL;
}
@@ -720,16 +720,19 @@
struct ipa3_rt_tbl *entry;
if (in->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
+ mutex_lock(&ipa3_ctx->lock);
/* check if this table exists */
entry = __ipa3_find_rt_tbl(in->ip, in->name);
- if (!entry)
+ if (!entry) {
+ mutex_unlock(&ipa3_ctx->lock);
return -EFAULT;
-
+ }
in->idx = entry->idx;
+ mutex_unlock(&ipa3_ctx->lock);
return 0;
}
@@ -743,7 +746,7 @@
int max_tbl_indx;
if (name == NULL) {
- IPAERR("no tbl name\n");
+ IPAERR_RL("no tbl name\n");
goto error;
}
@@ -756,7 +759,7 @@
max(IPA_MEM_PART(v6_modem_rt_index_hi),
IPA_MEM_PART(v6_apps_rt_index_hi));
} else {
- IPAERR("bad ip family type\n");
+ IPAERR_RL("bad ip family type\n");
goto error;
}
@@ -790,7 +793,7 @@
INIT_LIST_HEAD(&entry->link);
strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
entry->set = set;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_RT_TBL_COOKIE;
entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ?
!ipa3_ctx->ip4_rt_tbl_hash_lcl :
!ipa3_ctx->ip6_rt_tbl_hash_lcl;
@@ -808,12 +811,16 @@
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
}
return entry;
-
+ipa_insert_failed:
+ set->tbl_cnt--;
+ list_del(&entry->link);
+ idr_destroy(&entry->rule_ids);
fail_rt_idx_alloc:
entry->cookie = 0;
kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
@@ -827,13 +834,13 @@
u32 id;
struct ipa3_rt_tbl_set *rset;
- if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parms\n");
+ if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("bad parms\n");
return -EINVAL;
}
id = entry->id;
if (ipa3_id_find(id) == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EPERM;
}
@@ -841,8 +848,10 @@
ip = IPA_IP_v4;
else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ return -EPERM;
+ }
rset = &ipa3_ctx->reap_rt_tbl_set[ip];
@@ -879,14 +888,14 @@
if (rule->hdr_hdl) {
*hdr = ipa3_id_find(rule->hdr_hdl);
- if ((*hdr == NULL) || ((*hdr)->cookie != IPA_COOKIE)) {
+ if ((*hdr == NULL) || ((*hdr)->cookie != IPA_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid hdr\n");
return -EPERM;
}
} else if (rule->hdr_proc_ctx_hdl) {
*proc_ctx = ipa3_id_find(rule->hdr_proc_ctx_hdl);
if ((*proc_ctx == NULL) ||
- ((*proc_ctx)->cookie != IPA_COOKIE)) {
+ ((*proc_ctx)->cookie != IPA_PROC_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid proc ctx\n");
return -EPERM;
@@ -909,7 +918,7 @@
goto error;
}
INIT_LIST_HEAD(&(*entry)->link);
- (*(entry))->cookie = IPA_COOKIE;
+ (*(entry))->cookie = IPA_RT_RULE_COOKIE;
(*(entry))->rule = *rule;
(*(entry))->tbl = tbl;
(*(entry))->hdr = hdr;
@@ -977,8 +986,8 @@
tbl = __ipa_add_rt_tbl(ip, name);
- if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
- IPAERR("failed adding rt tbl name = %s\n",
+ if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("failed adding rt tbl name = %s\n",
name ? name : "");
goto error;
}
@@ -988,8 +997,8 @@
*/
if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
(tbl->rule_cnt > 0) && (at_rear != 0)) {
- IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
- tbl->rule_cnt, at_rear);
+ IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d at_rear=%d"
+ , tbl->rule_cnt, at_rear);
goto error;
}
@@ -1059,7 +1068,7 @@
int ret;
if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1069,7 +1078,7 @@
&rules->rules[i].rule,
rules->rules[i].at_rear,
&rules->rules[i].rt_rule_hdl)) {
- IPAERR("failed to add rt rule %d\n", i);
+ IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1105,36 +1114,36 @@
struct ipa3_rt_entry *entry = NULL;
if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name);
- if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
- IPAERR("failed finding rt tbl name = %s\n",
+ if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("failed finding rt tbl name = %s\n",
rules->rt_tbl_name ? rules->rt_tbl_name : "");
ret = -EINVAL;
goto bail;
}
if (tbl->rule_cnt <= 0) {
- IPAERR("tbl->rule_cnt <= 0");
+ IPAERR_RL("tbl->rule_cnt <= 0");
ret = -EINVAL;
goto bail;
}
entry = ipa3_id_find(rules->add_after_hdl);
if (!entry) {
- IPAERR("failed finding rule %d in rt tbls\n",
+ IPAERR_RL("failed finding rule %d in rt tbls\n",
rules->add_after_hdl);
ret = -EINVAL;
goto bail;
}
if (entry->tbl != tbl) {
- IPAERR("given rt rule does not match the table\n");
+ IPAERR_RL("given rt rule does not match the table\n");
ret = -EINVAL;
goto bail;
}
@@ -1145,7 +1154,7 @@
*/
if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
(&entry->link == tbl->head_rt_rule_list.prev)) {
- IPAERR("cannot add rule at end of tbl rule_cnt=%d\n",
+ IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d\n",
tbl->rule_cnt);
ret = -EINVAL;
goto bail;
@@ -1162,7 +1171,7 @@
&rules->rules[i].rule,
&rules->rules[i].rt_rule_hdl,
&entry)) {
- IPAERR("failed to add rt rule %d\n", i);
+ IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1171,7 +1180,7 @@
if (rules->commit)
if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
- IPAERR("failed to commit\n");
+ IPAERR_RL("failed to commit\n");
ret = -EPERM;
goto bail;
}
@@ -1192,12 +1201,12 @@
entry = ipa3_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
@@ -1213,7 +1222,7 @@
idr_remove(&entry->tbl->rule_ids, entry->rule_id);
if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
if (__ipa_del_rt_tbl(entry->tbl))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
}
entry->cookie = 0;
id = entry->id;
@@ -1240,14 +1249,14 @@
int ret;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa3_del_rt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del rt rule %i\n", i);
+ IPAERR_RL("failed to del rt rule %i\n", i);
hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1280,7 +1289,7 @@
int ret;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1324,7 +1333,7 @@
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1340,7 +1349,7 @@
* filtering rules point to routing tables
*/
if (ipa3_reset_flt(ip))
- IPAERR("fail to reset flt ip=%d\n", ip);
+ IPAERR_RL("fail to reset flt ip=%d\n", ip);
set = &ipa3_ctx->rt_tbl_set[ip];
rset = &ipa3_ctx->reap_rt_tbl_set[ip];
@@ -1429,18 +1438,18 @@
int result = -EFAULT;
if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name);
- if (entry && entry->cookie == IPA_COOKIE) {
+ if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
entry->ref_cnt++;
lookup->hdl = entry->id;
/* commit for get */
if (ipa3_ctx->ctrl->ipa3_commit_rt(lookup->ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
result = 0;
}
@@ -1466,13 +1475,13 @@
mutex_lock(&ipa3_ctx->lock);
entry = ipa3_id_find(rt_tbl_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto ret;
}
- if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
- IPAERR("bad parms\n");
+ if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
+ IPAERR_RL("bad parms\n");
result = -EINVAL;
goto ret;
}
@@ -1481,18 +1490,21 @@
ip = IPA_IP_v4;
else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ result = -EINVAL;
+ goto ret;
+ }
entry->ref_cnt--;
if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
IPADBG("zero ref_cnt, delete rt tbl (idx=%u)\n",
entry->idx);
if (__ipa_del_rt_tbl(entry))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
/* commit for put */
if (ipa3_ctx->ctrl->ipa3_commit_rt(ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
}
result = 0;
@@ -1512,26 +1524,27 @@
if (rtrule->rule.hdr_hdl) {
hdr = ipa3_id_find(rtrule->rule.hdr_hdl);
- if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
- IPAERR("rt rule does not point to valid hdr\n");
+ if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("rt rule does not point to valid hdr\n");
goto error;
}
} else if (rtrule->rule.hdr_proc_ctx_hdl) {
proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl);
- if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
- IPAERR("rt rule does not point to valid proc ctx\n");
+ if ((proc_ctx == NULL) ||
+ (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
+ IPAERR_RL("rt rule does not point to valid proc ctx\n");
goto error;
}
}
entry = ipa3_id_find(rtrule->rt_rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -1572,14 +1585,14 @@
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
- IPAERR("failed to mdfy rt rule %i\n", i);
+ IPAERR_RL("failed to mdfy rt rule %i\n", i);
hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
index ce47623..b6427d0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -104,41 +104,83 @@
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
TX_STATS(num_pkts_processed);
- TX_STATS(tail_ptr_val);
- TX_STATS(num_db_fired);
- TX_STATS(tx_comp_ring_stats.ringFull);
- TX_STATS(tx_comp_ring_stats.ringEmpty);
- TX_STATS(tx_comp_ring_stats.ringUsageHigh);
- TX_STATS(tx_comp_ring_stats.ringUsageLow);
- TX_STATS(tx_comp_ring_stats.RingUtilCount);
- TX_STATS(bam_stats.bamFifoFull);
- TX_STATS(bam_stats.bamFifoEmpty);
- TX_STATS(bam_stats.bamFifoUsageHigh);
- TX_STATS(bam_stats.bamFifoUsageLow);
- TX_STATS(bam_stats.bamUtilCount);
+ TX_STATS(ring_stats.ringFull);
+ TX_STATS(ring_stats.ringEmpty);
+ TX_STATS(ring_stats.ringUsageHigh);
+ TX_STATS(ring_stats.ringUsageLow);
+ TX_STATS(ring_stats.RingUtilCount);
+ TX_STATS(gsi_stats.bamFifoFull);
+ TX_STATS(gsi_stats.bamFifoEmpty);
+ TX_STATS(gsi_stats.bamFifoUsageHigh);
+ TX_STATS(gsi_stats.bamFifoUsageLow);
+ TX_STATS(gsi_stats.bamUtilCount);
TX_STATS(num_db);
TX_STATS(num_qmb_int_handled);
+ TX_STATS(ipa_pipe_number);
- RX_STATS(max_outstanding_pkts);
RX_STATS(num_pkts_processed);
- RX_STATS(rx_ring_rp_value);
- RX_STATS(rx_ind_ring_stats.ringFull);
- RX_STATS(rx_ind_ring_stats.ringEmpty);
- RX_STATS(rx_ind_ring_stats.ringUsageHigh);
- RX_STATS(rx_ind_ring_stats.ringUsageLow);
- RX_STATS(rx_ind_ring_stats.RingUtilCount);
- RX_STATS(bam_stats.bamFifoFull);
- RX_STATS(bam_stats.bamFifoEmpty);
- RX_STATS(bam_stats.bamFifoUsageHigh);
- RX_STATS(bam_stats.bamFifoUsageLow);
- RX_STATS(bam_stats.bamUtilCount);
+ RX_STATS(ring_stats.ringFull);
+ RX_STATS(ring_stats.ringEmpty);
+ RX_STATS(ring_stats.ringUsageHigh);
+ RX_STATS(ring_stats.ringUsageLow);
+ RX_STATS(ring_stats.RingUtilCount);
+ RX_STATS(gsi_stats.bamFifoFull);
+ RX_STATS(gsi_stats.bamFifoEmpty);
+ RX_STATS(gsi_stats.bamFifoUsageHigh);
+ RX_STATS(gsi_stats.bamFifoUsageLow);
+ RX_STATS(gsi_stats.bamUtilCount);
RX_STATS(num_db);
+ RX_STATS(num_qmb_int_handled);
+ RX_STATS(ipa_pipe_number);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
+
+int ipa3_ntn_uc_reg_rdyCB(void (*ipa_ready_cb)(void *), void *user_data)
+{
+ int ret;
+
+ if (!ipa3_ctx) {
+ IPAERR("IPA ctx is null\n");
+ return -ENXIO;
+ }
+
+ ret = ipa3_uc_state_check();
+ if (ret) {
+ ipa3_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb;
+ ipa3_ctx->uc_ntn_ctx.priv = user_data;
+ return 0;
+ }
+
+ return -EEXIST;
+}
+
+void ipa3_ntn_uc_dereg_rdyCB(void)
+{
+ ipa3_ctx->uc_ntn_ctx.uc_ready_cb = NULL;
+ ipa3_ctx->uc_ntn_ctx.priv = NULL;
+}
+
+static void ipa3_uc_ntn_loaded_handler(void)
+{
+ if (!ipa3_ctx) {
+ IPAERR("IPA ctx is null\n");
+ return;
+ }
+
+ if (ipa3_ctx->uc_ntn_ctx.uc_ready_cb) {
+ ipa3_ctx->uc_ntn_ctx.uc_ready_cb(
+ ipa3_ctx->uc_ntn_ctx.priv);
+
+ ipa3_ctx->uc_ntn_ctx.uc_ready_cb =
+ NULL;
+ ipa3_ctx->uc_ntn_ctx.priv = NULL;
+ }
+}
+
int ipa3_ntn_init(void)
{
struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 };
@@ -146,6 +188,8 @@
uc_ntn_cbs.ipa_uc_event_hdlr = ipa3_uc_ntn_event_handler;
uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
ipa3_uc_ntn_event_log_info_handler;
+ uc_ntn_cbs.ipa_uc_loaded_hdlr =
+ ipa3_uc_ntn_loaded_handler;
ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
index 79f0973..2e5a832 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -383,22 +383,21 @@
* struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe
* information
*
- *@max_outstanding_pkts: Number of outstanding packets in Rx
- * Ring
*@num_pkts_processed: Number of packets processed - cumulative
- *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
*
- *@rx_ind_ring_stats:
- *@bam_stats:
+ *@ring_stats:
+ *@gsi_stats:
*@num_db: Number of times the doorbell was rung
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@ipa_pipe_number: The IPA Rx/Tx pipe number.
*/
struct NTN3RxInfoData_t {
- u32 max_outstanding_pkts;
u32 num_pkts_processed;
- u32 rx_ring_rp_value;
- struct IpaHwRingStats_t rx_ind_ring_stats;
- struct IpaHwBamStats_t bam_stats;
- u32 num_db;
+ struct IpaHwRingStats_t ring_stats;
+ struct IpaHwBamStats_t gsi_stats;
+ u32 num_db;
+ u32 num_qmb_int_handled;
+ u32 ipa_pipe_number;
} __packed;
@@ -417,12 +416,11 @@
*/
struct NTN3TxInfoData_t {
u32 num_pkts_processed;
- u32 tail_ptr_val;
- u32 num_db_fired;
- struct IpaHwRingStats_t tx_comp_ring_stats;
- struct IpaHwBamStats_t bam_stats;
- u32 num_db;
- u32 num_qmb_int_handled;
+ struct IpaHwRingStats_t ring_stats;
+ struct IpaHwBamStats_t gsi_stats;
+ u32 num_db;
+ u32 num_qmb_int_handled;
+ u32 ipa_pipe_number;
} __packed;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 60dc04f..c97d2b3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1668,7 +1668,7 @@
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm, %d\n", clnt_hdl);
+ IPAERR_RL("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1681,7 +1681,7 @@
ep = &ipa3_ctx->ep[clnt_hdl];
if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 11da6b3..9ca4b7d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -26,6 +26,15 @@
#define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
#define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
#define IPA_V3_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
+
+#define IPA_V3_5_CLK_RATE_SVS (200 * 1000 * 1000UL)
+#define IPA_V3_5_CLK_RATE_NOMINAL (400 * 1000 * 1000UL)
+#define IPA_V3_5_CLK_RATE_TURBO (42640 * 10 * 1000UL)
+
+#define IPA_V4_0_CLK_RATE_SVS (125 * 1000 * 1000UL)
+#define IPA_V4_0_CLK_RATE_NOMINAL (220 * 1000 * 1000UL)
+#define IPA_V4_0_CLK_RATE_TURBO (250 * 1000 * 1000UL)
+
#define IPA_V3_0_MAX_HOLB_TMR_VAL (4294967296 - 1)
#define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000)
@@ -1538,43 +1547,6 @@
.name = "ipa",
};
-void ipa3_active_clients_lock(void)
-{
- unsigned long flags;
-
- mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
- spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
- ipa3_ctx->ipa3_active_clients.mutex_locked = true;
- spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
-}
-
-int ipa3_active_clients_trylock(unsigned long *flags)
-{
- spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
- if (ipa3_ctx->ipa3_active_clients.mutex_locked) {
- spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock,
- *flags);
- return 0;
- }
-
- return 1;
-}
-
-void ipa3_active_clients_trylock_unlock(unsigned long *flags)
-{
- spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
-}
-
-void ipa3_active_clients_unlock(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
- ipa3_ctx->ipa3_active_clients.mutex_locked = false;
- spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
- mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
-}
-
/**
* ipa3_get_clients_from_rm_resource() - get IPA clients which are related to an
* IPA_RM resource
@@ -1818,16 +1790,8 @@
enum ipa_client_type client;
struct ipa_ep_cfg_ctrl suspend;
int ipa_ep_idx;
- unsigned long flags;
struct ipa_active_client_logging_info log_info;
- if (ipa3_active_clients_trylock(&flags) == 0)
- return -EPERM;
- if (ipa3_ctx->ipa3_active_clients.cnt == 1) {
- res = -EPERM;
- goto bail;
- }
-
memset(&clients, 0, sizeof(clients));
res = ipa3_get_clients_from_rm_resource(resource, &clients);
if (res) {
@@ -1866,14 +1830,11 @@
if (res == 0) {
IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
ipa_rm_resource_str(resource));
- ipa3_active_clients_log_dec(&log_info, true);
- ipa3_ctx->ipa3_active_clients.cnt--;
- IPADBG("active clients = %d\n",
- ipa3_ctx->ipa3_active_clients.cnt);
+ /* before gating IPA clocks do TAG process */
+ ipa3_ctx->tag_process_before_gating = true;
+ ipa3_dec_client_disable_clks_no_block(&log_info);
}
bail:
- ipa3_active_clients_trylock_unlock(&flags);
-
return res;
}
@@ -2027,7 +1988,7 @@
*/
int ipa3_cfg_filter(u32 disable)
{
- IPAERR("Filter disable is not supported!\n");
+ IPAERR_RL("Filter disable is not supported!\n");
return -EPERM;
}
@@ -2162,7 +2123,7 @@
int ipa_ep_idx;
if (client >= IPA_CLIENT_MAX || client < 0) {
- IPAERR("Bad client number! client =%d\n", client);
+ IPAERR_RL("Bad client number! client =%d\n", client);
return IPA_EP_NOT_ALLOCATED;
}
@@ -3320,19 +3281,19 @@
int result = -EINVAL;
if (param_in->client >= IPA_CLIENT_MAX) {
- IPAERR("bad parm client:%d\n", param_in->client);
+ IPAERR_RL("bad parm client:%d\n", param_in->client);
goto fail;
}
ipa_ep_idx = ipa3_get_ep_mapping(param_in->client);
if (ipa_ep_idx == -1) {
- IPAERR("Invalid client.\n");
+ IPAERR_RL("Invalid client.\n");
goto fail;
}
ep = &ipa3_ctx->ep[ipa_ep_idx];
if (!ep->valid) {
- IPAERR("EP not allocated.\n");
+ IPAERR_RL("EP not allocated.\n");
goto fail;
}
@@ -3346,7 +3307,7 @@
ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
if (result)
- IPAERR("qmap_id %d write failed on ep=%d\n",
+ IPAERR_RL("qmap_id %d write failed on ep=%d\n",
meta.qmap_id, ipa_ep_idx);
result = 0;
}
@@ -3767,13 +3728,24 @@
int ipa3_controller_static_bind(struct ipa3_controller *ctrl,
enum ipa_hw_type hw_type)
{
+ if (hw_type >= IPA_HW_v4_0) {
+ ctrl->ipa_clk_rate_turbo = IPA_V4_0_CLK_RATE_TURBO;
+ ctrl->ipa_clk_rate_nominal = IPA_V4_0_CLK_RATE_NOMINAL;
+ ctrl->ipa_clk_rate_svs = IPA_V4_0_CLK_RATE_SVS;
+ } else if (hw_type >= IPA_HW_v3_5) {
+ ctrl->ipa_clk_rate_turbo = IPA_V3_5_CLK_RATE_TURBO;
+ ctrl->ipa_clk_rate_nominal = IPA_V3_5_CLK_RATE_NOMINAL;
+ ctrl->ipa_clk_rate_svs = IPA_V3_5_CLK_RATE_SVS;
+ } else {
+ ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO;
+ ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL;
+ ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS;
+ }
+
ctrl->ipa_init_rt4 = _ipa_init_rt4_v3;
ctrl->ipa_init_rt6 = _ipa_init_rt6_v3;
ctrl->ipa_init_flt4 = _ipa_init_flt4_v3;
ctrl->ipa_init_flt6 = _ipa_init_flt6_v3;
- ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO;
- ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL;
- ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS;
ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v3_0;
ctrl->ipa3_commit_flt = __ipa_commit_flt_v3;
ctrl->ipa3_commit_rt = __ipa_commit_rt_v3;
@@ -4502,6 +4474,8 @@
api_ctrl->ipa_tear_down_uc_offload_pipes =
ipa3_tear_down_uc_offload_pipes;
api_ctrl->ipa_get_pdev = ipa3_get_pdev;
+ api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa3_ntn_uc_reg_rdyCB;
+ api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa3_ntn_uc_dereg_rdyCB;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 16585a2..b19c71a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -2966,6 +2966,10 @@
kfree(req);
kfree(resp);
return rc;
+ } else if (data == NULL) {
+ kfree(req);
+ kfree(resp);
+ return 0;
}
if (resp->dl_dst_pipe_stats_list_valid) {
@@ -3149,8 +3153,11 @@
int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
{
enum ipa_upstream_type upstream_type;
+ struct wan_ioctl_query_tether_stats tether_stats;
int rc = 0;
+ memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3168,7 +3175,7 @@
} else {
IPAWANERR(" reset modem-backhaul stats\n");
rc = rmnet_ipa3_query_tethering_stats_modem(
- NULL, true);
+ &tether_stats, true);
if (rc) {
IPAWANERR("reset MODEM stats failed\n");
return rc;
diff --git a/drivers/platform/msm/seemp_core/seemp_logk.c b/drivers/platform/msm/seemp_core/seemp_logk.c
index e55260d..204142b 100644
--- a/drivers/platform/msm/seemp_core/seemp_logk.c
+++ b/drivers/platform/msm/seemp_core/seemp_logk.c
@@ -624,16 +624,17 @@
/* determine legitimacy of report */
if (report->report_valid &&
- report->sequence_number <=
- header->num_incidents &&
(last_sequence_number == 0
|| report->sequence_number >
last_sequence_number)) {
seemp_logk_rtic(report->report_type,
- report->report.incident.actor,
- report->report.incident.asset_id,
- report->report.incident.asset_category,
- report->report.incident.response);
+ report->actor,
+ /* leave this empty until
+ * asset id is provided
+ */
+ "",
+ report->asset_category,
+ report->response);
last_sequence_number = report->sequence_number;
} else {
last_pos = cur_pos - 1;
diff --git a/drivers/platform/msm/seemp_core/seemp_logk.h b/drivers/platform/msm/seemp_core/seemp_logk.h
index 871de0e..eecf4f7 100644
--- a/drivers/platform/msm/seemp_core/seemp_logk.h
+++ b/drivers/platform/msm/seemp_core/seemp_logk.h
@@ -164,39 +164,23 @@
__u64 report_version; /* Version of the EL2 report */
__u64 mp_catalog_version;
/* Version of MP catalogue used for kernel protection */
+ __u64 num_incidents; /* Number of Incidents Observed by EL2 */
__u8 protection_enabled; /* Kernel Assets protected by EL2 */
__u8 pad1;
__u8 pad2;
__u8 pad3;
__u32 pad4;
- __u64 num_incidents; /* Number of Incidents Observed by EL2 */
-};
-
-/* individual report contents */
-union el2_report {
- struct {
- __u8 asset_id[0x20]; /* Asset Identifier */
- __u64 actor;
- /* Actor that caused the Incident. */
- __u8 asset_category; /* Asset Category */
- __u8 response; /* Response From EL2 */
- __u16 pad1;
- __u32 pad2;
- } incident;
- struct {
- __u64 reserved; /* TBD */
- } info;
};
/* individual report */
struct el2_report_data_t {
+ __u64 sequence_number; /* Sequence number of the report */
+ __u64 actor; /* Actor that caused the Incident. */
__u8 report_valid;
/* Flag to indicate whether report instance is valid */
__u8 report_type; /* Report Type */
- __u8 pad1;
- __u8 pad2;
- __u64 sequence_number; /* Sequence number of the report */
- union el2_report report; /* Report Contents */
+ __u8 asset_category; /* Asset Category */
+ __u8 response; /* Response From EL2 */
};
#endif
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 8641a45..486e8c3 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -41,6 +41,7 @@
#define ICL_CHANGE_VOTER "ICL_CHANGE_VOTER"
#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
#define USBIN_I_VOTER "USBIN_I_VOTER"
+#define FCC_CHANGE_VOTER "FCC_CHANGE_VOTER"
struct pl_data {
int pl_mode;
@@ -288,69 +289,11 @@
__ATTR_NULL,
};
-/***********
- * TAPER *
- ************/
-#define MINIMUM_PARALLEL_FCC_UA 500000
-#define PL_TAPER_WORK_DELAY_MS 100
-#define TAPER_RESIDUAL_PCT 75
-static void pl_taper_work(struct work_struct *work)
-{
- struct pl_data *chip = container_of(work, struct pl_data,
- pl_taper_work.work);
- union power_supply_propval pval = {0, };
- int rc;
-
- /* exit immediately if parallel is disabled */
- if (get_effective_result(chip->pl_disable_votable)) {
- pl_dbg(chip, PR_PARALLEL, "terminating parallel not in progress\n");
- goto done;
- }
-
- pl_dbg(chip, PR_PARALLEL, "entering parallel taper work slave_fcc = %d\n",
- chip->slave_fcc_ua);
- if (chip->slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
- pl_dbg(chip, PR_PARALLEL, "terminating parallel's share lower than 500mA\n");
- vote(chip->pl_disable_votable, TAPER_END_VOTER, true, 0);
- goto done;
- }
-
- rc = power_supply_get_property(chip->batt_psy,
- POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
- if (rc < 0) {
- pr_err("Couldn't get batt charge type rc=%d\n", rc);
- goto done;
- }
-
- chip->charge_type = pval.intval;
- if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
- pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
-
- vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
- /* Reduce the taper percent by 25 percent */
- chip->taper_pct = chip->taper_pct * TAPER_RESIDUAL_PCT / 100;
- rerun_election(chip->fcc_votable);
- pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work after %d ms\n",
- PL_TAPER_WORK_DELAY_MS);
- schedule_delayed_work(&chip->pl_taper_work,
- msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
- return;
- }
-
- /*
- * Master back to Fast Charge, get out of this round of taper reduction
- */
- pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
-
-done:
- vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
-}
-
/*********
* FCC *
**********/
#define EFFICIENCY_PCT 80
-static void split_fcc(struct pl_data *chip, int total_ua,
+static void get_fcc_split(struct pl_data *chip, int total_ua,
int *master_ua, int *slave_ua)
{
int rc, effective_total_ua, slave_limited_ua, hw_cc_delta_ua = 0,
@@ -389,7 +332,7 @@
effective_total_ua = max(0, total_ua + hw_cc_delta_ua);
slave_limited_ua = min(effective_total_ua, bcl_ua);
*slave_ua = (slave_limited_ua * chip->slave_pct) / 100;
- *slave_ua = (*slave_ua * chip->taper_pct) / 100;
+
/*
* In USBIN_USBIN configuration with internal rsense parallel
* charger's current goes through main charger's BATFET, keep
@@ -399,14 +342,75 @@
*master_ua = max(0, total_ua);
else
*master_ua = max(0, total_ua - *slave_ua);
+
+ /* further reduce slave's share in accordance with taper reductions */
+ *slave_ua = (*slave_ua * chip->taper_pct) / 100;
+}
+
+#define MINIMUM_PARALLEL_FCC_UA 500000
+#define PL_TAPER_WORK_DELAY_MS 100
+#define TAPER_RESIDUAL_PCT 90
+static void pl_taper_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work, struct pl_data,
+ pl_taper_work.work);
+ union power_supply_propval pval = {0, };
+ int total_fcc_ua, master_fcc_ua, slave_fcc_ua;
+ int rc;
+
+ /* exit immediately if parallel is disabled */
+ if (get_effective_result(chip->pl_disable_votable)) {
+ pl_dbg(chip, PR_PARALLEL, "terminating parallel not in progress\n");
+ goto done;
+ }
+
+ total_fcc_ua = get_effective_result_locked(chip->fcc_votable);
+ get_fcc_split(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
+ if (slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
+ pl_dbg(chip, PR_PARALLEL, "terminating parallel's share lower than 500mA\n");
+ vote(chip->pl_disable_votable, TAPER_END_VOTER, true, 0);
+ goto done;
+ }
+
+ pl_dbg(chip, PR_PARALLEL, "entering parallel taper work slave_fcc = %d\n",
+ slave_fcc_ua);
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get batt charge type rc=%d\n", rc);
+ goto done;
+ }
+
+ chip->charge_type = pval.intval;
+ if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+ pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
+
+ vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
+ /* Reduce the taper percent by 10 percent */
+ chip->taper_pct = chip->taper_pct * TAPER_RESIDUAL_PCT / 100;
+ rerun_election(chip->fcc_votable);
+ pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work after %d ms\n",
+ PL_TAPER_WORK_DELAY_MS);
+ schedule_delayed_work(&chip->pl_taper_work,
+ msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
+ return;
+ }
+
+ /*
+ * Master back to Fast Charge, get out of this round of taper reduction
+ */
+ pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
+
+done:
+ vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
}
static int pl_fcc_vote_callback(struct votable *votable, void *data,
int total_fcc_ua, const char *client)
{
struct pl_data *chip = data;
- union power_supply_propval pval = {0, };
- int rc, master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
+ int master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
if (total_fcc_ua < 0)
return 0;
@@ -414,41 +418,23 @@
if (!chip->main_psy)
return 0;
- if (chip->pl_mode == POWER_SUPPLY_PL_NONE
- || get_effective_result_locked(chip->pl_disable_votable)) {
- pval.intval = total_fcc_ua;
- rc = power_supply_set_property(chip->main_psy,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
- &pval);
- if (rc < 0)
- pr_err("Couldn't set main fcc, rc=%d\n", rc);
- return rc;
- }
-
if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
- split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
+ get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
+ &slave_fcc_ua);
- pval.intval = slave_fcc_ua;
- rc = power_supply_set_property(chip->pl_psy,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
- &pval);
- if (rc < 0) {
- pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
- return rc;
- }
-
- chip->slave_fcc_ua = slave_fcc_ua;
-
- pval.intval = master_fcc_ua;
- rc = power_supply_set_property(chip->main_psy,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
- &pval);
- if (rc < 0) {
- pr_err("Could not set main fcc, rc=%d\n", rc);
- return rc;
+ if (slave_fcc_ua > 500000) {
+ chip->slave_fcc_ua = slave_fcc_ua;
+ vote(chip->pl_disable_votable, FCC_CHANGE_VOTER,
+ false, 0);
+ } else {
+ chip->slave_fcc_ua = 0;
+ vote(chip->pl_disable_votable, FCC_CHANGE_VOTER,
+ true, 0);
}
}
+ rerun_election(chip->pl_disable_votable);
+
pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
master_fcc_ua, slave_fcc_ua,
(master_fcc_ua * 100) / total_fcc_ua,
@@ -577,18 +563,34 @@
vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
}
+static bool is_main_available(struct pl_data *chip)
+{
+ if (chip->main_psy)
+ return true;
+
+ chip->main_psy = power_supply_get_by_name("main");
+
+ return !!chip->main_psy;
+}
+
static int pl_disable_vote_callback(struct votable *votable,
void *data, int pl_disable, const char *client)
{
struct pl_data *chip = data;
union power_supply_propval pval = {0, };
+ int master_fcc_ua, total_fcc_ua, slave_fcc_ua;
int rc;
- chip->taper_pct = 100;
chip->total_settled_ua = 0;
chip->pl_settled_ua = 0;
- if (!pl_disable) { /* enable */
+ if (!is_main_available(chip))
+ return -ENODEV;
+
+ total_fcc_ua = get_effective_result_locked(chip->fcc_votable);
+
+ if (chip->pl_mode != POWER_SUPPLY_PL_NONE && !pl_disable) {
+ /* enable parallel charging */
rc = power_supply_get_property(chip->pl_psy,
POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
if (rc == -ENODEV) {
@@ -602,7 +604,30 @@
}
rerun_election(chip->fv_votable);
- rerun_election(chip->fcc_votable);
+
+ get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
+ &slave_fcc_ua);
+
+ chip->slave_fcc_ua = slave_fcc_ua;
+
+ pval.intval = master_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Could not set main fcc, rc=%d\n", rc);
+ return rc;
+ }
+
+ pval.intval = slave_fcc_ua;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
+ return rc;
+ }
+
/*
* Enable will be called with a valid pl_psy always. The
* PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy
@@ -647,7 +672,17 @@
pr_err("Couldn't change slave suspend state rc=%d\n",
rc);
}
- rerun_election(chip->fcc_votable);
+
+ /* main psy gets all share */
+ pval.intval = total_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Could not set main fcc, rc=%d\n", rc);
+ return rc;
+ }
+
rerun_election(chip->fv_votable);
}
@@ -681,16 +716,6 @@
return 0;
}
-static bool is_main_available(struct pl_data *chip)
-{
- if (chip->main_psy)
- return true;
-
- chip->main_psy = power_supply_get_by_name("main");
-
- return !!chip->main_psy;
-}
-
static bool is_batt_available(struct pl_data *chip)
{
if (!chip->batt_psy)
@@ -835,6 +860,7 @@
else
vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
+ rerun_election(chip->fcc_votable);
if (get_effective_result(chip->pl_disable_votable))
return;
@@ -856,8 +882,6 @@
if (abs(new_total_settled_ua - chip->total_settled_ua)
> MIN_ICL_CHANGE_DELTA_UA)
split_settled(chip);
- } else {
- rerun_election(chip->fcc_votable);
}
}
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index cdd09dd..c77b808 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -67,6 +67,7 @@
#define MAX_LINE_LENGTH (ADDR_LEN + (ITEMS_PER_LINE * \
CHARS_PER_ITEM) + 1) \
+#define NUM_PARTITIONS 3
#define FG_SRAM_ADDRESS_MAX 255
#define FG_SRAM_LEN 504
#define PROFILE_LEN 224
@@ -192,6 +193,18 @@
int val);
};
+struct fg_dma_address {
+ /* Starting word address of the partition */
+ u16 partition_start;
+ /* Last word address of the partition */
+ u16 partition_end;
+ /*
+ * Byte offset in the FG_DMA peripheral that maps to the partition_start
+ * in SRAM
+ */
+ u16 spmi_addr_base;
+};
+
enum fg_alg_flag_id {
ALG_FLAG_SOC_LT_OTG_MIN = 0,
ALG_FLAG_SOC_LT_RECHARGE,
@@ -360,12 +373,12 @@
struct power_supply *parallel_psy;
struct iio_channel *batt_id_chan;
struct iio_channel *die_temp_chan;
- struct fg_memif *sram;
struct fg_irq_info *irqs;
struct votable *awake_votable;
struct votable *delta_bsoc_irq_en_votable;
struct votable *batt_miss_irq_en_votable;
struct fg_sram_param *sp;
+ struct fg_dma_address *addr_map;
struct fg_alg_flag *alg_flags;
int *debug_mask;
char batt_profile[PROFILE_LEN];
@@ -409,8 +422,10 @@
bool esr_flt_cold_temp_en;
bool slope_limit_en;
bool use_ima_single_mode;
+ bool use_dma;
struct completion soc_update;
struct completion soc_ready;
+ struct completion mem_grant;
struct delayed_work profile_load_work;
struct work_struct status_change_work;
struct work_struct cycle_count_work;
@@ -459,10 +474,15 @@
u8 offset, u8 *val, int len);
extern int fg_interleaved_mem_write(struct fg_chip *chip, u16 address,
u8 offset, u8 *val, int len, bool atomic_access);
+extern int fg_direct_mem_read(struct fg_chip *chip, u16 address,
+ u8 offset, u8 *val, int len);
+extern int fg_direct_mem_write(struct fg_chip *chip, u16 address,
+ u8 offset, u8 *val, int len, bool atomic_access);
extern int fg_read(struct fg_chip *chip, int addr, u8 *val, int len);
extern int fg_write(struct fg_chip *chip, int addr, u8 *val, int len);
extern int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val);
extern int fg_ima_init(struct fg_chip *chip);
+extern int fg_dma_init(struct fg_chip *chip);
extern int fg_clear_ima_errors_if_any(struct fg_chip *chip, bool check_hw_sts);
extern int fg_clear_dma_errors_if_any(struct fg_chip *chip);
extern int fg_debugfs_create(struct fg_chip *chip);
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index 8a949bf..0abc9df 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -746,6 +746,257 @@
return rc;
}
+#define MEM_GRANT_WAIT_MS 200
+static int fg_direct_mem_request(struct fg_chip *chip, bool request)
+{
+ int rc, ret;
+ u8 val, mask;
+ bool tried_again = false;
+
+ if (request)
+ reinit_completion(&chip->mem_grant);
+
+ mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT;
+ val = request ? MEM_ACCESS_REQ_BIT : 0;
+ rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip), mask, val);
+ if (rc < 0) {
+ pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ mask = MEM_ARB_LO_LATENCY_EN_BIT | MEM_ARB_REQ_BIT;
+ val = request ? mask : 0;
+ rc = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), mask, val);
+ if (rc < 0) {
+ pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n", rc);
+ return rc;
+ }
+
+ if (request)
+ pr_debug("requesting access\n");
+ else
+ pr_debug("releasing access\n");
+
+ if (!request)
+ return 0;
+
+wait:
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->mem_grant, msecs_to_jiffies(MEM_GRANT_WAIT_MS));
+ /* If we were interrupted wait again one more time. */
+ if (ret <= 0) {
+ if ((ret == -ERESTARTSYS || ret == 0) && !tried_again) {
+ pr_debug("trying again, ret=%d\n", ret);
+ tried_again = true;
+ goto wait;
+ } else {
+ pr_err("wait for mem_grant timed out ret=%d\n",
+ ret);
+ }
+ }
+
+ if (ret <= 0) {
+ val = 0;
+ mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT;
+ rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip), mask,
+ val);
+ if (rc < 0) {
+ pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ mask = MEM_ARB_LO_LATENCY_EN_BIT | MEM_ARB_REQ_BIT;
+ rc = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), mask,
+ val);
+ if (rc < 0) {
+ pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n",
+ rc);
+ return rc;
+ }
+
+ return -ETIMEDOUT;
+ }
+
+ return rc;
+}
+
+static int fg_get_dma_address(struct fg_chip *chip, u16 sram_addr, u8 offset,
+ u16 *addr)
+{
+ int i;
+ u16 start_sram_addr, end_sram_addr;
+
+ for (i = 0; i < NUM_PARTITIONS; i++) {
+ start_sram_addr = chip->addr_map[i].partition_start;
+ end_sram_addr = chip->addr_map[i].partition_end;
+ if (sram_addr >= start_sram_addr &&
+ sram_addr <= end_sram_addr) {
+ *addr = chip->addr_map[i].spmi_addr_base + offset +
+ (sram_addr - start_sram_addr) *
+ BYTES_PER_SRAM_WORD;
+ return 0;
+ }
+ }
+
+ pr_err("Couldn't find address for %d from address map\n", sram_addr);
+ return -ENXIO;
+}
+
+static int fg_get_partition_count(struct fg_chip *chip, u16 sram_addr, int len,
+ int *count)
+{
+ int i, num = 0;
+ u16 end_addr, last_addr = 0;
+
+ end_addr = sram_addr + len / BYTES_PER_SRAM_WORD;
+ if (!(len % BYTES_PER_SRAM_WORD))
+ end_addr -= 1;
+
+ if (sram_addr == end_addr) {
+ *count = 1;
+ return 0;
+ }
+
+ for (i = 0; i < NUM_PARTITIONS; i++) {
+ pr_debug("address: %d last_addr: %d\n", sram_addr, last_addr);
+ if (sram_addr >= chip->addr_map[i].partition_start
+ && sram_addr <= chip->addr_map[i].partition_end
+ && last_addr < end_addr) {
+ num++;
+ last_addr = chip->addr_map[i].partition_end;
+ sram_addr = chip->addr_map[i+1].partition_start;
+ }
+ }
+
+ if (num > 0) {
+ *count = num;
+ return 0;
+ }
+
+ pr_err("Couldn't find number of partitions for address %d\n",
+ sram_addr);
+ return -ENXIO;
+}
+
+static int fg_get_partition_avail_bytes(struct fg_chip *chip, u16 sram_addr,
+ int len, int *rem_len)
+{
+ int i, part_len = 0, temp;
+ u16 end_addr;
+
+ for (i = 0; i < NUM_PARTITIONS; i++) {
+ if (sram_addr >= chip->addr_map[i].partition_start
+ && sram_addr <= chip->addr_map[i].partition_end) {
+ part_len = (chip->addr_map[i].partition_end -
+ chip->addr_map[i].partition_start + 1);
+ part_len *= BYTES_PER_SRAM_WORD;
+ end_addr = chip->addr_map[i].partition_end;
+ break;
+ }
+ }
+
+ if (part_len <= 0) {
+ pr_err("Bad address? total_len=%d\n", part_len);
+ return -ENXIO;
+ }
+
+ temp = (end_addr - sram_addr + 1) * BYTES_PER_SRAM_WORD;
+ if (temp > part_len || !temp) {
+ pr_err("Bad length=%d\n", temp);
+ return -ENXIO;
+ }
+
+ *rem_len = temp;
+ pr_debug("address %d len %d rem_len %d\n", sram_addr, len, *rem_len);
+ return 0;
+}
+
+static int __fg_direct_mem_rw(struct fg_chip *chip, u16 sram_addr, u8 offset,
+ u8 *val, int len, bool access)
+{
+ int rc, ret, num_partitions, num_bytes = 0;
+ u16 addr;
+ u8 *ptr = val;
+ char *temp_str;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ rc = fg_get_partition_count(chip, sram_addr, len, &num_partitions);
+ if (rc < 0)
+ return rc;
+
+ pr_debug("number of partitions: %d\n", num_partitions);
+
+ rc = fg_direct_mem_request(chip, true);
+ if (rc < 0) {
+ pr_err("Error in requesting direct_mem access rc=%d\n", rc);
+ return rc;
+ }
+
+ while (num_partitions-- && len) {
+ rc = fg_get_dma_address(chip, sram_addr, offset, &addr);
+ if (rc < 0) {
+ pr_err("Incorrect address %d/offset %d\n", sram_addr,
+ offset);
+ break;
+ }
+
+ rc = fg_get_partition_avail_bytes(chip, sram_addr + offset, len,
+ &num_bytes);
+ if (rc < 0)
+ break;
+
+ if (num_bytes > len)
+ num_bytes = len;
+
+ pr_debug("reading from address: [%d %d] dma_address = %x\n",
+ sram_addr, offset, addr);
+
+ if (access == FG_READ) {
+ rc = fg_read(chip, addr, ptr, num_bytes);
+ temp_str = "read";
+ } else {
+ rc = fg_write(chip, addr, ptr, num_bytes);
+ temp_str = "write";
+ }
+
+ if (rc < 0) {
+ pr_err("Error in %sing address %d rc=%d\n", temp_str,
+ sram_addr, rc);
+ break;
+ }
+
+ ptr += num_bytes;
+ len -= num_bytes;
+ sram_addr += (num_bytes / BYTES_PER_SRAM_WORD);
+ offset = 0;
+ }
+
+ ret = fg_direct_mem_request(chip, false);
+ if (ret < 0) {
+ pr_err("Error in releasing direct_mem access rc=%d\n", rc);
+ return ret;
+ }
+
+ return rc;
+}
+
+int fg_direct_mem_read(struct fg_chip *chip, u16 sram_addr, u8 offset,
+ u8 *val, int len)
+{
+ return __fg_direct_mem_rw(chip, sram_addr, offset, val, len, FG_READ);
+}
+
+int fg_direct_mem_write(struct fg_chip *chip, u16 sram_addr, u8 offset,
+ u8 *val, int len, bool atomic_access)
+{
+ return __fg_direct_mem_rw(chip, sram_addr, offset, val, len, FG_WRITE);
+}
+
int fg_ima_init(struct fg_chip *chip)
{
int rc;
@@ -778,3 +1029,59 @@
return 0;
}
+
+/*
+ * This SRAM partition to DMA address partition mapping remains identical for
+ * PMICs that use GEN3 FG.
+ */
+static struct fg_dma_address fg_gen3_addr_map[NUM_PARTITIONS] = {
+ /* system partition */
+ {
+ .partition_start = 0,
+ .partition_end = 23,
+ .spmi_addr_base = FG_DMA0_BASE + SRAM_ADDR_OFFSET,
+ },
+ /* battery profile partition */
+ {
+ .partition_start = 24,
+ .partition_end = 79,
+ .spmi_addr_base = FG_DMA1_BASE + SRAM_ADDR_OFFSET,
+ },
+ /* scratch pad partition */
+ {
+ .partition_start = 80,
+ .partition_end = 125,
+ .spmi_addr_base = FG_DMA2_BASE + SRAM_ADDR_OFFSET,
+ },
+};
+int fg_dma_init(struct fg_chip *chip)
+{
+ int rc;
+
+ chip->addr_map = fg_gen3_addr_map;
+
+ /* Clear DMA errors if any before clearing IMA errors */
+ rc = fg_clear_dma_errors_if_any(chip);
+ if (rc < 0) {
+ pr_err("Error in checking DMA errors rc:%d\n", rc);
+ return rc;
+ }
+
+ /* Configure the DMA peripheral addressing to partition */
+ rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip), ADDR_KIND_BIT,
+ ADDR_KIND_BIT);
+ if (rc < 0) {
+ pr_err("failed to configure DMA_CTL rc:%d\n", rc);
+ return rc;
+ }
+
+ /* Release the DMA initially so that request can happen */
+ rc = fg_direct_mem_request(chip, false);
+ if (rc < 0) {
+ pr_err("Error in releasing direct_mem access rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
diff --git a/drivers/power/supply/qcom/fg-reg.h b/drivers/power/supply/qcom/fg-reg.h
index cd0b2fb..8ca4376 100644
--- a/drivers/power/supply/qcom/fg-reg.h
+++ b/drivers/power/supply/qcom/fg-reg.h
@@ -31,6 +31,7 @@
#define BATT_SOC_LOW_PWR_STS(chip) (chip->batt_soc_base + 0x56)
/* BATT_SOC_INT_RT_STS */
+#define SOC_READY_BIT BIT(1)
#define MSOC_EMPTY_BIT BIT(5)
/* BATT_SOC_EN_CTL */
@@ -266,6 +267,7 @@
/* FG_MEM_IF register and bit definitions */
#define MEM_IF_INT_RT_STS(chip) ((chip->mem_if_base) + 0x10)
+#define MEM_IF_MEM_ARB_CFG(chip) ((chip->mem_if_base) + 0x40)
#define MEM_IF_MEM_INTF_CFG(chip) ((chip->mem_if_base) + 0x50)
#define MEM_IF_IMA_CTL(chip) ((chip->mem_if_base) + 0x51)
#define MEM_IF_IMA_CFG(chip) ((chip->mem_if_base) + 0x52)
@@ -286,6 +288,11 @@
/* MEM_IF_INT_RT_STS */
#define MEM_XCP_BIT BIT(1)
+#define MEM_GNT_BIT BIT(2)
+
+/* MEM_IF_MEM_ARB_CFG */
+#define MEM_ARB_LO_LATENCY_EN_BIT BIT(1)
+#define MEM_ARB_REQ_BIT BIT(0)
/* MEM_IF_MEM_INTF_CFG */
#define MEM_ACCESS_REQ_BIT BIT(7)
@@ -325,5 +332,13 @@
#define DMA_READ_ERROR_BIT BIT(2)
/* MEM_IF_DMA_CTL */
+#define ADDR_KIND_BIT BIT(1)
#define DMA_CLEAR_LOG_BIT BIT(0)
+
+/* FG_DMAx */
+#define FG_DMA0_BASE 0x4800
+#define FG_DMA1_BASE 0x4900
+#define FG_DMA2_BASE 0x4A00
+#define FG_DMA3_BASE 0x4B00
+#define SRAM_ADDR_OFFSET 0x20
#endif
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
index 38d9594..d9ca47c 100644
--- a/drivers/power/supply/qcom/fg-util.c
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -255,8 +255,6 @@
reinit_completion(&chip->soc_update);
enable_irq(chip->irqs[SOC_UPDATE_IRQ].irq);
atomic_access = true;
- } else {
- flags = FG_IMA_DEFAULT;
}
wait:
/*
@@ -282,11 +280,17 @@
}
}
- rc = fg_interleaved_mem_write(chip, address, offset, val, len,
- atomic_access);
+ if (chip->use_dma)
+ rc = fg_direct_mem_write(chip, address, offset, val, len,
+ false);
+ else
+ rc = fg_interleaved_mem_write(chip, address, offset, val, len,
+ atomic_access);
+
if (rc < 0)
pr_err("Error in writing SRAM address 0x%x[%d], rc=%d\n",
address, offset, rc);
+
out:
if (atomic_access)
disable_irq_nosync(chip->irqs[SOC_UPDATE_IRQ].irq);
@@ -313,9 +317,14 @@
if (!(flags & FG_IMA_NO_WLOCK))
vote(chip->awake_votable, SRAM_READ, true, 0);
+
mutex_lock(&chip->sram_rw_lock);
- rc = fg_interleaved_mem_read(chip, address, offset, val, len);
+ if (chip->use_dma)
+ rc = fg_direct_mem_read(chip, address, offset, val, len);
+ else
+ rc = fg_interleaved_mem_read(chip, address, offset, val, len);
+
if (rc < 0)
pr_err("Error in reading SRAM address 0x%x[%d], rc=%d\n",
address, offset, rc);
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index e5a3a07..42a16d6 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -533,7 +533,7 @@
rc = fg_sram_read(chip, chip->sp[id].addr_word, chip->sp[id].addr_byte,
buf, chip->sp[id].len, FG_IMA_DEFAULT);
if (rc < 0) {
- pr_err("Error reading address 0x%04x[%d] rc=%d\n",
+ pr_err("Error reading address %d[%d] rc=%d\n",
chip->sp[id].addr_word, chip->sp[id].addr_byte, rc);
return rc;
}
@@ -3503,6 +3503,9 @@
static int fg_memif_init(struct fg_chip *chip)
{
+ if (chip->use_dma)
+ return fg_dma_init(chip);
+
return fg_ima_init(chip);
}
@@ -3542,6 +3545,26 @@
/* INTERRUPT HANDLERS STAY HERE */
+static irqreturn_t fg_dma_grant_irq_handler(int irq, void *data)
+{
+ struct fg_chip *chip = data;
+ u8 status;
+ int rc;
+
+ rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &status, 1);
+ if (rc < 0) {
+ pr_err("failed to read addr=0x%04x, rc=%d\n",
+ MEM_IF_INT_RT_STS(chip), rc);
+ return IRQ_HANDLED;
+ }
+
+ fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
+ if (status & MEM_GNT_BIT)
+ complete_all(&chip->mem_grant);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
@@ -3822,7 +3845,8 @@
/* MEM_IF irqs */
[DMA_GRANT_IRQ] = {
.name = "dma-grant",
- .handler = fg_dummy_irq_handler,
+ .handler = fg_dma_grant_irq_handler,
+ .wakeable = true,
},
[MEM_XCP_IRQ] = {
.name = "mem-xcp",
@@ -4046,6 +4070,7 @@
switch (chip->pmic_rev_id->pmic_subtype) {
case PMI8998_SUBTYPE:
+ chip->use_dma = true;
if (chip->pmic_rev_id->rev4 < PMI8998_V2P0_REV4) {
chip->sp = pmi8998_v1_sram_params;
chip->alg_flags = pmi8998_v1_alg_flags;
@@ -4466,6 +4491,7 @@
mutex_init(&chip->charge_full_lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
+ init_completion(&chip->mem_grant);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
INIT_WORK(&chip->status_change_work, status_change_work);
INIT_WORK(&chip->cycle_count_work, cycle_count_work);
@@ -4479,6 +4505,25 @@
goto exit;
}
+ platform_set_drvdata(pdev, chip);
+
+ rc = fg_register_interrupts(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
+ rc);
+ goto exit;
+ }
+
+ /* Keep SOC_UPDATE irq disabled until we require it */
+ if (fg_irqs[SOC_UPDATE_IRQ].irq)
+ disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
+
+ /* Keep BSOC_DELTA_IRQ disabled until we require it */
+ vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0);
+
+ /* Keep BATT_MISSING_IRQ disabled until we require it */
+ vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, false, 0);
+
rc = fg_hw_init(chip);
if (rc < 0) {
dev_err(chip->dev, "Error in initializing FG hardware, rc:%d\n",
@@ -4486,8 +4531,6 @@
goto exit;
}
- platform_set_drvdata(pdev, chip);
-
/* Register the power supply */
fg_psy_cfg.drv_data = chip;
fg_psy_cfg.of_node = NULL;
@@ -4508,23 +4551,6 @@
goto exit;
}
- rc = fg_register_interrupts(chip);
- if (rc < 0) {
- dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
- rc);
- goto exit;
- }
-
- /* Keep SOC_UPDATE_IRQ disabled until we require it */
- if (fg_irqs[SOC_UPDATE_IRQ].irq)
- disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
-
- /* Keep BSOC_DELTA_IRQ disabled until we require it */
- vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0);
-
- /* Keep BATT_MISSING_IRQ disabled until we require it */
- vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, false, 0);
-
rc = fg_debugfs_create(chip);
if (rc < 0) {
dev_err(chip->dev, "Error in creating debugfs entries, rc:%d\n",
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index ae44f01..3f26e5e 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -3705,6 +3705,7 @@
/* reset parallel voters */
vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+ vote(chg->pl_disable_votable, FCC_CHANGE_VOTER, false, 0);
vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 5c7819e..c08d404 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -66,6 +66,7 @@
#define USBIN_I_VOTER "USBIN_I_VOTER"
#define WEAK_CHARGER_VOTER "WEAK_CHARGER_VOTER"
#define OTG_VOTER "OTG_VOTER"
+#define FCC_CHANGE_VOTER "FCC_CHANGE_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index b2c0059..4e1bb17 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -62,6 +62,10 @@
#define CHGR_BATTOV_CFG_REG (CHGR_BASE + 0x70)
#define BATTOV_SETTING_MASK GENMASK(7, 0)
+#define POWER_MODE_HICCUP_CFG (BATIF_BASE + 0x72)
+#define MAX_HICCUP_DUETO_BATDIS_MASK GENMASK(5, 2)
+#define HICCUP_TIMEOUT_CFG_MASK GENMASK(1, 0)
+
#define TEMP_COMP_STATUS_REG (MISC_BASE + 0x07)
#define SKIN_TEMP_RST_HOT_BIT BIT(6)
#define SKIN_TEMP_UB_HOT_BIT BIT(5)
@@ -580,6 +584,16 @@
return rc;
}
+ /* HICCUP setting, unlimited retry with 250ms interval */
+ rc = smb1355_masked_write(chip, POWER_MODE_HICCUP_CFG,
+ HICCUP_TIMEOUT_CFG_MASK | MAX_HICCUP_DUETO_BATDIS_MASK,
+ 0);
+ if (rc < 0) {
+ pr_err("Couldn't enable parallel current sensing rc=%d\n",
+ rc);
+ return rc;
+ }
+
/* enable parallel current sensing */
rc = smb1355_masked_write(chip, CFG_REG,
VCHG_EN_CFG_BIT, VCHG_EN_CFG_BIT);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index a2b5ea0..c78bb9e 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1487,7 +1487,7 @@
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- queue_work(hba->clk_gating.ungating_workq,
+ queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
@@ -1755,7 +1755,8 @@
struct ufs_hba *hba = container_of(timer, struct ufs_hba,
clk_gating.gate_hrtimer);
- schedule_work(&hba->clk_gating.gate_work);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.gate_work);
return HRTIMER_NORESTART;
}
@@ -1763,7 +1764,7 @@
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
struct ufs_clk_gating *gating = &hba->clk_gating;
- char wq_name[sizeof("ufs_clk_ungating_00")];
+ char wq_name[sizeof("ufs_clk_gating_00")];
hba->clk_gating.state = CLKS_ON;
@@ -1792,9 +1793,10 @@
hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_ungating_%d",
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
hba->host->host_no);
- hba->clk_gating.ungating_workq = create_singlethread_workqueue(wq_name);
+ hba->clk_gating.clk_gating_workq =
+ create_singlethread_workqueue(wq_name);
gating->is_enabled = true;
@@ -1858,7 +1860,7 @@
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
ufshcd_cancel_gate_work(hba);
cancel_work_sync(&hba->clk_gating.ungate_work);
- destroy_workqueue(hba->clk_gating.ungating_workq);
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
}
static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
@@ -7833,6 +7835,11 @@
ufshcd_set_auto_hibern8_timer(hba,
hba->hibern8_on_idle.delay_ms);
out:
+ if (ret) {
+ ufshcd_set_ufs_dev_poweroff(hba);
+ ufshcd_set_link_off(hba);
+ }
+
/*
* If we failed to initialize the device or the device is not
* present, turn off the power/clocks etc.
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 343f327..eaed1b3 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -446,7 +446,7 @@
struct device_attribute enable_attr;
bool is_enabled;
int active_reqs;
- struct workqueue_struct *ungating_workq;
+ struct workqueue_struct *clk_gating_workq;
};
/* Hibern8 state */
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index e6c2aa3..5be06ba 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -632,6 +632,14 @@
between MSM and WCD DSP over glink transport protocol. This driver
provides read and write interface via char device.
+config QCOM_SMCINVOKE
+ bool "Secure QSEE Support"
+ help
+ Enable SMCInvoke driver which supports capability based secure
+ communication between QTI Secure Execution Environment (QSEE)
+ and high level operating system. It exposes APIs for both
+ userspace and kernel clients.
+
config MSM_EVENT_TIMER
bool "Event timer"
help
@@ -735,4 +743,12 @@
kernel panic. On certain MSM SoCs, this provides us
additional debugging information.
+config QMP_DEBUGFS_CLIENT
+ bool "Debugfs Client to communicate with AOP using QMP protocol"
+ depends on DEBUG_FS
+ default n
+ help
+ This options enables a driver which allows clients to send messages
+ to Alway On processor using QMP transport.
+
source "drivers/soc/qcom/memshare/Kconfig"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 64fb7a0..9736cdc 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -77,3 +77,5 @@
obj-$(CONFIG_APSS_CORE_EA) += msm-core.o debug_core.o
obj-$(CONFIG_QCOM_DCC_V2) += dcc_v2.o
obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_stats.o
+obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
+obj-$(CONFIG_QMP_DEBUGFS_CLIENT) += qmp-debugfs-client.o
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index d31bf8d..1d605e3 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -30,7 +30,6 @@
#include "glink_private.h"
#include "glink_xprt_if.h"
-#define GLINK_CTX_CANARY 0x58544324 /* "$CTX" */
/* Number of internal IPC Logging log pages */
#define NUM_LOG_PAGES 10
#define GLINK_PM_QOS_HOLDOFF_MS 10
@@ -40,7 +39,6 @@
#define GLINK_KTHREAD_PRIO 1
-static rwlock_t magic_lock;
/**
* struct glink_qos_priority_bin - Packet Scheduler's priority bucket
* @max_rate_kBps: Maximum rate supported by the priority bucket.
@@ -232,6 +230,8 @@
* @req_rate_kBps: Current QoS request by the channel.
* @tx_intent_cnt: Intent count to transmit soon in future.
* @tx_cnt: Packets to be picked by tx scheduler.
+ * @rt_vote_on: Number of times RT vote on is called.
+ * @rt_vote_off: Number of times RT vote off is called.
*/
struct channel_ctx {
struct rwref_lock ch_state_lhb2;
@@ -312,7 +312,9 @@
unsigned long req_rate_kBps;
uint32_t tx_intent_cnt;
uint32_t tx_cnt;
- uint32_t magic_number;
+
+ uint32_t rt_vote_on;
+ uint32_t rt_vote_off;
};
static struct glink_core_if core_impl;
@@ -443,33 +445,15 @@
static int glink_get_ch_ctx(struct channel_ctx *ctx)
{
- unsigned long flags;
-
if (!ctx)
return -EINVAL;
- read_lock_irqsave(&magic_lock, flags);
- if (ctx->magic_number != GLINK_CTX_CANARY) {
- read_unlock_irqrestore(&magic_lock, flags);
- return -EINVAL;
- }
rwref_get(&ctx->ch_state_lhb2);
- read_unlock_irqrestore(&magic_lock, flags);
return 0;
}
-static int glink_put_ch_ctx(struct channel_ctx *ctx, bool update_magic)
+static void glink_put_ch_ctx(struct channel_ctx *ctx)
{
- unsigned long flags;
-
- if (!update_magic) {
- rwref_put(&ctx->ch_state_lhb2);
- return 0;
- }
- write_lock_irqsave(&magic_lock, flags);
- ctx->magic_number = 0;
rwref_put(&ctx->ch_state_lhb2);
- write_unlock_irqrestore(&magic_lock, flags);
- return 0;
}
/**
@@ -1931,13 +1915,13 @@
}
ctx->transport_ptr = xprt_ctx;
+ rwref_get(&ctx->ch_state_lhb2);
list_add_tail(&ctx->port_list_node, &xprt_ctx->channels);
GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx,
"%s: local:GLINK_CHANNEL_CLOSED\n",
__func__);
}
- rwref_get(&ctx->ch_state_lhb2);
spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
rwref_write_put(&xprt_ctx->xprt_state_lhb0);
mutex_lock(&xprt_ctx->xprt_dbgfs_lock_lhb4);
@@ -2419,6 +2403,25 @@
}
/**
+ * dummy_rx_rt_vote() - Dummy RX Realtime thread vote
+ * @if_ptr: The transport to transmit on.
+
+ */
+static int dummy_rx_rt_vote(struct glink_transport_if *if_ptr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_rx_rt_unvote() - Dummy RX Realtime thread unvote
+ * @if_ptr: The transport to transmit on.
+ */
+static int dummy_rx_rt_unvote(struct glink_transport_if *if_ptr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* notif_if_up_all_xprts() - Check and notify existing transport state if up
* @notif_info: Data structure containing transport information to be notified.
*
@@ -2600,7 +2603,6 @@
ctx->notify_tx_abort = cfg->notify_tx_abort;
ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt;
ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent;
- ctx->magic_number = GLINK_CTX_CANARY;
if (!ctx->notify_rx_intent_req)
ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req;
@@ -2742,13 +2744,13 @@
GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
if (ctx->local_open_state == GLINK_CHANNEL_CLOSED) {
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return 0;
}
if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
/* close already pending */
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
@@ -2813,7 +2815,7 @@
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&xprt_ctx->xprt_state_lhb0);
- glink_put_ch_ctx(ctx, true);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_close);
@@ -3029,13 +3031,13 @@
xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
rwref_read_put(&ctx->ch_state_lhb2);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
glink_tx_common_err:
rwref_read_put(&ctx->ch_state_lhb2);
glink_tx_common_err_2:
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
kfree(tx_info);
return ret;
}
@@ -3085,7 +3087,7 @@
/* Can only queue rx intents if channel is fully opened */
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
@@ -3094,14 +3096,14 @@
GLINK_ERR_CH(ctx,
"%s: Intent pointer allocation failed size[%zu]\n",
__func__, size);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -ENOMEM;
}
GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
intent_ptr->intent_size);
if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
@@ -3111,7 +3113,7 @@
if (ret)
/* unable to transmit, dequeue intent */
ch_remove_local_rx_intent(ctx, intent_ptr->id);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_queue_rx_intent);
@@ -3143,12 +3145,12 @@
if (size <= intent->intent_size) {
spin_unlock_irqrestore(
&ctx->local_rx_intent_lst_lock_lhc1, flags);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return true;
}
}
spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return false;
}
EXPORT_SYMBOL(glink_rx_intent_exists);
@@ -3177,7 +3179,7 @@
if (IS_ERR_OR_NULL(liid_ptr)) {
/* invalid pointer */
GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EINVAL;
}
@@ -3203,7 +3205,7 @@
/* send rx done */
ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
ctx->lcid, id, reuse);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_rx_done);
@@ -3257,7 +3259,7 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
@@ -3267,7 +3269,7 @@
ctx->lcid, ctx->lsigs);
GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_sigs_set);
@@ -3293,12 +3295,12 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
*sigs = ctx->lsigs;
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return 0;
}
EXPORT_SYMBOL(glink_sigs_local_get);
@@ -3325,12 +3327,12 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
*sigs = ctx->rsigs;
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return 0;
}
EXPORT_SYMBOL(glink_sigs_remote_get);
@@ -3434,7 +3436,7 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
@@ -3444,7 +3446,7 @@
if (ret < 0)
GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
__func__, latency_us, pkt_size);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_qos_latency);
@@ -3468,12 +3470,12 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
ret = glink_qos_reset_priority(ctx);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_qos_cancel);
@@ -3500,7 +3502,7 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
@@ -3509,7 +3511,7 @@
ret = glink_qos_add_ch_tx_intent(ctx);
spin_unlock(&ctx->tx_lists_lock_lhc3);
spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_qos_start);
@@ -3537,11 +3539,11 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return (unsigned long)-EBUSY;
}
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ctx->transport_ptr->ops->get_power_vote_ramp_time(
ctx->transport_ptr->ops,
glink_prio_to_power_state(ctx->transport_ptr,
@@ -3549,6 +3551,61 @@
}
EXPORT_SYMBOL(glink_qos_get_ramp_time);
+
+/**
+ * glink_start_rx_rt() - Vote for RT thread priority on RX.
+ * @handle: Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_start_rx_rt(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ glink_put_ch_ctx(ctx);
+ return -EBUSY;
+ }
+ ret = ctx->transport_ptr->ops->rx_rt_vote(ctx->transport_ptr->ops);
+ ctx->rt_vote_on++;
+ GLINK_INFO_CH(ctx, "%s: Voting RX Realtime Thread %d", __func__, ret);
+ glink_put_ch_ctx(ctx);
+ return ret;
+}
+
+/**
+ * glink_end_rx_rt() - Vote for RT thread priority on RX.
+ * @handle: Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_end_rx_rt(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ glink_put_ch_ctx(ctx);
+ return -EBUSY;
+ }
+ ret = ctx->transport_ptr->ops->rx_rt_unvote(ctx->transport_ptr->ops);
+ ctx->rt_vote_off++;
+ GLINK_INFO_CH(ctx, "%s: Unvoting RX Realtime Thread %d", __func__, ret);
+ glink_put_ch_ctx(ctx);
+ return ret;
+}
+
/**
* glink_rpm_rx_poll() - Poll and receive any available events
* @handle: Channel handle in which this operation is performed.
@@ -3631,10 +3688,10 @@
if (ret)
return ret;
if (!ctx->transport_ptr) {
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EOPNOTSUPP;
}
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
}
EXPORT_SYMBOL(glink_wait_link_down);
@@ -3956,6 +4013,10 @@
if_ptr->power_vote = dummy_power_vote;
if (!if_ptr->power_unvote)
if_ptr->power_unvote = dummy_power_unvote;
+ if (!if_ptr->rx_rt_vote)
+ if_ptr->rx_rt_vote = dummy_rx_rt_vote;
+ if (!if_ptr->rx_rt_unvote)
+ if_ptr->rx_rt_unvote = dummy_rx_rt_unvote;
xprt_ptr->capabilities = 0;
xprt_ptr->ops = if_ptr;
spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
@@ -6175,7 +6236,6 @@
static int glink_init(void)
{
log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
- rwlock_init(&magic_lock);
if (!log_ctx)
GLINK_ERR("%s: unable to create log context\n", __func__);
glink_debugfs_init();
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 94dffa5..384347d 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -182,6 +182,8 @@
* @deferred_cmds: List of deferred commands that need to be
* processed in process context.
* @deferred_cmds_cnt: Number of deferred commands in queue.
+ * @rt_vote_lock: Serialize access to RT rx votes
+ * @rt_votes: Vote count for RT rx thread priority
* @num_pw_states: Size of @ramp_time_us.
* @ramp_time_us: Array of ramp times in microseconds where array
* index position represents a power state.
@@ -221,6 +223,8 @@
spinlock_t rx_lock;
struct list_head deferred_cmds;
uint32_t deferred_cmds_cnt;
+ spinlock_t rt_vote_lock;
+ uint32_t rt_votes;
uint32_t num_pw_states;
unsigned long *ramp_time_us;
struct mailbox_config_info *mailbox;
@@ -2125,6 +2129,52 @@
}
/**
+ * rx_rt_vote() - Increment and RX thread RT vote
+ * @if_ptr: The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int rx_rt_vote(struct glink_transport_if *if_ptr)
+{
+ struct edge_info *einfo;
+ struct sched_param param = { .sched_priority = 1 };
+ int ret = 0;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->rt_vote_lock, flags);
+ if (!einfo->rt_votes)
+ ret = sched_setscheduler_nocheck(einfo->task, SCHED_FIFO,
+ ¶m);
+ einfo->rt_votes++;
+ spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
+ return ret;
+}
+
+/**
+ * rx_rt_unvote() - Remove a RX thread RT vote
+ * @if_ptr: The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int rx_rt_unvote(struct glink_transport_if *if_ptr)
+{
+ struct edge_info *einfo;
+ struct sched_param param = { .sched_priority = 0 };
+ int ret = 0;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->rt_vote_lock, flags);
+ einfo->rt_votes--;
+ if (!einfo->rt_votes)
+ ret = sched_setscheduler_nocheck(einfo->task, SCHED_NORMAL,
+ ¶m);
+ spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
+ return ret;
+}
+
+/**
* negotiate_features_v1() - determine what features of a version can be used
* @if_ptr: The transport for which features are negotiated for.
* @version: The version negotiated.
@@ -2169,6 +2219,8 @@
einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
einfo->xprt_if.power_vote = power_vote;
einfo->xprt_if.power_unvote = power_unvote;
+ einfo->xprt_if.rx_rt_vote = rx_rt_vote;
+ einfo->xprt_if.rx_rt_unvote = rx_rt_unvote;
}
/**
@@ -2341,6 +2393,8 @@
init_srcu_struct(&einfo->use_ref);
spin_lock_init(&einfo->rx_lock);
INIT_LIST_HEAD(&einfo->deferred_cmds);
+ spin_lock_init(&einfo->rt_vote_lock);
+ einfo->rt_votes = 0;
mutex_lock(&probe_lock);
if (edge_infos[einfo->remote_proc_id]) {
diff --git a/drivers/soc/qcom/glink_xprt_if.h b/drivers/soc/qcom/glink_xprt_if.h
index f4d5a3b..47c1580 100644
--- a/drivers/soc/qcom/glink_xprt_if.h
+++ b/drivers/soc/qcom/glink_xprt_if.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -141,6 +141,8 @@
struct glink_transport_if *if_ptr, uint32_t state);
int (*power_vote)(struct glink_transport_if *if_ptr, uint32_t state);
int (*power_unvote)(struct glink_transport_if *if_ptr);
+ int (*rx_rt_vote)(struct glink_transport_if *if_ptr);
+ int (*rx_rt_unvote)(struct glink_transport_if *if_ptr);
/*
* Keep data pointers at the end of the structure after all function
* pointer to allow for in-place initialization.
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index cded512..0b6c80d 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -269,6 +269,7 @@
ICNSS_WLFW_EXISTS,
ICNSS_WDOG_BITE,
ICNSS_SHUTDOWN_DONE,
+ ICNSS_HOST_TRIGGERED_PDR,
};
struct ce_irq_list {
@@ -321,6 +322,13 @@
uint32_t disable;
} ce_irqs[ICNSS_MAX_IRQ_REGISTRATIONS];
+ struct {
+ uint32_t pdr_fw_crash;
+ uint32_t pdr_host_error;
+ uint32_t root_pd_crash;
+ uint32_t root_pd_shutdown;
+ } recovery;
+
uint32_t pm_suspend;
uint32_t pm_suspend_err;
uint32_t pm_resume;
@@ -358,10 +366,10 @@
uint32_t vbatt_req;
uint32_t vbatt_resp;
uint32_t vbatt_req_err;
+ u32 rejuvenate_ind;
uint32_t rejuvenate_ack_req;
uint32_t rejuvenate_ack_resp;
uint32_t rejuvenate_ack_err;
- uint32_t trigger_recovery;
};
#define MAX_NO_OF_MAC_ADDR 4
@@ -370,6 +378,20 @@
uint32_t no_of_mac_addr_set;
};
+enum icnss_pdr_cause_index {
+ ICNSS_FW_CRASH,
+ ICNSS_ROOT_PD_CRASH,
+ ICNSS_ROOT_PD_SHUTDOWN,
+ ICNSS_HOST_ERROR,
+};
+
+static const char * const icnss_pdr_cause[] = {
+ [ICNSS_FW_CRASH] = "FW crash",
+ [ICNSS_ROOT_PD_CRASH] = "Root PD crashed",
+ [ICNSS_ROOT_PD_SHUTDOWN] = "Root PD shutdown",
+ [ICNSS_HOST_ERROR] = "Host error",
+};
+
struct service_notifier_context {
void *handle;
uint32_t instance_id;
@@ -435,6 +457,11 @@
bool is_wlan_mac_set;
struct icnss_wlan_mac_addr wlan_mac_addr;
bool bypass_s1_smmu;
+ u8 cause_for_rejuvenation;
+ u8 requesting_sub_system;
+ u16 line_number;
+ char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+ struct mutex dev_lock;
} *penv;
#ifdef CONFIG_ICNSS_DEBUG
@@ -1670,6 +1697,60 @@
return ret;
}
+static int icnss_decode_rejuvenate_ind(void *msg, unsigned int msg_len)
+{
+ struct msg_desc ind_desc;
+ struct wlfw_rejuvenate_ind_msg_v01 ind_msg;
+ int ret = 0;
+
+ if (!penv || !penv->wlfw_clnt) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ memset(&ind_msg, 0, sizeof(ind_msg));
+
+ ind_desc.msg_id = QMI_WLFW_REJUVENATE_IND_V01;
+ ind_desc.max_msg_len = WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN;
+ ind_desc.ei_array = wlfw_rejuvenate_ind_msg_v01_ei;
+
+ ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+ if (ret < 0) {
+ icnss_pr_err("Failed to decode rejuvenate ind message: ret %d, msg_len %u\n",
+ ret, msg_len);
+ goto out;
+ }
+
+ if (ind_msg.cause_for_rejuvenation_valid)
+ penv->cause_for_rejuvenation = ind_msg.cause_for_rejuvenation;
+ else
+ penv->cause_for_rejuvenation = 0;
+ if (ind_msg.requesting_sub_system_valid)
+ penv->requesting_sub_system = ind_msg.requesting_sub_system;
+ else
+ penv->requesting_sub_system = 0;
+ if (ind_msg.line_number_valid)
+ penv->line_number = ind_msg.line_number;
+ else
+ penv->line_number = 0;
+ if (ind_msg.function_name_valid)
+ memcpy(penv->function_name, ind_msg.function_name,
+ QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1);
+ else
+ memset(penv->function_name, 0,
+ QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1);
+
+ icnss_pr_info("Cause for rejuvenation: 0x%x, requesting sub-system: 0x%x, line number: %u, function name: %s\n",
+ penv->cause_for_rejuvenation,
+ penv->requesting_sub_system,
+ penv->line_number,
+ penv->function_name);
+
+ penv->stats.rejuvenate_ind++;
+out:
+ return ret;
+}
+
static int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv)
{
int ret;
@@ -1863,6 +1944,7 @@
msg_id, penv->state);
icnss_ignore_qmi_timeout(true);
+ icnss_decode_rejuvenate_ind(msg, msg_len);
event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
if (event_data == NULL)
return;
@@ -2400,6 +2482,11 @@
icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
priv->state, notif->crashed);
+ if (notif->crashed)
+ priv->stats.recovery.root_pd_crash++;
+ else
+ priv->stats.recovery.root_pd_shutdown++;
+
icnss_ignore_qmi_timeout(true);
event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
@@ -2479,6 +2566,7 @@
enum pd_subsys_state *state = data;
struct icnss_event_pd_service_down_data *event_data;
struct icnss_uevent_fw_down_data fw_down_data;
+ enum icnss_pdr_cause_index cause = ICNSS_ROOT_PD_CRASH;
icnss_pr_dbg("PD service notification: 0x%lx state: 0x%lx\n",
notification, priv->state);
@@ -2491,28 +2579,42 @@
if (event_data == NULL)
return notifier_from_errno(-ENOMEM);
+ event_data->crashed = true;
+
if (state == NULL) {
- event_data->crashed = true;
+ priv->stats.recovery.root_pd_crash++;
goto event_post;
}
- icnss_pr_info("PD service down, pd_state: %d, state: 0x%lx\n",
- *state, priv->state);
-
switch (*state) {
case ROOT_PD_WDOG_BITE:
- event_data->crashed = true;
event_data->wdog_bite = true;
+ priv->stats.recovery.root_pd_crash++;
break;
case ROOT_PD_SHUTDOWN:
+ cause = ICNSS_ROOT_PD_SHUTDOWN;
+ priv->stats.recovery.root_pd_shutdown++;
+ event_data->crashed = false;
+ break;
+ case USER_PD_STATE_CHANGE:
+ if (test_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state)) {
+ cause = ICNSS_HOST_ERROR;
+ priv->stats.recovery.pdr_host_error++;
+ } else {
+ cause = ICNSS_FW_CRASH;
+ priv->stats.recovery.pdr_fw_crash++;
+ }
break;
default:
- event_data->crashed = true;
+ priv->stats.recovery.root_pd_crash++;
break;
}
+ icnss_pr_info("PD service down, pd_state: %d, state: 0x%lx: cause: %s\n",
+ *state, priv->state, icnss_pdr_cause[cause]);
event_post:
icnss_ignore_qmi_timeout(true);
+ clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
fw_down_data.crashed = event_data->crashed;
icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
@@ -3255,7 +3357,6 @@
WARN_ON(1);
icnss_pr_warn("Initiate PD restart at WLAN FW, state: 0x%lx\n",
priv->state);
- priv->stats.trigger_recovery++;
/*
* Initiate PDR, required only for the first instance
@@ -3263,6 +3364,9 @@
ret = service_notif_pd_restart(priv->service_notifier[0].name,
priv->service_notifier[0].instance_id);
+ if (!ret)
+ set_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
+
out:
return ret;
}
@@ -3274,6 +3378,7 @@
struct dma_iommu_mapping *mapping;
int atomic_ctx = 1;
int s1_bypass = 1;
+ int fast = 1;
int ret = 0;
icnss_pr_dbg("Initializing SMMU\n");
@@ -3287,7 +3392,17 @@
goto map_fail;
}
- if (!priv->bypass_s1_smmu) {
+ if (priv->bypass_s1_smmu) {
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS,
+ &s1_bypass);
+ if (ret < 0) {
+ icnss_pr_err("Set s1_bypass attribute failed, err = %d\n",
+ ret);
+ goto set_attr_fail;
+ }
+ icnss_pr_dbg("SMMU S1 BYPASS\n");
+ } else {
ret = iommu_domain_set_attr(mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
@@ -3296,14 +3411,17 @@
ret);
goto set_attr_fail;
}
- }
+ icnss_pr_dbg("SMMU ATTR ATOMIC\n");
- ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_S1_BYPASS,
- &s1_bypass);
- if (ret < 0) {
- icnss_pr_err("Set s1_bypass attribute failed, err = %d\n", ret);
- goto set_attr_fail;
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &fast);
+ if (ret < 0) {
+ icnss_pr_err("Set fast map attribute failed, err = %d\n",
+ ret);
+ goto set_attr_fail;
+ }
+ icnss_pr_dbg("SMMU FAST map set\n");
}
ret = arm_iommu_attach_device(&priv->pdev->dev, mapping);
@@ -3617,9 +3735,6 @@
if (ret)
return ret;
- if (ret == 0)
- memset(&priv->stats, 0, sizeof(priv->stats));
-
return count;
}
@@ -3713,6 +3828,9 @@
case ICNSS_SHUTDOWN_DONE:
seq_puts(s, "SHUTDOWN DONE");
continue;
+ case ICNSS_HOST_TRIGGERED_PDR:
+ seq_puts(s, "HOST TRIGGERED PDR");
+ continue;
}
seq_printf(s, "UNKNOWN-%d", i);
@@ -3743,6 +3861,26 @@
return 0;
}
+static int icnss_stats_show_rejuvenate_info(struct seq_file *s,
+ struct icnss_priv *priv)
+{
+ if (priv->stats.rejuvenate_ind) {
+ seq_puts(s, "\n<---------------- Rejuvenate Info ----------------->\n");
+ seq_printf(s, "Number of Rejuvenations: %u\n",
+ priv->stats.rejuvenate_ind);
+ seq_printf(s, "Cause for Rejuvenation: 0x%x\n",
+ priv->cause_for_rejuvenation);
+ seq_printf(s, "Requesting Sub-System: 0x%x\n",
+ priv->requesting_sub_system);
+ seq_printf(s, "Line Number: %u\n",
+ priv->line_number);
+ seq_printf(s, "Function Name: %s\n",
+ priv->function_name);
+ }
+
+ return 0;
+}
+
static int icnss_stats_show_events(struct seq_file *s, struct icnss_priv *priv)
{
int i;
@@ -3808,10 +3946,14 @@
ICNSS_STATS_DUMP(s, priv, vbatt_req);
ICNSS_STATS_DUMP(s, priv, vbatt_resp);
ICNSS_STATS_DUMP(s, priv, vbatt_req_err);
+ ICNSS_STATS_DUMP(s, priv, rejuvenate_ind);
ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_req);
ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_resp);
ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_err);
- ICNSS_STATS_DUMP(s, priv, trigger_recovery);
+ ICNSS_STATS_DUMP(s, priv, recovery.pdr_fw_crash);
+ ICNSS_STATS_DUMP(s, priv, recovery.pdr_host_error);
+ ICNSS_STATS_DUMP(s, priv, recovery.root_pd_crash);
+ ICNSS_STATS_DUMP(s, priv, recovery.root_pd_shutdown);
seq_puts(s, "\n<------------------ PM stats ------------------->\n");
ICNSS_STATS_DUMP(s, priv, pm_suspend);
@@ -3829,6 +3971,8 @@
icnss_stats_show_capability(s, priv);
+ icnss_stats_show_rejuvenate_info(s, priv);
+
icnss_stats_show_events(s, priv);
icnss_stats_show_state(s, priv);
@@ -3940,12 +4084,14 @@
{
struct icnss_priv *priv = s->private;
+ mutex_lock(&priv->dev_lock);
if (!priv->diag_reg_read_buf) {
seq_puts(s, "Usage: echo <mem_type> <offset> <data_len> > <debugfs>/icnss/reg_read\n");
if (!test_bit(ICNSS_FW_READY, &priv->state))
seq_puts(s, "Firmware is not ready yet!, wait for FW READY\n");
+ mutex_unlock(&priv->dev_lock);
return 0;
}
@@ -3959,6 +4105,7 @@
priv->diag_reg_read_len = 0;
kfree(priv->diag_reg_read_buf);
priv->diag_reg_read_buf = NULL;
+ mutex_unlock(&priv->dev_lock);
return 0;
}
@@ -4019,18 +4166,22 @@
data_len > QMI_WLFW_MAX_DATA_SIZE_V01)
return -EINVAL;
+ mutex_lock(&priv->dev_lock);
kfree(priv->diag_reg_read_buf);
priv->diag_reg_read_buf = NULL;
reg_buf = kzalloc(data_len, GFP_KERNEL);
- if (!reg_buf)
+ if (!reg_buf) {
+ mutex_unlock(&priv->dev_lock);
return -ENOMEM;
+ }
ret = wlfw_athdiag_read_send_sync_msg(priv, reg_offset,
mem_type, data_len,
reg_buf);
if (ret) {
kfree(reg_buf);
+ mutex_unlock(&priv->dev_lock);
return ret;
}
@@ -4038,6 +4189,7 @@
priv->diag_reg_read_mem_type = mem_type;
priv->diag_reg_read_len = data_len;
priv->diag_reg_read_buf = reg_buf;
+ mutex_unlock(&priv->dev_lock);
return count;
}
@@ -4317,6 +4469,7 @@
spin_lock_init(&priv->event_lock);
spin_lock_init(&priv->on_off_lock);
+ mutex_init(&priv->dev_lock);
priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
if (!priv->event_wq) {
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
index be2b7da..739c053 100644
--- a/drivers/soc/qcom/llcc-sdm845.c
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -57,23 +57,23 @@
}
static struct llcc_slice_config sdm845_data[] = {
- SCT_ENTRY("cpuss", 1, 1, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 1),
- SCT_ENTRY("vidsc0", 2, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("vidsc1", 3, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("rotator", 4, 4, 563, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("voice", 5, 5, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("audio", 6, 6, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("modemhp_grow", 7, 7, 1024, 2, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("modem", 8, 8, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("compute", 10, 10, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("gpuhtw", 11, 11, 515, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("gpu", 12, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("mmuhwt", 13, 13, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1),
- SCT_ENTRY("compute_dma", 15, 15, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("display", 16, 16, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("modemhp_fix", 20, 20, 1024, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("modem_paging", 21, 21, 1024, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("audiohw", 22, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+ SCT_ENTRY("cpuss", 1, 1, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 1),
+ SCT_ENTRY("vidsc0", 2, 2, 512, 2, 1, 0x0, 0x0F0, 0, 0, 1, 1, 0),
+ SCT_ENTRY("vidsc1", 3, 3, 512, 2, 1, 0x0, 0x0F0, 0, 0, 1, 1, 0),
+ SCT_ENTRY("rotator", 4, 4, 563, 2, 1, 0x0, 0x00F, 2, 0, 1, 1, 0),
+ SCT_ENTRY("voice", 5, 5, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("audio", 6, 6, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("modemhp_grow", 7, 7, 1024, 2, 0, 0x0F0, 0xF0F, 0, 0, 1, 1, 0),
+ SCT_ENTRY("modem", 8, 8, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("compute", 10, 10, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("gpuhtw", 11, 11, 512, 1, 1, 0x0, 0xC, 0, 0, 1, 1, 0),
+ SCT_ENTRY("gpu", 12, 12, 2560, 1, 0, 0xFF0, 0x3, 0, 0, 1, 1, 0),
+ SCT_ENTRY("mmuhwt", 13, 13, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 0, 1),
+ SCT_ENTRY("compute_dma", 15, 15, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("display", 16, 16, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("modemhp_fix", 20, 20, 1024, 2, 1, 0x0, 0xF00, 0, 0, 1, 1, 0),
+ SCT_ENTRY("modem_paging", 21, 21, 1024, 0, 1, 0x0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("audiohw", 22, 22, 1024, 1, 1, 0xFF0, 0xF, 0, 0, 1, 1, 0),
};
static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
index e90012d..aa6c5d7 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -40,8 +40,6 @@
};
static struct handle_type handle_list;
-static LIST_HEAD(input_list);
-static LIST_HEAD(apply_list);
static LIST_HEAD(commit_list);
static LIST_HEAD(late_init_clist);
static LIST_HEAD(query_list);
@@ -780,77 +778,9 @@
return;
}
-static void del_inp_list(struct list_head *list)
-{
- struct rule_update_path_info *rule_node;
- struct rule_update_path_info *rule_node_tmp;
-
- list_for_each_entry_safe(rule_node, rule_node_tmp, list, link) {
- list_del(&rule_node->link);
- rule_node->added = false;
- }
-}
-
-static void del_op_list(struct list_head *list)
-{
- struct rule_apply_rcm_info *rule;
- struct rule_apply_rcm_info *rule_tmp;
-
- list_for_each_entry_safe(rule, rule_tmp, list, link)
- list_del(&rule->link);
-}
-
-static int msm_bus_apply_rules(struct list_head *list, bool after_clk_commit)
-{
- struct rule_apply_rcm_info *rule;
- struct device *dev = NULL;
- struct msm_bus_node_device_type *dev_info = NULL;
- int ret = 0;
-
- list_for_each_entry(rule, list, link) {
- if (!rule)
- continue;
-
- if (rule && (rule->after_clk_commit != after_clk_commit))
- continue;
-
- dev = bus_find_device(&msm_bus_type, NULL,
- (void *) &rule->id,
- msm_bus_device_match_adhoc);
-
- if (!dev) {
- MSM_BUS_ERR("Can't find dev node for %d", rule->id);
- continue;
- }
- dev_info = to_msm_bus_node(dev);
-
- ret = msm_bus_enable_limiter(dev_info, rule->throttle,
- rule->lim_bw);
- if (ret)
- MSM_BUS_ERR("Failed to set limiter for %d", rule->id);
- }
-
- return ret;
-}
-
static void commit_data(void)
{
- bool rules_registered = msm_rule_are_rules_registered();
-
- if (rules_registered) {
- msm_rules_update_path(&input_list, &apply_list);
- msm_bus_apply_rules(&apply_list, false);
- }
-
msm_bus_commit_data(&commit_list);
-
- if (rules_registered) {
- msm_bus_apply_rules(&apply_list, true);
- del_inp_list(&input_list);
- del_op_list(&apply_list);
- }
- INIT_LIST_HEAD(&input_list);
- INIT_LIST_HEAD(&apply_list);
INIT_LIST_HEAD(&commit_list);
}
@@ -909,8 +839,6 @@
struct msm_bus_node_device_type *dev_info = NULL;
int curr_idx;
int ret = 0;
- struct rule_update_path_info *rule_node;
- bool rules_registered = msm_rule_are_rules_registered();
if (IS_ERR_OR_NULL(src_dev)) {
MSM_BUS_ERR("%s: No source device", __func__);
@@ -958,19 +886,6 @@
add_node_to_clist(dev_info);
- if (rules_registered) {
- rule_node = &dev_info->node_info->rule;
- rule_node->id = dev_info->node_info->id;
- rule_node->ib = dev_info->node_bw[ACTIVE_CTX].max_ib;
- rule_node->ab = dev_info->node_bw[ACTIVE_CTX].sum_ab;
- rule_node->clk =
- dev_info->node_bw[ACTIVE_CTX].cur_clk_hz;
- if (!rule_node->added) {
- list_add_tail(&rule_node->link, &input_list);
- rule_node->added = true;
- }
- }
-
next_dev = lnode->next_dev;
curr_idx = lnode->next;
}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
index 3f8b52c..6c69bec 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -133,7 +133,7 @@
return 0;
}
- if (sscanf(buf, "%s %llu", name, &vote_khz) != 2) {
+ if (sscanf(buf, "%9s %llu", name, &vote_khz) != 2) {
pr_err("%s:return error", __func__);
return -EINVAL;
}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index 36c0154..185d862 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -596,8 +596,16 @@
}
n_active = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
+ if (!n_active)
+ return -ENOMEM;
+
n_wake = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
+ if (!n_wake)
+ return -ENOMEM;
+
n_sleep = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
+ if (!n_sleep)
+ return -ENOMEM;
if (cnt_active)
cmdlist_active = kcalloc(cnt_active, sizeof(struct tcs_cmd),
@@ -612,18 +620,32 @@
cmdlist_wake, cmdlist_sleep, cur_bcm_clist);
ret = rpmh_invalidate(cur_mbox);
+ if (ret)
+ MSM_BUS_ERR("%s: Error invalidating mbox: %d\n",
+ __func__, ret);
+
if (cur_rsc->rscdev->req_state == RPMH_AWAKE_STATE)
ret = rpmh_write(cur_mbox, cur_rsc->rscdev->req_state,
cmdlist_active, cnt_active);
else
ret = rpmh_write_passthru(cur_mbox, cur_rsc->rscdev->req_state,
cmdlist_active, n_active);
+ if (ret)
+ MSM_BUS_ERR("%s: error sending active/awake sets: %d\n",
+ __func__, ret);
+
ret = rpmh_write_passthru(cur_mbox, RPMH_WAKE_ONLY_STATE,
cmdlist_wake, n_wake);
+ if (ret)
+ MSM_BUS_ERR("%s: error sending wake sets: %d\n",
+ __func__, ret);
ret = rpmh_write_passthru(cur_mbox, RPMH_SLEEP_STATE,
cmdlist_sleep, n_sleep);
+ if (ret)
+ MSM_BUS_ERR("%s: error sending sleep sets: %d\n",
+ __func__, ret);
list_for_each_entry_safe(node, node_tmp, clist, link) {
bcm_clist_clean(node);
@@ -746,18 +768,16 @@
static int msm_bus_disable_node_qos_clk(struct msm_bus_node_device_type *node)
{
- struct msm_bus_node_device_type *bus_node = NULL;
int i;
int ret = 0;
- if (!node || (!to_msm_bus_node(node->node_info->bus_device))) {
+ if (!node) {
ret = -ENXIO;
goto exit_disable_node_qos_clk;
}
- bus_node = to_msm_bus_node(node->node_info->bus_device);
- for (i = 0; i < bus_node->num_node_qos_clks; i++)
- ret = disable_nodeclk(&bus_node->node_qos_clks[i]);
+ for (i = 0; i < node->num_node_qos_clks; i++)
+ ret = disable_nodeclk(&node->node_qos_clks[i]);
exit_disable_node_qos_clk:
return ret;
@@ -766,7 +786,7 @@
static int msm_bus_enable_node_qos_clk(struct msm_bus_node_device_type *node)
{
int i;
- int ret;
+ int ret = 0;
long rounded_rate;
for (i = 0; i < node->num_node_qos_clks; i++) {
@@ -1345,7 +1365,7 @@
node_info->bcm_dev_ids = devm_kzalloc(bus_dev,
sizeof(int) * pdata_node_info->num_bcm_devs,
GFP_KERNEL);
- if (!node_info->bcm_devs) {
+ if (!node_info->bcm_dev_ids) {
MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
devm_kfree(bus_dev, node_info->bcm_devs);
ret = -ENOMEM;
@@ -1369,7 +1389,7 @@
node_info->rsc_dev_ids = devm_kzalloc(bus_dev,
sizeof(int) * pdata_node_info->num_rsc_devs,
GFP_KERNEL);
- if (!node_info->rsc_devs) {
+ if (!node_info->rsc_dev_ids) {
MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
devm_kfree(bus_dev, node_info->rsc_devs);
ret = -ENOMEM;
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
index 42a6f58..77cbbf1 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
@@ -47,7 +47,7 @@
}
arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if ((size > 0) && ZERO_OR_NULL_PTR(arr)) {
+ if (ZERO_OR_NULL_PTR(arr)) {
dev_err(&pdev->dev, "Error: Failed to alloc mem for %s\n",
prop);
return NULL;
diff --git a/drivers/soc/qcom/qmp-debugfs-client.c b/drivers/soc/qcom/qmp-debugfs-client.c
new file mode 100644
index 0000000..578e7f0
--- /dev/null
+++ b/drivers/soc/qcom/qmp-debugfs-client.c
@@ -0,0 +1,105 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mailbox_client.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox/qmp.h>
+#include <linux/uaccess.h>
+
+#define MAX_MSG_SIZE 96 /* Imposed by the remote*/
+
+static struct mbox_chan *chan;
+static struct mbox_client *cl;
+
+static ssize_t aop_msg_write(struct file *file, const char __user *userstr,
+ size_t len, loff_t *pos)
+{
+ char buf[MAX_MSG_SIZE + 1] = {0};
+ struct qmp_pkt pkt;
+ int rc;
+
+ if (!len || (len > MAX_MSG_SIZE))
+ return len;
+
+ rc = copy_from_user(buf, userstr, len);
+ if (rc) {
+ pr_err("%s copy from user failed, rc=%d\n", __func__, rc);
+ return len;
+ }
+
+ /*
+ * Controller expects a 4 byte aligned buffer
+ */
+ pkt.size = (len + 0x3) & ~0x3;
+ pkt.data = buf;
+
+ if (mbox_send_message(chan, &pkt) < 0)
+ pr_err("Failed to send qmp request\n");
+
+ return len;
+}
+
+static const struct file_operations aop_msg_fops = {
+ .write = aop_msg_write,
+};
+
+static int qmp_msg_probe(struct platform_device *pdev)
+{
+ struct dentry *file;
+
+ cl = devm_kzalloc(&pdev->dev, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->dev = &pdev->dev;
+ cl->tx_block = true;
+ cl->tx_tout = 100;
+ cl->knows_txdone = false;
+
+ chan = mbox_request_channel(cl, 0);
+ if (IS_ERR(chan)) {
+ dev_err(&pdev->dev, "Failed to mbox channel\n");
+ return PTR_ERR(chan);
+ }
+
+ file = debugfs_create_file("aop_send_message", 0220, NULL, NULL,
+ &aop_msg_fops);
+ if (!file)
+ goto err;
+ return 0;
+err:
+ mbox_free_channel(chan);
+ chan = NULL;
+ return -ENOMEM;
+}
+
+static const struct of_device_id aop_qmp_match_tbl[] = {
+ {.compatible = "qcom,debugfs-qmp-client"},
+ {},
+};
+
+static struct platform_driver aop_qmp_msg_driver = {
+ .probe = qmp_msg_probe,
+ .driver = {
+ .name = "debugfs-qmp-client",
+ .owner = THIS_MODULE,
+ .of_match_table = aop_qmp_match_tbl,
+ },
+};
+
+builtin_platform_driver(aop_qmp_msg_driver);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 9a98063..1b41269 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -32,6 +32,7 @@
#define RPMH_MAX_MBOXES 2
#define RPMH_MAX_FAST_RES 32
#define RPMH_MAX_REQ_IN_BATCH 10
+#define RPMH_TIMEOUT msecs_to_jiffies(10000)
#define DEFINE_RPMH_MSG_ONSTACK(rc, s, q, c, name) \
struct rpmh_msg name = { \
@@ -76,6 +77,8 @@
DECLARE_BITMAP(fast_req, RPMH_MAX_FAST_RES);
bool dirty;
bool in_solver_mode;
+ /* Cache sleep and wake requests sent as passthru */
+ struct rpmh_msg *passthru_cache[2 * RPMH_MAX_REQ_IN_BATCH];
};
struct rpmh_client {
@@ -110,17 +113,24 @@
return msg;
}
+static void __free_msg_to_pool(struct rpmh_msg *rpm_msg)
+{
+ struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
+
+ /* If we allocated the pool, set it as available */
+ if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
+ bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
+ }
+}
+
static void free_msg_to_pool(struct rpmh_msg *rpm_msg)
{
struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
unsigned long flags;
- /* If we allocated the pool, set it as available */
- if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
- spin_lock_irqsave(&rpm->lock, flags);
- bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
- spin_unlock_irqrestore(&rpm->lock, flags);
- }
+ spin_lock_irqsave(&rpm->lock, flags);
+ __free_msg_to_pool(rpm_msg);
+ spin_unlock_irqrestore(&rpm->lock, flags);
}
static void rpmh_rx_cb(struct mbox_client *cl, void *msg)
@@ -166,6 +176,46 @@
complete(compl);
}
+/**
+ * wait_for_tx_done: Wait forever until the response is received.
+ *
+ * @rc: The RPMH client
+ * @compl: The completion object
+ * @addr: An addr that we sent in that request
+ * @data: The data for the address in that request
+ *
+ */
+static inline void wait_for_tx_done(struct rpmh_client *rc,
+ struct completion *compl, u32 addr, u32 data)
+{
+ int ret;
+ int count = 4;
+ int skip = 0;
+
+ do {
+ ret = wait_for_completion_timeout(compl, RPMH_TIMEOUT);
+ if (ret) {
+ if (count != 4)
+ dev_notice(rc->dev,
+ "RPMH response received addr=0x%x data=0x%x\n",
+ addr, data);
+ return;
+ }
+ if (!count) {
+ if (skip++ % 100)
+ continue;
+ dev_err(rc->dev,
+ "RPMH waiting for interrupt from AOSS\n");
+ mbox_chan_debug(rc->chan);
+ } else {
+ dev_err(rc->dev,
+ "RPMH response timeout (%d) addr=0x%x,data=0x%x\n",
+ count, addr, data);
+ count--;
+ }
+ } while (true);
+}
+
static struct rpmh_req *__find_req(struct rpmh_client *rc, u32 addr)
{
struct rpmh_req *p, *req = NULL;
@@ -365,7 +415,7 @@
if (ret < 0)
return ret;
- wait_for_completion(&compl);
+ wait_for_tx_done(rc, &compl, addr, data);
return rpm_msg.err;
}
@@ -469,12 +519,76 @@
if (ret)
return ret;
- wait_for_completion(&compl);
+ wait_for_tx_done(rc, &compl, cmd[0].addr, cmd[0].data);
return rpm_msg.err;
}
EXPORT_SYMBOL(rpmh_write);
+static int cache_passthru(struct rpmh_client *rc, struct rpmh_msg **rpm_msg,
+ int count)
+{
+ struct rpmh_mbox *rpm = rc->rpmh;
+ unsigned long flags;
+ int ret = 0;
+ int index = 0;
+ int i;
+
+ spin_lock_irqsave(&rpm->lock, flags);
+ while (rpm->passthru_cache[index])
+ index++;
+ if (index + count >= 2 * RPMH_MAX_REQ_IN_BATCH) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < count; i++)
+ rpm->passthru_cache[index + i] = rpm_msg[i];
+fail:
+ spin_unlock_irqrestore(&rpm->lock, flags);
+
+ return ret;
+}
+
+static int flush_passthru(struct rpmh_client *rc)
+{
+ struct rpmh_mbox *rpm = rc->rpmh;
+ struct rpmh_msg *rpm_msg;
+ unsigned long flags;
+ int ret = 0;
+ int i;
+
+ /* Send Sleep/Wake requests to the controller, expect no response */
+ spin_lock_irqsave(&rpm->lock, flags);
+ for (i = 0; rpm->passthru_cache[i]; i++) {
+ rpm_msg = rpm->passthru_cache[i];
+ ret = mbox_send_controller_data(rc->chan, &rpm_msg->msg);
+ if (ret)
+ goto fail;
+ }
+fail:
+ spin_unlock_irqrestore(&rpm->lock, flags);
+
+ return ret;
+}
+
+static void invalidate_passthru(struct rpmh_client *rc)
+{
+ struct rpmh_mbox *rpm = rc->rpmh;
+ unsigned long flags;
+ int index = 0;
+ int i;
+
+ spin_lock_irqsave(&rpm->lock, flags);
+ while (rpm->passthru_cache[index])
+ index++;
+ for (i = 0; i < index; i++) {
+ __free_msg_to_pool(rpm->passthru_cache[i]);
+ rpm->passthru_cache[i] = NULL;
+ }
+ spin_unlock_irqrestore(&rpm->lock, flags);
+}
+
/**
* rpmh_write_passthru: Write multiple batches of RPMH commands without caching
*
@@ -494,12 +608,13 @@
int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
struct tcs_cmd *cmd, int *n)
{
- struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH];
+ struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH] = { NULL };
DECLARE_COMPLETION_ONSTACK(compl);
atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
int count = 0;
int ret, i, j, k;
bool complete_set;
+ u32 addr, data;
if (IS_ERR_OR_NULL(rc) || !cmd || !n)
return -EINVAL;
@@ -511,7 +626,7 @@
if (ret)
return ret;
- while (n[count++])
+ while (n[count++] > 0)
;
count--;
if (!count || count > RPMH_MAX_REQ_IN_BATCH)
@@ -537,6 +652,8 @@
}
}
+ addr = cmd[0].addr;
+ data = cmd[0].data;
/* Create async request batches */
for (i = 0; i < count; i++) {
rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i]);
@@ -566,16 +683,13 @@
/* For those unsent requests, spoof tx_done */
for (j = i; j < count; j++)
rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, ret);
- wait_for_completion(&compl);
+ wait_for_tx_done(rc, &compl, addr, data);
} else {
- /* Send Sleep requests to the controller, expect no response */
- for (i = 0; i < count; i++) {
- rpm_msg[i]->completion = NULL;
- ret = mbox_send_controller_data(rc->chan,
- &rpm_msg[i]->msg);
- free_msg_to_pool(rpm_msg[i]);
- }
- return 0;
+ /*
+ * Cache sleep/wake data in store.
+ * But flush passthru first before flushing all other data.
+ */
+ return cache_passthru(rc, rpm_msg, count);
}
return 0;
@@ -635,7 +749,7 @@
{
DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
- if (IS_ERR_OR_NULL(rc) || n > MAX_RPMH_PAYLOAD)
+ if (IS_ERR_OR_NULL(rc) || n <= 0 || n > MAX_RPMH_PAYLOAD)
return -EINVAL;
if (rpmh_standalone)
@@ -671,6 +785,8 @@
if (rpmh_standalone)
return 0;
+ invalidate_passthru(rc);
+
rpm = rc->rpmh;
rpm_msg.msg.invalidate = true;
rpm_msg.msg.is_complete = false;
@@ -719,7 +835,7 @@
return ret;
/* Wait until the response is received from RPMH */
- wait_for_completion(&compl);
+ wait_for_tx_done(rc, &compl, addr, 0);
/* Read the data back from the tcs_mbox_msg structrure */
*resp = rpm_msg.cmd[0].data;
@@ -784,6 +900,11 @@
}
spin_unlock_irqrestore(&rpm->lock, flags);
+ /* First flush the cached passthru's */
+ ret = flush_passthru(rc);
+ if (ret)
+ return ret;
+
/*
* Nobody else should be calling this function other than sleep,
* hence we can run without locks.
@@ -867,8 +988,10 @@
rpmh->msg_pool = kzalloc(sizeof(struct rpmh_msg) *
RPMH_MAX_FAST_RES, GFP_KERNEL);
- if (!rpmh->msg_pool)
+ if (!rpmh->msg_pool) {
+ of_node_put(spec.np);
return ERR_PTR(-ENOMEM);
+ }
rpmh->mbox_dn = spec.np;
INIT_LIST_HEAD(&rpmh->resources);
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 63cce5c..49fd7fe 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -380,6 +380,7 @@
sg_free_table(&table);
return ret;
}
+EXPORT_SYMBOL(hyp_assign_phys);
const char *msm_secure_vmid_to_string(int secure_vmid)
{
@@ -416,6 +417,8 @@
return "VMID_CP_SPSS_SP";
case VMID_CP_SPSS_SP_SHARED:
return "VMID_CP_SPSS_SP_SHARED";
+ case VMID_CP_SPSS_HLOS_SHARED:
+ return "VMID_CP_SPSS_HLOS_SHARED";
case VMID_INVAL:
return "VMID_INVAL";
default:
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
new file mode 100644
index 0000000..3f31fb1
--- /dev/null
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -0,0 +1,575 @@
+/*
+ * SMC Invoke driver
+ *
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/smcinvoke.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+
+#include <soc/qcom/scm.h>
+#include <asm/cacheflush.h>
+#include <soc/qcom/qseecomi.h>
+
+#include "smcinvoke_object.h"
+#include "../../misc/qseecom_kernel.h"
+
+#define SMCINVOKE_DEV "smcinvoke"
+#define SMCINVOKE_TZ_PARAM_ID 0x224
+#define SMCINVOKE_TZ_CMD 0x32000600
+#define SMCINVOKE_TZ_ROOT_OBJ 1
+#define SMCINVOKE_TZ_MIN_BUF_SIZE 4096
+#define SMCINVOKE_ARGS_ALIGN_SIZE (sizeof(uint64_t))
+#define SMCINVOKE_TZ_OBJ_NULL 0
+
+#define FOR_ARGS(ndxvar, counts, section) \
+ for (ndxvar = object_counts_index_##section(counts); \
+ ndxvar < (object_counts_index_##section(counts) \
+ + object_counts_num_##section(counts)); \
+ ++ndxvar)
+
+static long smcinvoke_ioctl(struct file *, unsigned int, unsigned long);
+static int smcinvoke_open(struct inode *, struct file *);
+static int smcinvoke_release(struct inode *, struct file *);
+
+static const struct file_operations smcinvoke_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = smcinvoke_ioctl,
+ .compat_ioctl = smcinvoke_ioctl,
+ .open = smcinvoke_open,
+ .release = smcinvoke_release,
+};
+
+struct smcinvoke_buf_hdr {
+ uint32_t offset;
+ uint32_t size;
+};
+
+union smcinvoke_tz_args {
+ struct smcinvoke_buf_hdr b;
+ uint32_t tzhandle;
+};
+struct smcinvoke_msg_hdr {
+ uint32_t tzhandle;
+ uint32_t op;
+ uint32_t counts;
+};
+
+struct smcinvoke_tzobj_context {
+ uint32_t tzhandle;
+};
+
+static dev_t smcinvoke_device_no;
+struct cdev smcinvoke_cdev;
+struct class *driver_class;
+struct device *class_dev;
+
+/*
+ * size_add saturates at SIZE_MAX. If integer overflow is detected,
+ * this function would return SIZE_MAX otherwise normal a+b is returned.
+ */
+static inline size_t size_add(size_t a, size_t b)
+{
+ return (b > (SIZE_MAX - a)) ? SIZE_MAX : a + b;
+}
+
+/*
+ * pad_size is used along with size_align to define a buffer overflow
+ * protected version of ALIGN
+ */
+static inline size_t pad_size(size_t a, size_t b)
+{
+ return (~a + 1) % b;
+}
+
+/*
+ * size_align saturates at SIZE_MAX. If integer overflow is detected, this
+ * function would return SIZE_MAX otherwise next aligned size is returned.
+ */
+static inline size_t size_align(size_t a, size_t b)
+{
+ return size_add(a, pad_size(a, b));
+}
+
+/*
+ * This function retrieves file pointer corresponding to FD provided. It stores
+ * retrived file pointer until IOCTL call is concluded. Once call is completed,
+ * all stored file pointers are released. file pointers are stored to prevent
+ * other threads from releasing that FD while IOCTL is in progress.
+ */
+static int get_tzhandle_from_fd(int64_t fd, struct file **filp,
+ uint32_t *tzhandle)
+{
+ int ret = -EBADF;
+ struct file *tmp_filp = NULL;
+ struct smcinvoke_tzobj_context *tzobj = NULL;
+
+ if (fd == SMCINVOKE_USERSPACE_OBJ_NULL) {
+ *tzhandle = SMCINVOKE_TZ_OBJ_NULL;
+ ret = 0;
+ goto out;
+ } else if (fd < SMCINVOKE_USERSPACE_OBJ_NULL) {
+ goto out;
+ }
+
+ tmp_filp = fget(fd);
+ if (!tmp_filp)
+ goto out;
+
+ /* Verify if filp is smcinvoke device's file pointer */
+ if (!tmp_filp->f_op || !tmp_filp->private_data ||
+ (tmp_filp->f_op != &smcinvoke_fops)) {
+ fput(tmp_filp);
+ goto out;
+ }
+
+ tzobj = tmp_filp->private_data;
+ *tzhandle = tzobj->tzhandle;
+ *filp = tmp_filp;
+ ret = 0;
+out:
+ return ret;
+}
+
+static int get_fd_from_tzhandle(uint32_t tzhandle, int64_t *fd)
+{
+ int unused_fd = -1, ret = -1;
+ struct file *f = NULL;
+ struct smcinvoke_tzobj_context *cxt = NULL;
+
+ if (tzhandle == SMCINVOKE_TZ_OBJ_NULL) {
+ *fd = SMCINVOKE_USERSPACE_OBJ_NULL;
+ ret = 0;
+ goto out;
+ }
+
+ cxt = kzalloc(sizeof(*cxt), GFP_KERNEL);
+ if (!cxt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ unused_fd = get_unused_fd_flags(O_RDWR);
+ if (unused_fd < 0)
+ goto out;
+
+ f = anon_inode_getfile(SMCINVOKE_DEV, &smcinvoke_fops, cxt, O_RDWR);
+ if (IS_ERR(f))
+ goto out;
+
+ *fd = unused_fd;
+ fd_install(*fd, f);
+ ((struct smcinvoke_tzobj_context *)
+ (f->private_data))->tzhandle = tzhandle;
+ return 0;
+out:
+ if (unused_fd >= 0)
+ put_unused_fd(unused_fd);
+ kfree(cxt);
+
+ return ret;
+}
+
+static int prepare_send_scm_msg(const uint8_t *in_buf, size_t in_buf_len,
+ const uint8_t *out_buf, size_t out_buf_len,
+ int32_t *smcinvoke_result)
+{
+ int ret = 0;
+ struct scm_desc desc = {0};
+ size_t inbuf_flush_size = (1UL << get_order(in_buf_len)) * PAGE_SIZE;
+ size_t outbuf_flush_size = (1UL << get_order(out_buf_len)) * PAGE_SIZE;
+
+ desc.arginfo = SMCINVOKE_TZ_PARAM_ID;
+ desc.args[0] = (uint64_t)virt_to_phys(in_buf);
+ desc.args[1] = inbuf_flush_size;
+ desc.args[2] = (uint64_t)virt_to_phys(out_buf);
+ desc.args[3] = outbuf_flush_size;
+
+ dmac_flush_range(in_buf, in_buf + inbuf_flush_size);
+ dmac_flush_range(out_buf, out_buf + outbuf_flush_size);
+
+ ret = scm_call2(SMCINVOKE_TZ_CMD, &desc);
+
+ /* process listener request */
+ if (!ret && (desc.ret[0] == QSEOS_RESULT_INCOMPLETE ||
+ desc.ret[0] == QSEOS_RESULT_BLOCKED_ON_LISTENER))
+ ret = qseecom_process_listener_from_smcinvoke(&desc);
+
+ *smcinvoke_result = (int32_t)desc.ret[1];
+ if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0])
+ pr_err("SCM call failed with ret val = %d %d %d %d\n",
+ ret, (int)desc.ret[0],
+ (int)desc.ret[1], (int)desc.ret[2]);
+
+ dmac_inv_range(in_buf, in_buf + inbuf_flush_size);
+ dmac_inv_range(out_buf, out_buf + outbuf_flush_size);
+ return ret;
+}
+
+static int marshal_out(void *buf, uint32_t buf_size,
+ struct smcinvoke_cmd_req *req,
+ union smcinvoke_arg *args_buf)
+{
+ int ret = -EINVAL, i = 0;
+ union smcinvoke_tz_args *tz_args = NULL;
+ size_t offset = sizeof(struct smcinvoke_msg_hdr) +
+ object_counts_total(req->counts) *
+ sizeof(union smcinvoke_tz_args);
+
+ if (offset > buf_size)
+ goto out;
+
+ tz_args = (union smcinvoke_tz_args *)
+ (buf + sizeof(struct smcinvoke_msg_hdr));
+
+ tz_args += object_counts_num_BI(req->counts);
+
+ FOR_ARGS(i, req->counts, BO) {
+ args_buf[i].b.size = tz_args->b.size;
+ if ((buf_size - tz_args->b.offset < tz_args->b.size) ||
+ tz_args->b.offset > buf_size) {
+ pr_err("%s: buffer overflow detected\n", __func__);
+ goto out;
+ }
+ if (copy_to_user((void __user *)(uintptr_t)(args_buf[i].b.addr),
+ (uint8_t *)(buf) + tz_args->b.offset,
+ tz_args->b.size)) {
+ pr_err("Error %d copying ctxt to user\n", ret);
+ goto out;
+ }
+ tz_args++;
+ }
+ tz_args += object_counts_num_OI(req->counts);
+
+ FOR_ARGS(i, req->counts, OO) {
+ /*
+ * create a new FD and assign to output object's
+ * context
+ */
+ ret = get_fd_from_tzhandle(tz_args->tzhandle,
+ &(args_buf[i].o.fd));
+ if (ret)
+ goto out;
+ tz_args++;
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+/*
+ * SMC expects arguments in following format
+ * ---------------------------------------------------------------------------
+ * | cxt | op | counts | ptr|size |ptr|size...|ORef|ORef|...| rest of payload |
+ * ---------------------------------------------------------------------------
+ * cxt: target, op: operation, counts: total arguments
+ * offset: offset is from beginning of buffer i.e. cxt
+ * size: size is 8 bytes aligned value
+ */
+static size_t compute_in_msg_size(const struct smcinvoke_cmd_req *req,
+ const union smcinvoke_arg *args_buf)
+{
+ uint32_t i = 0;
+
+ size_t total_size = sizeof(struct smcinvoke_msg_hdr) +
+ object_counts_total(req->counts) *
+ sizeof(union smcinvoke_tz_args);
+
+ /* Computed total_size should be 8 bytes aligned from start of buf */
+ total_size = ALIGN(total_size, SMCINVOKE_ARGS_ALIGN_SIZE);
+
+ /* each buffer has to be 8 bytes aligned */
+ while (i < object_counts_num_buffers(req->counts))
+ total_size = size_add(total_size,
+ size_align(args_buf[i++].b.size, SMCINVOKE_ARGS_ALIGN_SIZE));
+
+ /* Since we're using get_free_pages, no need for explicit PAGE align */
+ return total_size;
+}
+
+static int marshal_in(const struct smcinvoke_cmd_req *req,
+ const union smcinvoke_arg *args_buf, uint32_t tzhandle,
+ uint8_t *buf, size_t buf_size, struct file **arr_filp)
+{
+ int ret = -EINVAL, i = 0;
+ union smcinvoke_tz_args *tz_args = NULL;
+ struct smcinvoke_msg_hdr msg_hdr = {tzhandle, req->op, req->counts};
+ uint32_t offset = sizeof(struct smcinvoke_msg_hdr) +
+ sizeof(union smcinvoke_tz_args) *
+ object_counts_total(req->counts);
+
+ if (buf_size < offset)
+ goto out;
+
+ *(struct smcinvoke_msg_hdr *)buf = msg_hdr;
+ tz_args = (union smcinvoke_tz_args *)
+ (buf + sizeof(struct smcinvoke_msg_hdr));
+
+ FOR_ARGS(i, req->counts, BI) {
+ offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
+ if ((offset > buf_size) ||
+ (args_buf[i].b.size > (buf_size - offset)))
+ goto out;
+
+ tz_args->b.offset = offset;
+ tz_args->b.size = args_buf[i].b.size;
+ tz_args++;
+
+ if (copy_from_user(buf+offset,
+ (void __user *)(uintptr_t)(args_buf[i].b.addr),
+ args_buf[i].b.size))
+ goto out;
+
+ offset += args_buf[i].b.size;
+ }
+ FOR_ARGS(i, req->counts, BO) {
+ offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
+ if ((offset > buf_size) ||
+ (args_buf[i].b.size > (buf_size - offset)))
+ goto out;
+
+ tz_args->b.offset = offset;
+ tz_args->b.size = args_buf[i].b.size;
+ tz_args++;
+
+ offset += args_buf[i].b.size;
+ }
+ FOR_ARGS(i, req->counts, OI) {
+ if (get_tzhandle_from_fd(args_buf[i].o.fd,
+ &arr_filp[i], &(tz_args->tzhandle)))
+ goto out;
+ tz_args++;
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+long smcinvoke_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = -1, i = 0, nr_args = 0;
+ struct smcinvoke_cmd_req req = {0};
+ void *in_msg = NULL;
+ size_t inmsg_size = 0;
+ void *out_msg = NULL;
+ union smcinvoke_arg *args_buf = NULL;
+ struct file *filp_to_release[object_counts_max_OO] = {NULL};
+ struct smcinvoke_tzobj_context *tzobj = filp->private_data;
+
+ switch (cmd) {
+ case SMCINVOKE_IOCTL_INVOKE_REQ:
+ if (_IOC_SIZE(cmd) != sizeof(req)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = copy_from_user(&req, (void __user *)arg, sizeof(req));
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ nr_args = object_counts_num_buffers(req.counts) +
+ object_counts_num_objects(req.counts);
+
+ if (req.argsize != sizeof(union smcinvoke_arg)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (nr_args) {
+
+ args_buf = kzalloc(nr_args * req.argsize, GFP_KERNEL);
+ if (!args_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = copy_from_user(args_buf,
+ (void __user *)(uintptr_t)(req.args),
+ nr_args * req.argsize);
+
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ inmsg_size = compute_in_msg_size(&req, args_buf);
+ in_msg = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(inmsg_size));
+ if (!in_msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ out_msg = (void *)__get_free_page(GFP_KERNEL);
+ if (!out_msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = marshal_in(&req, args_buf, tzobj->tzhandle, in_msg,
+ inmsg_size, filp_to_release);
+ if (ret)
+ goto out;
+
+ ret = prepare_send_scm_msg(in_msg, inmsg_size, out_msg,
+ SMCINVOKE_TZ_MIN_BUF_SIZE, &req.result);
+ if (ret)
+ goto out;
+
+ /*
+ * if invoke op results in an err, no need to marshal_out and
+ * copy args buf to user space
+ */
+ if (!req.result) {
+ ret = marshal_out(in_msg, inmsg_size, &req, args_buf);
+
+ ret |= copy_to_user(
+ (void __user *)(uintptr_t)(req.args),
+ args_buf, nr_args * req.argsize);
+ }
+ ret |= copy_to_user((void __user *)arg, &req, sizeof(req));
+ if (ret)
+ goto out;
+
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+out:
+ free_page((long)out_msg);
+ free_pages((long)in_msg, get_order(inmsg_size));
+ kfree(args_buf);
+ for (i = 0; i < object_counts_max_OO; i++) {
+ if (filp_to_release[i])
+ fput(filp_to_release[i]);
+ }
+
+ return ret;
+}
+
+static int smcinvoke_open(struct inode *nodp, struct file *filp)
+{
+ struct smcinvoke_tzobj_context *tzcxt = NULL;
+
+ tzcxt = kzalloc(sizeof(*tzcxt), GFP_KERNEL);
+ if (!tzcxt)
+ return -ENOMEM;
+
+ tzcxt->tzhandle = SMCINVOKE_TZ_ROOT_OBJ;
+ filp->private_data = tzcxt;
+
+ return 0;
+}
+
+static int smcinvoke_release(struct inode *nodp, struct file *filp)
+{
+ int ret = 0, smcinvoke_result = 0;
+ uint8_t *in_buf = NULL;
+ uint8_t *out_buf = NULL;
+ struct smcinvoke_msg_hdr hdr = {0};
+ struct smcinvoke_tzobj_context *tzobj = filp->private_data;
+ uint32_t tzhandle = tzobj->tzhandle;
+
+ /* Root object is special in sense it is indestructible */
+ if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ)
+ goto out;
+
+ in_buf = (uint8_t *)__get_free_page(GFP_KERNEL);
+ out_buf = (uint8_t *)__get_free_page(GFP_KERNEL);
+ if (!in_buf || !out_buf)
+ goto out;
+
+ hdr.tzhandle = tzhandle;
+ hdr.op = object_op_RELEASE;
+ hdr.counts = 0;
+ *(struct smcinvoke_msg_hdr *)in_buf = hdr;
+
+ ret = prepare_send_scm_msg(in_buf, SMCINVOKE_TZ_MIN_BUF_SIZE,
+ out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE, &smcinvoke_result);
+out:
+ kfree(filp->private_data);
+ free_page((long)in_buf);
+ free_page((long)out_buf);
+
+ return ret;
+}
+
+static int __init smcinvoke_init(void)
+{
+ unsigned int baseminor = 0;
+ unsigned int count = 1;
+ int rc = 0;
+
+ rc = alloc_chrdev_region(&smcinvoke_device_no, baseminor, count,
+ SMCINVOKE_DEV);
+ if (rc < 0) {
+ pr_err("chrdev_region failed %d for %s\n", rc, SMCINVOKE_DEV);
+ return rc;
+ }
+ driver_class = class_create(THIS_MODULE, SMCINVOKE_DEV);
+ if (IS_ERR(driver_class)) {
+ rc = -ENOMEM;
+ pr_err("class_create failed %d\n", rc);
+ goto exit_unreg_chrdev_region;
+ }
+ class_dev = device_create(driver_class, NULL, smcinvoke_device_no,
+ NULL, SMCINVOKE_DEV);
+ if (!class_dev) {
+ pr_err("class_device_create failed %d\n", rc);
+ rc = -ENOMEM;
+ goto exit_destroy_class;
+ }
+
+ cdev_init(&smcinvoke_cdev, &smcinvoke_fops);
+ smcinvoke_cdev.owner = THIS_MODULE;
+
+ rc = cdev_add(&smcinvoke_cdev, MKDEV(MAJOR(smcinvoke_device_no), 0),
+ count);
+ if (rc < 0) {
+ pr_err("cdev_add failed %d for %s\n", rc, SMCINVOKE_DEV);
+ goto exit_destroy_device;
+ }
+ return 0;
+
+exit_destroy_device:
+ device_destroy(driver_class, smcinvoke_device_no);
+exit_destroy_class:
+ class_destroy(driver_class);
+exit_unreg_chrdev_region:
+ unregister_chrdev_region(smcinvoke_device_no, count);
+
+ return rc;
+}
+
+static void __exit smcinvoke_exit(void)
+{
+ int count = 1;
+
+ cdev_del(&smcinvoke_cdev);
+ device_destroy(driver_class, smcinvoke_device_no);
+ class_destroy(driver_class);
+ unregister_chrdev_region(smcinvoke_device_no, count);
+}
+device_initcall(smcinvoke_init);
+module_exit(smcinvoke_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SMC Invoke driver");
diff --git a/drivers/soc/qcom/smcinvoke_object.h b/drivers/soc/qcom/smcinvoke_object.h
new file mode 100644
index 0000000..670b425
--- /dev/null
+++ b/drivers/soc/qcom/smcinvoke_object.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __SMCINVOKE_OBJECT_H
+#define __SMCINVOKE_OBJECT_H
+
+#include <linux/types.h>
+
+#define object_op_METHOD_MASK ((uint32_t)0x0000FFFFu)
+#define object_op_RELEASE (object_op_METHOD_MASK - 0)
+#define object_op_RETAIN (object_op_METHOD_MASK - 1)
+
+#define object_counts_max_BI 0xF
+#define object_counts_max_BO 0xF
+#define object_counts_max_OI 0xF
+#define object_counts_max_OO 0xF
+
+/* unpack counts */
+
+#define object_counts_num_BI(k) ((size_t) (((k) >> 0) & object_counts_max_BI))
+#define object_counts_num_BO(k) ((size_t) (((k) >> 4) & object_counts_max_BO))
+#define object_counts_num_OI(k) ((size_t) (((k) >> 8) & object_counts_max_OI))
+#define object_counts_num_OO(k) ((size_t) (((k) >> 12) & object_counts_max_OO))
+#define object_counts_num_buffers(k) \
+ (object_counts_num_BI(k) + object_counts_num_BO(k))
+
+#define object_counts_num_objects(k) \
+ (object_counts_num_OI(k) + object_counts_num_OO(k))
+
+/* Indices into args[] */
+
+#define object_counts_index_BI(k) 0
+#define object_counts_index_BO(k) \
+ (object_counts_index_BI(k) + object_counts_num_BI(k))
+#define object_counts_index_OI(k) \
+ (object_counts_index_BO(k) + object_counts_num_BO(k))
+#define object_counts_index_OO(k) \
+ (object_counts_index_OI(k) + object_counts_num_OI(k))
+#define object_counts_total(k) \
+ (object_counts_index_OO(k) + object_counts_num_OO(k))
+
+
+#endif /* __SMCINVOKE_OBJECT_H */
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index c252040..31760ee 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -47,6 +47,7 @@
#define SMEM_IMAGE_VERSION_OEM_OFFSET 96
#define SMEM_IMAGE_VERSION_PARTITION_APPS 10
+static DECLARE_RWSEM(current_image_rwsem);
enum {
HW_PLATFORM_UNKNOWN = 0,
HW_PLATFORM_SURF = 1,
@@ -1047,7 +1048,9 @@
pr_err("Failed to get image version base address");
return snprintf(buf, SMEM_IMAGE_VERSION_NAME_SIZE, "Unknown");
}
+ down_read(¤t_image_rwsem);
string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ up_read(¤t_image_rwsem);
return snprintf(buf, SMEM_IMAGE_VERSION_NAME_SIZE, "%-.75s\n",
string_address);
}
@@ -1060,14 +1063,19 @@
{
char *store_address;
- if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS)
+ down_read(¤t_image_rwsem);
+ if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) {
+ up_read(¤t_image_rwsem);
return count;
+ }
store_address = socinfo_get_image_version_base_address();
if (IS_ERR_OR_NULL(store_address)) {
pr_err("Failed to get image version base address");
+ up_read(¤t_image_rwsem);
return count;
}
store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ up_read(¤t_image_rwsem);
snprintf(store_address, SMEM_IMAGE_VERSION_NAME_SIZE, "%-.75s", buf);
return count;
}
@@ -1085,7 +1093,9 @@
return snprintf(buf, SMEM_IMAGE_VERSION_VARIANT_SIZE,
"Unknown");
}
+ down_read(¤t_image_rwsem);
string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ up_read(¤t_image_rwsem);
string_address += SMEM_IMAGE_VERSION_VARIANT_OFFSET;
return snprintf(buf, SMEM_IMAGE_VERSION_VARIANT_SIZE, "%-.20s\n",
string_address);
@@ -1099,14 +1109,19 @@
{
char *store_address;
- if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS)
+ down_read(¤t_image_rwsem);
+ if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) {
+ up_read(¤t_image_rwsem);
return count;
+ }
store_address = socinfo_get_image_version_base_address();
if (IS_ERR_OR_NULL(store_address)) {
pr_err("Failed to get image version base address");
+ up_read(¤t_image_rwsem);
return count;
}
store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ up_read(¤t_image_rwsem);
store_address += SMEM_IMAGE_VERSION_VARIANT_OFFSET;
snprintf(store_address, SMEM_IMAGE_VERSION_VARIANT_SIZE, "%-.20s", buf);
return count;
@@ -1124,7 +1139,9 @@
pr_err("Failed to get image version base address");
return snprintf(buf, SMEM_IMAGE_VERSION_OEM_SIZE, "Unknown");
}
+ down_read(¤t_image_rwsem);
string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ up_read(¤t_image_rwsem);
string_address += SMEM_IMAGE_VERSION_OEM_OFFSET;
return snprintf(buf, SMEM_IMAGE_VERSION_OEM_SIZE, "%-.32s\n",
string_address);
@@ -1138,14 +1155,19 @@
{
char *store_address;
- if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS)
+ down_read(¤t_image_rwsem);
+ if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) {
+ up_read(¤t_image_rwsem);
return count;
+ }
store_address = socinfo_get_image_version_base_address();
if (IS_ERR_OR_NULL(store_address)) {
pr_err("Failed to get image version base address");
+ up_read(¤t_image_rwsem);
return count;
}
store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ up_read(¤t_image_rwsem);
store_address += SMEM_IMAGE_VERSION_OEM_OFFSET;
snprintf(store_address, SMEM_IMAGE_VERSION_OEM_SIZE, "%-.32s", buf);
return count;
@@ -1156,8 +1178,14 @@
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ int ret;
+
+ down_read(¤t_image_rwsem);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
current_image);
+ up_read(¤t_image_rwsem);
+ return ret;
+
}
static ssize_t
@@ -1169,10 +1197,12 @@
ret = kstrtoint(buf, 10, &digit);
if (ret)
return ret;
+ down_write(¤t_image_rwsem);
if (digit >= 0 && digit < SMEM_IMAGE_VERSION_BLOCKS_COUNT)
current_image = digit;
else
current_image = 0;
+ up_write(¤t_image_rwsem);
return count;
}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index ba92ed9..aa2d2d7 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -293,6 +293,7 @@
int ion_heap_is_system_secure_heap_type(enum ion_heap_type type);
int get_secure_vmid(unsigned long flags);
+int get_vmid(unsigned long flags);
bool is_secure_vmid_valid(int vmid);
unsigned int count_set_bits(unsigned long val);
int populate_vm_list(unsigned long flags, unsigned int *vm_list, int nelems);
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index 52926f0..c7b58ce 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -621,7 +621,8 @@
vmid == VMID_CP_APP ||
vmid == VMID_CP_CAMERA_PREVIEW ||
vmid == VMID_CP_SPSS_SP ||
- vmid == VMID_CP_SPSS_SP_SHARED);
+ vmid == VMID_CP_SPSS_SP_SHARED ||
+ vmid == VMID_CP_SPSS_HLOS_SHARED);
}
unsigned int count_set_bits(unsigned long val)
@@ -637,7 +638,7 @@
flags = flags & ION_FLAGS_CP_MASK;
for_each_set_bit(itr, &flags, BITS_PER_LONG) {
- vmid = get_secure_vmid(0x1UL << itr);
+ vmid = get_vmid(0x1UL << itr);
if (vmid < 0 || !nelems)
return -EINVAL;
@@ -669,8 +670,23 @@
return VMID_CP_SPSS_SP;
if (flags & ION_FLAG_CP_SPSS_SP_SHARED)
return VMID_CP_SPSS_SP_SHARED;
+ if (flags & ION_FLAG_CP_SPSS_HLOS_SHARED)
+ return VMID_CP_SPSS_HLOS_SHARED;
return -EINVAL;
}
+
+int get_vmid(unsigned long flags)
+{
+ int vmid;
+
+ vmid = get_secure_vmid(flags);
+ if (vmid < 0) {
+ if (flags & ION_FLAG_CP_HLOS)
+ vmid = VMID_HLOS;
+ }
+ return vmid;
+}
+
/* fix up the cases where the ioctl direction bits are incorrect */
static unsigned int msm_ion_ioctl_dir(unsigned int cmd)
{
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 9846c51..c0cda28 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -42,6 +42,21 @@
#include <linux/rcupdate.h>
#include <linux/profile.h>
#include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/swap.h>
+#include <linux/fs.h>
+#include <linux/cpuset.h>
+#include <linux/vmpressure.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/almk.h>
+
+#ifdef CONFIG_HIGHMEM
+#define _ZONE ZONE_HIGHMEM
+#else
+#define _ZONE ZONE_NORMAL
+#endif
#define CREATE_TRACE_POINTS
#include "trace/lowmemorykiller.h"
@@ -63,6 +78,7 @@
};
static int lowmem_minfree_size = 4;
+static int lmk_fast_run = 1;
static unsigned long lowmem_deathpending_timeout;
@@ -81,6 +97,314 @@
global_node_page_state(NR_INACTIVE_FILE);
}
+static atomic_t shift_adj = ATOMIC_INIT(0);
+static short adj_max_shift = 353;
+module_param_named(adj_max_shift, adj_max_shift, short, 0644);
+
+/* User knob to enable/disable adaptive lmk feature */
+static int enable_adaptive_lmk;
+module_param_named(enable_adaptive_lmk, enable_adaptive_lmk, int, 0644);
+
+/*
+ * This parameter controls the behaviour of LMK when vmpressure is in
+ * the range of 90-94. Adaptive lmk triggers based on number of file
+ * pages wrt vmpressure_file_min, when vmpressure is in the range of
+ * 90-94. Usually this is a pseudo minfree value, higher than the
+ * highest configured value in minfree array.
+ */
+static int vmpressure_file_min;
+module_param_named(vmpressure_file_min, vmpressure_file_min, int, 0644);
+
+enum {
+ VMPRESSURE_NO_ADJUST = 0,
+ VMPRESSURE_ADJUST_ENCROACH,
+ VMPRESSURE_ADJUST_NORMAL,
+};
+
+static int adjust_minadj(short *min_score_adj)
+{
+ int ret = VMPRESSURE_NO_ADJUST;
+
+ if (!enable_adaptive_lmk)
+ return 0;
+
+ if (atomic_read(&shift_adj) &&
+ (*min_score_adj > adj_max_shift)) {
+ if (*min_score_adj == OOM_SCORE_ADJ_MAX + 1)
+ ret = VMPRESSURE_ADJUST_ENCROACH;
+ else
+ ret = VMPRESSURE_ADJUST_NORMAL;
+ *min_score_adj = adj_max_shift;
+ }
+ atomic_set(&shift_adj, 0);
+
+ return ret;
+}
+
+static int lmk_vmpressure_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int other_free, other_file;
+ unsigned long pressure = action;
+ int array_size = ARRAY_SIZE(lowmem_adj);
+
+ if (!enable_adaptive_lmk)
+ return 0;
+
+ if (pressure >= 95) {
+ other_file = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
+ total_swapcache_pages();
+ other_free = global_page_state(NR_FREE_PAGES);
+
+ atomic_set(&shift_adj, 1);
+ trace_almk_vmpressure(pressure, other_free, other_file);
+ } else if (pressure >= 90) {
+ if (lowmem_adj_size < array_size)
+ array_size = lowmem_adj_size;
+ if (lowmem_minfree_size < array_size)
+ array_size = lowmem_minfree_size;
+
+ other_file = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
+ total_swapcache_pages();
+
+ other_free = global_page_state(NR_FREE_PAGES);
+
+ if ((other_free < lowmem_minfree[array_size - 1]) &&
+ (other_file < vmpressure_file_min)) {
+ atomic_set(&shift_adj, 1);
+ trace_almk_vmpressure(pressure, other_free, other_file);
+ }
+ } else if (atomic_read(&shift_adj)) {
+ other_file = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
+ total_swapcache_pages();
+
+ other_free = global_page_state(NR_FREE_PAGES);
+ /*
+ * shift_adj would have been set by a previous invocation
+ * of notifier, which is not followed by a lowmem_shrink yet.
+ * Since vmpressure has improved, reset shift_adj to avoid
+ * false adaptive LMK trigger.
+ */
+ trace_almk_vmpressure(pressure, other_free, other_file);
+ atomic_set(&shift_adj, 0);
+ }
+
+ return 0;
+}
+
+static struct notifier_block lmk_vmpr_nb = {
+ .notifier_call = lmk_vmpressure_notifier,
+};
+
+static int test_task_flag(struct task_struct *p, int flag)
+{
+ struct task_struct *t;
+
+ for_each_thread(p, t) {
+ task_lock(t);
+ if (test_tsk_thread_flag(t, flag)) {
+ task_unlock(t);
+ return 1;
+ }
+ task_unlock(t);
+ }
+
+ return 0;
+}
+
+static int test_task_lmk_waiting(struct task_struct *p)
+{
+ struct task_struct *t;
+
+ for_each_thread(p, t) {
+ task_lock(t);
+ if (task_lmk_waiting(t)) {
+ task_unlock(t);
+ return 1;
+ }
+ task_unlock(t);
+ }
+
+ return 0;
+}
+
+static DEFINE_MUTEX(scan_mutex);
+
+static int can_use_cma_pages(gfp_t gfp_mask)
+{
+ int can_use = 0;
+ int mtype = gfpflags_to_migratetype(gfp_mask);
+ int i = 0;
+ int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
+
+ if (is_migrate_cma(mtype)) {
+ can_use = 1;
+ } else {
+ for (i = 0;; i++) {
+ int fallbacktype = mtype_fallbacks[i];
+
+ if (is_migrate_cma(fallbacktype)) {
+ can_use = 1;
+ break;
+ }
+
+ if (fallbacktype == MIGRATE_TYPES)
+ break;
+ }
+ }
+ return can_use;
+}
+
+void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
+ int *other_free, int *other_file,
+ int use_cma_pages)
+{
+ struct zone *zone;
+ struct zoneref *zoneref;
+ int zone_idx;
+
+ for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) {
+ zone_idx = zonelist_zone_idx(zoneref);
+ if (zone_idx == ZONE_MOVABLE) {
+ if (!use_cma_pages && other_free)
+ *other_free -=
+ zone_page_state(zone, NR_FREE_CMA_PAGES);
+ continue;
+ }
+
+ if (zone_idx > classzone_idx) {
+ if (other_free != NULL)
+ *other_free -= zone_page_state(zone,
+ NR_FREE_PAGES);
+ if (other_file != NULL)
+ *other_file -= zone_page_state(zone,
+ NR_ZONE_INACTIVE_FILE) +
+ zone_page_state(zone,
+ NR_ZONE_ACTIVE_FILE);
+ } else if (zone_idx < classzone_idx) {
+ if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0) &&
+ other_free) {
+ if (!use_cma_pages) {
+ *other_free -= min(
+ zone->lowmem_reserve[classzone_idx] +
+ zone_page_state(
+ zone, NR_FREE_CMA_PAGES),
+ zone_page_state(
+ zone, NR_FREE_PAGES));
+ } else {
+ *other_free -=
+ zone->lowmem_reserve[classzone_idx];
+ }
+ } else {
+ if (other_free)
+ *other_free -=
+ zone_page_state(zone, NR_FREE_PAGES);
+ }
+ }
+ }
+}
+
+#ifdef CONFIG_HIGHMEM
+static void adjust_gfp_mask(gfp_t *gfp_mask)
+{
+ struct zone *preferred_zone;
+ struct zoneref *zref;
+ struct zonelist *zonelist;
+ enum zone_type high_zoneidx;
+
+ if (current_is_kswapd()) {
+ zonelist = node_zonelist(0, *gfp_mask);
+ high_zoneidx = gfp_zone(*gfp_mask);
+ zref = first_zones_zonelist(zonelist, high_zoneidx, NULL);
+ preferred_zone = zref->zone;
+
+ if (high_zoneidx == ZONE_NORMAL) {
+ if (zone_watermark_ok_safe(
+ preferred_zone, 0,
+ high_wmark_pages(preferred_zone), 0))
+ *gfp_mask |= __GFP_HIGHMEM;
+ } else if (high_zoneidx == ZONE_HIGHMEM) {
+ *gfp_mask |= __GFP_HIGHMEM;
+ }
+ }
+}
+#else
+static void adjust_gfp_mask(gfp_t *unused)
+{
+}
+#endif
+
+void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
+{
+ gfp_t gfp_mask;
+ struct zone *preferred_zone;
+ struct zoneref *zref;
+ struct zonelist *zonelist;
+ enum zone_type high_zoneidx, classzone_idx;
+ unsigned long balance_gap;
+ int use_cma_pages;
+
+ gfp_mask = sc->gfp_mask;
+ adjust_gfp_mask(&gfp_mask);
+
+ zonelist = node_zonelist(0, gfp_mask);
+ high_zoneidx = gfp_zone(gfp_mask);
+ zref = first_zones_zonelist(zonelist, high_zoneidx, NULL);
+ preferred_zone = zref->zone;
+ classzone_idx = zone_idx(preferred_zone);
+ use_cma_pages = can_use_cma_pages(gfp_mask);
+
+ balance_gap = min(low_wmark_pages(preferred_zone),
+ (preferred_zone->present_pages +
+ 100-1) /
+ 100);
+
+ if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0,
+ high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX +
+ balance_gap, 0, 0))) {
+ if (lmk_fast_run)
+ tune_lmk_zone_param(zonelist, classzone_idx, other_free,
+ other_file, use_cma_pages);
+ else
+ tune_lmk_zone_param(zonelist, classzone_idx, other_free,
+ NULL, use_cma_pages);
+
+ if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) {
+ if (!use_cma_pages) {
+ *other_free -= min(
+ preferred_zone->lowmem_reserve[_ZONE]
+ + zone_page_state(
+ preferred_zone, NR_FREE_CMA_PAGES),
+ zone_page_state(
+ preferred_zone, NR_FREE_PAGES));
+ } else {
+ *other_free -=
+ preferred_zone->lowmem_reserve[_ZONE];
+ }
+ } else {
+ *other_free -= zone_page_state(preferred_zone,
+ NR_FREE_PAGES);
+ }
+
+ lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem "
+ "ofree %d, %d\n", *other_free, *other_file);
+ } else {
+ tune_lmk_zone_param(zonelist, classzone_idx, other_free,
+ other_file, use_cma_pages);
+
+ if (!use_cma_pages) {
+ *other_free -=
+ zone_page_state(preferred_zone, NR_FREE_CMA_PAGES);
+ }
+
+ lowmem_print(4, "lowmem_shrink tunning for others ofree %d, "
+ "%d\n", *other_free, *other_file);
+ }
+}
+
static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
@@ -88,16 +412,31 @@
unsigned long rem = 0;
int tasksize;
int i;
+ int ret = 0;
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
int minfree = 0;
int selected_tasksize = 0;
short selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
- int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
- int other_file = global_node_page_state(NR_FILE_PAGES) -
- global_node_page_state(NR_SHMEM) -
- global_node_page_state(NR_UNEVICTABLE) -
- total_swapcache_pages();
+ int other_free;
+ int other_file;
+
+ if (mutex_lock_interruptible(&scan_mutex) < 0)
+ return 0;
+
+ other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
+
+ if (global_node_page_state(NR_SHMEM) + total_swapcache_pages() +
+ global_node_page_state(NR_UNEVICTABLE) <
+ global_node_page_state(NR_FILE_PAGES))
+ other_file = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
+ global_node_page_state(NR_UNEVICTABLE) -
+ total_swapcache_pages();
+ else
+ other_file = 0;
+
+ tune_lmk_param(&other_free, &other_file, sc);
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
@@ -111,13 +450,17 @@
}
}
+ ret = adjust_minadj(&min_score_adj);
+
lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n",
sc->nr_to_scan, sc->gfp_mask, other_free,
other_file, min_score_adj);
if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
+ trace_almk_shrink(0, ret, other_free, other_file, 0);
lowmem_print(5, "lowmem_scan %lu, %x, return 0\n",
sc->nr_to_scan, sc->gfp_mask);
+ mutex_unlock(&scan_mutex);
return 0;
}
@@ -131,16 +474,24 @@
if (tsk->flags & PF_KTHREAD)
continue;
+ /* if task no longer has any memory ignore it */
+ if (test_task_flag(tsk, TIF_MM_RELEASED))
+ continue;
+
+ if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
+ if (test_task_lmk_waiting(tsk)) {
+ rcu_read_unlock();
+ /* give the system time to free up the memory */
+ msleep_interruptible(20);
+ mutex_unlock(&scan_mutex);
+ return 0;
+ }
+ }
+
p = find_lock_task_mm(tsk);
if (!p)
continue;
- if (task_lmk_waiting(p) &&
- time_before_eq(jiffies, lowmem_deathpending_timeout)) {
- task_unlock(p);
- rcu_read_unlock();
- return 0;
- }
oom_score_adj = p->signal->oom_score_adj;
if (oom_score_adj < min_score_adj) {
task_unlock(p);
@@ -160,7 +511,7 @@
selected = p;
selected_tasksize = tasksize;
selected_oom_score_adj = oom_score_adj;
- lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
+ lowmem_print(3, "select '%s' (%d), adj %hd, size %d, to kill\n",
p->comm, p->pid, oom_score_adj, tasksize);
}
if (selected) {
@@ -175,23 +526,51 @@
task_unlock(selected);
trace_lowmemory_kill(selected, cache_size, cache_limit, free);
lowmem_print(1, "Killing '%s' (%d), adj %hd,\n"
- " to free %ldkB on behalf of '%s' (%d) because\n"
- " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n"
- " Free memory is %ldkB above reserved\n",
- selected->comm, selected->pid,
- selected_oom_score_adj,
- selected_tasksize * (long)(PAGE_SIZE / 1024),
- current->comm, current->pid,
- cache_size, cache_limit,
- min_score_adj,
- free);
+ "to free %ldkB on behalf of '%s' (%d) because\n"
+ "cache %ldkB is below limit %ldkB for oom score %hd\n"
+ "Free memory is %ldkB above reserved.\n"
+ "Free CMA is %ldkB\n"
+ "Total reserve is %ldkB\n"
+ "Total free pages is %ldkB\n"
+ "Total file cache is %ldkB\n"
+ "GFP mask is 0x%x\n",
+ selected->comm, selected->pid,
+ selected_oom_score_adj,
+ selected_tasksize * (long)(PAGE_SIZE / 1024),
+ current->comm, current->pid,
+ cache_size, cache_limit,
+ min_score_adj,
+ free,
+ global_page_state(NR_FREE_CMA_PAGES) *
+ (long)(PAGE_SIZE / 1024),
+ totalreserve_pages * (long)(PAGE_SIZE / 1024),
+ global_page_state(NR_FREE_PAGES) *
+ (long)(PAGE_SIZE / 1024),
+ global_node_page_state(NR_FILE_PAGES) *
+ (long)(PAGE_SIZE / 1024),
+ sc->gfp_mask);
+
+ if (lowmem_debug_level >= 2 && selected_oom_score_adj == 0) {
+ show_mem(SHOW_MEM_FILTER_NODES);
+ dump_tasks(NULL, NULL);
+ }
+
lowmem_deathpending_timeout = jiffies + HZ;
rem += selected_tasksize;
+ rcu_read_unlock();
+ /* give the system time to free up the memory */
+ msleep_interruptible(20);
+ trace_almk_shrink(selected_tasksize, ret,
+ other_free, other_file,
+ selected_oom_score_adj);
+ } else {
+ trace_almk_shrink(1, ret, other_free, other_file, 0);
+ rcu_read_unlock();
}
lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
sc->nr_to_scan, sc->gfp_mask, rem);
- rcu_read_unlock();
+ mutex_unlock(&scan_mutex);
return rem;
}
@@ -204,6 +583,7 @@
static int __init lowmem_init(void)
{
register_shrinker(&lowmem_shrinker);
+ vmpressure_notifier_register(&lmk_vmpr_nb);
return 0;
}
device_initcall(lowmem_init);
@@ -299,6 +679,7 @@
module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, 0644);
#endif
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
- 0644);
-module_param_named(debug_level, lowmem_debug_level, uint, 0644);
+ S_IRUGO | S_IWUSR);
+module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+module_param_named(lmk_fast_run, lmk_fast_run, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index 7381ee9..84598db 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -89,6 +89,7 @@
#define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25)
#define ION_FLAG_CP_APP ION_BIT(26)
#define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27)
+#define ION_FLAG_CP_SPSS_HLOS_SHARED ION_BIT(30)
/**
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 64b3966..a34fd5a 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2908,6 +2908,7 @@
dev = comedi_alloc_board_minor(NULL);
if (IS_ERR(dev)) {
comedi_cleanup_board_minors();
+ class_destroy(comedi_class);
cdev_del(&comedi_cdev);
unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
COMEDI_NUM_MINORS);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 0594828..b195537 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -522,6 +522,9 @@
goto free_all;
}
+ if (vnt_key_init_table(priv))
+ goto free_all;
+
priv->int_interval = 1; /* bInterval is set to 1 */
vnt_int_start_interrupt(priv);
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 1ab5b0c..fe0a7c7 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -79,6 +79,9 @@
{ .compatible = "qcom,sdm845-tsens",
.data = &data_tsens24xx,
},
+ { .compatible = "qcom,tsens24xx",
+ .data = &data_tsens24xx,
+ },
{}
};
MODULE_DEVICE_TABLE(of, tsens_table);
@@ -157,14 +160,18 @@
for (i = 0; i < TSENS_MAX_SENSORS; i++) {
tmdev->sensor[i].tmdev = tmdev;
tmdev->sensor[i].hw_id = i;
- tmdev->sensor[i].tzd =
- devm_thermal_zone_of_sensor_register(
- &tmdev->pdev->dev, i,
- &tmdev->sensor[i], &tsens_tm_thermal_zone_ops);
- if (IS_ERR(tmdev->sensor[i].tzd)) {
- pr_debug("Error registering sensor:%d\n", i);
- sensor_missing++;
- continue;
+ if (tmdev->ops->sensor_en(tmdev, i)) {
+ tmdev->sensor[i].tzd =
+ devm_thermal_zone_of_sensor_register(
+ &tmdev->pdev->dev, i,
+ &tmdev->sensor[i], &tsens_tm_thermal_zone_ops);
+ if (IS_ERR(tmdev->sensor[i].tzd)) {
+ pr_debug("Error registering sensor:%d\n", i);
+ sensor_missing++;
+ continue;
+ }
+ } else {
+ pr_debug("Sensor not enabled:%d\n", i);
}
}
diff --git a/drivers/thermal/qcom/lmh_dbg.c b/drivers/thermal/qcom/lmh_dbg.c
index 74ffeda..d027bd9 100644
--- a/drivers/thermal/qcom/lmh_dbg.c
+++ b/drivers/thermal/qcom/lmh_dbg.c
@@ -315,11 +315,12 @@
pr_err("No LMH device supported.\n");
return -ENODEV;
}
- if (!dest_buf)
+ if (!dest_buf) {
dest_buf = devm_kcalloc(lmh_data->dev, *size,
sizeof(*dest_buf), GFP_KERNEL);
if (!dest_buf)
return -ENOMEM;
+ }
for (idx = next;
idx < min((next + LMH_SCM_PAYLOAD_SIZE), *size);
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index 770b982..a695d57 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -91,6 +91,7 @@
int (*set_trips)(struct tsens_sensor *, int, int);
int (*interrupts_reg)(struct tsens_device *);
int (*dbg)(struct tsens_device *, u32, u32, int *);
+ int (*sensor_en)(struct tsens_device *, u32);
};
struct tsens_irqs {
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 55be2f9..de9f27f 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -61,6 +61,7 @@
#define TSENS_TM_WATCHDOG_LOG(n) ((n) + 0x13c)
#define TSENS_EN BIT(0)
+#define TSENS_CTRL_SENSOR_EN_MASK(n) ((n >> 3) & 0xffff)
static void msm_tsens_convert_temp(int last_temp, int *temp)
{
@@ -499,6 +500,21 @@
return IRQ_HANDLED;
}
+static int tsens2xxx_hw_sensor_en(struct tsens_device *tmdev,
+ u32 sensor_id)
+{
+ void __iomem *srot_addr;
+ unsigned int srot_val, sensor_en;
+
+ srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+ srot_val = readl_relaxed(srot_addr);
+ srot_val = TSENS_CTRL_SENSOR_EN_MASK(srot_val);
+
+ sensor_en = ((1 << sensor_id) & srot_val);
+
+ return sensor_en;
+}
+
static int tsens2xxx_hw_init(struct tsens_device *tmdev)
{
void __iomem *srot_addr;
@@ -602,6 +618,7 @@
.set_trips = tsens2xxx_set_trip_temp,
.interrupts_reg = tsens2xxx_register_interrupts,
.dbg = tsens2xxx_dbg,
+ .sensor_en = tsens2xxx_hw_sensor_en,
};
const struct tsens_data data_tsens2xxx = {
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 94ba2c3e..17cdac4 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -469,6 +469,9 @@
int done = 0;
unsigned int irq_clear = M_CMD_DONE_EN;
+ if (!uart_console(uport))
+ return;
+
done = msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_DONE_EN, true);
if (!done) {
@@ -686,17 +689,22 @@
{
unsigned int geni_m_irq_en;
struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
-
- if (!msm_geni_serial_tx_empty(uport))
- return;
+ unsigned int geni_status;
if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
return;
}
+ geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
+ if (geni_status & M_GENI_CMD_ACTIVE)
+ return;
+
+ if (!msm_geni_serial_tx_empty(uport))
+ return;
+
geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
- geni_m_irq_en |= M_TX_FIFO_WATERMARK_EN;
+ geni_m_irq_en |= (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN);
geni_write_reg_nolog(msm_port->tx_wm, uport->membase,
SE_GENI_TX_WATERMARK_REG);
@@ -868,6 +876,7 @@
unsigned int xmit_size;
unsigned int fifo_width_bytes =
(uart_console(uport) ? 1 : (msm_port->tx_fifo_width >> 3));
+ unsigned int geni_m_irq_en;
tx_fifo_status = geni_read_reg_nolog(uport->membase,
SE_GENI_TX_FIFO_STATUS);
@@ -876,6 +885,16 @@
goto exit_handle_tx;
}
+ if (!uart_console(uport)) {
+ geni_m_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_M_IRQ_EN);
+ geni_m_irq_en &= ~(M_TX_FIFO_WATERMARK_EN);
+ geni_write_reg_nolog(0, uport->membase,
+ SE_GENI_TX_WATERMARK_REG);
+ geni_write_reg_nolog(geni_m_irq_en, uport->membase,
+ SE_GENI_M_IRQ_EN);
+ }
+
avail_fifo_bytes = (msm_port->tx_fifo_depth - msm_port->tx_wm) *
fifo_width_bytes;
xmit_size = uart_circ_chars_pending(xmit);
@@ -923,6 +942,7 @@
unsigned int s_irq_status;
struct uart_port *uport = dev;
unsigned long flags;
+ unsigned int m_irq_en;
spin_lock_irqsave(&uport->lock, flags);
if (uart_console(uport) && uport->suspended)
@@ -937,6 +957,7 @@
SE_GENI_M_IRQ_CLEAR);
geni_write_reg_nolog(s_irq_status, uport->membase,
SE_GENI_S_IRQ_CLEAR);
+ m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
if ((m_irq_status & M_ILLEGAL_CMD_EN)) {
WARN_ON(1);
@@ -948,7 +969,8 @@
msm_geni_serial_handle_rx(uport);
}
- if ((m_irq_status & M_TX_FIFO_WATERMARK_EN))
+ if ((m_irq_status & m_irq_en) &
+ (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
msm_geni_serial_handle_tx(uport);
exit_geni_serial_isr:
@@ -1069,6 +1091,14 @@
SE_GENI_TX_PACKING_CFG0);
geni_write_reg_nolog(cfg1, uport->membase,
SE_GENI_TX_PACKING_CFG1);
+ msm_port->handle_rx = handle_rx_hs;
+ msm_port->rx_fifo = devm_kzalloc(uport->dev,
+ sizeof(msm_port->rx_fifo_depth * sizeof(u32)),
+ GFP_KERNEL);
+ if (!msm_port->rx_fifo) {
+ ret = -ENOMEM;
+ goto exit_portsetup;
+ }
} else {
/*
* Make an unconditional cancel on the main sequencer to reset
@@ -1166,12 +1196,12 @@
goto exit_startup;
}
+ get_tx_fifo_size(msm_port);
if (!msm_port->port_setup) {
if (msm_geni_serial_port_setup(uport))
goto exit_startup;
}
- get_tx_fifo_size(msm_port);
msm_geni_serial_start_rx(uport);
/*
* Ensure that all the port configuration writes complete
@@ -1360,6 +1390,9 @@
tx_trans_cfg |= UART_CTS_MASK;
/* status bits to ignore */
+ if (likely(baud))
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
geni_serial_write_term_regs(uport, port->loopback, tx_trans_cfg,
tx_parity_cfg, rx_trans_cfg, rx_parity_cfg, bits_per_char,
stop_bit_len, ser_clk_cfg);
@@ -1790,10 +1823,6 @@
dev_port->rx_fifo = devm_kzalloc(uport->dev, sizeof(u32),
GFP_KERNEL);
} else {
- dev_port->handle_rx = handle_rx_hs;
- dev_port->rx_fifo = devm_kzalloc(uport->dev,
- sizeof(dev_port->rx_fifo_depth * sizeof(u32)),
- GFP_KERNEL);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
@@ -1882,7 +1911,8 @@
struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
struct uart_port *uport = &port->uport;
- if (uart_console(uport)) {
+ if (uart_console(uport) &&
+ console_suspend_enabled && uport->suspended) {
se_geni_resources_on(&port->serial_rsc);
uart_resume_port((struct uart_driver *)uport->private_data,
uport);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 96b21b0..3116edf 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -223,6 +223,10 @@
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ /* Hauppauge HVR-950q */
+ { USB_DEVICE(0x2040, 0x7200), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 7272f9a..92e5d13 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -279,6 +279,8 @@
usb_destroy_configuration(udev);
usb_release_bos_descriptor(udev);
+ if (udev->parent)
+ of_node_put(dev->of_node);
usb_put_hcd(hcd);
kfree(udev->product);
kfree(udev->manufacturer);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 81f3384..0dc81d2 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1768,9 +1768,10 @@
/*
* Below sequence is used when controller is working without
- * having ssphy and only USB high speed is supported.
+ * having ssphy and only USB high/full speed is supported.
*/
- if (dwc->maximum_speed == USB_SPEED_HIGH) {
+ if (dwc->maximum_speed == USB_SPEED_HIGH ||
+ dwc->maximum_speed == USB_SPEED_FULL) {
dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
dwc3_msm_read_reg(mdwc->base,
QSCRATCH_GENERAL_CFG)
@@ -2911,6 +2912,7 @@
}
}
+ edev = NULL;
/* Use third phandle (optional) for EUD based detach/attach events */
if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
@@ -2920,7 +2922,7 @@
}
}
- if (!IS_ERR(edev)) {
+ if (!IS_ERR_OR_NULL(edev)) {
mdwc->extcon_eud = edev;
mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
ret = extcon_register_notifier(edev, EXTCON_USB,
@@ -3763,20 +3765,9 @@
platform_device_del(dwc->xhci);
usb_unregister_notify(&mdwc->host_nb);
- /*
- * Perform USB hardware RESET (both core reset and DBM reset)
- * when moving from host to peripheral. This is required for
- * peripheral mode to work.
- */
- dwc3_msm_block_reset(mdwc, true);
-
dwc3_usb3_phy_suspend(dwc, false);
- dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
-
mdwc->in_host_mode = false;
- /* re-init core and OTG registers as block reset clears these */
- dwc3_post_host_reset_core_init(dwc);
pm_runtime_mark_last_busy(mdwc->dev);
pm_runtime_put_sync_autosuspend(mdwc->dev);
dbg_event(0xFF, "StopHost psync",
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index aaaf256..4cf5381 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -231,7 +231,7 @@
dwc3_data->syscfg_reg_off = res->start;
- dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
+ dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
dwc3_data->rstc_pwrdn =
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7d8566f..edd000b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1211,9 +1211,9 @@
return -ESHUTDOWN;
}
- if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
+ if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
&req->request, req->dep->name)) {
- dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
+ dwc3_trace(trace_dwc3_gadget, "request %pK belongs to '%s'",
&req->request, req->dep->name);
return -EINVAL;
}
@@ -1405,7 +1405,7 @@
dwc3_stop_active_transfer(dwc, dep->number, true);
goto out1;
}
- dev_err(dwc->dev, "request %p was not queued to %s\n",
+ dev_err(dwc->dev, "request %pK was not queued to %s\n",
request, ep->name);
ret = -EINVAL;
goto out0;
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index e9e8f46..af3ce4f 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -19,6 +19,39 @@
#include "core.h"
+static int dwc3_host_get_irq(struct dwc3 *dwc)
+{
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int irq;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "host");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq > 0)
+ goto out;
+
+ if (irq != -EPROBE_DEFER)
+ dev_err(dwc->dev, "missing host IRQ\n");
+
+ if (!irq)
+ irq = -EINVAL;
+
+out:
+ return irq;
+}
+
int dwc3_host_init(struct dwc3 *dwc)
{
struct property_entry props[3];
@@ -28,39 +61,18 @@
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int prop_idx = 0;
- irq = platform_get_irq_byname(dwc3_pdev, "host");
- if (irq == -EPROBE_DEFER)
+ irq = dwc3_host_get_irq(dwc);
+ if (irq < 0)
return irq;
- if (irq <= 0) {
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
- if (irq == -EPROBE_DEFER)
- return irq;
-
- if (irq <= 0) {
- irq = platform_get_irq(dwc3_pdev, 0);
- if (irq <= 0) {
- if (irq != -EPROBE_DEFER) {
- dev_err(dwc->dev,
- "missing host IRQ\n");
- }
- if (!irq)
- irq = -EINVAL;
- return irq;
- } else {
- res = platform_get_resource(dwc3_pdev,
- IORESOURCE_IRQ, 0);
- }
- } else {
- res = platform_get_resource_byname(dwc3_pdev,
- IORESOURCE_IRQ,
- "dwc_usb3");
- }
-
- } else {
+ res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, "host");
+ if (!res)
res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ,
- "host");
- }
+ "dwc_usb3");
+ if (!res)
+ res = platform_get_resource(dwc3_pdev, IORESOURCE_IRQ, 0);
+ if (!res)
+ return -ENOMEM;
dwc->xhci_resources[1].start = irq;
dwc->xhci_resources[1].end = irq;
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 46df732..a7cb586 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -346,6 +346,7 @@
struct acc_dev *dev = ep->driver_data;
char *string_dest = NULL;
int length = req->actual;
+ unsigned long flags;
if (req->status != 0) {
pr_err("acc_complete_set_string, err %d\n", req->status);
@@ -371,22 +372,26 @@
case ACCESSORY_STRING_SERIAL:
string_dest = dev->serial;
break;
- }
- if (string_dest) {
- unsigned long flags;
-
- if (length >= ACC_STRING_SIZE)
- length = ACC_STRING_SIZE - 1;
-
- spin_lock_irqsave(&dev->lock, flags);
- memcpy(string_dest, req->buf, length);
- /* ensure zero termination */
- string_dest[length] = 0;
- spin_unlock_irqrestore(&dev->lock, flags);
- } else {
+ default:
pr_err("unknown accessory string index %d\n",
- dev->string_index);
+ dev->string_index);
+ return;
}
+
+ if (!length) {
+ pr_debug("zero length for accessory string index %d\n",
+ dev->string_index);
+ return;
+ }
+
+ if (length >= ACC_STRING_SIZE)
+ length = ACC_STRING_SIZE - 1;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memcpy(string_dest, req->buf, length);
+ /* ensure zero termination */
+ string_dest[length] = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index ea17164..102003d 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -1545,7 +1545,7 @@
}
seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
- min, max, sum / iteration);
+ min, max, (iteration ? (sum / iteration) : 0));
min = max = sum = iteration = 0;
seq_puts(s, "\n=======================\n");
seq_puts(s, "USB MTP IN related VFS read stats:\n");
@@ -1567,7 +1567,7 @@
}
seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
- min, max, sum / iteration);
+ min, max, (iteration ? (sum / iteration) : 0));
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 32aa45e..b0c4f12 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1535,6 +1535,9 @@
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
t2 &= ~PORT_WKDISC_E;
}
+ if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
+ (hcd->speed < HCD_USB3))
+ t2 &= ~PORT_WAKE_BITS;
} else
t2 &= ~PORT_WAKE_BITS;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 69864ba..672751e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,6 +54,11 @@
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
+
static const char hcd_name[] = "xhci_hcd";
static struct hc_driver __read_mostly xhci_pci_hc_driver;
@@ -135,6 +140,13 @@
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
+ ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
+ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
+ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
+ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
+ xhci->quirks |= XHCI_U2_DISABLE_WAKE;
+
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 86d578e..3a7fb29 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1668,6 +1668,7 @@
/* For controller with a broken Port Disable implementation */
#define XHCI_BROKEN_PORT_PED (1 << 25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
+#define XHCI_U2_DISABLE_WAKE (1 << 27)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 2682d29..141b916 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -182,7 +182,7 @@
#define PS_HARD_RESET_TIME 25
#define PS_SOURCE_ON 400
#define PS_SOURCE_OFF 750
-#define SWAP_SOURCE_START_TIME 20
+#define FIRST_SOURCE_CAP_TIME 200
#define VDM_BUSY_TIME 50
#define VCONN_ON_TIME 100
@@ -796,17 +796,27 @@
pd->pd_phy_opened = true;
}
- pd->current_state = PE_SRC_SEND_CAPABILITIES;
if (pd->in_pr_swap) {
- kick_sm(pd, SWAP_SOURCE_START_TIME);
pd->in_pr_swap = false;
val.intval = 0;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PR_SWAP, &val);
- break;
}
- /* fall-through */
+ /*
+ * A sink might remove its terminations (during some Type-C
+ * compliance tests or a sink attempting to do Try.SRC)
+ * at this point just after we enabled VBUS. Sending PD
+ * messages now would delay detecting the detach beyond the
+ * required timing. Instead, delay sending out the first
+ * source capabilities to allow for the other side to
+ * completely settle CC debounce and allow HW to detect detach
+ * sooner in the meantime. PD spec allows up to
+ * tFirstSourceCap (250ms).
+ */
+ pd->current_state = PE_SRC_SEND_CAPABILITIES;
+ kick_sm(pd, FIRST_SOURCE_CAP_TIME);
+ break;
case PE_SRC_SEND_CAPABILITIES:
kick_sm(pd, 0);
@@ -1422,6 +1432,7 @@
static void dr_swap(struct usbpd *pd)
{
reset_vdm_state(pd);
+ usbpd_dbg(&pd->dev, "dr_swap: current_dr(%d)\n", pd->current_dr);
if (pd->current_dr == DR_DFP) {
stop_usb_host(pd);
@@ -1429,9 +1440,9 @@
pd->current_dr = DR_UFP;
} else if (pd->current_dr == DR_UFP) {
stop_usb_peripheral(pd);
+ start_usb_host(pd, true);
pd->current_dr = DR_DFP;
- /* don't start USB host until after SVDM discovery */
usbpd_send_svdm(pd, USBPD_SID, USBPD_SVDM_DISCOVER_IDENTITY,
SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
}
@@ -1604,7 +1615,6 @@
else if (pd->current_dr == DR_DFP)
stop_usb_host(pd);
- pd->current_pr = PR_NONE;
pd->current_dr = DR_NONE;
if (pd->current_state == PE_ERROR_RECOVERY)
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 675e50e..37d904f 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -79,6 +79,7 @@
struct qusb_phy {
struct usb_phy phy;
+ struct mutex lock;
void __iomem *base;
void __iomem *efuse_reg;
@@ -103,11 +104,11 @@
int efuse_bit_pos;
int efuse_num_of_bits;
- bool power_enabled;
+ int power_enabled_ref;
bool clocks_enabled;
bool cable_connected;
bool suspended;
- bool rm_pulldown;
+ bool dpdm_enable;
struct regulator_desc dpdm_rdesc;
struct regulator_dev *dpdm_rdev;
@@ -171,35 +172,47 @@
return ret;
}
-static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on,
- bool toggle_vdd)
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on)
{
int ret = 0;
- dev_dbg(qphy->phy.dev, "%s turn %s regulators. power_enabled:%d\n",
- __func__, on ? "on" : "off", qphy->power_enabled);
+ mutex_lock(&qphy->lock);
- if (toggle_vdd && qphy->power_enabled == on) {
- dev_dbg(qphy->phy.dev, "PHYs' regulators are already ON.\n");
- return 0;
+ dev_dbg(qphy->phy.dev,
+ "%s:req to turn %s regulators. power_enabled_ref:%d\n",
+ __func__, on ? "on" : "off", qphy->power_enabled_ref);
+
+ if (on && ++qphy->power_enabled_ref > 1) {
+ dev_dbg(qphy->phy.dev, "PHYs' regulators are already on\n");
+ goto done;
}
- if (!on)
- goto disable_vdda33;
-
- if (toggle_vdd) {
- ret = qusb_phy_config_vdd(qphy, true);
- if (ret) {
- dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
- ret);
- goto err_vdd;
+ if (!on) {
+ if (on == qphy->power_enabled_ref) {
+ dev_dbg(qphy->phy.dev,
+ "PHYs' regulators are already off\n");
+ goto done;
}
- ret = regulator_enable(qphy->vdd);
- if (ret) {
- dev_err(qphy->phy.dev, "Unable to enable VDD\n");
- goto unconfig_vdd;
- }
+ qphy->power_enabled_ref--;
+ if (!qphy->power_enabled_ref)
+ goto disable_vdda33;
+
+ dev_dbg(qphy->phy.dev, "Skip turning off PHYs' regulators\n");
+ goto done;
+ }
+
+ ret = qusb_phy_config_vdd(qphy, true);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+ ret);
+ goto err_vdd;
+ }
+
+ ret = regulator_enable(qphy->vdd);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+ goto unconfig_vdd;
}
ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
@@ -242,10 +255,9 @@
goto unset_vdd33;
}
- if (toggle_vdd)
- qphy->power_enabled = true;
-
pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
+
+ mutex_unlock(&qphy->lock);
return ret;
disable_vdda33:
@@ -281,22 +293,24 @@
dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
disable_vdd:
- if (toggle_vdd) {
- ret = regulator_disable(qphy->vdd);
- if (ret)
- dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
- ret);
+ ret = regulator_disable(qphy->vdd);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+ ret);
unconfig_vdd:
- ret = qusb_phy_config_vdd(qphy, false);
- if (ret)
- dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
- ret);
- }
+ ret = qusb_phy_config_vdd(qphy, false);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+ ret);
err_vdd:
- if (toggle_vdd)
- qphy->power_enabled = false;
dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
+
+ /* in case of error in turning on regulators */
+ if (qphy->power_enabled_ref)
+ qphy->power_enabled_ref--;
+done:
+ mutex_unlock(&qphy->lock);
return ret;
}
@@ -394,7 +408,7 @@
dev_dbg(phy->dev, "%s\n", __func__);
- ret = qusb_phy_enable_power(qphy, true, true);
+ ret = qusb_phy_enable_power(qphy, true);
if (ret)
return ret;
@@ -576,7 +590,7 @@
qphy->base + qphy->phy_reg[INTR_CTRL]);
qusb_phy_reset(qphy);
qusb_phy_enable_clocks(qphy, false);
- qusb_phy_enable_power(qphy, false, true);
+ qusb_phy_enable_power(qphy, false);
}
qphy->suspended = true;
} else {
@@ -595,7 +609,6 @@
/* Makes sure that above write goes through */
wmb();
} else { /* Cable connect case */
- qusb_phy_enable_power(qphy, true, true);
qusb_phy_enable_clocks(qphy, true);
}
qphy->suspended = false;
@@ -636,15 +649,17 @@
int ret = 0;
struct qusb_phy *qphy = rdev_get_drvdata(rdev);
- dev_dbg(qphy->phy.dev, "%s\n", __func__);
+ dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+ __func__, qphy->dpdm_enable);
- if (qphy->rm_pulldown) {
- ret = qusb_phy_enable_power(qphy, true, false);
- if (ret >= 0) {
- qphy->rm_pulldown = true;
- dev_dbg(qphy->phy.dev, "dpdm_enable:rm_pulldown:%d\n",
- qphy->rm_pulldown);
+ if (!qphy->dpdm_enable) {
+ ret = qusb_phy_enable_power(qphy, true);
+ if (ret < 0) {
+ dev_dbg(qphy->phy.dev,
+ "dpdm regulator enable failed:%d\n", ret);
+ return ret;
}
+ qphy->dpdm_enable = true;
}
return ret;
@@ -655,15 +670,17 @@
int ret = 0;
struct qusb_phy *qphy = rdev_get_drvdata(rdev);
- dev_dbg(qphy->phy.dev, "%s\n", __func__);
+ dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+ __func__, qphy->dpdm_enable);
- if (!qphy->rm_pulldown) {
- ret = qusb_phy_enable_power(qphy, false, false);
- if (ret >= 0) {
- qphy->rm_pulldown = false;
- dev_dbg(qphy->phy.dev, "dpdm_disable:rm_pulldown:%d\n",
- qphy->rm_pulldown);
+ if (qphy->dpdm_enable) {
+ ret = qusb_phy_enable_power(qphy, false);
+ if (ret < 0) {
+ dev_dbg(qphy->phy.dev,
+ "dpdm regulator disable failed:%d\n", ret);
+ return ret;
}
+ qphy->dpdm_enable = false;
}
return ret;
@@ -673,9 +690,9 @@
{
struct qusb_phy *qphy = rdev_get_drvdata(rdev);
- dev_dbg(qphy->phy.dev, "%s qphy->rm_pulldown = %d\n", __func__,
- qphy->rm_pulldown);
- return qphy->rm_pulldown;
+ dev_dbg(qphy->phy.dev, "%s qphy->dpdm_enable = %d\n", __func__,
+ qphy->dpdm_enable);
+ return qphy->dpdm_enable;
}
static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
@@ -964,6 +981,7 @@
return PTR_ERR(qphy->vdda18);
}
+ mutex_init(&qphy->lock);
platform_set_drvdata(pdev, qphy);
qphy->phy.label = "msm-qusb-phy-v2";
@@ -991,7 +1009,7 @@
usb_remove_phy(&qphy->phy);
qusb_phy_enable_clocks(qphy, false);
- qusb_phy_enable_power(qphy, false, true);
+ qusb_phy_enable_power(qphy, false);
return 0;
}
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 76b034e..e355e35 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -130,7 +130,7 @@
bool cable_connected;
bool suspended;
bool ulpi_mode;
- bool rm_pulldown;
+ bool dpdm_enable;
bool is_se_clk;
struct regulator_desc dpdm_rdesc;
@@ -673,15 +673,17 @@
int ret = 0;
struct qusb_phy *qphy = rdev_get_drvdata(rdev);
- dev_dbg(qphy->phy.dev, "%s\n", __func__);
+ dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+ __func__, qphy->dpdm_enable);
- if (qphy->rm_pulldown) {
+ if (!qphy->dpdm_enable) {
ret = qusb_phy_enable_power(qphy, true, false);
- if (ret >= 0) {
- qphy->rm_pulldown = true;
- dev_dbg(qphy->phy.dev, "dpdm_enable:rm_pulldown:%d\n",
- qphy->rm_pulldown);
+ if (ret < 0) {
+ dev_dbg(qphy->phy.dev,
+ "dpdm regulator enable failed:%d\n", ret);
+ return ret;
}
+ qphy->dpdm_enable = true;
}
return ret;
@@ -692,15 +694,17 @@
int ret = 0;
struct qusb_phy *qphy = rdev_get_drvdata(rdev);
- dev_dbg(qphy->phy.dev, "%s\n", __func__);
+ dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+ __func__, qphy->dpdm_enable);
- if (!qphy->rm_pulldown) {
+ if (qphy->dpdm_enable) {
ret = qusb_phy_enable_power(qphy, false, false);
- if (ret >= 0) {
- qphy->rm_pulldown = false;
- dev_dbg(qphy->phy.dev, "dpdm_disable:rm_pulldown:%d\n",
- qphy->rm_pulldown);
+ if (ret < 0) {
+ dev_dbg(qphy->phy.dev,
+ "dpdm regulator disable failed:%d\n", ret);
+ return ret;
}
+ qphy->dpdm_enable = false;
}
return ret;
@@ -710,9 +714,9 @@
{
struct qusb_phy *qphy = rdev_get_drvdata(rdev);
- dev_dbg(qphy->phy.dev, "%s qphy->rm_pulldown = %d\n", __func__,
- qphy->rm_pulldown);
- return qphy->rm_pulldown;
+ dev_dbg(qphy->phy.dev, "%s qphy->dpdm_enable = %d\n", __func__,
+ qphy->dpdm_enable);
+ return qphy->dpdm_enable;
}
static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 8bb4875..84b444f 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -135,6 +135,7 @@
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3bf61ac..ebe51f11 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1877,6 +1877,10 @@
.driver_info = (kernel_ulong_t)&four_g_w100_blacklist
},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index fd509ed6c..652b433 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -158,6 +158,7 @@
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
+ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */
{DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 44ab43f..af10f7b 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -262,7 +262,11 @@
kmem_cache_free(stub_priv_cache, priv);
kfree(urb->transfer_buffer);
+ urb->transfer_buffer = NULL;
+
kfree(urb->setup_packet);
+ urb->setup_packet = NULL;
+
usb_free_urb(urb);
}
}
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index 6b1e8c3..be50cef 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -28,7 +28,11 @@
struct urb *urb = priv->urb;
kfree(urb->setup_packet);
+ urb->setup_packet = NULL;
+
kfree(urb->transfer_buffer);
+ urb->transfer_buffer = NULL;
+
list_del(&priv->list);
kmem_cache_free(stub_priv_cache, priv);
usb_free_urb(urb);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 1afa111..aca0d88 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -315,7 +315,7 @@
struct ceph_mds_client *mdsc = fsc->mdsc;
int i;
int err;
- u32 ftype;
+ unsigned frag = -1;
struct ceph_mds_reply_info_parsed *rinfo;
dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
@@ -362,7 +362,6 @@
/* do we have the correct frag content buffered? */
if (need_send_readdir(fi, ctx->pos)) {
struct ceph_mds_request *req;
- unsigned frag;
int op = ceph_snap(inode) == CEPH_SNAPDIR ?
CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
@@ -373,8 +372,11 @@
}
if (is_hash_order(ctx->pos)) {
- frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
- NULL, NULL);
+ /* fragtree isn't always accurate. choose frag
+ * based on previous reply when possible. */
+ if (frag == (unsigned)-1)
+ frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
+ NULL, NULL);
} else {
frag = fpos_frag(ctx->pos);
}
@@ -497,6 +499,7 @@
struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
struct ceph_vino vino;
ino_t ino;
+ u32 ftype;
BUG_ON(rde->offset < ctx->pos);
@@ -519,15 +522,17 @@
ctx->pos++;
}
+ ceph_mdsc_put_request(fi->last_readdir);
+ fi->last_readdir = NULL;
+
if (fi->next_offset > 2) {
- ceph_mdsc_put_request(fi->last_readdir);
- fi->last_readdir = NULL;
+ frag = fi->frag;
goto more;
}
/* more frags? */
if (!ceph_frag_is_rightmost(fi->frag)) {
- unsigned frag = ceph_frag_next(fi->frag);
+ frag = ceph_frag_next(fi->frag);
if (is_hash_order(ctx->pos)) {
loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag),
fi->next_offset, true);
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 42145be..5dc655e 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -100,7 +100,7 @@
int ret;
ret = kstrtoull(skip_spaces(buf), 0, &val);
- if (!ret || val >= clusters)
+ if (ret || val >= clusters)
return -EINVAL;
atomic64_set(&sbi->s_resv_clusters, val);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 350a2c8..1493ceb 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -741,16 +741,10 @@
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
- O_RDONLY | O_WRONLY | O_RDWR |
- O_CREAT | O_EXCL | O_NOCTTY |
- O_TRUNC | O_APPEND | /* O_NONBLOCK | */
- __O_SYNC | O_DSYNC | FASYNC |
- O_DIRECT | O_LARGEFILE | O_DIRECTORY |
- O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- __FMODE_EXEC | O_PATH | __O_TMPFILE |
- __FMODE_NONOTIFY
- ));
+ BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
+ HWEIGHT32(
+ (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
+ __FMODE_EXEC | __FMODE_NONOTIFY));
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 6528724..7bff6f4 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -80,9 +80,9 @@
static struct rhashtable gl_hash_table;
-void gfs2_glock_free(struct gfs2_glock *gl)
+static void gfs2_glock_dealloc(struct rcu_head *rcu)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
@@ -90,6 +90,13 @@
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(gfs2_glock_cachep, gl);
}
+}
+
+void gfs2_glock_free(struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 51519c2..a04bf95 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -370,6 +370,7 @@
loff_t end;
} gl_vm;
};
+ struct rcu_head gl_rcu;
struct rhash_head gl_node;
};
diff --git a/fs/open.c b/fs/open.c
index 568749b..73b7d19 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -915,6 +915,12 @@
int lookup_flags = 0;
int acc_mode = ACC_MODE(flags);
+ /*
+ * Clear out all open flags we don't know about so that we don't report
+ * them in fcntl(F_GETFD) or similar interfaces.
+ */
+ flags &= VALID_OPEN_FLAGS;
+
if (flags & (O_CREAT | __O_TMPFILE))
op->mode = (mode & S_IALLUGO) | S_IFREG;
else
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 45f75c4..18f7612 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1632,7 +1632,7 @@
.release = single_release,
};
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
#ifdef CONFIG_SCHED_AUTOGROUP
/*
@@ -3090,6 +3090,9 @@
REG("mounts", S_IRUGO, proc_mounts_operations),
REG("mountinfo", S_IRUGO, proc_mountinfo_operations),
REG("mountstats", S_IRUSR, proc_mountstats_operations),
+#ifdef CONFIG_PROCESS_RECLAIM
+ REG("reclaim", S_IWUSR, proc_reclaim_operations),
+#endif
#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
REG("smaps", S_IRUGO, proc_pid_smaps_operations),
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 5378441..6dfb414 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -209,6 +209,7 @@
extern const struct inode_operations proc_link_inode_operations;
extern const struct inode_operations proc_pid_link_inode_operations;
+extern const struct file_operations proc_reclaim_operations;
extern void proc_init_inodecache(void);
extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9182f84..c585e7e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -15,6 +15,8 @@
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>
+#include <linux/mm_inline.h>
+#include <linux/ctype.h>
#include <asm/elf.h>
#include <asm/uaccess.h>
@@ -1526,6 +1528,241 @@
};
#endif /* CONFIG_PROC_PAGE_MONITOR */
+#ifdef CONFIG_PROCESS_RECLAIM
+static int reclaim_pte_range(pmd_t *pmd, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct reclaim_param *rp = walk->private;
+ struct vm_area_struct *vma = rp->vma;
+ pte_t *pte, ptent;
+ spinlock_t *ptl;
+ struct page *page;
+ LIST_HEAD(page_list);
+ int isolated;
+ int reclaimed;
+
+ split_huge_pmd(vma, addr, pmd);
+ if (pmd_trans_unstable(pmd) || !rp->nr_to_reclaim)
+ return 0;
+cont:
+ isolated = 0;
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ for (; addr != end; pte++, addr += PAGE_SIZE) {
+ ptent = *pte;
+ if (!pte_present(ptent))
+ continue;
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (!page)
+ continue;
+
+ if (isolate_lru_page(page))
+ continue;
+
+ list_add(&page->lru, &page_list);
+ inc_node_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+ isolated++;
+ rp->nr_scanned++;
+ if ((isolated >= SWAP_CLUSTER_MAX) || !rp->nr_to_reclaim)
+ break;
+ }
+ pte_unmap_unlock(pte - 1, ptl);
+ reclaimed = reclaim_pages_from_list(&page_list, vma);
+ rp->nr_reclaimed += reclaimed;
+ rp->nr_to_reclaim -= reclaimed;
+ if (rp->nr_to_reclaim < 0)
+ rp->nr_to_reclaim = 0;
+
+ if (rp->nr_to_reclaim && (addr != end))
+ goto cont;
+
+ cond_resched();
+ return 0;
+}
+
+enum reclaim_type {
+ RECLAIM_FILE,
+ RECLAIM_ANON,
+ RECLAIM_ALL,
+ RECLAIM_RANGE,
+};
+
+struct reclaim_param reclaim_task_anon(struct task_struct *task,
+ int nr_to_reclaim)
+{
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ struct mm_walk reclaim_walk = {};
+ struct reclaim_param rp;
+
+ rp.nr_reclaimed = 0;
+ rp.nr_scanned = 0;
+ get_task_struct(task);
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out;
+
+ reclaim_walk.mm = mm;
+ reclaim_walk.pmd_entry = reclaim_pte_range;
+
+ rp.nr_to_reclaim = nr_to_reclaim;
+ reclaim_walk.private = &rp;
+
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (is_vm_hugetlb_page(vma))
+ continue;
+
+ if (vma->vm_file)
+ continue;
+
+ if (vma->vm_flags & VM_LOCKED)
+ continue;
+
+ if (!rp.nr_to_reclaim)
+ break;
+
+ rp.vma = vma;
+ walk_page_range(vma->vm_start, vma->vm_end,
+ &reclaim_walk);
+ }
+
+ flush_tlb_mm(mm);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+out:
+ put_task_struct(task);
+ return rp;
+}
+
+static ssize_t reclaim_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task;
+ char buffer[200];
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ enum reclaim_type type;
+ char *type_buf;
+ struct mm_walk reclaim_walk = {};
+ unsigned long start = 0;
+ unsigned long end = 0;
+ struct reclaim_param rp;
+
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
+
+ if (copy_from_user(buffer, buf, count))
+ return -EFAULT;
+
+ type_buf = strstrip(buffer);
+ if (!strcmp(type_buf, "file"))
+ type = RECLAIM_FILE;
+ else if (!strcmp(type_buf, "anon"))
+ type = RECLAIM_ANON;
+ else if (!strcmp(type_buf, "all"))
+ type = RECLAIM_ALL;
+ else if (isdigit(*type_buf))
+ type = RECLAIM_RANGE;
+ else
+ goto out_err;
+
+ if (type == RECLAIM_RANGE) {
+ char *token;
+ unsigned long long len, len_in, tmp;
+ token = strsep(&type_buf, " ");
+ if (!token)
+ goto out_err;
+ tmp = memparse(token, &token);
+ if (tmp & ~PAGE_MASK || tmp > ULONG_MAX)
+ goto out_err;
+ start = tmp;
+
+ token = strsep(&type_buf, " ");
+ if (!token)
+ goto out_err;
+ len_in = memparse(token, &token);
+ len = (len_in + ~PAGE_MASK) & PAGE_MASK;
+ if (len > ULONG_MAX)
+ goto out_err;
+ /*
+ * Check to see whether len was rounded up from small -ve
+ * to zero.
+ */
+ if (len_in && !len)
+ goto out_err;
+
+ end = start + len;
+ if (end < start)
+ goto out_err;
+ }
+
+ task = get_proc_task(file->f_path.dentry->d_inode);
+ if (!task)
+ return -ESRCH;
+
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out;
+
+ reclaim_walk.mm = mm;
+ reclaim_walk.pmd_entry = reclaim_pte_range;
+
+ rp.nr_to_reclaim = INT_MAX;
+ rp.nr_reclaimed = 0;
+ reclaim_walk.private = &rp;
+
+ down_read(&mm->mmap_sem);
+ if (type == RECLAIM_RANGE) {
+ vma = find_vma(mm, start);
+ while (vma) {
+ if (vma->vm_start > end)
+ break;
+ if (is_vm_hugetlb_page(vma))
+ continue;
+
+ rp.vma = vma;
+ walk_page_range(max(vma->vm_start, start),
+ min(vma->vm_end, end),
+ &reclaim_walk);
+ vma = vma->vm_next;
+ }
+ } else {
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (is_vm_hugetlb_page(vma))
+ continue;
+
+ if (type == RECLAIM_ANON && vma->vm_file)
+ continue;
+
+ if (type == RECLAIM_FILE && !vma->vm_file)
+ continue;
+
+ rp.vma = vma;
+ walk_page_range(vma->vm_start, vma->vm_end,
+ &reclaim_walk);
+ }
+ }
+
+ flush_tlb_mm(mm);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+out:
+ put_task_struct(task);
+ return count;
+
+out_err:
+ return -EINVAL;
+}
+
+const struct file_operations proc_reclaim_operations = {
+ .write = reclaim_write,
+ .llseek = noop_llseek,
+};
+#endif
+
#ifdef CONFIG_NUMA
struct numa_maps {
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 3c5b51d..80825b2 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -364,41 +364,34 @@
return err;
}
-/* A feature which supports mount_nodev() with options */
-static struct dentry *mount_nodev_with_options(struct vfsmount *mnt,
- struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data,
- int (*fill_super)(struct vfsmount *, struct super_block *,
- const char *, void *, int))
+struct sdcardfs_mount_private {
+ struct vfsmount *mnt;
+ const char *dev_name;
+ void *raw_data;
+};
+static int __sdcardfs_fill_super(
+ struct super_block *sb,
+ void *_priv, int silent)
{
- int error;
- struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
+ struct sdcardfs_mount_private *priv = _priv;
- if (IS_ERR(s))
- return ERR_CAST(s);
-
- s->s_flags = flags;
-
- error = fill_super(mnt, s, dev_name, data, flags & MS_SILENT ? 1 : 0);
- if (error) {
- deactivate_locked_super(s);
- return ERR_PTR(error);
- }
- s->s_flags |= MS_ACTIVE;
- return dget(s->s_root);
+ return sdcardfs_read_super(priv->mnt,
+ sb, priv->dev_name, priv->raw_data, silent);
}
static struct dentry *sdcardfs_mount(struct vfsmount *mnt,
struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
- /*
- * dev_name is a lower_path_name,
- * raw_data is a option string.
- */
- return mount_nodev_with_options(mnt, fs_type, flags, dev_name,
- raw_data, sdcardfs_read_super);
+ struct sdcardfs_mount_private priv = {
+ .mnt = mnt,
+ .dev_name = dev_name,
+ .raw_data = raw_data
+ };
+
+ return mount_nodev(fs_type, flags,
+ &priv, __sdcardfs_fill_super);
}
static struct dentry *sdcardfs_mount_wrn(struct file_system_type *fs_type,
@@ -423,7 +416,7 @@
list_del(&sbi->list);
mutex_unlock(&sdcardfs_super_list_lock);
}
- generic_shutdown_super(sb);
+ kill_anon_super(sb);
}
static struct file_system_type sdcardfs_fs_type = {
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
index 75ddcfa..8108c98 100644
--- a/include/dt-bindings/clock/mdss-10nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -17,23 +17,25 @@
/* DSI PLL clocks */
#define VCO_CLK_0 0
-#define BITCLK_SRC_0_CLK 1
-#define BYTECLK_SRC_0_CLK 2
-#define POST_BIT_DIV_0_CLK 3
-#define POST_VCO_DIV_0_CLK 4
-#define BYTECLK_MUX_0_CLK 5
-#define PCLK_SRC_MUX_0_CLK 6
-#define PCLK_SRC_0_CLK 7
-#define PCLK_MUX_0_CLK 8
-#define VCO_CLK_1 9
-#define BITCLK_SRC_1_CLK 10
-#define BYTECLK_SRC_1_CLK 11
-#define POST_BIT_DIV_1_CLK 12
-#define POST_VCO_DIV_1_CLK 13
-#define BYTECLK_MUX_1_CLK 14
-#define PCLK_SRC_MUX_1_CLK 15
-#define PCLK_SRC_1_CLK 16
-#define PCLK_MUX_1_CLK 17
+#define PLL_OUT_DIV_0_CLK 1
+#define BITCLK_SRC_0_CLK 2
+#define BYTECLK_SRC_0_CLK 3
+#define POST_BIT_DIV_0_CLK 4
+#define POST_VCO_DIV_0_CLK 5
+#define BYTECLK_MUX_0_CLK 6
+#define PCLK_SRC_MUX_0_CLK 7
+#define PCLK_SRC_0_CLK 8
+#define PCLK_MUX_0_CLK 9
+#define VCO_CLK_1 10
+#define PLL_OUT_DIV_1_CLK 11
+#define BITCLK_SRC_1_CLK 12
+#define BYTECLK_SRC_1_CLK 13
+#define POST_BIT_DIV_1_CLK 14
+#define POST_VCO_DIV_1_CLK 15
+#define BYTECLK_MUX_1_CLK 16
+#define PCLK_SRC_MUX_1_CLK 17
+#define PCLK_SRC_1_CLK 18
+#define PCLK_MUX_1_CLK 19
/* DP PLL clocks */
#define DP_VCO_CLK 0
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 678a885..339d470 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -201,7 +201,17 @@
#define GCC_SDCC1_ICE_CORE_CLK 183
#define GCC_SDCC1_APPS_CLK_SRC 184
#define GCC_SDCC1_ICE_CORE_CLK_SRC 185
-
+#define GCC_APC_VS_CLK 186
+#define GCC_GPU_VS_CLK 187
+#define GCC_MSS_VS_CLK 188
+#define GCC_VDDA_VS_CLK 189
+#define GCC_VDDCX_VS_CLK 190
+#define GCC_VDDMX_VS_CLK 191
+#define GCC_VS_CTRL_AHB_CLK 192
+#define GCC_VS_CTRL_CLK 193
+#define GCC_VS_CTRL_CLK_SRC 194
+#define GCC_VSENSOR_CLK_SRC 195
+#define GPLL4 196
/* GCC reset clocks */
#define GCC_MMSS_BCR 0
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fb910c6..0693c3e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -508,6 +508,7 @@
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
+#define QUEUE_FLAG_FAST 27 /* fast block device (e.g. ram based) */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -598,6 +599,7 @@
#define blk_queue_secure_erase(q) \
(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
+#define blk_queue_fast(q) test_bit(QUEUE_FLAG_FAST, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 0538291..10842bb 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -266,9 +266,13 @@
#ifdef CONFIG_OF
extern struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node);
+extern struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node);
#else
static inline struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node) { return NULL; }
+static inlint struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node) { return NULL; }
#endif
#ifdef CONFIG_PID_NS
diff --git a/include/linux/device.h b/include/linux/device.h
index d469121..f43db28 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -375,6 +375,7 @@
* @suspend: Used to put the device to sleep mode, usually to a low power
* state.
* @resume: Used to bring the device from the sleep mode.
+ * @shutdown: Called at shut-down time to quiesce the device.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
* @pm: The default device power management operations of this class.
@@ -403,6 +404,7 @@
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
+ int (*shutdown)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index b871c0c..a9a16f2 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -202,6 +202,7 @@
/* Internal data. Please do not set. */
struct device dev;
struct raw_notifier_head *nh;
+ struct blocking_notifier_head *bnh;
struct list_head entry;
int max_supported;
spinlock_t lock; /* could be called by irq handler */
@@ -289,6 +290,10 @@
struct notifier_block *nb);
extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
+extern int extcon_register_blocking_notifier(struct extcon_dev *edev,
+ unsigned int id, struct notifier_block *nb);
+extern int extcon_unregister_blocking_notifier(struct extcon_dev *edev,
+ unsigned int id, struct notifier_block *nb);
extern int devm_extcon_register_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
@@ -306,7 +311,8 @@
/* Following API to get information of extcon device */
extern const char *extcon_get_edev_name(struct extcon_dev *edev);
-
+extern int extcon_blocking_sync(struct extcon_dev *edev, unsigned int id,
+ bool val);
#else /* CONFIG_EXTCON */
static inline int extcon_dev_register(struct extcon_dev *edev)
{
@@ -413,6 +419,20 @@
return 0;
}
+static inline int extcon_register_blocking_notifier(struct extcon_dev *edev,
+ unsigned int id,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int extcon_unregister_blocking_notifier(struct extcon_dev *edev,
+ unsigned int id,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
static inline int devm_extcon_register_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 76ce329..1b48d9c 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -3,6 +3,12 @@
#include <uapi/linux/fcntl.h>
+/* list of all valid flags for the open/openat flags argument: */
+#define VALID_OPEN_FLAGS \
+ (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
+ O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
+ FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
+ O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
#ifndef force_o_largefile
#define force_o_largefile() (BITS_PER_LONG != 32)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f8041f9de..46cd745 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -41,6 +41,7 @@
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
+#define ___GFP_CMA 0x4000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -54,8 +55,9 @@
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
-#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-
+#define __GFP_CMA ((__force gfp_t)___GFP_CMA)
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE| \
+ __GFP_CMA)
/*
* Page mobility and placement hints
*
@@ -274,7 +276,12 @@
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
+#ifndef CONFIG_CMA
return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+#else
+ return ((gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT) |
+ ((gfp_flags & __GFP_CMA) != 0);
+#endif
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 4c70716..61aff32 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -187,9 +187,24 @@
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
unsigned long vaddr)
{
+#ifndef CONFIG_CMA
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+#else
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+ vaddr);
+#endif
}
+#ifdef CONFIG_CMA
+static inline struct page *
+alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+ vaddr);
+}
+#endif
+
static inline void clear_highpage(struct page *page)
{
void *kaddr = kmap_atomic(page);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7bdddb3..0b8aedf 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -141,6 +141,7 @@
DOMAIN_ATTR_EARLY_MAP,
DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+ DOMAIN_ATTR_CB_STALL_DISABLE,
DOMAIN_ATTR_MAX,
};
diff --git a/include/linux/ipa_uc_offload.h b/include/linux/ipa_uc_offload.h
index 0277e87..85d0ce9 100644
--- a/include/linux/ipa_uc_offload.h
+++ b/include/linux/ipa_uc_offload.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -163,6 +163,20 @@
u32 max_supported_bw_mbps;
};
+/**
+ * struct ipa_uc_ready_params - uC ready CB parameters
+ * @is_uC_ready: uC loaded or not
+ * @priv : callback cookie
+ * @notify: callback
+ * @proto: uC offload protocol type
+ */
+struct ipa_uc_ready_params {
+ bool is_uC_ready;
+ void *priv;
+ ipa_uc_ready_cb notify;
+ enum ipa_uc_offload_proto proto;
+};
+
#if defined CONFIG_IPA || defined CONFIG_IPA3
/**
@@ -223,6 +237,19 @@
*/
int ipa_set_perf_profile(struct ipa_perf_profile *profile);
+
+/*
+ * To register uC ready callback if uC not ready
+ * and also check uC readiness
+ * if uC not ready only, register callback
+ */
+int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param);
+
+/*
+ * To de-register uC ready callback
+ */
+void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto);
+
#else /* (CONFIG_IPA || CONFIG_IPA3) */
static inline int ipa_uc_offload_reg_intf(
@@ -254,6 +281,15 @@
return -EPERM;
}
+static inline int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param)
+{
+ return -EPERM;
+}
+
+static void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto)
+{
+}
+
#endif /* CONFIG_IPA3 */
#endif /* _IPA_UC_OFFLOAD_H_ */
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
index 78f01ea..86a2dc6 100644
--- a/include/linux/mailbox_client.h
+++ b/include/linux/mailbox_client.h
@@ -49,5 +49,6 @@
bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
bool mbox_controller_is_idle(struct mbox_chan *chan); /* atomic */
+void mbox_chan_debug(struct mbox_chan *chan);
#endif /* __MAILBOX_CLIENT_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 30a4ed2..7827c68 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -49,6 +49,8 @@
* Used only if txdone_poll:=true && txdone_irq:=false
* @peek_data: Atomic check for any received data. Return true if controller
* has some data to push to the client. False otherwise.
+ * @debug: Allow chan to be debugged when the client detects a channel is
+ * locked up.
*/
struct mbox_chan_ops {
int (*send_data)(struct mbox_chan *chan, void *data);
@@ -90,6 +92,7 @@
struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
const struct of_phandle_args *sp);
bool (*is_idle)(struct mbox_controller *mbox);
+ void (*debug)(struct mbox_chan *chan);
/* Internal to API */
struct hrtimer poll_hrt;
struct list_head node;
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index d6ebc01..37e5178 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -415,6 +415,11 @@
for (idx = 0, rgn = &memblock_type->regions[0]; \
idx < memblock_type->cnt; \
idx++, rgn = &memblock_type->regions[idx])
+#define for_each_memblock_rev(memblock_type, region) \
+ for (region = memblock.memblock_type.regions + \
+ memblock.memblock_type.cnt - 1; \
+ region >= memblock.memblock_type.regions; \
+ region--)
#ifdef CONFIG_MEMTEST
extern void early_memtest(phys_addr_t start, phys_addr_t end);
diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h
index b4c1be4..b994010 100644
--- a/include/linux/mfd/wcd9xxx/core.h
+++ b/include/linux/mfd/wcd9xxx/core.h
@@ -350,7 +350,7 @@
int (*post_reset)(struct wcd9xxx *wcd9xxx);
void *ssr_priv;
- bool dev_up;
+ unsigned long dev_up;
u32 num_of_supplies;
struct regulator_bulk_data *supplies;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6a14034..2b423f7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -478,16 +478,16 @@
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
-static inline bool is_vmalloc_addr(const void *x)
-{
-#ifdef CONFIG_MMU
- unsigned long addr = (unsigned long)x;
- return addr >= VMALLOC_START && addr < VMALLOC_END;
+#ifdef CONFIG_MMU
+extern int is_vmalloc_addr(const void *x);
#else
- return false;
-#endif
+static inline int is_vmalloc_addr(const void *x)
+{
+ return 0;
}
+#endif
+
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
@@ -2407,7 +2407,6 @@
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
extern struct page_ext_operations debug_guardpage_ops;
-extern struct page_ext_operations page_poisoning_ops;
#ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder;
@@ -2448,5 +2447,19 @@
static inline void setup_nr_node_ids(void) {}
#endif
+#ifdef CONFIG_PROCESS_RECLAIM
+struct reclaim_param {
+ struct vm_area_struct *vma;
+ /* Number of pages scanned */
+ int nr_scanned;
+ /* max pages to reclaim */
+ int nr_to_reclaim;
+ /* pages reclaimed */
+ int nr_reclaimed;
+};
+extern struct reclaim_param reclaim_task_anon(struct task_struct *task,
+ int nr_to_reclaim);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6744eb4..ed0099c9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -39,8 +39,6 @@
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
- MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
- MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
@@ -57,6 +55,8 @@
*/
MIGRATE_CMA,
#endif
+ MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
+ MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
#endif
@@ -65,13 +65,22 @@
/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
extern char * const migratetype_names[MIGRATE_TYPES];
+/*
+ * Returns a list which contains the migrate types on to which
+ * an allocation falls back when the free list for the migrate
+ * type mtype is depleted.
+ * The end of the list is delimited by the type MIGRATE_TYPES.
+ */
+extern int *get_migratetype_fallbacks(int mtype);
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
+# define get_cma_migrate_type() MIGRATE_CMA
#else
# define is_migrate_cma(migratetype) false
# define is_migrate_cma_page(_page) false
+# define get_cma_migrate_type() MIGRATE_MOVABLE
#endif
#define for_each_migratetype_order(order, type) \
@@ -368,6 +377,10 @@
struct pglist_data *zone_pgdat;
struct per_cpu_pageset __percpu *pageset;
+#ifdef CONFIG_CMA
+ bool cma_alloc;
+#endif
+
#ifndef CONFIG_SPARSEMEM
/*
* Flags for a pageblock_nr_pages block. See pageblock-flags.h.
diff --git a/include/linux/oom.h b/include/linux/oom.h
index b4e36e9..b986840 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -79,6 +79,9 @@
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
+extern void dump_tasks(struct mem_cgroup *memcg,
+ const nodemask_t *nodemask);
+
/* sysctls */
extern int sysctl_oom_dump_tasks;
extern int sysctl_oom_kill_allocating_task;
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 9788360..0ea3e1b 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -266,6 +266,9 @@
#define plist_next(pos) \
list_next_entry(pos, node_list)
+#define plist_next_entry(pos, type, member) \
+ container_of(plist_next(pos), type, member)
+
/**
* plist_prev - get the prev entry in list
* @pos: the type * to cursor
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 6c9ddcd..2938206 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -64,11 +64,14 @@
struct pinctrl *geni_pinctrl;
struct pinctrl_state *geni_gpio_active;
struct pinctrl_state *geni_gpio_sleep;
+ int clk_freq_out;
};
#define PINCTRL_DEFAULT "default"
#define PINCTRL_SLEEP "sleep"
+#define KHz(freq) (1000 * (freq))
+
/* Common SE registers */
#define GENI_INIT_CFG_REVISION (0x0)
#define GENI_S_INIT_CFG_REVISION (0x4)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b46bb56..71fd2b3 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -10,6 +10,11 @@
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
+extern int isolate_lru_page(struct page *page);
+extern void putback_lru_page(struct page *page);
+extern unsigned long reclaim_pages_from_list(struct list_head *page_list,
+ struct vm_area_struct *vma);
+
/*
* The anon_vma heads a list of private "related" vmas, to scan if
* an anonymous page pointing to this anon_vma needs to be unmapped:
@@ -186,7 +191,8 @@
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
-int try_to_unmap(struct page *, enum ttu_flags flags);
+int try_to_unmap(struct page *, enum ttu_flags flags,
+ struct vm_area_struct *vma);
/*
* Used by uprobes to replace a userspace page safely
@@ -263,6 +269,7 @@
*/
struct rmap_walk_control {
void *arg;
+ struct vm_area_struct *target_vma;
int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
@@ -287,7 +294,7 @@
return 0;
}
-#define try_to_unmap(page, refs) SWAP_FAIL
+#define try_to_unmap(page, refs, vma) SWAP_FAIL
static inline int page_mkclean(struct page *page)
{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ff65b44..7627c76 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1503,9 +1503,6 @@
u32 sum_history[RAVG_HIST_SIZE_MAX];
u32 *curr_window_cpu, *prev_window_cpu;
u32 curr_window, prev_window;
-#ifdef CONFIG_SCHED_HMP
- u64 curr_burst, avg_burst, avg_sleep_time;
-#endif
u16 active_windows;
u32 pred_demand;
u8 busy_buckets[NUM_BUSY_BUCKETS];
@@ -2653,38 +2650,10 @@
#define MAX_NUM_CGROUP_COLOC_ID 20
-#ifdef CONFIG_SCHED_HMP
-extern int sched_set_window(u64 window_start, unsigned int window_size);
-extern unsigned long sched_get_busy(int cpu);
-extern void sched_get_cpus_busy(struct sched_load *busy,
- const struct cpumask *query_cpus);
-extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
-extern u32 sched_get_init_task_load(struct task_struct *p);
-extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
-extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
-extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
-extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
-extern int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle);
-extern unsigned int sched_get_cluster_wake_idle(int cpu);
-extern int sched_update_freq_max_load(const cpumask_t *cpumask);
-extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
- u32 fmin, u32 fmax);
-extern void sched_set_cpu_cstate(int cpu, int cstate,
- int wakeup_energy, int wakeup_latency);
-extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
- int wakeup_energy, int wakeup_latency);
-extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
-extern unsigned int sched_get_group_id(struct task_struct *p);
-
-#else /* CONFIG_SCHED_HMP */
static inline int sched_set_window(u64 window_start, unsigned int window_size)
{
return -EINVAL;
}
-static inline unsigned long sched_get_busy(int cpu)
-{
- return 0;
-}
static inline void sched_get_cpus_busy(struct sched_load *busy,
const struct cpumask *query_cpus) {};
@@ -2698,12 +2667,6 @@
{
}
-static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
- int dstate, int wakeup_energy, int wakeup_latency)
-{
-}
-#endif /* CONFIG_SCHED_HMP */
-
#ifdef CONFIG_SCHED_WALT
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
extern void sched_set_io_is_busy(int val);
@@ -2731,10 +2694,8 @@
#endif /* CONFIG_SCHED_WALT */
#ifndef CONFIG_SCHED_WALT
-#ifndef CONFIG_SCHED_HMP
static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
u32 fmin, u32 fmax) { }
-#endif /* CONFIG_SCHED_HMP */
#endif /* CONFIG_SCHED_WALT */
#ifdef CONFIG_NO_HZ_COMMON
@@ -2847,7 +2808,7 @@
task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
-#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_HMP)
+#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec() {}
@@ -2982,7 +2943,6 @@
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
-extern int wake_up_process_no_notif(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
@@ -3217,7 +3177,7 @@
}
/* mmput gets rid of the mappings and all user-space */
-extern void mmput(struct mm_struct *);
+extern int mmput(struct mm_struct *mm);
#ifdef CONFIG_MMU
/* same as above but performs the slow path from the async context. Can
* be called from the atomic context as well
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index f0ba8e6..8410c32 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -37,47 +37,13 @@
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;
-#endif
-
-#ifdef CONFIG_SCHED_HMP
-
-enum freq_reporting_policy {
- FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK,
- FREQ_REPORT_CPU_LOAD,
- FREQ_REPORT_TOP_TASK,
- FREQ_REPORT_INVALID_POLICY
-};
-
-extern int sysctl_sched_freq_inc_notify;
-extern int sysctl_sched_freq_dec_notify;
-extern unsigned int sysctl_sched_freq_reporting_policy;
-extern unsigned int sysctl_sched_window_stats_policy;
-extern unsigned int sysctl_sched_ravg_hist_size;
-extern unsigned int sysctl_sched_spill_nr_run;
-extern unsigned int sysctl_sched_spill_load_pct;
-extern unsigned int sysctl_sched_upmigrate_pct;
-extern unsigned int sysctl_sched_downmigrate_pct;
-extern unsigned int sysctl_early_detection_duration;
-extern unsigned int sysctl_sched_small_wakee_task_load_pct;
-extern unsigned int sysctl_sched_big_waker_task_load_pct;
-extern unsigned int sysctl_sched_select_prev_cpu_us;
-extern unsigned int sysctl_sched_restrict_cluster_spill;
-extern unsigned int sysctl_sched_pred_alert_freq;
-extern unsigned int sysctl_sched_freq_aggregate;
-extern unsigned int sysctl_sched_enable_thread_grouping;
-extern unsigned int sysctl_sched_freq_aggregate_threshold_pct;
-extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker;
-extern unsigned int sysctl_sched_short_burst;
-extern unsigned int sysctl_sched_short_sleep;
-
-#elif defined(CONFIG_SCHED_WALT)
extern int
walt_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
@@ -106,9 +72,6 @@
extern int sched_migrate_notify_proc_handler(struct ctl_table *table,
int write, void __user *buffer, size_t *lenp, loff_t *ppos);
-extern int sched_hmp_proc_update_handler(struct ctl_table *table,
- int write, void __user *buffer, size_t *lenp, loff_t *ppos);
-
extern int sched_boost_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index 1450caa..0320210 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -114,15 +114,16 @@
*
* @fps: panel te interval
* @vtotal: current vertical total (height + vbp + vfp)
- * @jitter: panel can set the jitter to wake up rsc/solver early
- * This value causes mdp core to exit certain mode
- * early. Default is 10% jitter
+ * @jitter_numer: panel jitter numerator value. This config causes rsc/solver
+ * early before te. Default is 0.8% jitter.
+ * @jitter_denom: panel jitter denominator.
* @prefill_lines: max prefill lines based on panel
*/
struct sde_rsc_cmd_config {
u32 fps;
u32 vtotal;
- u32 jitter;
+ u32 jitter_numer;
+ u32 jitter_denom;
u32 prefill_lines;
};
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 55ff559..92d1fde 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -151,12 +151,14 @@
SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
+ SWP_FAST = (1 << 11), /* blkdev access is fast and cheap */
/* add others here before... */
- SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */
+ SWP_SCANNING = (1 << 12), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
+#define SWAPFILE_CLUSTER 256
#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
@@ -241,6 +243,8 @@
*/
struct work_struct discard_work; /* discard worker */
struct swap_cluster_list discard_clusters; /* discard clusters list */
+ unsigned int write_pending;
+ unsigned int max_writes;
};
/* linux/mm/workingset.c */
@@ -328,6 +332,8 @@
unsigned long *nr_scanned);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
+extern int sysctl_swap_ratio;
+extern int sysctl_swap_ratio_enable;
extern int remove_mapping(struct address_space *mapping, struct page *page);
extern unsigned long vm_total_pages;
@@ -389,10 +395,18 @@
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
+extern bool is_swap_fast(swp_entry_t entry);
/* Swap 50% full? Release swapcache more aggressively.. */
-static inline bool vm_swap_full(void)
+static inline bool vm_swap_full(struct swap_info_struct *si)
{
+ /*
+ * If the swap device is fast, return true
+ * not to delay swap free.
+ */
+ if (si->flags & SWP_FAST)
+ return true;
+
return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
}
@@ -428,7 +442,7 @@
#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
#define total_swapcache_pages() 0UL
-#define vm_swap_full() 0
+#define vm_swap_full(si) 0
#define si_swapinfo(val) \
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
@@ -579,7 +593,7 @@
static inline bool mem_cgroup_swap_full(struct page *page)
{
- return vm_swap_full();
+ return vm_swap_full(page_swap_info(page));
}
#endif
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 388293a..ed2a9c9 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -7,7 +7,12 @@
*/
extern spinlock_t swap_lock;
extern struct plist_head swap_active_head;
+extern spinlock_t swap_avail_lock;
+extern struct plist_head swap_avail_head;
extern struct swap_info_struct *swap_info[];
extern int try_to_unuse(unsigned int, bool, unsigned long);
+extern int swap_ratio(struct swap_info_struct **si);
+extern void setup_swap_ratio(struct swap_info_struct *p, int prio);
+extern bool is_swap_ratio_group(int prio);
#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 56dde53..5f5107b 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -185,6 +185,9 @@
extern int del_timer(struct timer_list * timer);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
+#ifdef CONFIG_SMP
+extern bool check_pending_deferrable_timers(int cpu);
+#endif
/*
* The jiffies value which is added to now, when there is no timer
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 5c0b3fa..b305b0e 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -582,9 +582,9 @@
((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
#define EndpointRequest \
- ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
#define EndpointOutRequest \
- ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
/* class requests from the USB 2.0 hub spec, table 11-15 */
/* GetBusState and SetHubDescriptor are optional, omitted */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 4d6ec58..9cc195f 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -21,7 +21,7 @@
#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
-enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
+enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC),
FOR_ALL_ZONES(ALLOCSTALL),
FOR_ALL_ZONES(PGSCAN_SKIP),
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 3d9d786..f113e0e 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -19,6 +19,8 @@
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
+#define VM_LOWMEM 0x00000100 /* Tracking of direct mapped lowmem */
+
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@@ -82,6 +84,7 @@
const void *caller);
extern void vfree(const void *addr);
+extern void vfree_atomic(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
@@ -159,6 +162,13 @@
extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+extern __init int vm_area_check_early(struct vm_struct *vm);
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+extern void mark_vmalloc_reserved_area(void *addr, unsigned long size);
+#else
+static inline void mark_vmalloc_reserved_area(void *addr, unsigned long size)
+{ };
+#endif
#ifdef CONFIG_SMP
# ifdef CONFIG_MMU
@@ -184,7 +194,12 @@
#endif
#ifdef CONFIG_MMU
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+extern unsigned long total_vmalloc_size;
+#define VMALLOC_TOTAL total_vmalloc_size
+#else
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
+#endif
#else
#define VMALLOC_TOTAL 0UL
#endif
diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h
index 3347cc3..93000f5 100644
--- a/include/linux/vmpressure.h
+++ b/include/linux/vmpressure.h
@@ -15,6 +15,7 @@
unsigned long tree_scanned;
unsigned long tree_reclaimed;
+ unsigned long stall;
/* The lock is used to keep the scanned/reclaimed above in sync. */
struct spinlock sr_lock;
@@ -28,11 +29,13 @@
struct mem_cgroup;
-#ifdef CONFIG_MEMCG
+extern int vmpressure_notifier_register(struct notifier_block *nb);
+extern int vmpressure_notifier_unregister(struct notifier_block *nb);
extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
unsigned long scanned, unsigned long reclaimed);
extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
+#ifdef CONFIG_MEMCG
extern void vmpressure_init(struct vmpressure *vmpr);
extern void vmpressure_cleanup(struct vmpressure *vmpr);
extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
@@ -43,9 +46,9 @@
extern void vmpressure_unregister_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd);
#else
-static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
- unsigned long scanned, unsigned long reclaimed) {}
-static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg,
- int prio) {}
+static inline struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
+{
+ return NULL;
+}
#endif /* CONFIG_MEMCG */
#endif /* __LINUX_VMPRESSURE_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 6d27dae..8529be6 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -772,6 +772,30 @@
};
/**
+ * struct iface_combination_params - input parameters for interface combinations
+ *
+ * Used to pass interface combination parameters
+ *
+ * @num_different_channels: the number of different channels we want
+ * to use for verification
+ * @radar_detect: a bitmap where each bit corresponds to a channel
+ * width where radar detection is needed, as in the definition of
+ * &struct ieee80211_iface_combination.@radar_detect_widths
+ * @iftype_num: array with the number of interfaces of each interface
+ * type. The index is the interface type as specified in &enum
+ * nl80211_iftype.
+ * @new_beacon_int: set this to the beacon interval of a new interface
+ * that's not operating yet, if such is to be checked as part of
+ * the verification
+ */
+struct iface_combination_params {
+ int num_different_channels;
+ u8 radar_detect;
+ int iftype_num[NUM_NL80211_IFTYPES];
+ u32 new_beacon_int;
+};
+
+/**
* enum station_parameters_apply_mask - station parameter values to apply
* @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
* @STATION_PARAM_APPLY_CAPABILITY: apply new capability
@@ -3082,6 +3106,12 @@
* only in special cases.
* @radar_detect_widths: bitmap of channel widths supported for radar detection
* @radar_detect_regions: bitmap of regions supported for radar detection
+ * @beacon_int_min_gcd: This interface combination supports different
+ * beacon intervals.
+ * = 0 - all beacon intervals for different interface must be same.
+ * > 0 - any beacon interval for the interface part of this combination AND
+ * *GCD* of all beacon intervals from beaconing interfaces of this
+ * combination must be greater or equal to this value.
*
* With this structure the driver can describe which interface
* combinations it supports concurrently.
@@ -3147,6 +3177,7 @@
bool beacon_int_infra_match;
u8 radar_detect_widths;
u8 radar_detect_regions;
+ u32 beacon_int_min_gcd;
};
struct ieee80211_txrx_stypes {
@@ -5644,36 +5675,20 @@
* cfg80211_check_combinations - check interface combinations
*
* @wiphy: the wiphy
- * @num_different_channels: the number of different channels we want
- * to use for verification
- * @radar_detect: a bitmap where each bit corresponds to a channel
- * width where radar detection is needed, as in the definition of
- * &struct ieee80211_iface_combination.@radar_detect_widths
- * @iftype_num: array with the numbers of interfaces of each interface
- * type. The index is the interface type as specified in &enum
- * nl80211_iftype.
+ * @params: the interface combinations parameter
*
* This function can be called by the driver to check whether a
* combination of interfaces and their types are allowed according to
* the interface combinations.
*/
int cfg80211_check_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES]);
+ struct iface_combination_params *params);
/**
* cfg80211_iter_combinations - iterate over matching combinations
*
* @wiphy: the wiphy
- * @num_different_channels: the number of different channels we want
- * to use for verification
- * @radar_detect: a bitmap where each bit corresponds to a channel
- * width where radar detection is needed, as in the definition of
- * &struct ieee80211_iface_combination.@radar_detect_widths
- * @iftype_num: array with the numbers of interfaces of each interface
- * type. The index is the interface type as specified in &enum
- * nl80211_iftype.
+ * @params: the interface combinations parameter
* @iter: function to call for each matching combination
* @data: pointer to pass to iter function
*
@@ -5682,9 +5697,7 @@
* purposes.
*/
int cfg80211_iter_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES],
+ struct iface_combination_params *params,
void (*iter)(const struct ieee80211_iface_combination *c,
void *data),
void *data);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 2700f92..7b93ffd 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -140,6 +140,9 @@
* most likely due to retrans in 3WHS.
*/
+/* Number of full MSS to receive before Acking RFC2581 */
+#define TCP_DELACK_SEG 1
+
#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
* for local resources.
*/
@@ -274,6 +277,11 @@
extern int sysctl_tcp_default_init_rwnd;
extern atomic_long_t tcp_memory_allocated;
+
+/* sysctl variables for controlling various tcp parameters */
+extern int sysctl_tcp_delack_seg;
+extern int sysctl_tcp_use_userconfig;
+
extern struct percpu_counter tcp_sockets_allocated;
extern int tcp_memory_pressure;
@@ -364,6 +372,13 @@
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
+/* sysctl master controller */
+extern int tcp_use_userconfig_sysctl_handler(struct ctl_table *table,
+ int write, void __user *buffer, size_t *length,
+ loff_t *ppos);
+extern int tcp_proc_delayed_ack_control(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
diff --git a/include/soc/qcom/glink.h b/include/soc/qcom/glink.h
index 7b86481..4522b11 100644
--- a/include/soc/qcom/glink.h
+++ b/include/soc/qcom/glink.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -340,6 +340,22 @@
*/
unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size);
+/**
+ * glink_start_rx_rt() - Vote for RT thread priority on RX.
+ * @handle: Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_start_rx_rt(void *handle);
+
+/**
+ * glink_end_rx_rt() - Vote for RT thread priority on RX.
+ * @handle: Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_end_rx_rt(void *handle);
+
#else /* CONFIG_MSM_GLINK */
static inline void *glink_open(const struct glink_open_config *cfg_ptr)
{
@@ -428,5 +444,16 @@
{
return 0;
}
+
+static inline int glink_start_rx_rt(void *handle)
+{
+ return -ENODEV;
+}
+
+static inline int glink_end_rx_rt(void *handle)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_MSM_GLINK */
#endif /* _SOC_QCOM_GLINK_H_ */
diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h
index 0efea04..a7d4190 100644
--- a/include/soc/qcom/qseecomi.h
+++ b/include/soc/qcom/qseecomi.h
@@ -336,7 +336,7 @@
__packed struct qseecom_continue_blocked_request_ireq {
uint32_t qsee_cmd_id;
- uint32_t app_id;
+ uint32_t app_or_session_id; /*legacy: app_id; smcinvoke: session_id*/
};
@@ -682,6 +682,9 @@
#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x04)
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x07)
+
#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index a08cfe1..665708d 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -40,6 +40,7 @@
VMID_CP_SPSS_SP = 0x1A,
VMID_CP_CAMERA_PREVIEW = 0x1D,
VMID_CP_SPSS_SP_SHARED = 0x22,
+ VMID_CP_SPSS_HLOS_SHARED = 0x24,
VMID_LAST,
VMID_INVAL = -1
};
@@ -55,7 +56,7 @@
u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms,
int dest_nelems);
-int hyp_assign_phys(phys_addr_t addr, u64 size,
+extern int hyp_assign_phys(phys_addr_t addr, u64 size,
u32 *source_vmlist, int source_nelems,
int *dest_vmids, int *dest_perms, int dest_nelems);
bool msm_secure_v2_is_supported(void);
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 128da7b..f0da77a 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -466,8 +466,6 @@
struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
struct snd_kcontrol *kcontrol);
-struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist(
- const struct snd_kcontrol *kcontrol);
struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_widget(
struct snd_kcontrol *kcontrol);
diff --git a/include/trace/events/almk.h b/include/trace/events/almk.h
new file mode 100644
index 0000000..85d712d
--- /dev/null
+++ b/include/trace/events/almk.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM almk
+
+#if !defined(_TRACE_EVENT_ALMK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENT_ALMK_H
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+TRACE_EVENT(almk_vmpressure,
+
+ TP_PROTO(unsigned long pressure,
+ int other_free,
+ int other_file),
+
+ TP_ARGS(pressure, other_free, other_file),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pressure)
+ __field(int, other_free)
+ __field(int, other_file)
+ ),
+
+ TP_fast_assign(
+ __entry->pressure = pressure;
+ __entry->other_free = other_free;
+ __entry->other_file = other_file;
+ ),
+
+ TP_printk("%lu, %d, %d",
+ __entry->pressure, __entry->other_free,
+ __entry->other_file)
+);
+
+TRACE_EVENT(almk_shrink,
+
+ TP_PROTO(int tsize,
+ int vmp,
+ int other_free,
+ int other_file,
+ short adj),
+
+ TP_ARGS(tsize, vmp, other_free, other_file, adj),
+
+ TP_STRUCT__entry(
+ __field(int, tsize)
+ __field(int, vmp)
+ __field(int, other_free)
+ __field(int, other_file)
+ __field(short, adj)
+ ),
+
+ TP_fast_assign(
+ __entry->tsize = tsize;
+ __entry->vmp = vmp;
+ __entry->other_free = other_free;
+ __entry->other_file = other_file;
+ __entry->adj = adj;
+ ),
+
+ TP_printk("%d, %d, %d, %d, %d",
+ __entry->tsize,
+ __entry->vmp,
+ __entry->other_free,
+ __entry->other_file,
+ __entry->adj)
+);
+
+#endif
+
+#include <trace/define_trace.h>
+
diff --git a/include/trace/events/process_reclaim.h b/include/trace/events/process_reclaim.h
new file mode 100644
index 0000000..6fcede7
--- /dev/null
+++ b/include/trace/events/process_reclaim.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM process_reclaim
+
+#if !defined(_TRACE_EVENT_PROCESSRECLAIM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENT_PROCESSRECLAIM_H
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+
+TRACE_EVENT(process_reclaim,
+
+ TP_PROTO(int tasksize,
+ short oom_score_adj,
+ int nr_scanned, int nr_reclaimed,
+ int per_swap_size, int total_sz,
+ int nr_to_reclaim),
+
+ TP_ARGS(tasksize, oom_score_adj, nr_scanned,
+ nr_reclaimed, per_swap_size,
+ total_sz, nr_to_reclaim),
+
+ TP_STRUCT__entry(
+ __field(int, tasksize)
+ __field(short, oom_score_adj)
+ __field(int, nr_scanned)
+ __field(int, nr_reclaimed)
+ __field(int, per_swap_size)
+ __field(int, total_sz)
+ __field(int, nr_to_reclaim)
+ ),
+
+ TP_fast_assign(
+ __entry->tasksize = tasksize;
+ __entry->oom_score_adj = oom_score_adj;
+ __entry->nr_scanned = nr_scanned;
+ __entry->nr_reclaimed = nr_reclaimed;
+ __entry->per_swap_size = per_swap_size;
+ __entry->total_sz = total_sz;
+ __entry->nr_to_reclaim = nr_to_reclaim;
+ ),
+
+ TP_printk("%d, %hd, %d, %d, %d, %d, %d",
+ __entry->tasksize, __entry->oom_score_adj,
+ __entry->nr_scanned, __entry->nr_reclaimed,
+ __entry->per_swap_size, __entry->total_sz,
+ __entry->nr_to_reclaim)
+);
+
+TRACE_EVENT(process_reclaim_eff,
+
+ TP_PROTO(int efficiency, int reclaim_avg_efficiency),
+
+ TP_ARGS(efficiency, reclaim_avg_efficiency),
+
+ TP_STRUCT__entry(
+ __field(int, efficiency)
+ __field(int, reclaim_avg_efficiency)
+ ),
+
+ TP_fast_assign(
+ __entry->efficiency = efficiency;
+ __entry->reclaim_avg_efficiency = reclaim_avg_efficiency;
+ ),
+
+ TP_printk("%d, %d", __entry->efficiency,
+ __entry->reclaim_avg_efficiency)
+);
+
+#endif
+
+#include <trace/define_trace.h>
+
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e94a82b..cf3f5e3 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -71,10 +71,8 @@
__field(unsigned long, cpu_load )
__field(unsigned int, rt_nr_running )
__field(unsigned int, cpus_allowed )
-#ifdef CONFIG_SCHED_HMP
__field(unsigned int, demand )
__field(unsigned int, pred_demand )
-#endif
),
TP_fast_assign(
@@ -87,24 +85,17 @@
__entry->cpu_load = task_rq(p)->cpu_load[0];
__entry->rt_nr_running = task_rq(p)->rt.rt_nr_running;
__entry->cpus_allowed = cpus_allowed;
-#ifdef CONFIG_SCHED_HMP
- __entry->demand = p->ravg.demand;
- __entry->pred_demand = p->ravg.pred_demand;
-#endif
+ __entry->demand = task_load(p);
+ __entry->pred_demand = task_pl(p);
),
- TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x"
-#ifdef CONFIG_SCHED_HMP
- " demand=%u pred_demand=%u"
-#endif
- , __entry->cpu,
+ TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x demand=%u pred_demand=%u",
+ __entry->cpu,
__entry->enqueue ? "enqueue" : "dequeue",
__entry->comm, __entry->pid,
__entry->prio, __entry->nr_running,
__entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
-#ifdef CONFIG_SCHED_HMP
, __entry->demand, __entry->pred_demand
-#endif
)
);
@@ -254,7 +245,7 @@
__entry->pred_demand = p->ravg.pred_demand;
memcpy(__entry->hist, p->ravg.sum_history,
RAVG_HIST_SIZE_MAX * sizeof(u32));
- __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
+ __entry->nr_big_tasks = rq->walt_stats.nr_big_tasks;
__entry->cpu = rq->cpu;
),
@@ -549,9 +540,9 @@
#ifdef CONFIG_SCHED_WALT
DECLARE_EVENT_CLASS(sched_cpu_load,
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+ TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
- TP_ARGS(rq, idle, irqload, power_cost, temp),
+ TP_ARGS(rq, idle, irqload, power_cost),
TP_STRUCT__entry(
__field(unsigned int, cpu )
@@ -566,197 +557,79 @@
__field(unsigned int, power_cost )
__field( int, cstate )
__field( int, dstate )
- __field( int, temp )
),
TP_fast_assign(
__entry->cpu = rq->cpu;
__entry->idle = idle;
__entry->nr_running = rq->nr_running;
- __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
+ __entry->nr_big_tasks = rq->walt_stats.nr_big_tasks;
__entry->load_scale_factor = cpu_load_scale_factor(rq->cpu);
__entry->capacity = cpu_capacity(rq->cpu);
- __entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
+ __entry->cumulative_runnable_avg = rq->walt_stats.cumulative_runnable_avg;
__entry->irqload = irqload;
__entry->max_freq = cpu_max_freq(rq->cpu);
__entry->power_cost = power_cost;
__entry->cstate = rq->cstate;
__entry->dstate = rq->cluster->dstate;
- __entry->temp = temp;
),
- TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d temp %d",
+ TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d",
__entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
__entry->load_scale_factor, __entry->capacity,
__entry->cumulative_runnable_avg, __entry->irqload,
__entry->max_freq, __entry->power_cost, __entry->cstate,
- __entry->dstate, __entry->temp)
+ __entry->dstate)
);
DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
- TP_ARGS(rq, idle, irqload, power_cost, temp)
+ TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
+ TP_ARGS(rq, idle, irqload, power_cost)
+);
+
+TRACE_EVENT(sched_load_to_gov,
+
+ TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load, u64 freq_aggr_thresh, u64 load, int policy),
+ TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr_thresh, load, policy),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( int, policy )
+ __field( int, ed_task_pid )
+ __field( u64, aggr_grp_load )
+ __field( u64, freq_aggr_thresh )
+ __field( u64, tt_load )
+ __field( u64, rq_ps )
+ __field( u64, grp_rq_ps )
+ __field( u64, nt_ps )
+ __field( u64, grp_nt_ps )
+ __field( u64, pl )
+ __field( u64, load )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu_of(rq);
+ __entry->policy = policy;
+ __entry->ed_task_pid = rq->ed_task ? rq->ed_task->pid : -1;
+ __entry->aggr_grp_load = aggr_grp_load;
+ __entry->freq_aggr_thresh = freq_aggr_thresh;
+ __entry->tt_load = tt_load;
+ __entry->rq_ps = rq->prev_runnable_sum;
+ __entry->grp_rq_ps = rq->grp_time.prev_runnable_sum;
+ __entry->nt_ps = rq->nt_prev_runnable_sum;
+ __entry->grp_nt_ps = rq->grp_time.nt_prev_runnable_sum;
+ __entry->pl = rq->walt_stats.pred_demands_sum;
+ __entry->load = load;
+ ),
+
+ TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr_thresh=%llu tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu",
+ __entry->cpu, __entry->policy, __entry->ed_task_pid,
+ __entry->aggr_grp_load, __entry->freq_aggr_thresh,
+ __entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
+ __entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load)
);
#endif
-#ifdef CONFIG_SCHED_HMP
-
-TRACE_EVENT(sched_task_load,
-
- TP_PROTO(struct task_struct *p, bool boost, int reason,
- bool sync, bool need_idle, u32 flags, int best_cpu),
-
- TP_ARGS(p, boost, reason, sync, need_idle, flags, best_cpu),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field(unsigned int, demand )
- __field( bool, boost )
- __field( int, reason )
- __field( bool, sync )
- __field( bool, need_idle )
- __field( u32, flags )
- __field( int, best_cpu )
- __field( u64, latency )
- __field( int, grp_id )
- __field( u64, avg_burst )
- __field( u64, avg_sleep )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->demand = p->ravg.demand;
- __entry->boost = boost;
- __entry->reason = reason;
- __entry->sync = sync;
- __entry->need_idle = need_idle;
- __entry->flags = flags;
- __entry->best_cpu = best_cpu;
- __entry->latency = p->state == TASK_WAKING ?
- sched_ktime_clock() -
- p->ravg.mark_start : 0;
- __entry->grp_id = p->grp ? p->grp->id : 0;
- __entry->avg_burst = p->ravg.avg_burst;
- __entry->avg_sleep = p->ravg.avg_sleep_time;
- ),
-
- TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu avg_burst=%llu avg_sleep=%llu",
- __entry->pid, __entry->comm, __entry->demand,
- __entry->boost, __entry->reason, __entry->sync,
- __entry->need_idle, __entry->flags, __entry->grp_id,
- __entry->best_cpu, __entry->latency, __entry->avg_burst,
- __entry->avg_sleep)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup,
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
- TP_ARGS(rq, idle, irqload, power_cost, temp)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
- TP_ARGS(rq, idle, irqload, power_cost, temp)
-);
-
-TRACE_EVENT(sched_reset_all_window_stats,
-
- TP_PROTO(u64 window_start, u64 window_size, u64 time_taken,
- int reason, unsigned int old_val, unsigned int new_val),
-
- TP_ARGS(window_start, window_size, time_taken,
- reason, old_val, new_val),
-
- TP_STRUCT__entry(
- __field( u64, window_start )
- __field( u64, window_size )
- __field( u64, time_taken )
- __field( int, reason )
- __field(unsigned int, old_val )
- __field(unsigned int, new_val )
- ),
-
- TP_fast_assign(
- __entry->window_start = window_start;
- __entry->window_size = window_size;
- __entry->time_taken = time_taken;
- __entry->reason = reason;
- __entry->old_val = old_val;
- __entry->new_val = new_val;
- ),
-
- TP_printk("time_taken %llu window_start %llu window_size %llu reason %s old_val %u new_val %u",
- __entry->time_taken, __entry->window_start,
- __entry->window_size,
- sched_window_reset_reasons[__entry->reason],
- __entry->old_val, __entry->new_val)
-);
-
-TRACE_EVENT(sched_get_busy,
-
- TP_PROTO(int cpu, u64 load, u64 nload, u64 pload, int early),
-
- TP_ARGS(cpu, load, nload, pload, early),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( u64, load )
- __field( u64, nload )
- __field( u64, pload )
- __field( int, early )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->load = load;
- __entry->nload = nload;
- __entry->pload = pload;
- __entry->early = early;
- ),
-
- TP_printk("cpu %d load %lld new_task_load %lld predicted_load %lld early %d",
- __entry->cpu, __entry->load, __entry->nload,
- __entry->pload, __entry->early)
-);
-
-TRACE_EVENT(sched_freq_alert,
-
- TP_PROTO(int cpu, int pd_notif, int check_groups, struct rq *rq,
- u64 new_load),
-
- TP_ARGS(cpu, pd_notif, check_groups, rq, new_load),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( int, pd_notif )
- __field( int, check_groups )
- __field( u64, old_busy_time )
- __field( u64, ps )
- __field( u64, new_load )
- __field( u64, old_pred )
- __field( u64, new_pred )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->pd_notif = pd_notif;
- __entry->check_groups = check_groups;
- __entry->old_busy_time = rq->old_busy_time;
- __entry->ps = rq->prev_runnable_sum;
- __entry->new_load = new_load;
- __entry->old_pred = rq->old_estimated_time;
- __entry->new_pred = rq->hmp_stats.pred_demands_sum;
- ),
-
- TP_printk("cpu %d pd_notif=%d check_groups %d old_busy_time=%llu prev_sum=%lld new_load=%llu old_pred=%llu new_pred=%llu",
- __entry->cpu, __entry->pd_notif, __entry->check_groups,
- __entry->old_busy_time, __entry->ps, __entry->new_load,
- __entry->old_pred, __entry->new_pred)
-);
-
-#endif /* CONFIG_SCHED_HMP */
-
#ifdef CONFIG_SMP
TRACE_EVENT(sched_cpu_util,
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index eb7e0c6..71c2c9e 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -322,6 +322,7 @@
#define DRM_EVENT_AD_BACKLIGHT 0x80000001
#define DRM_EVENT_CRTC_POWER 0x80000002
#define DRM_EVENT_SYS_BACKLIGHT 0x80000003
+#define DRM_EVENT_SDE_POWER 0x80000004
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index 7945af0..c7f2308 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -28,17 +28,35 @@
/**
* struct drm_msm_pcc - pcc feature structure
- * flags: for customizing operations
- * r: red coefficients.
- * g: green coefficients.
- * b: blue coefficients.
+ * @flags: for customizing operations
+ * @r: red coefficients.
+ * @g: green coefficients.
+ * @b: blue coefficients.
+ * @r_rr: second order coefficients
+ * @r_gg: second order coefficients
+ * @r_bb: second order coefficients
+ * @g_rr: second order coefficients
+ * @g_gg: second order coefficients
+ * @g_bb: second order coefficients
+ * @b_rr: second order coefficients
+ * @b_gg: second order coefficients
+ * @b_bb: second order coefficients
*/
-
+#define DRM_MSM_PCC3
struct drm_msm_pcc {
__u64 flags;
struct drm_msm_pcc_coeff r;
struct drm_msm_pcc_coeff g;
struct drm_msm_pcc_coeff b;
+ __u32 r_rr;
+ __u32 r_gg;
+ __u32 r_bb;
+ __u32 g_rr;
+ __u32 g_gg;
+ __u32 g_bb;
+ __u32 b_rr;
+ __u32 b_gg;
+ __u32 b_bb;
};
/* struct drm_msm_pa_vlut - picture adjustment vLUT structure
@@ -135,6 +153,26 @@
__u32 c2[PGC_TBL_LEN];
};
+#define IGC_TBL_LEN 256
+#define IGC_DITHER_ENABLE (1 << 0)
+/**
+ * struct drm_msm_igc_lut - igc lut feature structure
+ * @flags: flags for the feature customization, values can be:
+ * - IGC_DITHER_ENABLE: Enable dither functionality
+ * @c0: color0 component lut
+ * @c1: color1 component lut
+ * @c2: color2 component lut
+ * @strength: dither strength, considered valid when IGC_DITHER_ENABLE
+ * is set in flags. Strength value based on source bit width.
+ */
+struct drm_msm_igc_lut {
+ __u64 flags;
+ __u32 c0[IGC_TBL_LEN];
+ __u32 c1[IGC_TBL_LEN];
+ __u32 c2[IGC_TBL_LEN];
+ __u32 strength;
+};
+
#define AD4_LUT_GRP0_SIZE 33
#define AD4_LUT_GRP1_SIZE 32
/*
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index d3cbe48..7d1e3b2 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -4285,6 +4285,9 @@
* of supported channel widths for radar detection.
* @NL80211_IFACE_COMB_RADAR_DETECT_REGIONS: u32 attribute containing the bitmap
* of supported regulatory regions for radar detection.
+ * @NL80211_IFACE_COMB_BI_MIN_GCD: u32 attribute specifying the minimum GCD of
+ * different beacon intervals supported by all the interface combinations
+ * in this group (if not present, all beacon intervals be identical).
* @NUM_NL80211_IFACE_COMB: number of attributes
* @MAX_NL80211_IFACE_COMB: highest attribute number
*
@@ -4292,8 +4295,8 @@
* limits = [ #{STA} <= 1, #{AP} <= 1 ], matching BI, channels = 1, max = 2
* => allows an AP and a STA that must match BIs
*
- * numbers = [ #{AP, P2P-GO} <= 8 ], channels = 1, max = 8
- * => allows 8 of AP/GO
+ * numbers = [ #{AP, P2P-GO} <= 8 ], BI min gcd, channels = 1, max = 8,
+ * => allows 8 of AP/GO that can have BI gcd >= min gcd
*
* numbers = [ #{STA} <= 2 ], channels = 2, max = 2
* => allows two STAs on different channels
@@ -4319,6 +4322,7 @@
NL80211_IFACE_COMB_NUM_CHANNELS,
NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
+ NL80211_IFACE_COMB_BI_MIN_GCD,
/* keep last */
NUM_NL80211_IFACE_COMB,
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index e5c4ddf..731b2f0 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1074,6 +1074,11 @@
V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED,
};
+#define V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 109)
+#define V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_10BIT \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 110)
+
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index e6c1a45..23a8ccf 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -22,6 +22,7 @@
#define CAM_ACTUATOR_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 9)
#define CAM_CCI_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 10)
#define CAM_FLASH_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 11)
+#define CAM_EEPROM_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 12)
/* cam_req_mgr hdl info */
#define CAM_REQ_MGR_HDL_IDX_POS 8
@@ -43,6 +44,10 @@
#define V4L_EVENT_CAM_REQ_MGR_ERROR 1
#define V4L_EVENT_CAM_REQ_MGR_MAX 2
+/* SOF Event status */
+#define CAM_REQ_MGR_SOF_EVENT_SUCCESS 0
+#define CAM_REQ_MGR_SOF_EVENT_ERROR 1
+
/**
* Request Manager : flush_type
* @CAM_REQ_MGR_FLUSH_TYPE_ALL: Req mgr will remove all the pending
@@ -353,14 +358,18 @@
/**
* struct cam_req_mgr_frame_msg
- * @request_id: request id of frame
- * @frame_count: running count of frames
- * @timestamp: timestamp of frame
+ * @request_id: request id of the frame
+ * @frame_id: frame id of the frame
+ * @timestamp: timestamp of the frame
+ * @link_hdl: link handle associated with this message
+ * @sof_status: sof status success or fail
*/
struct cam_req_mgr_frame_msg {
uint64_t request_id;
- uint64_t frame_count;
+ uint64_t frame_id;
uint64_t timestamp;
+ int32_t link_hdl;
+ uint32_t sof_status;
};
/**
diff --git a/include/uapi/media/cam_sensor.h b/include/uapi/media/cam_sensor.h
index 83f1a02..ac370ba 100644
--- a/include/uapi/media/cam_sensor.h
+++ b/include/uapi/media/cam_sensor.h
@@ -6,7 +6,7 @@
#include <media/cam_defs.h>
#define CAM_SENSOR_PROBE_CMD (CAM_COMMON_OPCODE_MAX + 1)
-#define CAM_SENSOR_MAX_LED_TRIGGERS 3
+#define CAM_FLASH_MAX_LED_TRIGGERS 3
/**
* struct cam_sensor_query_cap - capabilities info for sensor
*
@@ -63,6 +63,18 @@
} __attribute__((packed));
/**
+ * struct cam_eeprom_query_cap_t - capabilities info for eeprom
+ *
+ * @slot_info : Indicates about the slotId or cell Index
+ * @eeprom_kernel_probe : Indicates about the kernel or userspace probe
+ */
+struct cam_eeprom_query_cap_t {
+ uint32_t slot_info;
+ uint16_t eeprom_kernel_probe;
+ uint16_t reserved;
+} __attribute__((packed));
+
+/**
* struct cam_cmd_i2c_info - Contains slave I2C related info
*
* @slave_addr : Slave address
@@ -360,7 +372,7 @@
uint16_t reserved;
uint32_t led_on_delay_ms;
uint32_t led_off_delay_ms;
- uint32_t led_current_ma[CAM_SENSOR_MAX_LED_TRIGGERS];
+ uint32_t led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
} __attribute__((packed));
/**
@@ -379,7 +391,7 @@
uint16_t count;
uint8_t opcode;
uint8_t cmd_type;
- uint32_t led_current_ma[CAM_SENSOR_MAX_LED_TRIGGERS];
+ uint32_t led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
} __attribute__((packed));
/**
@@ -409,9 +421,9 @@
*/
struct cam_flash_query_cap_info {
uint32_t slot_info;
- uint32_t max_current_flash[CAM_SENSOR_MAX_LED_TRIGGERS];
- uint32_t max_duration_flash[CAM_SENSOR_MAX_LED_TRIGGERS];
- uint32_t max_current_torch[CAM_SENSOR_MAX_LED_TRIGGERS];
+ uint32_t max_current_flash[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t max_duration_flash[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t max_current_torch[CAM_FLASH_MAX_LED_TRIGGERS];
} __attribute__ ((packed));
#endif
diff --git a/include/uapi/sound/devdep_params.h b/include/uapi/sound/devdep_params.h
index 5061ec0..9e3133b 100644
--- a/include/uapi/sound/devdep_params.h
+++ b/include/uapi/sound/devdep_params.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -66,4 +66,14 @@
uint32_t device;
} __packed;
+#define HWDEP_FE_BASE 3000 /*unique base for FE hw dep nodes*/
+struct snd_pcm_mmap_fd {
+ int32_t dir;
+ int32_t fd;
+ int32_t size;
+ int32_t actual_size;
+};
+
+#define SNDRV_PCM_IOCTL_MMAP_DATA_FD _IOWR('U', 0xd2, struct snd_pcm_mmap_fd)
+
#endif
diff --git a/init/Kconfig b/init/Kconfig
index 954de19..af000c7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1276,16 +1276,6 @@
endif # CGROUPS
-config SCHED_HMP
- bool "Scheduler support for heterogenous multi-processor systems"
- select SCHED_WALT
- depends on SMP && FAIR_GROUP_SCHED
- help
- This feature will let the scheduler optimize task placement on
- systems made of heterogeneous cpus i.e cpus that differ either
- in their instructions per-cycle capability or the maximum
- frequency they can attain.
-
config SCHED_WALT
bool "WALT"
depends on SMP && FAIR_GROUP_SCHED
@@ -1293,14 +1283,6 @@
Use Window-Assisted Load Tracking (WALT) as an alternative or
additional load tracking scheme in lieu of or along with PELT.
-config SCHED_HMP_CSTATE_AWARE
- bool "CPU C-state aware scheduler"
- depends on SCHED_HMP
- help
- This feature will let the HMP scheduler optimize task placement
- with CPUs C-state. If this is enabled, scheduler places tasks
- onto the shallowest C-state CPU among the most power efficient CPUs.
-
config SCHED_CORE_CTL
bool "QTI Core Control"
depends on SMP
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a37a10b..02fb438 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1249,8 +1249,10 @@
timeo = MAX_SCHEDULE_TIMEOUT;
ret = netlink_attachskb(sock, nc, &timeo, NULL);
- if (ret == 1)
+ if (ret == 1) {
+ sock = NULL;
goto retry;
+ }
if (ret) {
sock = NULL;
nc = NULL;
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index 301e1a6..80df048 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -17,7 +17,6 @@
CONFIG_BLK_DEV_INITRD=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_BPF=y
diff --git a/kernel/exit.c b/kernel/exit.c
index 83e8afa..35ff283 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -465,6 +465,7 @@
{
struct mm_struct *mm = tsk->mm;
struct core_state *core_state;
+ int mm_released;
mm_release(tsk, mm);
if (!mm)
@@ -511,9 +512,12 @@
enter_lazy_tlb(mm, current);
task_unlock(tsk);
mm_update_next_owner(mm);
- mmput(mm);
+
+ mm_released = mmput(mm);
if (test_thread_flag(TIF_MEMDIE))
exit_oom_victim();
+ if (mm_released)
+ set_tsk_thread_flag(tsk, TIF_MM_RELEASED);
}
static struct task_struct *find_alive_thread(struct task_struct *p)
diff --git a/kernel/fork.c b/kernel/fork.c
index 33663b0..f90327b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -877,12 +877,17 @@
/*
* Decrement the use count and release all resources for an mm.
*/
-void mmput(struct mm_struct *mm)
+int mmput(struct mm_struct *mm)
{
+ int mm_freed = 0;
might_sleep();
- if (atomic_dec_and_test(&mm->mm_users))
+ if (atomic_dec_and_test(&mm->mm_users)) {
__mmput(mm);
+ mm_freed = 1;
+ }
+
+ return mm_freed;
}
EXPORT_SYMBOL_GPL(mmput);
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 2bef4ab..a608f7a 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -233,8 +233,8 @@
out_nolock:
list_del(&waiter.list);
- if (!list_empty(&sem->wait_list))
- __rwsem_do_wake(sem, 1);
+ if (!list_empty(&sem->wait_list) && sem->count >= 0)
+ __rwsem_do_wake(sem, 0);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return -EINTR;
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 12fe782..009f788 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -588,7 +588,12 @@
if (irq_can_set_affinity(req->irq)) {
int ret = 0;
struct irq_desc *desc = irq_to_desc(req->irq);
- struct cpumask *mask = desc->irq_data.common->affinity;
+ struct cpumask *mask;
+
+ if (!desc)
+ break;
+
+ mask = desc->irq_data.common->affinity;
/* Get the current affinity */
cpumask_copy(&req->cpus_affine, mask);
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 3d12ce8..f6cce95 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -18,7 +18,6 @@
obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
obj-y += wait.o swait.o completion.o idle.o
-obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o sched_avg.o
obj-$(CONFIG_SCHED_WALT) += walt.o boost.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c
index 1a3309b..1ccd19d 100644
--- a/kernel/sched/boost.c
+++ b/kernel/sched/boost.c
@@ -32,7 +32,7 @@
{
struct rq *rq = cpu_rq(cpu);
- if (!test_and_set_bit(BOOST_KICK, &rq->hmp_flags))
+ if (!test_and_set_bit(BOOST_KICK, &rq->walt_flags))
smp_send_reschedule(cpu);
}
@@ -57,14 +57,14 @@
int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
- return test_bit(BOOST_KICK, &rq->hmp_flags);
+ return test_bit(BOOST_KICK, &rq->walt_flags);
}
void clear_boost_kick(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- clear_bit(BOOST_KICK, &rq->hmp_flags);
+ clear_bit(BOOST_KICK, &rq->walt_flags);
}
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c2433b3..297c38a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1074,7 +1074,6 @@
struct migration_arg *arg = data;
struct task_struct *p = arg->task;
struct rq *rq = this_rq();
- int src_cpu = cpu_of(rq);
bool moved = false;
/*
@@ -1109,9 +1108,6 @@
local_irq_enable();
- if (moved)
- notify_migration(src_cpu, arg->dest_cpu, false, p);
-
return 0;
}
@@ -1287,8 +1283,6 @@
#endif
#endif
- trace_sched_migrate_task(p, new_cpu, pct_task_load(p));
-
if (task_cpu(p) != new_cpu) {
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p);
@@ -1685,7 +1679,7 @@
return cpu;
}
-void update_avg(u64 *avg, u64 sample)
+static void update_avg(u64 *avg, u64 sample)
{
s64 diff = sample - *avg;
*avg += diff >> 3;
@@ -2091,12 +2085,9 @@
struct related_thread_group *grp = NULL;
int src_cpu;
bool notif_required = false;
- bool freq_notif_allowed = !(wake_flags & WF_NO_NOTIFIER);
bool check_group = false;
#endif
- wake_flags &= ~WF_NO_NOTIFIER;
-
/*
* If we are going to wake up a thread waiting for CONDITION we
* need to ensure that CONDITION=1 done by the caller can not be
@@ -2196,9 +2187,6 @@
notif_required = true;
}
- if (!__task_in_cum_window_demand(cpu_rq(cpu), p))
- inc_cum_window_demand(cpu_rq(cpu), p, task_load(p));
-
note_task_waking(p, wallclock);
#endif /* CONFIG_SMP */
@@ -2208,19 +2196,6 @@
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-#ifdef CONFIG_SMP
- if (freq_notif_allowed) {
- if (notif_required && !same_freq_domain(src_cpu, cpu)) {
- check_for_freq_change(cpu_rq(cpu),
- false, check_group);
- check_for_freq_change(cpu_rq(src_cpu),
- false, check_group);
- } else if (success) {
- check_for_freq_change(cpu_rq(cpu), true, false);
- }
- }
-#endif
-
return success;
}
@@ -2271,8 +2246,6 @@
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
- if (!__task_in_cum_window_demand(rq, p))
- inc_cum_window_demand(rq, p, task_load(p));
cpufreq_update_util(rq, 0);
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
note_task_waking(p, wallclock);
@@ -2302,26 +2275,6 @@
}
EXPORT_SYMBOL(wake_up_process);
-/**
- * wake_up_process_no_notif - Wake up a specific process without notifying
- * governor
- * @p: The process to be woken up.
- *
- * Attempt to wake up the nominated process and move it to the set of runnable
- * processes.
- *
- * Return: 1 if the process was woken up, 0 if it was already running.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-int wake_up_process_no_notif(struct task_struct *p)
-{
- WARN_ON(task_is_stopped_or_traced(p));
- return try_to_wake_up(p, TASK_NORMAL, WF_NO_NOTIFIER);
-}
-EXPORT_SYMBOL(wake_up_process_no_notif);
-
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
@@ -3165,7 +3118,7 @@
*load = rq->load.weight;
}
-#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_HMP)
+#ifdef CONFIG_SMP
/*
* sched_exec - execve() is a valuable balancing opportunity, because at
@@ -3692,15 +3645,10 @@
update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
cpufreq_update_util(rq, 0);
- if (!is_idle_task(prev) && !prev->on_rq)
- update_avg_burst(prev);
-
rq->nr_switches++;
rq->curr = next;
++*switch_count;
- set_task_last_switch_out(prev, wallclock);
-
trace_sched_switch(preempt, prev, next);
rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
} else {
@@ -5953,7 +5901,6 @@
if (rq != dead_rq) {
raw_spin_unlock(&rq->lock);
raw_spin_unlock(&next->pi_lock);
- notify_migration(dead_rq->cpu, dest_cpu, true, next);
rq = dead_rq;
raw_spin_lock(&next->pi_lock);
raw_spin_lock(&rq->lock);
@@ -6006,7 +5953,7 @@
*/
nohz_balance_clear_nohz_mask(cpu);
- clear_hmp_request(cpu);
+ clear_walt_request(cpu);
local_irq_enable();
return 0;
}
@@ -8116,7 +8063,7 @@
BUG_ON(rq->nr_running != 1);
raw_spin_unlock_irqrestore(&rq->lock, flags);
- clear_hmp_request(cpu);
+ clear_walt_request(cpu);
calc_load_migrate(rq);
update_max_interval();
@@ -8236,9 +8183,6 @@
for (i = 0; i < WAIT_TABLE_SIZE; i++)
init_waitqueue_head(bit_wait_table + i);
-#ifdef CONFIG_SCHED_HMP
- pr_info("HMP scheduling enabled.\n");
-#endif
sched_boost_parse_dt();
init_clusters();
@@ -8359,56 +8303,8 @@
rq->avg_idle = 2*sysctl_sched_migration_cost;
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
rq->push_task = NULL;
-#ifdef CONFIG_SCHED_WALT
- cpumask_set_cpu(i, &rq->freq_domain_cpumask);
- init_irq_work(&rq->irq_work, walt_irq_work);
- rq->hmp_stats.cumulative_runnable_avg = 0;
- rq->window_start = 0;
- rq->cum_window_start = 0;
- rq->hmp_stats.nr_big_tasks = 0;
- rq->hmp_flags = 0;
- rq->cur_irqload = 0;
- rq->avg_irqload = 0;
- rq->irqload_ts = 0;
- rq->static_cpu_pwr_cost = 0;
- rq->cc.cycles = 1;
- rq->cc.time = 1;
- rq->cstate = 0;
- rq->wakeup_latency = 0;
- rq->wakeup_energy = 0;
+ walt_sched_init(rq);
- /*
- * All cpus part of same cluster by default. This avoids the
- * need to check for rq->cluster being non-NULL in hot-paths
- * like select_best_cpu()
- */
- rq->cluster = &init_cluster;
- rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
- rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
- memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
- rq->old_busy_time = 0;
- rq->old_estimated_time = 0;
- rq->old_busy_time_group = 0;
- rq->hmp_stats.pred_demands_sum = 0;
- rq->ed_task = NULL;
- rq->curr_table = 0;
- rq->prev_top = 0;
- rq->curr_top = 0;
-
- for (j = 0; j < NUM_TRACKED_WINDOWS; j++) {
- memset(&rq->load_subs[j], 0,
- sizeof(struct load_subtractions));
-
- rq->top_tasks[j] = kcalloc(NUM_LOAD_INDICES,
- sizeof(u8), GFP_NOWAIT);
-
- /* No other choice */
- BUG_ON(!rq->top_tasks[j]);
-
- clear_top_tasks_bitmap(rq->top_tasks_bitmap[j]);
- }
- rq->cum_window_demand = 0;
-#endif
INIT_LIST_HEAD(&rq->cfs_tasks);
rq_attach_root(rq, &def_root_domain);
@@ -8427,8 +8323,6 @@
i = alloc_related_thread_groups();
BUG_ON(i);
- set_hmp_defaults();
-
set_load_weight(&init_task);
/*
@@ -9552,13 +9446,6 @@
#endif /* CONFIG_RT_GROUP_SCHED */
static struct cftype cpu_files[] = {
-#ifdef CONFIG_SCHED_HMP
- {
- .name = "upmigrate_discourage",
- .read_u64 = cpu_upmigrate_discourage_read_u64,
- .write_u64 = cpu_upmigrate_discourage_write_u64,
- },
-#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "shares",
@@ -9685,8 +9572,13 @@
wallclock = sched_ktime_clock();
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
dequeue_task(rq, p, 0);
+ /*
+ * task's contribution is already removed from the
+ * cumulative window demand in dequeue. As the
+ * task's stats are reset, the next enqueue does
+ * not change the cumulative window demand.
+ */
reset_task_stats(p);
- dec_cum_window_demand(rq, p);
p->ravg.mark_start = wallclock;
p->ravg.sum_history[0] = EXITING_TASK_MARKER;
free_task_load_ptrs(p);
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index b140e55..4c3bf526 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -612,8 +612,7 @@
spin_lock_irqsave(&cluster->pending_lock, flags);
cluster->pending = true;
spin_unlock_irqrestore(&cluster->pending_lock, flags);
-
- wake_up_process_no_notif(cluster->core_ctl_thread);
+ wake_up_process(cluster->core_ctl_thread);
}
static u64 core_ctl_check_timestamp;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index b6a639b..dce76d1 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -404,12 +404,13 @@
static void sugov_work(struct kthread_work *work)
{
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
+ unsigned long flags;
mutex_lock(&sg_policy->work_lock);
- raw_spin_lock(&sg_policy->update_lock);
+ raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
sugov_track_cycles(sg_policy, sg_policy->policy->cur,
sched_ktime_clock());
- raw_spin_unlock(&sg_policy->update_lock);
+ raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
CPUFREQ_RELATION_L);
mutex_unlock(&sg_policy->work_lock);
@@ -488,18 +489,19 @@
unsigned int val;
struct sugov_policy *sg_policy;
unsigned long hs_util;
+ unsigned long flags;
if (kstrtouint(buf, 10, &val))
return -EINVAL;
tunables->hispeed_freq = val;
list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
- raw_spin_lock(&sg_policy->update_lock);
+ raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
hs_util = freq_to_util(sg_policy,
sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
- raw_spin_unlock(&sg_policy->update_lock);
+ raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
}
return count;
@@ -784,13 +786,14 @@
static void sugov_limits(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy = policy->governor_data;
+ unsigned long flags;
if (!policy->fast_switch_enabled) {
mutex_lock(&sg_policy->work_lock);
- raw_spin_lock(&sg_policy->update_lock);
+ raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
sugov_track_cycles(sg_policy, sg_policy->policy->cur,
sched_ktime_clock());
- raw_spin_unlock(&sg_policy->update_lock);
+ raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
cpufreq_policy_apply_limits(policy);
mutex_unlock(&sg_policy->work_lock);
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 10a807c..08d4511 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -19,41 +19,6 @@
#include <linux/slab.h>
-#ifdef CONFIG_SCHED_WALT
-
-static void
-inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
-{
- inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
-{
- dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
-{
- s64 task_load_delta = (s64)new_task_load - task_load(p);
- s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
- pred_demand_delta);
-}
-
-#else /* CONFIG_SCHED_WALT */
-
-static inline void
-inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
-
-#endif /* CONFIG_SCHED_WALT */
-
struct dl_bandwidth def_dl_bandwidth;
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
@@ -865,7 +830,7 @@
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
add_nr_running(rq_of_dl_rq(dl_rq), 1);
- inc_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
+ walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq);
@@ -880,7 +845,7 @@
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
- dec_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
+ walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq);
@@ -1845,7 +1810,7 @@
.update_curr = update_curr_dl,
#ifdef CONFIG_SCHED_WALT
- .fixup_hmp_sched_stats = fixup_hmp_sched_stats_dl,
+ .fixup_walt_sched_stats = fixup_walt_sched_stats_common,
#endif
};
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index ed9f6db..0f8c0b2 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -600,12 +600,6 @@
cfs_rq->throttle_count);
SEQ_printf(m, " .%-30s: %d\n", "runtime_enabled",
cfs_rq->runtime_enabled);
-#ifdef CONFIG_SCHED_WALT
- SEQ_printf(m, " .%-30s: %d\n", "nr_big_tasks",
- cfs_rq->hmp_stats.nr_big_tasks);
- SEQ_printf(m, " .%-30s: %llu\n", "cumulative_runnable_avg",
- cfs_rq->hmp_stats.cumulative_runnable_avg);
-#endif
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -698,10 +692,6 @@
P(cpu_capacity);
#endif
#ifdef CONFIG_SCHED_WALT
-#ifdef CONFIG_SCHED_HMP
- P(static_cpu_pwr_cost);
- P(cluster->static_cluster_pwr_cost);
-#endif
P(cluster->load_scale_factor);
P(cluster->capacity);
P(cluster->max_possible_capacity);
@@ -710,10 +700,10 @@
P(cluster->max_freq);
P(cluster->exec_scale_factor);
#ifdef CONFIG_SCHED_WALT
- P(hmp_stats.nr_big_tasks);
+ P(walt_stats.nr_big_tasks);
#endif
- SEQ_printf(m, " .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
- rq->hmp_stats.cumulative_runnable_avg);
+ SEQ_printf(m, " .%-30s: %llu\n", "walt_stats.cumulative_runnable_avg",
+ rq->walt_stats.cumulative_runnable_avg);
#endif
#undef P
#undef PN
@@ -794,10 +784,6 @@
P(sysctl_sched_child_runs_first);
P(sysctl_sched_features);
#ifdef CONFIG_SCHED_WALT
-#ifdef CONFIG_SCHED_HMP
- P(sched_upmigrate);
- P(sched_downmigrate);
-#endif
P(sched_init_task_load_windows);
P(min_capacity);
P(max_capacity);
@@ -965,9 +951,6 @@
void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
unsigned long nr_switches;
- unsigned int load_avg;
-
- load_avg = pct_task_load(p);
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
get_nr_threads(p));
@@ -1025,12 +1008,9 @@
P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
-#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
- __P(load_avg);
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
P(ravg.demand);
#endif
-#endif
avg_atom = p->se.sum_exec_runtime;
if (nr_switches)
avg_atom = div64_ul(avg_atom, nr_switches);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d06ac7d..6b54c26 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -37,94 +37,6 @@
#include "walt.h"
#include <trace/events/sched.h>
-/* QHMP/Zone forward declarations */
-
-struct lb_env;
-struct sd_lb_stats;
-struct sg_lb_stats;
-
-#ifdef CONFIG_SCHED_WALT
-static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand);
-#endif
-
-#ifdef CONFIG_SCHED_HMP
-#ifdef CONFIG_CFS_BANDWIDTH
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra);
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra);
-
-static inline void dec_throttled_cfs_rq_hmp_stats(
- struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq);
-
-static inline void inc_throttled_cfs_rq_hmp_stats(
- struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq);
-
-static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq);
-
-#else /* CONFIG_CFS_BANDWIDTH */
-static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra) { }
-static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra) { }
-#endif /* CONFIG_CFS_BANDWIDTH */
-
-#ifdef CONFIG_SMP
-
-static struct rq *find_busiest_queue_hmp(struct lb_env *env,
- struct sched_group *group);
-static int
-bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds);
-
-static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
- struct sd_lb_stats *sds,
- struct sched_group *sg,
- struct sg_lb_stats *sgs);
-
-static int select_best_cpu(struct task_struct *p, int target, int reason,
- int sync);
-
-#ifdef CONFIG_NO_HZ_COMMON
-static int find_new_hmp_ilb(int type);
-static int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type);
-#endif /* CONFIG_NO_HZ_COMMON */
-#endif /* CONFIG_SMP */
-#else /* CONFIG_SCHED_HMP */
-
-static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra) { }
-static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra) { }
-static inline void dec_throttled_cfs_rq_hmp_stats(
- struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq) { }
-static inline void inc_throttled_cfs_rq_hmp_stats(
- struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq) { }
-static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) { }
-
-#ifdef CONFIG_SMP
-
-static inline int
-bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
-{
- return 0;
-}
-
-static inline bool update_sd_pick_busiest_active_balance(struct lb_env *env,
- struct sd_lb_stats *sds,
- struct sched_group *sg,
- struct sg_lb_stats *sgs)
-{
- return false;
-}
-#endif /* CONFIG_SMP */
-
-#endif /* CONFIG_SCHED_HMP */
-
#ifdef CONFIG_SCHED_WALT
static inline bool task_fits_max(struct task_struct *p, int cpu);
#endif
@@ -4095,16 +4007,13 @@
if (dequeue)
dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
qcfs_rq->h_nr_running -= task_delta;
- dec_throttled_cfs_rq_hmp_stats(&qcfs_rq->hmp_stats, cfs_rq);
if (qcfs_rq->load.weight)
dequeue = 0;
}
- if (!se) {
+ if (!se)
sub_nr_running(rq, task_delta);
- dec_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, cfs_rq);
- }
cfs_rq->throttled = 1;
cfs_rq->throttled_clock = rq_clock(rq);
@@ -4125,12 +4034,6 @@
start_cfs_bandwidth(cfs_b);
raw_spin_unlock(&cfs_b->lock);
-
- /* Log effect on hmp stats after throttling */
- trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
- sched_irqload(cpu_of(rq)),
- power_cost(cpu_of(rq), 0),
- cpu_temp(cpu_of(rq)));
}
void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
@@ -4168,26 +4071,17 @@
if (enqueue)
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
cfs_rq->h_nr_running += task_delta;
- inc_throttled_cfs_rq_hmp_stats(&cfs_rq->hmp_stats, tcfs_rq);
if (cfs_rq_throttled(cfs_rq))
break;
}
- if (!se) {
+ if (!se)
add_nr_running(rq, task_delta);
- inc_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, tcfs_rq);
- }
/* determine whether we need to wake up potentially idle cpu */
if (rq->curr == rq->idle && rq->cfs.nr_running)
resched_curr(rq);
-
- /* Log effect on hmp stats after un-throttling */
- trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
- sched_irqload(cpu_of(rq)),
- power_cost(cpu_of(rq), 0),
- cpu_temp(cpu_of(rq)));
}
static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
@@ -4525,7 +4419,6 @@
{
cfs_rq->runtime_enabled = 0;
INIT_LIST_HEAD(&cfs_rq->throttled_list);
- init_cfs_rq_hmp_stats(cfs_rq);
}
void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
@@ -4743,7 +4636,6 @@
if (cfs_rq_throttled(cfs_rq))
break;
cfs_rq->h_nr_running++;
- inc_cfs_rq_hmp_stats(cfs_rq, p, 1);
flags = ENQUEUE_WAKEUP;
}
@@ -4751,7 +4643,6 @@
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_running++;
- inc_cfs_rq_hmp_stats(cfs_rq, p, 1);
if (cfs_rq_throttled(cfs_rq))
break;
@@ -4765,7 +4656,7 @@
#ifdef CONFIG_SCHED_WALT
p->misfit = !task_fits_max(p, rq->cpu);
#endif
- inc_rq_hmp_stats(rq, p, 1);
+ inc_rq_walt_stats(rq, p);
}
#ifdef CONFIG_SMP
@@ -4821,7 +4712,6 @@
if (cfs_rq_throttled(cfs_rq))
break;
cfs_rq->h_nr_running--;
- dec_cfs_rq_hmp_stats(cfs_rq, p, 1);
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
@@ -4841,7 +4731,6 @@
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_running--;
- dec_cfs_rq_hmp_stats(cfs_rq, p, 1);
if (cfs_rq_throttled(cfs_rq))
break;
@@ -4852,7 +4741,7 @@
if (!se) {
sub_nr_running(rq, 1);
- dec_rq_hmp_stats(rq, p, 1);
+ dec_rq_walt_stats(rq, p);
}
#ifdef CONFIG_SMP
@@ -5407,10 +5296,13 @@
}
static inline bool
-bias_to_waker_cpu(struct task_struct *p, int cpu)
+bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
{
+ int rtg_target_cpu = rtg_target ? cpumask_first(rtg_target) : cpu;
+
return cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
cpu_active(cpu) && !cpu_isolated(cpu) &&
+ capacity_orig_of(cpu) >= capacity_orig_of(rtg_target_cpu) &&
task_fits_max(p, cpu);
}
@@ -6730,6 +6622,7 @@
bool need_idle;
enum sched_boost_policy placement_boost = task_sched_boost(p) ?
sched_boost_policy() : SCHED_BOOST_NONE;
+ struct related_thread_group *grp;
sd = rcu_dereference(per_cpu(sd_ea, task_cpu(p)));
@@ -6745,22 +6638,17 @@
need_idle = wake_to_idle(p);
- if (sync && bias_to_waker_cpu(p, cpu)) {
+ grp = task_related_thread_group(p);
+ if (grp && grp->preferred_cluster)
+ rtg_target = &grp->preferred_cluster->cpus;
+
+ if (sync && bias_to_waker_cpu(p, cpu, rtg_target)) {
trace_sched_task_util_bias_to_waker(p, task_cpu(p),
task_util(p), cpu, cpu, 0, need_idle);
return cpu;
}
if (sysctl_sched_is_big_little) {
- struct related_thread_group *grp;
-
- rcu_read_lock();
- grp = task_related_thread_group(p);
- rcu_read_unlock();
-
- if (grp && grp->preferred_cluster)
- rtg_target = &grp->preferred_cluster->cpus;
-
task_util_boosted = boosted_task_util(p);
/*
@@ -7076,12 +6964,12 @@
int want_affine = 0;
int sync = wake_flags & WF_SYNC;
-#ifdef CONFIG_SCHED_HMP
- return select_best_cpu(p, prev_cpu, 0, sync);
-#endif
-
- if (energy_aware())
- return energy_aware_wake_cpu(p, prev_cpu, sync);
+ if (energy_aware()) {
+ rcu_read_lock();
+ new_cpu = energy_aware_wake_cpu(p, prev_cpu, sync);
+ rcu_read_unlock();
+ return new_cpu;
+ }
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
@@ -7748,9 +7636,6 @@
enum fbq_type fbq_type;
enum group_type busiest_group_type;
struct list_head tasks;
-#ifdef CONFIG_SCHED_HMP
- enum sched_boost_policy boost_policy;
-#endif
};
/*
@@ -7848,9 +7733,6 @@
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
int tsk_cache_hot;
-#ifdef CONFIG_SCHED_HMP
- int twf, group_cpus;
-#endif
lockdep_assert_held(&env->src_rq->lock);
@@ -7921,37 +7803,6 @@
return 0;
#endif
-#ifdef CONFIG_SCHED_HMP
- if (cpu_capacity(env->dst_cpu) > cpu_capacity(env->src_cpu)) {
- if (nr_big_tasks(env->src_rq) && !is_big_task(p))
- return 0;
-
- if (env->boost_policy == SCHED_BOOST_ON_BIG &&
- !task_sched_boost(p))
- return 0;
- }
-
- twf = task_will_fit(p, env->dst_cpu);
-
- /*
- * Attempt to not pull tasks that don't fit. We may get lucky and find
- * one that actually fits.
- */
- if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
- return 0;
-
- /*
- * Group imbalance can sometimes cause work to be pulled across groups
- * even though the group could have managed the imbalance on its own.
- * Prevent inter-cluster migrations for big tasks when the number of
- * tasks is lower than the capacity of the group.
- */
- group_cpus = DIV_ROUND_UP(env->busiest_grp_capacity,
- SCHED_CAPACITY_SCALE);
- if (!twf && env->busiest_nr_running <= group_cpus)
- return 0;
-#endif
-
if (task_running(env->src_rq, p)) {
schedstat_inc(p->se.statistics.nr_failed_migrations_running);
return 0;
@@ -8292,10 +8143,6 @@
unsigned long group_capacity;
unsigned long group_util; /* Total utilization of the group */
unsigned int sum_nr_running; /* Nr tasks running in the group */
-#ifdef CONFIG_SCHED_HMP
- unsigned long sum_nr_big_tasks;
- u64 group_cpu_load; /* Scaled load of all CPUs of the group */
-#endif
unsigned int idle_cpus;
unsigned int group_weight;
enum group_type group_type;
@@ -8339,10 +8186,6 @@
.avg_load = 0UL,
.sum_nr_running = 0,
.group_type = group_other,
-#ifdef CONFIG_SCHED_HMP
- .sum_nr_big_tasks = 0UL,
- .group_cpu_load = 0ULL,
-#endif
},
};
}
@@ -8673,8 +8516,7 @@
trace_sched_cpu_load_lb(cpu_rq(i), idle_cpu(i),
sched_irqload(i),
- power_cost(i, 0),
- cpu_temp(i));
+ power_cost(i, 0));
if (cpu_isolated(i))
continue;
@@ -8693,11 +8535,6 @@
if (nr_running > 1)
*overload = true;
-#ifdef CONFIG_SCHED_HMP
- sgs->sum_nr_big_tasks += rq->hmp_stats.nr_big_tasks;
- sgs->group_cpu_load += cpu_load(i);
-#endif
-
#ifdef CONFIG_NUMA_BALANCING
sgs->nr_numa_running += rq->nr_numa_running;
sgs->nr_preferred_running += rq->nr_preferred_running;
@@ -8759,9 +8596,6 @@
{
struct sg_lb_stats *busiest = &sds->busiest_stat;
- if (update_sd_pick_busiest_active_balance(env, sds, sg, sgs))
- return true;
-
if (sgs->group_type > busiest->group_type)
return true;
@@ -9225,9 +9059,6 @@
if (env->flags & LBF_BIG_TASK_ACTIVE_BALANCE)
goto force_balance;
- if (bail_inter_cluster_balance(env, &sds))
- goto out_balanced;
-
sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
/ sds.total_capacity;
@@ -9306,10 +9137,6 @@
unsigned long busiest_load = 0, busiest_capacity = 1;
int i;
-#ifdef CONFIG_SCHED_HMP
- return find_busiest_queue_hmp(env, group);
-#endif
-
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
unsigned long capacity, wl;
enum fbq_type rt;
@@ -9515,9 +9342,6 @@
.loop = 0,
.busiest_nr_running = 0,
.busiest_grp_capacity = 0,
-#ifdef CONFIG_SCHED_HMP
- .boost_policy = sched_boost_policy(),
-#endif
};
/*
@@ -9724,21 +9548,9 @@
sd->nr_balance_failed = sd->cache_nice_tries +
NEED_ACTIVE_BALANCE_THRESHOLD - 1;
}
- } else {
+ } else
sd->nr_balance_failed = 0;
- /* Assumes one 'busiest' cpu that we pulled tasks from */
- if (!same_freq_domain(this_cpu, cpu_of(busiest))) {
- int check_groups = !!(env.flags &
- LBF_MOVED_RELATED_THREAD_GROUP_TASK);
-
- check_for_freq_change(this_rq, false, check_groups);
- check_for_freq_change(busiest, false, check_groups);
- } else {
- check_for_freq_change(this_rq, true, false);
- }
- }
-
if (likely(!active_balance)) {
/* We were unbalanced, so reset the balancing interval */
sd->balance_interval = sd->min_interval;
@@ -9973,9 +9785,6 @@
.busiest_grp_capacity = 0,
.flags = 0,
.loop = 0,
-#ifdef CONFIG_SCHED_HMP
- .boost_policy = sched_boost_policy(),
-#endif
};
bool moved = false;
@@ -10060,15 +9869,6 @@
local_irq_enable();
- if (moved && !same_freq_domain(busiest_cpu, target_cpu)) {
- int check_groups = !!(env.flags &
- LBF_MOVED_RELATED_THREAD_GROUP_TASK);
- check_for_freq_change(busiest_rq, false, check_groups);
- check_for_freq_change(target_rq, false, check_groups);
- } else if (moved) {
- check_for_freq_change(target_rq, true, false);
- }
-
return 0;
}
@@ -10098,10 +9898,6 @@
struct rq *rq = cpu_rq(cpu);
cpumask_t cpumask;
-#ifdef CONFIG_SCHED_HMP
- return find_new_hmp_ilb(type);
-#endif
-
rcu_read_lock();
sd = rcu_dereference_check_sched_domain(rq->sd);
if (sd) {
@@ -10437,11 +10233,9 @@
static inline bool nohz_kick_needed(struct rq *rq, int *type)
{
unsigned long now = jiffies;
-#ifndef CONFIG_SCHED_HMP
struct sched_domain_shared *sds;
struct sched_domain *sd;
int nr_busy;
-#endif
int cpu = rq->cpu;
bool kick = false;
@@ -10462,17 +10256,12 @@
if (likely(!atomic_read(&nohz.nr_cpus)))
return false;
-#ifdef CONFIG_SCHED_HMP
- return _nohz_kick_needed_hmp(rq, cpu, type);
-#endif
-
if (time_before(now, nohz.next_balance))
return false;
if (energy_aware())
return rq->nr_running >= 2 && cpu_overutilized(cpu);
-#ifndef CONFIG_SCHED_HMP
rcu_read_lock();
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
if (sds && !energy_aware()) {
@@ -10506,7 +10295,6 @@
unlock:
rcu_read_unlock();
-#endif
return kick;
}
#else
@@ -10603,7 +10391,7 @@
rq->misfit_task = misfit;
if (old_misfit != misfit) {
- adjust_nr_big_tasks(&rq->hmp_stats, 1, misfit);
+ walt_adjust_nr_big_tasks(rq, 1, misfit);
curr->misfit = misfit;
}
#endif
@@ -11068,7 +10856,7 @@
.task_change_group = task_change_group_fair,
#endif
#ifdef CONFIG_SCHED_WALT
- .fixup_hmp_sched_stats = fixup_hmp_sched_stats_fair,
+ .fixup_walt_sched_stats = fixup_walt_sched_stats_common,
#endif
};
@@ -11118,123 +10906,7 @@
}
/* WALT sched implementation begins here */
-
-#if defined(CONFIG_SCHED_WALT) && defined(CONFIG_CFS_BANDWIDTH)
-static inline struct task_group *next_task_group(struct task_group *tg)
-{
- tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
-
- return (&tg->list == &task_groups) ? NULL : tg;
-}
-
-/* Iterate over all cfs_rq in a cpu */
-#define for_each_cfs_rq(cfs_rq, tg, cpu) \
- for (tg = container_of(&task_groups, struct task_group, list); \
- ((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));)
-
-void reset_cfs_rq_hmp_stats(int cpu, int reset_cra)
-{
- struct task_group *tg;
- struct cfs_rq *cfs_rq;
-
- rcu_read_lock();
-
- for_each_cfs_rq(cfs_rq, tg, cpu)
- reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra);
-
- rcu_read_unlock();
-}
-
-static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
-
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra);
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra);
-
-/* Add task's contribution to a cpu' HMP statistics */
-void inc_hmp_sched_stats_fair(struct rq *rq,
- struct task_struct *p, int change_cra)
-{
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
-
- /*
- * Although below check is not strictly required (as
- * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
- * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
- * efficiency by short-circuiting for_each_sched_entity() loop when
- * sched_disable_window_stats
- */
- if (sched_disable_window_stats)
- return;
-
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
- if (cfs_rq_throttled(cfs_rq))
- break;
- }
-
- /* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
- if (!se)
- inc_rq_hmp_stats(rq, p, change_cra);
-}
-
-static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
-{
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
- s64 task_load_delta = (s64)new_task_load - task_load(p);
- s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
-
- fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
- task_load_delta,
- pred_demand_delta);
- fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
- if (cfs_rq_throttled(cfs_rq))
- break;
- }
-
- /* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
- if (!se) {
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
- task_load_delta,
- pred_demand_delta);
- fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
- }
-}
-
-#elif defined(CONFIG_SCHED_WALT)
-
-inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
-
-static void
-fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
-{
- s64 task_load_delta = (s64)new_task_load - task_load(p);
- s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
- pred_demand_delta);
- fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
-}
-
-static inline int task_will_be_throttled(struct task_struct *p)
-{
- return 0;
-}
-
-void inc_hmp_sched_stats_fair(struct rq *rq,
- struct task_struct *p, int change_cra)
-{
- inc_nr_big_task(&rq->hmp_stats, p);
-}
+#ifdef CONFIG_SCHED_WALT
static inline int
kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
@@ -11256,1035 +10928,7 @@
return rc;
}
-#else
-
-static inline int task_will_be_throttled(struct task_struct *p)
-{
- return 0;
-}
-
-#endif
-
-/* QHMP/Zone sched implementation begins here */
-
-#ifdef CONFIG_SCHED_HMP
-#ifdef CONFIG_SMP
-
-/* CPU selection flag */
-#define SBC_FLAG_PREV_CPU 0x1
-#define SBC_FLAG_BEST_CAP_CPU 0x2
-#define SBC_FLAG_CPU_COST 0x4
-#define SBC_FLAG_MIN_COST 0x8
-#define SBC_FLAG_IDLE_LEAST_LOADED 0x10
-#define SBC_FLAG_IDLE_CSTATE 0x20
-#define SBC_FLAG_COST_CSTATE_TIE_BREAKER 0x40
-#define SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER 0x80
-#define SBC_FLAG_CSTATE_LOAD 0x100
-#define SBC_FLAG_BEST_SIBLING 0x200
-#define SBC_FLAG_WAKER_CPU 0x400
-#define SBC_FLAG_PACK_TASK 0x800
-
-/* Cluster selection flag */
-#define SBC_FLAG_COLOC_CLUSTER 0x10000
-#define SBC_FLAG_WAKER_CLUSTER 0x20000
-#define SBC_FLAG_BACKUP_CLUSTER 0x40000
-#define SBC_FLAG_BOOST_CLUSTER 0x80000
-
-struct cpu_select_env {
- struct task_struct *p;
- struct related_thread_group *rtg;
- u8 reason;
- u8 need_idle:1;
- u8 need_waker_cluster:1;
- u8 sync:1;
- enum sched_boost_policy boost_policy;
- u8 pack_task:1;
- int prev_cpu;
- DECLARE_BITMAP(candidate_list, NR_CPUS);
- DECLARE_BITMAP(backup_list, NR_CPUS);
- u64 task_load;
- u64 cpu_load;
- u32 sbc_best_flag;
- u32 sbc_best_cluster_flag;
- struct cpumask search_cpus;
-};
-
-struct cluster_cpu_stats {
- int best_idle_cpu, least_loaded_cpu;
- int best_capacity_cpu, best_cpu, best_sibling_cpu;
- int min_cost, best_sibling_cpu_cost;
- int best_cpu_wakeup_latency;
- u64 min_load, best_load, best_sibling_cpu_load;
- s64 highest_spare_capacity;
-};
-
-static int spill_threshold_crossed(struct cpu_select_env *env, struct rq *rq)
-{
- u64 total_load;
-
- total_load = env->task_load + env->cpu_load;
-
- if (total_load > sched_spill_load ||
- (rq->nr_running + 1) > sysctl_sched_spill_nr_run)
- return 1;
-
- return 0;
-}
-
-static int skip_cpu(int cpu, struct cpu_select_env *env)
-{
- int tcpu = task_cpu(env->p);
- int skip = 0;
-
- if (!env->reason)
- return 0;
-
- if (is_reserved(cpu))
- return 1;
-
- switch (env->reason) {
- case UP_MIGRATION:
- skip = !idle_cpu(cpu);
- break;
- case IRQLOAD_MIGRATION:
- /* Purposely fall through */
- default:
- skip = (cpu == tcpu);
- break;
- }
-
- return skip;
-}
-
-static inline int
-acceptable_capacity(struct sched_cluster *cluster, struct cpu_select_env *env)
-{
- int tcpu;
-
- if (!env->reason)
- return 1;
-
- tcpu = task_cpu(env->p);
- switch (env->reason) {
- case UP_MIGRATION:
- return cluster->capacity > cpu_capacity(tcpu);
-
- case DOWN_MIGRATION:
- return cluster->capacity < cpu_capacity(tcpu);
-
- default:
- break;
- }
-
- return 1;
-}
-
-static int
-skip_cluster(struct sched_cluster *cluster, struct cpu_select_env *env)
-{
- if (!test_bit(cluster->id, env->candidate_list))
- return 1;
-
- if (!acceptable_capacity(cluster, env)) {
- __clear_bit(cluster->id, env->candidate_list);
- return 1;
- }
-
- return 0;
-}
-
-static struct sched_cluster *
-select_least_power_cluster(struct cpu_select_env *env)
-{
- struct sched_cluster *cluster;
-
- if (env->rtg) {
- int cpu = cluster_first_cpu(env->rtg->preferred_cluster);
-
- env->task_load = scale_load_to_cpu(task_load(env->p), cpu);
-
- if (task_load_will_fit(env->p, env->task_load,
- cpu, env->boost_policy)) {
- env->sbc_best_cluster_flag |= SBC_FLAG_COLOC_CLUSTER;
-
- if (env->boost_policy == SCHED_BOOST_NONE)
- return env->rtg->preferred_cluster;
-
- for_each_sched_cluster(cluster) {
- if (cluster != env->rtg->preferred_cluster) {
- __set_bit(cluster->id,
- env->backup_list);
- __clear_bit(cluster->id,
- env->candidate_list);
- }
- }
-
- return env->rtg->preferred_cluster;
- }
-
- /*
- * Since the task load does not fit on the preferred
- * cluster anymore, pretend that the task does not
- * have any preferred cluster. This allows the waking
- * task to get the appropriate CPU it needs as per the
- * non co-location placement policy without having to
- * wait until the preferred cluster is updated.
- */
- env->rtg = NULL;
- }
-
- for_each_sched_cluster(cluster) {
- if (!skip_cluster(cluster, env)) {
- int cpu = cluster_first_cpu(cluster);
-
- env->task_load = scale_load_to_cpu(task_load(env->p),
- cpu);
- if (task_load_will_fit(env->p, env->task_load, cpu,
- env->boost_policy))
- return cluster;
-
- __set_bit(cluster->id, env->backup_list);
- __clear_bit(cluster->id, env->candidate_list);
- }
- }
-
- return NULL;
-}
-
-static struct sched_cluster *
-next_candidate(const unsigned long *list, int start, int end)
-{
- int cluster_id;
-
- cluster_id = find_next_bit(list, end, start - 1 + 1);
- if (cluster_id >= end)
- return NULL;
-
- return sched_cluster[cluster_id];
-}
-
-static void
-update_spare_capacity(struct cluster_cpu_stats *stats,
- struct cpu_select_env *env, int cpu, int capacity,
- u64 cpu_load)
-{
- s64 spare_capacity = sched_ravg_window - cpu_load;
-
- if (spare_capacity > 0 &&
- (spare_capacity > stats->highest_spare_capacity ||
- (spare_capacity == stats->highest_spare_capacity &&
- ((!env->need_waker_cluster &&
- capacity > cpu_capacity(stats->best_capacity_cpu)) ||
- (env->need_waker_cluster &&
- cpu_rq(cpu)->nr_running <
- cpu_rq(stats->best_capacity_cpu)->nr_running))))) {
- /*
- * If sync waker is the only runnable of CPU, cr_avg of the
- * CPU is 0 so we have high chance to place the wakee on the
- * waker's CPU which likely causes preemtion of the waker.
- * This can lead migration of preempted waker. Place the
- * wakee on the real idle CPU when it's possible by checking
- * nr_running to avoid such preemption.
- */
- stats->highest_spare_capacity = spare_capacity;
- stats->best_capacity_cpu = cpu;
- }
-}
-
-static inline void find_backup_cluster(
-struct cpu_select_env *env, struct cluster_cpu_stats *stats)
-{
- struct sched_cluster *next = NULL;
- int i;
- struct cpumask search_cpus;
-
- while (!bitmap_empty(env->backup_list, num_clusters)) {
- next = next_candidate(env->backup_list, 0, num_clusters);
- __clear_bit(next->id, env->backup_list);
-
- cpumask_and(&search_cpus, &env->search_cpus, &next->cpus);
- for_each_cpu(i, &search_cpus) {
- trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
- sched_irqload(i), power_cost(i, task_load(env->p) +
- cpu_cravg_sync(i, env->sync)), 0);
-
- update_spare_capacity(stats, env, i, next->capacity,
- cpu_load_sync(i, env->sync));
- }
- env->sbc_best_cluster_flag = SBC_FLAG_BACKUP_CLUSTER;
- }
-}
-
-struct sched_cluster *
-next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
- struct cluster_cpu_stats *stats)
-{
- struct sched_cluster *next = NULL;
-
- __clear_bit(cluster->id, env->candidate_list);
-
- if (env->rtg && preferred_cluster(cluster, env->p))
- return NULL;
-
- do {
- if (bitmap_empty(env->candidate_list, num_clusters))
- return NULL;
-
- next = next_candidate(env->candidate_list, 0, num_clusters);
- if (next) {
- if (next->min_power_cost > stats->min_cost) {
- clear_bit(next->id, env->candidate_list);
- next = NULL;
- continue;
- }
-
- if (skip_cluster(next, env))
- next = NULL;
- }
- } while (!next);
-
- env->task_load = scale_load_to_cpu(task_load(env->p),
- cluster_first_cpu(next));
- return next;
-}
-
-#ifdef CONFIG_SCHED_HMP_CSTATE_AWARE
-static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
- struct cpu_select_env *env, int cpu_cost)
-{
- int wakeup_latency;
- int prev_cpu = env->prev_cpu;
-
- wakeup_latency = cpu_rq(cpu)->wakeup_latency;
-
- if (env->need_idle) {
- stats->min_cost = cpu_cost;
- if (idle_cpu(cpu)) {
- if (wakeup_latency < stats->best_cpu_wakeup_latency ||
- (wakeup_latency == stats->best_cpu_wakeup_latency &&
- cpu == prev_cpu)) {
- stats->best_idle_cpu = cpu;
- stats->best_cpu_wakeup_latency = wakeup_latency;
- }
- } else {
- if (env->cpu_load < stats->min_load ||
- (env->cpu_load == stats->min_load &&
- cpu == prev_cpu)) {
- stats->least_loaded_cpu = cpu;
- stats->min_load = env->cpu_load;
- }
- }
-
- return;
- }
-
- if (cpu_cost < stats->min_cost) {
- stats->min_cost = cpu_cost;
- stats->best_cpu_wakeup_latency = wakeup_latency;
- stats->best_load = env->cpu_load;
- stats->best_cpu = cpu;
- env->sbc_best_flag = SBC_FLAG_CPU_COST;
- return;
- }
-
- /* CPU cost is the same. Start breaking the tie by C-state */
-
- if (wakeup_latency > stats->best_cpu_wakeup_latency)
- return;
-
- if (wakeup_latency < stats->best_cpu_wakeup_latency) {
- stats->best_cpu_wakeup_latency = wakeup_latency;
- stats->best_load = env->cpu_load;
- stats->best_cpu = cpu;
- env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
- return;
- }
-
- /* C-state is the same. Use prev CPU to break the tie */
- if (cpu == prev_cpu) {
- stats->best_cpu = cpu;
- env->sbc_best_flag = SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER;
- return;
- }
-
- if (stats->best_cpu != prev_cpu &&
- ((wakeup_latency == 0 && env->cpu_load < stats->best_load) ||
- (wakeup_latency > 0 && env->cpu_load > stats->best_load))) {
- stats->best_load = env->cpu_load;
- stats->best_cpu = cpu;
- env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
- }
-}
-#else /* CONFIG_SCHED_HMP_CSTATE_AWARE */
-static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
- struct cpu_select_env *env, int cpu_cost)
-{
- int prev_cpu = env->prev_cpu;
-
- if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
- if (stats->best_sibling_cpu_cost > cpu_cost ||
- (stats->best_sibling_cpu_cost == cpu_cost &&
- stats->best_sibling_cpu_load > env->cpu_load)) {
- stats->best_sibling_cpu_cost = cpu_cost;
- stats->best_sibling_cpu_load = env->cpu_load;
- stats->best_sibling_cpu = cpu;
- }
- }
-
- if ((cpu_cost < stats->min_cost) ||
- ((stats->best_cpu != prev_cpu &&
- stats->min_load > env->cpu_load) || cpu == prev_cpu)) {
- if (env->need_idle) {
- if (idle_cpu(cpu)) {
- stats->min_cost = cpu_cost;
- stats->best_idle_cpu = cpu;
- }
- } else {
- stats->min_cost = cpu_cost;
- stats->min_load = env->cpu_load;
- stats->best_cpu = cpu;
- env->sbc_best_flag = SBC_FLAG_MIN_COST;
- }
- }
-}
-#endif /* CONFIG_SCHED_HMP_CSTATE_AWARE */
-
-static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
- struct cpu_select_env *env)
-{
- int cpu_cost;
-
- /*
- * We try to find the least loaded *busy* CPU irrespective
- * of the power cost.
- */
- if (env->pack_task)
- cpu_cost = cpu_min_power_cost(cpu);
-
- else
- cpu_cost = power_cost(cpu, task_load(env->p) +
- cpu_cravg_sync(cpu, env->sync));
-
- if (cpu_cost <= stats->min_cost)
- __update_cluster_stats(cpu, stats, env, cpu_cost);
-}
-
-static void find_best_cpu_in_cluster(struct sched_cluster *c,
- struct cpu_select_env *env, struct cluster_cpu_stats *stats)
-{
- int i;
- struct cpumask search_cpus;
-
- cpumask_and(&search_cpus, &env->search_cpus, &c->cpus);
-
- env->need_idle = wake_to_idle(env->p) || c->wake_up_idle;
-
- for_each_cpu(i, &search_cpus) {
- env->cpu_load = cpu_load_sync(i, env->sync);
-
- trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
- sched_irqload(i),
- power_cost(i, task_load(env->p) +
- cpu_cravg_sync(i, env->sync)), 0);
-
- if (skip_cpu(i, env))
- continue;
-
- update_spare_capacity(stats, env, i, c->capacity,
- env->cpu_load);
-
- /*
- * need_idle takes precedence over sched boost but when both
- * are set, idlest CPU with in all the clusters is selected
- * when boost_policy = BOOST_ON_ALL whereas idlest CPU in the
- * big cluster is selected within boost_policy = BOOST_ON_BIG.
- */
- if ((!env->need_idle &&
- env->boost_policy != SCHED_BOOST_NONE) ||
- env->need_waker_cluster ||
- sched_cpu_high_irqload(i) ||
- spill_threshold_crossed(env, cpu_rq(i)))
- continue;
-
- update_cluster_stats(i, stats, env);
- }
-}
-
-static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
-{
- stats->best_cpu = stats->best_idle_cpu = -1;
- stats->best_capacity_cpu = stats->best_sibling_cpu = -1;
- stats->min_cost = stats->best_sibling_cpu_cost = INT_MAX;
- stats->min_load = stats->best_sibling_cpu_load = ULLONG_MAX;
- stats->highest_spare_capacity = 0;
- stats->least_loaded_cpu = -1;
- stats->best_cpu_wakeup_latency = INT_MAX;
- /* No need to initialize stats->best_load */
-}
-
-static inline bool env_has_special_flags(struct cpu_select_env *env)
-{
- if (env->need_idle || env->boost_policy != SCHED_BOOST_NONE ||
- env->reason)
- return true;
-
- return false;
-}
-
-static inline bool
-bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
-{
- int prev_cpu;
- struct task_struct *task = env->p;
- struct sched_cluster *cluster;
-
- if (!task->ravg.mark_start || !sched_short_sleep_task_threshold)
- return false;
-
- prev_cpu = env->prev_cpu;
- if (!cpumask_test_cpu(prev_cpu, &env->search_cpus))
- return false;
-
- if (task->ravg.mark_start - task->last_cpu_selected_ts >=
- sched_long_cpu_selection_threshold)
- return false;
-
- /*
- * This function should be used by task wake up path only as it's
- * assuming p->last_switch_out_ts as last sleep time.
- * p->last_switch_out_ts can denote last preemption time as well as
- * last sleep time.
- */
- if (task->ravg.mark_start - task->last_switch_out_ts >=
- sched_short_sleep_task_threshold)
- return false;
-
- env->task_load = scale_load_to_cpu(task_load(task), prev_cpu);
- cluster = cpu_rq(prev_cpu)->cluster;
-
- if (!task_load_will_fit(task, env->task_load, prev_cpu,
- sched_boost_policy())) {
-
- __set_bit(cluster->id, env->backup_list);
- __clear_bit(cluster->id, env->candidate_list);
- return false;
- }
-
- env->cpu_load = cpu_load_sync(prev_cpu, env->sync);
- if (sched_cpu_high_irqload(prev_cpu) ||
- spill_threshold_crossed(env, cpu_rq(prev_cpu))) {
- update_spare_capacity(stats, env, prev_cpu,
- cluster->capacity, env->cpu_load);
- cpumask_clear_cpu(prev_cpu, &env->search_cpus);
- return false;
- }
-
- return true;
-}
-
-static inline bool
-wake_to_waker_cluster(struct cpu_select_env *env)
-{
- return env->sync &&
- task_load(current) > sched_big_waker_task_load &&
- task_load(env->p) < sched_small_wakee_task_load;
-}
-
-static inline bool
-bias_to_waker_cpu(struct cpu_select_env *env, int cpu)
-{
- return sysctl_sched_prefer_sync_wakee_to_waker &&
- cpu_rq(cpu)->nr_running == 1 &&
- cpumask_test_cpu(cpu, &env->search_cpus);
-}
-
-static inline int
-cluster_allowed(struct cpu_select_env *env, struct sched_cluster *cluster)
-{
- return cpumask_intersects(&env->search_cpus, &cluster->cpus);
-}
-
-/* return cheapest cpu that can fit this task */
-static int select_best_cpu(struct task_struct *p, int target, int reason,
- int sync)
-{
- struct sched_cluster *cluster, *pref_cluster = NULL;
- struct cluster_cpu_stats stats;
- struct related_thread_group *grp;
- unsigned int sbc_flag = 0;
- int cpu = raw_smp_processor_id();
- bool special;
-
- struct cpu_select_env env = {
- .p = p,
- .reason = reason,
- .need_idle = wake_to_idle(p),
- .need_waker_cluster = 0,
- .sync = sync,
- .prev_cpu = target,
- .rtg = NULL,
- .sbc_best_flag = 0,
- .sbc_best_cluster_flag = 0,
- .pack_task = false,
- };
-
- env.boost_policy = task_sched_boost(p) ?
- sched_boost_policy() : SCHED_BOOST_NONE;
-
- bitmap_copy(env.candidate_list, all_cluster_ids, NR_CPUS);
- bitmap_zero(env.backup_list, NR_CPUS);
-
- cpumask_and(&env.search_cpus, tsk_cpus_allowed(p), cpu_active_mask);
- cpumask_andnot(&env.search_cpus, &env.search_cpus, cpu_isolated_mask);
-
- init_cluster_cpu_stats(&stats);
- special = env_has_special_flags(&env);
-
- rcu_read_lock();
-
- grp = task_related_thread_group(p);
-
- if (grp && grp->preferred_cluster) {
- pref_cluster = grp->preferred_cluster;
- if (!cluster_allowed(&env, pref_cluster))
- clear_bit(pref_cluster->id, env.candidate_list);
- else
- env.rtg = grp;
- } else if (!special) {
- cluster = cpu_rq(cpu)->cluster;
- if (wake_to_waker_cluster(&env)) {
- if (bias_to_waker_cpu(&env, cpu)) {
- target = cpu;
- sbc_flag = SBC_FLAG_WAKER_CLUSTER |
- SBC_FLAG_WAKER_CPU;
- goto out;
- } else if (cluster_allowed(&env, cluster)) {
- env.need_waker_cluster = 1;
- bitmap_zero(env.candidate_list, NR_CPUS);
- __set_bit(cluster->id, env.candidate_list);
- env.sbc_best_cluster_flag =
- SBC_FLAG_WAKER_CLUSTER;
- }
- } else if (bias_to_prev_cpu(&env, &stats)) {
- sbc_flag = SBC_FLAG_PREV_CPU;
- goto out;
- }
- }
-
- if (!special && is_short_burst_task(p)) {
- env.pack_task = true;
- sbc_flag = SBC_FLAG_PACK_TASK;
- }
-retry:
- cluster = select_least_power_cluster(&env);
-
- if (!cluster)
- goto out;
-
- /*
- * 'cluster' now points to the minimum power cluster which can satisfy
- * task's perf goals. Walk down the cluster list starting with that
- * cluster. For non-small tasks, skip clusters that don't have
- * mostly_idle/idle cpus
- */
-
- do {
- find_best_cpu_in_cluster(cluster, &env, &stats);
-
- } while ((cluster = next_best_cluster(cluster, &env, &stats)));
-
- if (env.need_idle) {
- if (stats.best_idle_cpu >= 0) {
- target = stats.best_idle_cpu;
- sbc_flag |= SBC_FLAG_IDLE_CSTATE;
- } else if (stats.least_loaded_cpu >= 0) {
- target = stats.least_loaded_cpu;
- sbc_flag |= SBC_FLAG_IDLE_LEAST_LOADED;
- }
- } else if (stats.best_cpu >= 0) {
- if (stats.best_cpu != task_cpu(p) &&
- stats.min_cost == stats.best_sibling_cpu_cost) {
- stats.best_cpu = stats.best_sibling_cpu;
- sbc_flag |= SBC_FLAG_BEST_SIBLING;
- }
- sbc_flag |= env.sbc_best_flag;
- target = stats.best_cpu;
- } else {
- if (env.rtg && env.boost_policy == SCHED_BOOST_NONE) {
- env.rtg = NULL;
- goto retry;
- }
-
- /*
- * With boost_policy == SCHED_BOOST_ON_BIG, we reach here with
- * backup_list = little cluster, candidate_list = none and
- * stats->best_capacity_cpu points the best spare capacity
- * CPU among the CPUs in the big cluster.
- */
- if (env.boost_policy == SCHED_BOOST_ON_BIG &&
- stats.best_capacity_cpu >= 0)
- sbc_flag |= SBC_FLAG_BOOST_CLUSTER;
- else
- find_backup_cluster(&env, &stats);
-
- if (stats.best_capacity_cpu >= 0) {
- target = stats.best_capacity_cpu;
- sbc_flag |= SBC_FLAG_BEST_CAP_CPU;
- }
- }
- p->last_cpu_selected_ts = sched_ktime_clock();
-out:
- sbc_flag |= env.sbc_best_cluster_flag;
- rcu_read_unlock();
- trace_sched_task_load(p, sched_boost_policy() && task_sched_boost(p),
- env.reason, env.sync, env.need_idle, sbc_flag, target);
- return target;
-}
-
-/*
- * Reset balance_interval at all sched_domain levels of given cpu, so that it
- * honors kick.
- */
-static inline void reset_balance_interval(int cpu)
-{
- struct sched_domain *sd;
-
- if (cpu >= nr_cpu_ids)
- return;
-
- rcu_read_lock();
- for_each_domain(cpu, sd)
- sd->balance_interval = 0;
- rcu_read_unlock();
-}
-
-/*
- * Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal
- * cpu as per its demand or priority)
- *
- * Returns reason why task needs to be migrated
- */
-static inline int migration_needed(struct task_struct *p, int cpu)
-{
- int nice;
- struct related_thread_group *grp;
-
- if (p->state != TASK_RUNNING || p->nr_cpus_allowed == 1)
- return 0;
-
- /* No need to migrate task that is about to be throttled */
- if (task_will_be_throttled(p))
- return 0;
-
- if (sched_boost_policy() == SCHED_BOOST_ON_BIG &&
- cpu_capacity(cpu) != max_capacity && task_sched_boost(p))
- return UP_MIGRATION;
-
- if (sched_cpu_high_irqload(cpu))
- return IRQLOAD_MIGRATION;
-
- nice = task_nice(p);
- rcu_read_lock();
- grp = task_related_thread_group(p);
- /*
- * Don't assume higher capacity means higher power. If the task
- * is running on the power efficient CPU, avoid migrating it
- * to a lower capacity cluster.
- */
- if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE ||
- upmigrate_discouraged(p)) &&
- cpu_capacity(cpu) > min_capacity &&
- cpu_max_power_cost(cpu) == max_power_cost) {
- rcu_read_unlock();
- return DOWN_MIGRATION;
- }
-
- if (!task_will_fit(p, cpu)) {
- rcu_read_unlock();
- return UP_MIGRATION;
- }
- rcu_read_unlock();
-
- return 0;
-}
-
static DEFINE_RAW_SPINLOCK(migration_lock);
-
-/*
- * Check if currently running task should be migrated to a better cpu.
- *
- * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
- */
-void check_for_migration(struct rq *rq, struct task_struct *p)
-{
- int cpu = cpu_of(rq), new_cpu;
- int active_balance = 0, reason;
-
- reason = migration_needed(p, cpu);
- if (!reason)
- return;
-
- raw_spin_lock(&migration_lock);
- new_cpu = select_best_cpu(p, cpu, reason, 0);
-
- if (new_cpu != cpu) {
- active_balance = kick_active_balance(rq, p, new_cpu);
- if (active_balance)
- mark_reserved(new_cpu);
- }
-
- raw_spin_unlock(&migration_lock);
-
- if (active_balance)
- stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
- &rq->active_balance_work);
-}
-
-#ifdef CONFIG_CFS_BANDWIDTH
-static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
-{
- cfs_rq->hmp_stats.nr_big_tasks = 0;
- cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
- cfs_rq->hmp_stats.pred_demands_sum = 0;
-}
-
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra)
-{
- inc_nr_big_task(&cfs_rq->hmp_stats, p);
- if (change_cra)
- inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
-}
-
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra)
-{
- dec_nr_big_task(&cfs_rq->hmp_stats, p);
- if (change_cra)
- dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
-}
-
-static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq)
-{
- stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
- stats->cumulative_runnable_avg +=
- cfs_rq->hmp_stats.cumulative_runnable_avg;
- stats->pred_demands_sum += cfs_rq->hmp_stats.pred_demands_sum;
-}
-
-static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq)
-{
- stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
- stats->cumulative_runnable_avg -=
- cfs_rq->hmp_stats.cumulative_runnable_avg;
- stats->pred_demands_sum -= cfs_rq->hmp_stats.pred_demands_sum;
-
- BUG_ON(stats->nr_big_tasks < 0 ||
- (s64)stats->cumulative_runnable_avg < 0);
- BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-#endif /* CONFIG_CFS_BANDWIDTH */
-
-static int
-bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
-{
- int local_cpu, busiest_cpu;
- int local_capacity, busiest_capacity;
- int local_pwr_cost, busiest_pwr_cost;
- int nr_cpus;
- int boost = sched_boost();
-
- if (!sysctl_sched_restrict_cluster_spill ||
- boost == FULL_THROTTLE_BOOST || boost == CONSERVATIVE_BOOST)
- return 0;
-
- local_cpu = group_first_cpu(sds->local);
- busiest_cpu = group_first_cpu(sds->busiest);
-
- local_capacity = cpu_max_possible_capacity(local_cpu);
- busiest_capacity = cpu_max_possible_capacity(busiest_cpu);
-
- local_pwr_cost = cpu_max_power_cost(local_cpu);
- busiest_pwr_cost = cpu_max_power_cost(busiest_cpu);
-
- if (local_pwr_cost <= busiest_pwr_cost)
- return 0;
-
- if (local_capacity > busiest_capacity &&
- sds->busiest_stat.sum_nr_big_tasks)
- return 0;
-
- nr_cpus = cpumask_weight(sched_group_cpus(sds->busiest));
- if ((sds->busiest_stat.group_cpu_load < nr_cpus * sched_spill_load) &&
- (sds->busiest_stat.sum_nr_running <
- nr_cpus * sysctl_sched_spill_nr_run))
- return 1;
-
- return 0;
-}
-
-static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
- struct sd_lb_stats *sds,
- struct sched_group *sg,
- struct sg_lb_stats *sgs)
-{
- if (env->idle != CPU_NOT_IDLE &&
- cpu_capacity(env->dst_cpu) > group_rq_capacity(sg)) {
- if (sgs->sum_nr_big_tasks >
- sds->busiest_stat.sum_nr_big_tasks) {
- env->flags |= LBF_BIG_TASK_ACTIVE_BALANCE;
- return true;
- }
- }
-
- return false;
-}
-
-static struct rq *find_busiest_queue_hmp(struct lb_env *env,
- struct sched_group *group)
-{
- struct rq *busiest = NULL, *busiest_big = NULL;
- u64 max_runnable_avg = 0, max_runnable_avg_big = 0;
- int max_nr_big = 0, nr_big;
- bool find_big = !!(env->flags & LBF_BIG_TASK_ACTIVE_BALANCE);
- int i;
- cpumask_t cpus;
-
- cpumask_andnot(&cpus, sched_group_cpus(group), cpu_isolated_mask);
-
- for_each_cpu(i, &cpus) {
- struct rq *rq = cpu_rq(i);
- u64 cumulative_runnable_avg =
- rq->hmp_stats.cumulative_runnable_avg;
-
- if (!cpumask_test_cpu(i, env->cpus))
- continue;
-
-
- if (find_big) {
- nr_big = nr_big_tasks(rq);
- if (nr_big > max_nr_big ||
- (nr_big > 0 && nr_big == max_nr_big &&
- cumulative_runnable_avg > max_runnable_avg_big)) {
- max_runnable_avg_big = cumulative_runnable_avg;
- busiest_big = rq;
- max_nr_big = nr_big;
- continue;
- }
- }
-
- if (cumulative_runnable_avg > max_runnable_avg) {
- max_runnable_avg = cumulative_runnable_avg;
- busiest = rq;
- }
- }
-
- if (busiest_big)
- return busiest_big;
-
- env->flags &= ~LBF_BIG_TASK_ACTIVE_BALANCE;
- return busiest;
-}
-
-#ifdef CONFIG_NO_HZ_COMMON
-static inline int find_new_hmp_ilb(int type)
-{
- int call_cpu = raw_smp_processor_id();
- struct sched_domain *sd;
- int ilb;
-
- rcu_read_lock();
-
- /* Pick an idle cpu "closest" to call_cpu */
- for_each_domain(call_cpu, sd) {
- for_each_cpu_and(ilb, nohz.idle_cpus_mask,
- sched_domain_span(sd)) {
- if (idle_cpu(ilb) && (type != NOHZ_KICK_RESTRICT ||
- cpu_max_power_cost(ilb) <=
- cpu_max_power_cost(call_cpu))) {
- rcu_read_unlock();
- reset_balance_interval(ilb);
- return ilb;
- }
- }
- }
-
- rcu_read_unlock();
- return nr_cpu_ids;
-}
-
-static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
-{
- struct sched_domain *sd;
- int i;
-
- if (rq->nr_running < 2)
- return 0;
-
- if (!sysctl_sched_restrict_cluster_spill ||
- sched_boost_policy() == SCHED_BOOST_ON_ALL)
- return 1;
-
- if (cpu_max_power_cost(cpu) == max_power_cost)
- return 1;
-
- rcu_read_lock();
- sd = rcu_dereference_check_sched_domain(rq->sd);
- if (!sd) {
- rcu_read_unlock();
- return 0;
- }
-
- for_each_cpu(i, sched_domain_span(sd)) {
- if (cpu_load(i) < sched_spill_load &&
- cpu_rq(i)->nr_running <
- sysctl_sched_spill_nr_run) {
- /* Change the kick type to limit to CPUs that
- * are of equal or lower capacity.
- */
- *type = NOHZ_KICK_RESTRICT;
- break;
- }
- }
- rcu_read_unlock();
- return 1;
-}
-#endif /* CONFIG_NO_HZ_COMMON */
-#endif /* CONFIG_SMP */
-
-#ifdef CONFIG_CFS_BANDWIDTH
-/*
- * Check if task is part of a hierarchy where some cfs_rq does not have any
- * runtime left.
- *
- * We can't rely on throttled_hierarchy() to do this test, as
- * cfs_rq->throttle_count will not be updated yet when this function is called
- * from scheduler_tick()
- */
-static int task_will_be_throttled(struct task_struct *p)
-{
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq;
-
- if (!cfs_bandwidth_used())
- return 0;
-
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- if (!cfs_rq->runtime_enabled)
- continue;
- if (cfs_rq->runtime_remaining <= 0)
- return 1;
- }
-
- return 0;
-}
-#endif /* CONFIG_CFS_BANDWIDTH */
-
-#elif defined(CONFIG_SCHED_WALT)
-
void check_for_migration(struct rq *rq, struct task_struct *p)
{
int new_cpu;
@@ -12296,17 +10940,23 @@
rq->curr->nr_cpus_allowed == 1)
return;
+ raw_spin_lock(&migration_lock);
+ rcu_read_lock();
new_cpu = energy_aware_wake_cpu(p, cpu, 0);
+ rcu_read_unlock();
if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) {
active_balance = kick_active_balance(rq, p, new_cpu);
if (active_balance) {
mark_reserved(new_cpu);
+ raw_spin_unlock(&migration_lock);
stop_one_cpu_nowait(cpu,
active_load_balance_cpu_stop, rq,
&rq->active_balance_work);
+ return;
}
}
+ raw_spin_unlock(&migration_lock);
}
}
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
deleted file mode 100644
index 24b60d7..0000000
--- a/kernel/sched/hmp.c
+++ /dev/null
@@ -1,1639 +0,0 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Implementation credits: Srivatsa Vaddagiri, Steve Muckle
- * Syed Rameez Mustafa, Olav haugan, Joonwoo Park, Pavan Kumar Kondeti
- * and Vikram Mulukutla
- */
-
-#include <linux/cpufreq.h>
-#include <linux/list_sort.h>
-#include <linux/syscore_ops.h>
-
-#include "sched.h"
-#include "walt.h"
-
-#include <trace/events/sched.h>
-
-#define CSTATE_LATENCY_GRANULARITY_SHIFT (6)
-
-inline void clear_ed_task(struct task_struct *p, struct rq *rq)
-{
- if (p == rq->ed_task)
- rq->ed_task = NULL;
-}
-
-inline void set_task_last_switch_out(struct task_struct *p, u64 wallclock)
-{
- p->last_switch_out_ts = wallclock;
-}
-
-/*
- * Note C-state for (idle) cpus.
- *
- * @cstate = cstate index, 0 -> active state
- * @wakeup_energy = energy spent in waking up cpu
- * @wakeup_latency = latency to wakeup from cstate
- *
- */
-void
-sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
-{
- struct rq *rq = cpu_rq(cpu);
-
- rq->cstate = cstate; /* C1, C2 etc */
- rq->wakeup_energy = wakeup_energy;
- /* disregard small latency delta (64 us). */
- rq->wakeup_latency = ((wakeup_latency >>
- CSTATE_LATENCY_GRANULARITY_SHIFT) <<
- CSTATE_LATENCY_GRANULARITY_SHIFT);
-}
-
-/*
- * Note D-state for (idle) cluster.
- *
- * @dstate = dstate index, 0 -> active state
- * @wakeup_energy = energy spent in waking up cluster
- * @wakeup_latency = latency to wakeup from cluster
- *
- */
-void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
- int wakeup_energy, int wakeup_latency)
-{
- struct sched_cluster *cluster =
- cpu_rq(cpumask_first(cluster_cpus))->cluster;
- cluster->dstate = dstate;
- cluster->dstate_wakeup_energy = wakeup_energy;
- cluster->dstate_wakeup_latency = wakeup_latency;
-}
-
-u32 __weak get_freq_max_load(int cpu, u32 freq)
-{
- /* 100% by default */
- return 100;
-}
-
-struct freq_max_load_entry {
- /* The maximum load which has accounted governor's headroom. */
- u64 hdemand;
-};
-
-struct freq_max_load {
- struct rcu_head rcu;
- int length;
- struct freq_max_load_entry freqs[0];
-};
-
-static DEFINE_PER_CPU(struct freq_max_load *, freq_max_load);
-static DEFINE_SPINLOCK(freq_max_load_lock);
-
-struct cpu_pwr_stats __weak *get_cpu_pwr_stats(void)
-{
- return NULL;
-}
-
-int sched_update_freq_max_load(const cpumask_t *cpumask)
-{
- int i, cpu, ret;
- unsigned int freq;
- struct cpu_pstate_pwr *costs;
- struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
- struct freq_max_load *max_load, *old_max_load;
- struct freq_max_load_entry *entry;
- u64 max_demand_capacity, max_demand;
- unsigned long flags;
- u32 hfreq;
- int hpct;
-
- if (!per_cpu_info)
- return 0;
-
- spin_lock_irqsave(&freq_max_load_lock, flags);
- max_demand_capacity = div64_u64(max_task_load(), max_possible_capacity);
- for_each_cpu(cpu, cpumask) {
- if (!per_cpu_info[cpu].ptable) {
- ret = -EINVAL;
- goto fail;
- }
-
- old_max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
-
- /*
- * allocate len + 1 and leave the last power cost as 0 for
- * power_cost() can stop iterating index when
- * per_cpu_info[cpu].len > len of max_load due to race between
- * cpu power stats update and get_cpu_pwr_stats().
- */
- max_load = kzalloc(sizeof(struct freq_max_load) +
- sizeof(struct freq_max_load_entry) *
- (per_cpu_info[cpu].len + 1), GFP_ATOMIC);
- if (unlikely(!max_load)) {
- ret = -ENOMEM;
- goto fail;
- }
-
- max_load->length = per_cpu_info[cpu].len;
-
- max_demand = max_demand_capacity *
- cpu_max_possible_capacity(cpu);
-
- i = 0;
- costs = per_cpu_info[cpu].ptable;
- while (costs[i].freq) {
- entry = &max_load->freqs[i];
- freq = costs[i].freq;
- hpct = get_freq_max_load(cpu, freq);
- if (hpct <= 0 || hpct > 100)
- hpct = 100;
- hfreq = div64_u64((u64)freq * hpct, 100);
- entry->hdemand =
- div64_u64(max_demand * hfreq,
- cpu_max_possible_freq(cpu));
- i++;
- }
-
- rcu_assign_pointer(per_cpu(freq_max_load, cpu), max_load);
- if (old_max_load)
- kfree_rcu(old_max_load, rcu);
- }
-
- spin_unlock_irqrestore(&freq_max_load_lock, flags);
- return 0;
-
-fail:
- for_each_cpu(cpu, cpumask) {
- max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
- if (max_load) {
- rcu_assign_pointer(per_cpu(freq_max_load, cpu), NULL);
- kfree_rcu(max_load, rcu);
- }
- }
-
- spin_unlock_irqrestore(&freq_max_load_lock, flags);
- return ret;
-}
-
-unsigned long __weak arch_get_cpu_efficiency(int cpu)
-{
- return SCHED_CAPACITY_SCALE;
-}
-
-int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost)
-{
- struct rq *rq = cpu_rq(cpu);
-
- rq->static_cpu_pwr_cost = cost;
- return 0;
-}
-
-unsigned int sched_get_static_cpu_pwr_cost(int cpu)
-{
- return cpu_rq(cpu)->static_cpu_pwr_cost;
-}
-
-int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost)
-{
- struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
-
- cluster->static_cluster_pwr_cost = cost;
- return 0;
-}
-
-unsigned int sched_get_static_cluster_pwr_cost(int cpu)
-{
- return cpu_rq(cpu)->cluster->static_cluster_pwr_cost;
-}
-
-int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle)
-{
- struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
-
- cluster->wake_up_idle = !!wake_idle;
- return 0;
-}
-
-unsigned int sched_get_cluster_wake_idle(int cpu)
-{
- return cpu_rq(cpu)->cluster->wake_up_idle;
-}
-
-/*
- * Tasks that are runnable continuously for a period greather than
- * EARLY_DETECTION_DURATION can be flagged early as potential
- * high load tasks.
- */
-#define EARLY_DETECTION_DURATION 9500000
-
-/*
- * For increase, send notification if
- * freq_required - cur_freq > sysctl_sched_freq_inc_notify
- */
-__read_mostly int sysctl_sched_freq_inc_notify = 10 * 1024 * 1024; /* + 10GHz */
-
-/*
- * For decrease, send notification if
- * cur_freq - freq_required > sysctl_sched_freq_dec_notify
- */
-__read_mostly int sysctl_sched_freq_dec_notify = 10 * 1024 * 1024; /* - 10GHz */
-__read_mostly unsigned int sysctl_sched_pred_alert_freq = 10 * 1024 * 1024;
-
-/* Maximum allowed threshold before freq aggregation must be enabled */
-#define MAX_FREQ_AGGR_THRESH 1000
-
-#define for_each_related_thread_group(grp) \
- list_for_each_entry(grp, &active_related_thread_groups, list)
-
-/* Size of bitmaps maintained to track top tasks */
-static const unsigned int top_tasks_bitmap_size =
- BITS_TO_LONGS(NUM_LOAD_INDICES + 1) * sizeof(unsigned long);
-
-__read_mostly unsigned int sysctl_sched_freq_aggregate = 1;
-
-/* A cpu can no longer accommodate more tasks if:
- *
- * rq->nr_running > sysctl_sched_spill_nr_run ||
- * rq->hmp_stats.cumulative_runnable_avg > sched_spill_load
- */
-unsigned int __read_mostly sysctl_sched_spill_nr_run = 10;
-
-/*
- * Place sync wakee tasks those have less than configured demand to the waker's
- * cluster.
- */
-unsigned int __read_mostly sched_small_wakee_task_load;
-unsigned int __read_mostly sysctl_sched_small_wakee_task_load_pct = 10;
-
-unsigned int __read_mostly sched_big_waker_task_load;
-unsigned int __read_mostly sysctl_sched_big_waker_task_load_pct = 25;
-
-/*
- * CPUs with load greater than the sched_spill_load_threshold are not
- * eligible for task placement. When all CPUs in a cluster achieve a
- * load higher than this level, tasks becomes eligible for inter
- * cluster migration.
- */
-unsigned int __read_mostly sched_spill_load;
-unsigned int __read_mostly sysctl_sched_spill_load_pct = 100;
-
-/*
- * Prefer the waker CPU for sync wakee task, if the CPU has only 1 runnable
- * task. This eliminates the LPM exit latency associated with the idle
- * CPUs in the waker cluster.
- */
-unsigned int __read_mostly sysctl_sched_prefer_sync_wakee_to_waker;
-
-/*
- * Tasks whose bandwidth consumption on a cpu is more than
- * sched_upmigrate are considered "big" tasks. Big tasks will be
- * considered for "up" migration, i.e migrating to a cpu with better
- * capacity.
- */
-unsigned int __read_mostly sched_upmigrate;
-unsigned int __read_mostly sysctl_sched_upmigrate_pct = 80;
-
-/*
- * Big tasks, once migrated, will need to drop their bandwidth
- * consumption to less than sched_downmigrate before they are "down"
- * migrated.
- */
-unsigned int __read_mostly sched_downmigrate;
-unsigned int __read_mostly sysctl_sched_downmigrate_pct = 60;
-
-/*
- * The load scale factor of a CPU gets boosted when its max frequency
- * is restricted due to which the tasks are migrating to higher capacity
- * CPUs early. The sched_upmigrate threshold is auto-upgraded by
- * rq->max_possible_freq/rq->max_freq of a lower capacity CPU.
- */
-unsigned int up_down_migrate_scale_factor = 1024;
-
-/*
- * Scheduler selects and places task to its previous CPU if sleep time is
- * less than sysctl_sched_select_prev_cpu_us.
- */
-unsigned int __read_mostly
-sched_short_sleep_task_threshold = 2000 * NSEC_PER_USEC;
-
-unsigned int __read_mostly sysctl_sched_select_prev_cpu_us = 2000;
-
-unsigned int __read_mostly
-sched_long_cpu_selection_threshold = 100 * NSEC_PER_MSEC;
-
-unsigned int __read_mostly sysctl_sched_restrict_cluster_spill;
-
-/*
- * Scheduler tries to avoid waking up idle CPUs for tasks running
- * in short bursts. If the task average burst is less than
- * sysctl_sched_short_burst nanoseconds and it sleeps on an average
- * for more than sysctl_sched_short_sleep nanoseconds, then the
- * task is eligible for packing.
- */
-unsigned int __read_mostly sysctl_sched_short_burst;
-unsigned int __read_mostly sysctl_sched_short_sleep = 1 * NSEC_PER_MSEC;
-
-static void _update_up_down_migrate(unsigned int *up_migrate,
- unsigned int *down_migrate, bool is_group)
-{
- unsigned int delta;
-
- if (up_down_migrate_scale_factor == 1024)
- return;
-
- delta = *up_migrate - *down_migrate;
-
- *up_migrate /= NSEC_PER_USEC;
- *up_migrate *= up_down_migrate_scale_factor;
- *up_migrate >>= 10;
- *up_migrate *= NSEC_PER_USEC;
-
- if (!is_group)
- *up_migrate = min(*up_migrate, sched_ravg_window);
-
- *down_migrate /= NSEC_PER_USEC;
- *down_migrate *= up_down_migrate_scale_factor;
- *down_migrate >>= 10;
- *down_migrate *= NSEC_PER_USEC;
-
- *down_migrate = min(*down_migrate, *up_migrate - delta);
-}
-
-static void update_up_down_migrate(void)
-{
- unsigned int up_migrate = pct_to_real(sysctl_sched_upmigrate_pct);
- unsigned int down_migrate = pct_to_real(sysctl_sched_downmigrate_pct);
-
- _update_up_down_migrate(&up_migrate, &down_migrate, false);
- sched_upmigrate = up_migrate;
- sched_downmigrate = down_migrate;
-
- up_migrate = pct_to_real(sysctl_sched_group_upmigrate_pct);
- down_migrate = pct_to_real(sysctl_sched_group_downmigrate_pct);
-
- _update_up_down_migrate(&up_migrate, &down_migrate, true);
- sched_group_upmigrate = up_migrate;
- sched_group_downmigrate = down_migrate;
-}
-
-void set_hmp_defaults(void)
-{
- sched_spill_load =
- pct_to_real(sysctl_sched_spill_load_pct);
-
- update_up_down_migrate();
-
- sched_init_task_load_windows =
- div64_u64((u64)sysctl_sched_init_task_load_pct *
- (u64)sched_ravg_window, 100);
-
- sched_short_sleep_task_threshold = sysctl_sched_select_prev_cpu_us *
- NSEC_PER_USEC;
-
- sched_small_wakee_task_load =
- div64_u64((u64)sysctl_sched_small_wakee_task_load_pct *
- (u64)sched_ravg_window, 100);
-
- sched_big_waker_task_load =
- div64_u64((u64)sysctl_sched_big_waker_task_load_pct *
- (u64)sched_ravg_window, 100);
-
- sched_freq_aggregate_threshold =
- pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
-}
-
-#ifdef CONFIG_CGROUP_SCHED
-
-int upmigrate_discouraged(struct task_struct *p)
-{
- return task_group(p)->upmigrate_discouraged;
-}
-
-#else
-
-static inline int upmigrate_discouraged(struct task_struct *p)
-{
- return 0;
-}
-
-#endif
-
-/* Is a task "big" on its current cpu */
-static inline int __is_big_task(struct task_struct *p, u64 scaled_load)
-{
- int nice = task_nice(p);
-
- if (nice > SCHED_UPMIGRATE_MIN_NICE || upmigrate_discouraged(p))
- return 0;
-
- return scaled_load > sched_upmigrate;
-}
-
-int is_big_task(struct task_struct *p)
-{
- return __is_big_task(p, scale_load_to_cpu(task_load(p), task_cpu(p)));
-}
-
-u64 cpu_load(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
-
- return scale_load_to_cpu(rq->hmp_stats.cumulative_runnable_avg, cpu);
-}
-
-u64 cpu_load_sync(int cpu, int sync)
-{
- return scale_load_to_cpu(cpu_cravg_sync(cpu, sync), cpu);
-}
-
-/*
- * Task will fit on a cpu if it's bandwidth consumption on that cpu
- * will be less than sched_upmigrate. A big task that was previously
- * "up" migrated will be considered fitting on "little" cpu if its
- * bandwidth consumption on "little" cpu will be less than
- * sched_downmigrate. This will help avoid frequenty migrations for
- * tasks with load close to the upmigrate threshold
- */
-int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
- enum sched_boost_policy boost_policy)
-{
- int upmigrate = sched_upmigrate;
-
- if (cpu_capacity(cpu) == max_capacity)
- return 1;
-
- if (cpu_capacity(task_cpu(p)) > cpu_capacity(cpu))
- upmigrate = sched_downmigrate;
-
- if (boost_policy != SCHED_BOOST_ON_BIG) {
- if (task_nice(p) > SCHED_UPMIGRATE_MIN_NICE ||
- upmigrate_discouraged(p))
- return 1;
-
- if (task_load < upmigrate)
- return 1;
- } else {
- if (task_sched_boost(p) || task_load >= upmigrate)
- return 0;
-
- return 1;
- }
-
- return 0;
-}
-
-int task_will_fit(struct task_struct *p, int cpu)
-{
- u64 tload = scale_load_to_cpu(task_load(p), cpu);
-
- return task_load_will_fit(p, tload, cpu, sched_boost_policy());
-}
-
-/*
- * Return the cost of running task p on CPU cpu. This function
- * currently assumes that task p is the only task which will run on
- * the CPU.
- */
-unsigned int power_cost(int cpu, u64 demand)
-{
- int first, mid, last;
- struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
- struct cpu_pstate_pwr *costs;
- struct freq_max_load *max_load;
- int total_static_pwr_cost = 0;
- struct rq *rq = cpu_rq(cpu);
- unsigned int pc;
-
- if (!per_cpu_info || !per_cpu_info[cpu].ptable)
- /*
- * When power aware scheduling is not in use, or CPU
- * power data is not available, just use the CPU
- * capacity as a rough stand-in for real CPU power
- * numbers, assuming bigger CPUs are more power
- * hungry.
- */
- return cpu_max_possible_capacity(cpu);
-
- rcu_read_lock();
- max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
- if (!max_load) {
- pc = cpu_max_possible_capacity(cpu);
- goto unlock;
- }
-
- costs = per_cpu_info[cpu].ptable;
-
- if (demand <= max_load->freqs[0].hdemand) {
- pc = costs[0].power;
- goto unlock;
- } else if (demand > max_load->freqs[max_load->length - 1].hdemand) {
- pc = costs[max_load->length - 1].power;
- goto unlock;
- }
-
- first = 0;
- last = max_load->length - 1;
- mid = (last - first) >> 1;
- while (1) {
- if (demand <= max_load->freqs[mid].hdemand)
- last = mid;
- else
- first = mid;
-
- if (last - first == 1)
- break;
- mid = first + ((last - first) >> 1);
- }
-
- pc = costs[last].power;
-
-unlock:
- rcu_read_unlock();
-
- if (idle_cpu(cpu) && rq->cstate) {
- total_static_pwr_cost += rq->static_cpu_pwr_cost;
- if (rq->cluster->dstate)
- total_static_pwr_cost +=
- rq->cluster->static_cluster_pwr_cost;
- }
-
- return pc + total_static_pwr_cost;
-
-}
-
-struct sched_cluster *rq_cluster(struct rq *rq)
-{
- return rq->cluster;
-}
-
-/*
- * reset_cpu_hmp_stats - reset HMP stats for a cpu
- * nr_big_tasks
- * cumulative_runnable_avg (iff reset_cra is true)
- */
-void reset_cpu_hmp_stats(int cpu, int reset_cra)
-{
- reset_cfs_rq_hmp_stats(cpu, reset_cra);
- reset_hmp_stats(&cpu_rq(cpu)->hmp_stats, reset_cra);
-}
-
-void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
- struct task_struct *p, s64 delta)
-{
- u64 new_task_load;
- u64 old_task_load;
-
- if (sched_disable_window_stats)
- return;
-
- old_task_load = scale_load_to_cpu(task_load(p), task_cpu(p));
- new_task_load = scale_load_to_cpu(delta + task_load(p), task_cpu(p));
-
- if (__is_big_task(p, old_task_load) && !__is_big_task(p, new_task_load))
- stats->nr_big_tasks--;
- else if (!__is_big_task(p, old_task_load) &&
- __is_big_task(p, new_task_load))
- stats->nr_big_tasks++;
-
- BUG_ON(stats->nr_big_tasks < 0);
-}
-
-/*
- * Walk runqueue of cpu and re-initialize 'nr_big_tasks' counters.
- */
-static void update_nr_big_tasks(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- struct task_struct *p;
-
- /* Do not reset cumulative_runnable_avg */
- reset_cpu_hmp_stats(cpu, 0);
-
- list_for_each_entry(p, &rq->cfs_tasks, se.group_node)
- inc_hmp_sched_stats_fair(rq, p, 0);
-}
-
-/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */
-void pre_big_task_count_change(const struct cpumask *cpus)
-{
- int i;
-
- local_irq_disable();
-
- for_each_cpu(i, cpus)
- raw_spin_lock(&cpu_rq(i)->lock);
-}
-
-/*
- * Reinitialize 'nr_big_tasks' counters on all affected cpus
- */
-void post_big_task_count_change(const struct cpumask *cpus)
-{
- int i;
-
- /* Assumes local_irq_disable() keeps online cpumap stable */
- for_each_cpu(i, cpus)
- update_nr_big_tasks(i);
-
- for_each_cpu(i, cpus)
- raw_spin_unlock(&cpu_rq(i)->lock);
-
- local_irq_enable();
-}
-
-static inline int invalid_value_freq_input(unsigned int *data)
-{
- if (data == &sysctl_sched_freq_aggregate)
- return !(*data == 0 || *data == 1);
-
- return 0;
-}
-
-static inline int invalid_value(unsigned int *data)
-{
- unsigned int val = *data;
-
- if (data == &sysctl_sched_ravg_hist_size)
- return (val < 2 || val > RAVG_HIST_SIZE_MAX);
-
- if (data == &sysctl_sched_window_stats_policy)
- return val >= WINDOW_STATS_INVALID_POLICY;
-
- return invalid_value_freq_input(data);
-}
-
-/*
- * Handle "atomic" update of sysctl_sched_window_stats_policy,
- * sysctl_sched_ravg_hist_size variables.
- */
-int sched_window_update_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
-{
- int ret;
- unsigned int *data = (unsigned int *)table->data;
- unsigned int old_val;
-
- mutex_lock(&policy_mutex);
-
- old_val = *data;
-
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- if (ret || !write || (write && (old_val == *data)))
- goto done;
-
- if (invalid_value(data)) {
- *data = old_val;
- ret = -EINVAL;
- goto done;
- }
-
- reset_all_window_stats(0, 0);
-
-done:
- mutex_unlock(&policy_mutex);
-
- return ret;
-}
-
-/*
- * Convert percentage value into absolute form. This will avoid div() operation
- * in fast path, to convert task load in percentage scale.
- */
-int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
-{
- int ret;
- unsigned int old_val;
- unsigned int *data = (unsigned int *)table->data;
- int update_task_count = 0;
-
- /*
- * The policy mutex is acquired with cpu_hotplug.lock
- * held from cpu_up()->cpufreq_governor_interactive()->
- * sched_set_window(). So enforce the same order here.
- */
- if (write && (data == &sysctl_sched_upmigrate_pct)) {
- update_task_count = 1;
- get_online_cpus();
- }
-
- mutex_lock(&policy_mutex);
-
- old_val = *data;
-
- ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-
- if (ret || !write)
- goto done;
-
- if (write && (old_val == *data))
- goto done;
-
- if (sysctl_sched_downmigrate_pct > sysctl_sched_upmigrate_pct ||
- sysctl_sched_group_downmigrate_pct >
- sysctl_sched_group_upmigrate_pct) {
- *data = old_val;
- ret = -EINVAL;
- goto done;
- }
-
- /*
- * Big task tunable change will need to re-classify tasks on
- * runqueue as big and set their counters appropriately.
- * sysctl interface affects secondary variables (*_pct), which is then
- * "atomically" carried over to the primary variables. Atomic change
- * includes taking runqueue lock of all online cpus and re-initiatizing
- * their big counter values based on changed criteria.
- */
- if (update_task_count)
- pre_big_task_count_change(cpu_online_mask);
-
- set_hmp_defaults();
-
- if (update_task_count)
- post_big_task_count_change(cpu_online_mask);
-
-done:
- mutex_unlock(&policy_mutex);
- if (update_task_count)
- put_online_cpus();
- return ret;
-}
-
-inline int nr_big_tasks(struct rq *rq)
-{
- return rq->hmp_stats.nr_big_tasks;
-}
-
-unsigned int cpu_temp(int cpu)
-{
- struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
-
- if (per_cpu_info)
- return per_cpu_info[cpu].temp;
- else
- return 0;
-}
-
-/* Return task demand in percentage scale */
-unsigned int pct_task_load(struct task_struct *p)
-{
- unsigned int load;
-
- load = div64_u64((u64)task_load(p) * 100, (u64)max_task_load());
-
- return load;
-}
-
-static int __init set_sched_ravg_window(char *str)
-{
- unsigned int window_size;
-
- get_option(&str, &window_size);
-
- if (window_size < MIN_SCHED_RAVG_WINDOW ||
- window_size > MAX_SCHED_RAVG_WINDOW) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- sched_ravg_window = window_size;
- return 0;
-}
-
-early_param("sched_ravg_window", set_sched_ravg_window);
-
-#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
-
-static inline u64 scale_exec_time(u64 delta, struct rq *rq)
-{
- u32 freq;
-
- freq = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
- delta = DIV64_U64_ROUNDUP(delta * freq, max_possible_freq);
- delta *= rq->cluster->exec_scale_factor;
- delta >>= 10;
-
- return delta;
-}
-
-/* Does freq_required sufficiently exceed or fall behind cur_freq? */
-static inline int
-nearly_same_freq(unsigned int cur_freq, unsigned int freq_required)
-{
- int delta = freq_required - cur_freq;
-
- if (freq_required > cur_freq)
- return delta < sysctl_sched_freq_inc_notify;
-
- delta = -delta;
-
- return delta < sysctl_sched_freq_dec_notify;
-}
-
-/* Convert busy time to frequency equivalent */
-static inline unsigned int load_to_freq(struct rq *rq, u64 load)
-{
- unsigned int freq;
-
- load = scale_load_to_cpu(load, cpu_of(rq));
- load *= 128;
- load = div64_u64(load, max_task_load());
-
- freq = load * cpu_max_possible_freq(cpu_of(rq));
- freq /= 128;
-
- return freq;
-}
-
-/*
- * Return load from all related groups in given frequency domain.
- */
-static void group_load_in_freq_domain(struct cpumask *cpus,
- u64 *grp_load, u64 *new_grp_load)
-{
- int j;
-
- for_each_cpu(j, cpus) {
- struct rq *rq = cpu_rq(j);
-
- *grp_load += rq->grp_time.prev_runnable_sum;
- *new_grp_load += rq->grp_time.nt_prev_runnable_sum;
- }
-}
-
-/*
- * Should scheduler alert governor for changing frequency?
- *
- * @check_pred - evaluate frequency based on the predictive demand
- * @check_groups - add load from all related groups on given cpu
- *
- * check_groups is set to 1 if a "related" task movement/wakeup is triggering
- * the notification check. To avoid "re-aggregation" of demand in such cases,
- * we check whether the migrated/woken tasks demand (along with demand from
- * existing tasks on the cpu) can be met on target cpu
- *
- */
-
-static int send_notification(struct rq *rq, int check_pred, int check_groups)
-{
- unsigned int cur_freq, freq_required;
- unsigned long flags;
- int rc = 0;
- u64 group_load = 0, new_load = 0;
-
- if (check_pred) {
- u64 prev = rq->old_busy_time;
- u64 predicted = rq->hmp_stats.pred_demands_sum;
-
- if (rq->cluster->cur_freq == cpu_max_freq(cpu_of(rq)))
- return 0;
-
- prev = max(prev, rq->old_estimated_time);
- if (prev > predicted)
- return 0;
-
- cur_freq = load_to_freq(rq, prev);
- freq_required = load_to_freq(rq, predicted);
-
- if (freq_required < cur_freq + sysctl_sched_pred_alert_freq)
- return 0;
- } else {
- /*
- * Protect from concurrent update of rq->prev_runnable_sum and
- * group cpu load
- */
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (check_groups)
- group_load = rq->grp_time.prev_runnable_sum;
-
- new_load = rq->prev_runnable_sum + group_load;
- new_load = freq_policy_load(rq, new_load);
-
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-
- cur_freq = load_to_freq(rq, rq->old_busy_time);
- freq_required = load_to_freq(rq, new_load);
-
- if (nearly_same_freq(cur_freq, freq_required))
- return 0;
- }
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (!rq->cluster->notifier_sent) {
- rq->cluster->notifier_sent = 1;
- rc = 1;
- trace_sched_freq_alert(cpu_of(rq), check_pred, check_groups, rq,
- new_load);
- }
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-
- return rc;
-}
-
-/* Alert governor if there is a need to change frequency */
-void check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups)
-{
- int cpu = cpu_of(rq);
-
- if (!send_notification(rq, check_pred, check_groups))
- return;
-
- atomic_notifier_call_chain(
- &load_alert_notifier_head, 0,
- (void *)(long)cpu);
-}
-
-void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
- struct task_struct *p)
-{
- bool check_groups;
-
- rcu_read_lock();
- check_groups = task_in_related_thread_group(p);
- rcu_read_unlock();
-
- if (!same_freq_domain(src_cpu, dest_cpu)) {
- if (!src_cpu_dead)
- check_for_freq_change(cpu_rq(src_cpu), false,
- check_groups);
- check_for_freq_change(cpu_rq(dest_cpu), false, check_groups);
- } else {
- check_for_freq_change(cpu_rq(dest_cpu), true, check_groups);
- }
-}
-
-#define INC_STEP 8
-#define DEC_STEP 2
-#define CONSISTENT_THRES 16
-#define INC_STEP_BIG 16
-/*
- * bucket_increase - update the count of all buckets
- *
- * @buckets: array of buckets tracking busy time of a task
- * @idx: the index of bucket to be incremented
- *
- * Each time a complete window finishes, count of bucket that runtime
- * falls in (@idx) is incremented. Counts of all other buckets are
- * decayed. The rate of increase and decay could be different based
- * on current count in the bucket.
- */
-static inline void bucket_increase(u8 *buckets, int idx)
-{
- int i, step;
-
- for (i = 0; i < NUM_BUSY_BUCKETS; i++) {
- if (idx != i) {
- if (buckets[i] > DEC_STEP)
- buckets[i] -= DEC_STEP;
- else
- buckets[i] = 0;
- } else {
- step = buckets[i] >= CONSISTENT_THRES ?
- INC_STEP_BIG : INC_STEP;
- if (buckets[i] > U8_MAX - step)
- buckets[i] = U8_MAX;
- else
- buckets[i] += step;
- }
- }
-}
-
-static inline int busy_to_bucket(u32 normalized_rt)
-{
- int bidx;
-
- bidx = mult_frac(normalized_rt, NUM_BUSY_BUCKETS, max_task_load());
- bidx = min(bidx, NUM_BUSY_BUCKETS - 1);
-
- /*
- * Combine lowest two buckets. The lowest frequency falls into
- * 2nd bucket and thus keep predicting lowest bucket is not
- * useful.
- */
- if (!bidx)
- bidx++;
-
- return bidx;
-}
-
-/*
- * get_pred_busy - calculate predicted demand for a task on runqueue
- *
- * @rq: runqueue of task p
- * @p: task whose prediction is being updated
- * @start: starting bucket. returned prediction should not be lower than
- * this bucket.
- * @runtime: runtime of the task. returned prediction should not be lower
- * than this runtime.
- * Note: @start can be derived from @runtime. It's passed in only to
- * avoid duplicated calculation in some cases.
- *
- * A new predicted busy time is returned for task @p based on @runtime
- * passed in. The function searches through buckets that represent busy
- * time equal to or bigger than @runtime and attempts to find the bucket to
- * to use for prediction. Once found, it searches through historical busy
- * time and returns the latest that falls into the bucket. If no such busy
- * time exists, it returns the medium of that bucket.
- */
-static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
- int start, u32 runtime)
-{
- int i;
- u8 *buckets = p->ravg.busy_buckets;
- u32 *hist = p->ravg.sum_history;
- u32 dmin, dmax;
- u64 cur_freq_runtime = 0;
- int first = NUM_BUSY_BUCKETS, final;
- u32 ret = runtime;
-
- /* skip prediction for new tasks due to lack of history */
- if (unlikely(is_new_task(p)))
- goto out;
-
- /* find minimal bucket index to pick */
- for (i = start; i < NUM_BUSY_BUCKETS; i++) {
- if (buckets[i]) {
- first = i;
- break;
- }
- }
- /* if no higher buckets are filled, predict runtime */
- if (first >= NUM_BUSY_BUCKETS)
- goto out;
-
- /* compute the bucket for prediction */
- final = first;
-
- /* determine demand range for the predicted bucket */
- if (final < 2) {
- /* lowest two buckets are combined */
- dmin = 0;
- final = 1;
- } else {
- dmin = mult_frac(final, max_task_load(), NUM_BUSY_BUCKETS);
- }
- dmax = mult_frac(final + 1, max_task_load(), NUM_BUSY_BUCKETS);
-
- /*
- * search through runtime history and return first runtime that falls
- * into the range of predicted bucket.
- */
- for (i = 0; i < sched_ravg_hist_size; i++) {
- if (hist[i] >= dmin && hist[i] < dmax) {
- ret = hist[i];
- break;
- }
- }
- /* no historical runtime within bucket found, use average of the bin */
- if (ret < dmin)
- ret = (dmin + dmax) / 2;
- /*
- * when updating in middle of a window, runtime could be higher
- * than all recorded history. Always predict at least runtime.
- */
- ret = max(runtime, ret);
-out:
- trace_sched_update_pred_demand(rq, p, runtime,
- mult_frac((unsigned int)cur_freq_runtime, 100,
- sched_ravg_window), ret);
- return ret;
-}
-
-static inline u32 calc_pred_demand(struct rq *rq, struct task_struct *p)
-{
- if (p->ravg.pred_demand >= p->ravg.curr_window)
- return p->ravg.pred_demand;
-
- return get_pred_busy(rq, p, busy_to_bucket(p->ravg.curr_window),
- p->ravg.curr_window);
-}
-
-static void reset_all_task_stats(void)
-{
- struct task_struct *g, *p;
-
- do_each_thread(g, p) {
- reset_task_stats(p);
- } while_each_thread(g, p);
-}
-
-enum reset_reason_code {
- WINDOW_CHANGE,
- POLICY_CHANGE,
- HIST_SIZE_CHANGE,
- FREQ_AGGREGATE_CHANGE,
-};
-
-const char *sched_window_reset_reasons[] = {
- "WINDOW_CHANGE",
- "POLICY_CHANGE",
- "HIST_SIZE_CHANGE",
-};
-
-/* Called with IRQs enabled */
-void reset_all_window_stats(u64 window_start, unsigned int window_size)
-{
- int cpu, i;
- unsigned long flags;
- u64 start_ts = sched_ktime_clock();
- int reason = WINDOW_CHANGE;
- unsigned int old = 0, new = 0;
-
- local_irq_save(flags);
-
- read_lock(&tasklist_lock);
-
- read_lock(&related_thread_group_lock);
-
- /* Taking all runqueue locks prevents race with sched_exit(). */
- for_each_possible_cpu(cpu)
- raw_spin_lock(&cpu_rq(cpu)->lock);
-
- sched_disable_window_stats = 1;
-
- reset_all_task_stats();
-
- read_unlock(&tasklist_lock);
-
- if (window_size) {
- sched_ravg_window = window_size * TICK_NSEC;
- set_hmp_defaults();
- sched_load_granule = sched_ravg_window / NUM_LOAD_INDICES;
- }
-
- sched_disable_window_stats = 0;
-
- for_each_possible_cpu(cpu) {
- struct rq *rq = cpu_rq(cpu);
-
- if (window_start)
- rq->window_start = window_start;
- rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
- rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
- memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
- for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
- memset(&rq->load_subs[i], 0,
- sizeof(struct load_subtractions));
- clear_top_tasks_table(rq->top_tasks[i]);
- clear_top_tasks_bitmap(rq->top_tasks_bitmap[i]);
- }
-
- rq->curr_table = 0;
- rq->curr_top = 0;
- rq->prev_top = 0;
- reset_cpu_hmp_stats(cpu, 1);
- }
-
- if (sched_window_stats_policy != sysctl_sched_window_stats_policy) {
- reason = POLICY_CHANGE;
- old = sched_window_stats_policy;
- new = sysctl_sched_window_stats_policy;
- sched_window_stats_policy = sysctl_sched_window_stats_policy;
- } else if (sched_ravg_hist_size != sysctl_sched_ravg_hist_size) {
- reason = HIST_SIZE_CHANGE;
- old = sched_ravg_hist_size;
- new = sysctl_sched_ravg_hist_size;
- sched_ravg_hist_size = sysctl_sched_ravg_hist_size;
- } else if (sched_freq_aggregate !=
- sysctl_sched_freq_aggregate) {
- reason = FREQ_AGGREGATE_CHANGE;
- old = sched_freq_aggregate;
- new = sysctl_sched_freq_aggregate;
- sched_freq_aggregate = sysctl_sched_freq_aggregate;
- }
-
- for_each_possible_cpu(cpu)
- raw_spin_unlock(&cpu_rq(cpu)->lock);
-
- read_unlock(&related_thread_group_lock);
-
- local_irq_restore(flags);
-
- trace_sched_reset_all_window_stats(window_start, window_size,
- sched_ktime_clock() - start_ts, reason, old, new);
-}
-
-void sched_get_cpus_busy(struct sched_load *busy,
- const struct cpumask *query_cpus)
-{
- unsigned long flags;
- struct rq *rq;
- const int cpus = cpumask_weight(query_cpus);
- u64 load[cpus], group_load[cpus];
- u64 nload[cpus], ngload[cpus];
- u64 pload[cpus];
- unsigned int max_freq[cpus];
- int notifier_sent = 0;
- int early_detection[cpus];
- int cpu, i = 0;
- unsigned int window_size;
- u64 max_prev_sum = 0;
- int max_busy_cpu = cpumask_first(query_cpus);
- u64 total_group_load = 0, total_ngload = 0;
- bool aggregate_load = false;
- struct sched_cluster *cluster = cpu_cluster(cpumask_first(query_cpus));
-
- if (unlikely(cpus == 0))
- return;
-
- local_irq_save(flags);
-
- /*
- * This function could be called in timer context, and the
- * current task may have been executing for a long time. Ensure
- * that the window stats are current by doing an update.
- */
-
- for_each_cpu(cpu, query_cpus)
- raw_spin_lock(&cpu_rq(cpu)->lock);
-
- window_size = sched_ravg_window;
-
- /*
- * We don't really need the cluster lock for this entire for loop
- * block. However, there is no advantage in optimizing this as rq
- * locks are held regardless and would prevent migration anyways
- */
- raw_spin_lock(&cluster->load_lock);
-
- for_each_cpu(cpu, query_cpus) {
- rq = cpu_rq(cpu);
-
- update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_ktime_clock(),
- 0);
-
- /*
- * Ensure that we don't report load for 'cpu' again via the
- * cpufreq_update_util path in the window that started at
- * rq->window_start
- */
- rq->load_reported_window = rq->window_start;
-
- account_load_subtractions(rq);
- load[i] = rq->prev_runnable_sum;
- nload[i] = rq->nt_prev_runnable_sum;
- pload[i] = rq->hmp_stats.pred_demands_sum;
- rq->old_estimated_time = pload[i];
-
- if (load[i] > max_prev_sum) {
- max_prev_sum = load[i];
- max_busy_cpu = cpu;
- }
-
- /*
- * sched_get_cpus_busy() is called for all CPUs in a
- * frequency domain. So the notifier_sent flag per
- * cluster works even when a frequency domain spans
- * more than 1 cluster.
- */
- if (rq->cluster->notifier_sent) {
- notifier_sent = 1;
- rq->cluster->notifier_sent = 0;
- }
- early_detection[i] = (rq->ed_task != NULL);
- max_freq[i] = cpu_max_freq(cpu);
- i++;
- }
-
- raw_spin_unlock(&cluster->load_lock);
-
- group_load_in_freq_domain(
- &cpu_rq(max_busy_cpu)->freq_domain_cpumask,
- &total_group_load, &total_ngload);
- aggregate_load = !!(total_group_load > sched_freq_aggregate_threshold);
-
- i = 0;
- for_each_cpu(cpu, query_cpus) {
- group_load[i] = 0;
- ngload[i] = 0;
-
- if (early_detection[i])
- goto skip_early;
-
- rq = cpu_rq(cpu);
- if (aggregate_load) {
- if (cpu == max_busy_cpu) {
- group_load[i] = total_group_load;
- ngload[i] = total_ngload;
- }
- } else {
- group_load[i] = rq->grp_time.prev_runnable_sum;
- ngload[i] = rq->grp_time.nt_prev_runnable_sum;
- }
-
- load[i] += group_load[i];
- nload[i] += ngload[i];
-
- load[i] = freq_policy_load(rq, load[i]);
- rq->old_busy_time = load[i];
-
- /*
- * Scale load in reference to cluster max_possible_freq.
- *
- * Note that scale_load_to_cpu() scales load in reference to
- * the cluster max_freq.
- */
- load[i] = scale_load_to_cpu(load[i], cpu);
- nload[i] = scale_load_to_cpu(nload[i], cpu);
- pload[i] = scale_load_to_cpu(pload[i], cpu);
-skip_early:
- i++;
- }
-
- for_each_cpu(cpu, query_cpus)
- raw_spin_unlock(&(cpu_rq(cpu))->lock);
-
- local_irq_restore(flags);
-
- i = 0;
- for_each_cpu(cpu, query_cpus) {
- rq = cpu_rq(cpu);
-
- if (early_detection[i]) {
- busy[i].prev_load = div64_u64(sched_ravg_window,
- NSEC_PER_USEC);
- busy[i].new_task_load = 0;
- busy[i].predicted_load = 0;
- goto exit_early;
- }
-
- load[i] = scale_load_to_freq(load[i], max_freq[i],
- cpu_max_possible_freq(cpu));
- nload[i] = scale_load_to_freq(nload[i], max_freq[i],
- cpu_max_possible_freq(cpu));
-
- pload[i] = scale_load_to_freq(pload[i], max_freq[i],
- rq->cluster->max_possible_freq);
-
- busy[i].prev_load = div64_u64(load[i], NSEC_PER_USEC);
- busy[i].new_task_load = div64_u64(nload[i], NSEC_PER_USEC);
- busy[i].predicted_load = div64_u64(pload[i], NSEC_PER_USEC);
-
-exit_early:
- trace_sched_get_busy(cpu, busy[i].prev_load,
- busy[i].new_task_load,
- busy[i].predicted_load,
- early_detection[i]);
- i++;
- }
-}
-
-int sched_set_window(u64 window_start, unsigned int window_size)
-{
- u64 now, cur_jiffies, jiffy_ktime_ns;
- s64 ws;
- unsigned long flags;
-
- if (window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW)
- return -EINVAL;
-
- mutex_lock(&policy_mutex);
-
- /*
- * Get a consistent view of ktime, jiffies, and the time
- * since the last jiffy (based on last_jiffies_update).
- */
- local_irq_save(flags);
- cur_jiffies = jiffy_to_ktime_ns(&now, &jiffy_ktime_ns);
- local_irq_restore(flags);
-
- /* translate window_start from jiffies to nanoseconds */
- ws = (window_start - cur_jiffies); /* jiffy difference */
- ws *= TICK_NSEC;
- ws += jiffy_ktime_ns;
-
- /*
- * Roll back calculated window start so that it is in
- * the past (window stats must have a current window).
- */
- while (ws > now)
- ws -= (window_size * TICK_NSEC);
-
- BUG_ON(sched_ktime_clock() < ws);
-
- reset_all_window_stats(ws, window_size);
-
- sched_update_freq_max_load(cpu_possible_mask);
-
- mutex_unlock(&policy_mutex);
-
- return 0;
-}
-
-static inline void create_subtraction_entry(struct rq *rq, u64 ws, int index)
-{
- rq->load_subs[index].window_start = ws;
- rq->load_subs[index].subs = 0;
- rq->load_subs[index].new_subs = 0;
-}
-
-#define sched_up_down_migrate_auto_update 1
-static void check_for_up_down_migrate_update(const struct cpumask *cpus)
-{
- int i = cpumask_first(cpus);
-
- if (!sched_up_down_migrate_auto_update)
- return;
-
- if (cpu_max_possible_capacity(i) == max_possible_capacity)
- return;
-
- if (cpu_max_possible_freq(i) == cpu_max_freq(i))
- up_down_migrate_scale_factor = 1024;
- else
- up_down_migrate_scale_factor = (1024 *
- cpu_max_possible_freq(i)) / cpu_max_freq(i);
-
- update_up_down_migrate();
-}
-
-void update_cpu_cluster_capacity(const cpumask_t *cpus)
-{
- int i;
- struct sched_cluster *cluster;
- struct cpumask cpumask;
-
- cpumask_copy(&cpumask, cpus);
- pre_big_task_count_change(cpu_possible_mask);
-
- for_each_cpu(i, &cpumask) {
- cluster = cpu_rq(i)->cluster;
- cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
-
- cluster->capacity = compute_capacity(cluster);
- cluster->load_scale_factor = compute_load_scale_factor(cluster);
-
- /* 'cpus' can contain cpumask more than one cluster */
- check_for_up_down_migrate_update(&cluster->cpus);
- }
-
- __update_min_max_capacity();
-
- post_big_task_count_change(cpu_possible_mask);
-}
-
-static DEFINE_SPINLOCK(cpu_freq_min_max_lock);
-void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax)
-{
- struct cpumask cpumask;
- struct sched_cluster *cluster;
- int i, update_capacity = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&cpu_freq_min_max_lock, flags);
- cpumask_copy(&cpumask, cpus);
- for_each_cpu(i, &cpumask) {
- cluster = cpu_rq(i)->cluster;
- cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
-
- update_capacity += (cluster->max_mitigated_freq != fmax);
- cluster->max_mitigated_freq = fmax;
- }
- spin_unlock_irqrestore(&cpu_freq_min_max_lock, flags);
-
- if (update_capacity)
- update_cpu_cluster_capacity(cpus);
-}
-
-static int cpufreq_notifier_trans(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
- unsigned int cpu = freq->cpu, new_freq = freq->new;
- unsigned long flags;
- struct sched_cluster *cluster;
- struct cpumask policy_cpus = cpu_rq(cpu)->freq_domain_cpumask;
- int i, j;
-
- if (val != CPUFREQ_POSTCHANGE)
- return 0;
-
- BUG_ON(!new_freq);
-
- if (cpu_cur_freq(cpu) == new_freq)
- return 0;
-
- for_each_cpu(i, &policy_cpus) {
- cluster = cpu_rq(i)->cluster;
-
- for_each_cpu(j, &cluster->cpus) {
- struct rq *rq = cpu_rq(j);
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- update_task_ravg(rq->curr, rq, TASK_UPDATE,
- sched_ktime_clock(), 0);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- }
-
- cluster->cur_freq = new_freq;
- cpumask_andnot(&policy_cpus, &policy_cpus, &cluster->cpus);
- }
-
- return 0;
-}
-
-static int pwr_stats_ready_notifier(struct notifier_block *nb,
- unsigned long cpu, void *data)
-{
- cpumask_t mask = CPU_MASK_NONE;
-
- cpumask_set_cpu(cpu, &mask);
- sched_update_freq_max_load(&mask);
-
- mutex_lock(&cluster_lock);
- sort_clusters();
- mutex_unlock(&cluster_lock);
-
- return 0;
-}
-
-static struct notifier_block notifier_trans_block = {
- .notifier_call = cpufreq_notifier_trans
-};
-
-static struct notifier_block notifier_pwr_stats_ready = {
- .notifier_call = pwr_stats_ready_notifier
-};
-
-int __weak register_cpu_pwr_stats_ready_notifier(struct notifier_block *nb)
-{
- return -EINVAL;
-}
-
-static int register_sched_callback(void)
-{
- cpufreq_register_notifier(¬ifier_trans_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- register_cpu_pwr_stats_ready_notifier(¬ifier_pwr_stats_ready);
-
- return 0;
-}
-
-/*
- * cpufreq callbacks can be registered at core_initcall or later time.
- * Any registration done prior to that is "forgotten" by cpufreq. See
- * initialization of variable init_cpufreq_transition_notifier_list_called
- * for further information.
- */
-core_initcall(register_sched_callback);
-
-void update_avg_burst(struct task_struct *p)
-{
- update_avg(&p->ravg.avg_burst, p->ravg.curr_burst);
- p->ravg.curr_burst = 0;
-}
-
-void note_task_waking(struct task_struct *p, u64 wallclock)
-{
- u64 sleep_time = wallclock - p->last_switch_out_ts;
-
- p->last_wake_ts = wallclock;
- update_avg(&p->ravg.avg_sleep_time, sleep_time);
-}
-
-#ifdef CONFIG_CGROUP_SCHED
-u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
- struct cftype *cft)
-{
- struct task_group *tg = css_tg(css);
-
- return tg->upmigrate_discouraged;
-}
-
-int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
- struct cftype *cft, u64 upmigrate_discourage)
-{
- struct task_group *tg = css_tg(css);
- int discourage = upmigrate_discourage > 0;
-
- if (tg->upmigrate_discouraged == discourage)
- return 0;
-
- /*
- * Revisit big-task classification for tasks of this cgroup. It would
- * have been efficient to walk tasks of just this cgroup in running
- * state, but we don't have easy means to do that. Walk all tasks in
- * running state on all cpus instead and re-visit their big task
- * classification.
- */
- get_online_cpus();
- pre_big_task_count_change(cpu_online_mask);
-
- tg->upmigrate_discouraged = discourage;
-
- post_big_task_count_change(cpu_online_mask);
- put_online_cpus();
-
- return 0;
-}
-#endif /* CONFIG_CGROUP_SCHED */
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index b852cbe..5405d3f 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -78,14 +78,6 @@
{
}
-#ifdef CONFIG_SCHED_WALT
-static void
-fixup_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
-{
-}
-#endif
-
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
@@ -114,7 +106,4 @@
.prio_changed = prio_changed_idle,
.switched_to = switched_to_idle,
.update_curr = update_curr_idle,
-#ifdef CONFIG_SCHED_WALT
- .fixup_hmp_sched_stats = fixup_hmp_sched_stats_idle,
-#endif
};
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 65b34b4..96f5654 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -11,60 +11,6 @@
#include <linux/irq_work.h>
#include <trace/events/sched.h>
-#ifdef CONFIG_SCHED_WALT
-
-static void
-inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
-{
- inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
-{
- dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
-{
- s64 task_load_delta = (s64)new_task_load - task_load(p);
- s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
- pred_demand_delta);
-}
-
-#ifdef CONFIG_SMP
-static int find_lowest_rq(struct task_struct *task);
-
-#ifdef CONFIG_SCHED_HMP
-static int
-select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
-{
- int target;
-
- rcu_read_lock();
- target = find_lowest_rq(p);
- if (target != -1)
- cpu = target;
- rcu_read_unlock();
-
- return cpu;
-}
-#endif /* CONFIG_SCHED_HMP */
-#endif /* CONFIG_SMP */
-#else /* CONFIG_SCHED_WALT */
-
-static inline void
-inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
-
-#endif /* CONFIG_SCHED_HMP */
-
#include "walt.h"
int sched_rr_timeslice = RR_TIMESLICE;
@@ -1436,7 +1382,7 @@
rt_se->timeout = 0;
enqueue_rt_entity(rt_se, flags);
- inc_hmp_sched_stats_rt(rq, p);
+ walt_inc_cumulative_runnable_avg(rq, p);
if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
enqueue_pushable_task(rq, p);
@@ -1448,7 +1394,7 @@
update_curr_rt(rq);
dequeue_rt_entity(rt_se, flags);
- dec_hmp_sched_stats_rt(rq, p);
+ walt_dec_cumulative_runnable_avg(rq, p);
dequeue_pushable_task(rq, p);
}
@@ -1515,10 +1461,6 @@
struct rq *rq;
bool may_not_preempt;
-#ifdef CONFIG_SCHED_HMP
- return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
-#endif
-
/* For anything but wake ups, just return the task_cpu */
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
goto out;
@@ -1771,93 +1713,6 @@
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
-#ifdef CONFIG_SCHED_HMP
-static int find_lowest_rq_hmp(struct task_struct *task)
-{
- struct cpumask *lowest_mask = *this_cpu_ptr(&local_cpu_mask);
- struct cpumask candidate_mask = CPU_MASK_NONE;
- struct sched_cluster *cluster;
- int best_cpu = -1;
- int prev_cpu = task_cpu(task);
- u64 cpu_load, min_load = ULLONG_MAX;
- int i;
- int restrict_cluster;
- int boost_on_big;
- int pack_task, wakeup_latency, least_wakeup_latency = INT_MAX;
-
- boost_on_big = sched_boost() == FULL_THROTTLE_BOOST &&
- sched_boost_policy() == SCHED_BOOST_ON_BIG;
-
- restrict_cluster = sysctl_sched_restrict_cluster_spill;
-
- /* Make sure the mask is initialized first */
- if (unlikely(!lowest_mask))
- return best_cpu;
-
- if (task->nr_cpus_allowed == 1)
- return best_cpu; /* No other targets possible */
-
- if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
- return best_cpu; /* No targets found */
-
- pack_task = is_short_burst_task(task);
-
- /*
- * At this point we have built a mask of cpus representing the
- * lowest priority tasks in the system. Now we want to elect
- * the best one based on our affinity and topology.
- */
-
- for_each_sched_cluster(cluster) {
- if (boost_on_big && cluster->capacity != max_possible_capacity)
- continue;
-
- cpumask_and(&candidate_mask, &cluster->cpus, lowest_mask);
- cpumask_andnot(&candidate_mask, &candidate_mask,
- cpu_isolated_mask);
-
- if (cpumask_empty(&candidate_mask))
- continue;
-
- for_each_cpu(i, &candidate_mask) {
- if (sched_cpu_high_irqload(i))
- continue;
-
- cpu_load = cpu_rq(i)->hmp_stats.cumulative_runnable_avg;
- if (!restrict_cluster)
- cpu_load = scale_load_to_cpu(cpu_load, i);
-
- if (pack_task) {
- wakeup_latency = cpu_rq(i)->wakeup_latency;
-
- if (wakeup_latency > least_wakeup_latency)
- continue;
-
- if (wakeup_latency < least_wakeup_latency) {
- least_wakeup_latency = wakeup_latency;
- min_load = cpu_load;
- best_cpu = i;
- continue;
- }
- }
-
- if (cpu_load < min_load ||
- (cpu_load == min_load &&
- (i == prev_cpu || (best_cpu != prev_cpu &&
- cpus_share_cache(prev_cpu, i))))) {
- min_load = cpu_load;
- best_cpu = i;
- }
- }
-
- if (restrict_cluster && best_cpu != -1)
- break;
- }
-
- return best_cpu;
-}
-#endif /* CONFIG_SCHED_HMP */
-
static inline unsigned long task_util(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
@@ -1888,10 +1743,6 @@
long max_spare_cap = -LONG_MAX;
bool placement_boost;
-#ifdef CONFIG_SCHED_HMP
- return find_lowest_rq_hmp(task);
-#endif
-
/* Make sure the mask is initialized first */
if (unlikely(!lowest_mask))
return -1;
@@ -2733,7 +2584,7 @@
.update_curr = update_curr_rt,
#ifdef CONFIG_SCHED_WALT
- .fixup_hmp_sched_stats = fixup_hmp_sched_stats_rt,
+ .fixup_walt_sched_stats = fixup_walt_sched_stats_common,
#endif
};
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 2524954..83f3b84 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -28,7 +28,7 @@
#ifdef CONFIG_SCHED_WALT
extern unsigned int sched_ravg_window;
-struct hmp_sched_stats {
+struct walt_sched_stats {
int nr_big_tasks;
u64 cumulative_runnable_avg;
u64 pred_demands_sum;
@@ -318,10 +318,6 @@
struct task_group {
struct cgroup_subsys_state css;
-#ifdef CONFIG_SCHED_HMP
- bool upmigrate_discouraged;
-#endif
-
#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each cpu */
struct sched_entity **se;
@@ -507,15 +503,7 @@
struct list_head leaf_cfs_rq_list;
struct task_group *tg; /* group that "owns" this runqueue */
-#ifdef CONFIG_SCHED_WALT
- u64 cumulative_runnable_avg;
-#endif
-
#ifdef CONFIG_CFS_BANDWIDTH
-#ifdef CONFIG_SCHED_WALT
- struct hmp_sched_stats hmp_stats;
-#endif
-
int runtime_enabled;
u64 runtime_expires;
s64 runtime_remaining;
@@ -772,13 +760,13 @@
#ifdef CONFIG_SCHED_WALT
struct sched_cluster *cluster;
struct cpumask freq_domain_cpumask;
- struct hmp_sched_stats hmp_stats;
+ struct walt_sched_stats walt_stats;
int cstate, wakeup_latency, wakeup_energy;
u64 window_start;
s64 cum_window_start;
u64 load_reported_window;
- unsigned long hmp_flags;
+ unsigned long walt_flags;
u64 cur_irqload;
u64 avg_irqload;
@@ -801,7 +789,6 @@
u8 curr_table;
int prev_top;
int curr_top;
- struct irq_work irq_work;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -1312,7 +1299,6 @@
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
-#define WF_NO_NOTIFIER 0x08 /* do not notify governor */
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
@@ -1430,7 +1416,7 @@
void (*task_change_group) (struct task_struct *p, int type);
#endif
#ifdef CONFIG_SCHED_WALT
- void (*fixup_hmp_sched_stats)(struct rq *rq, struct task_struct *p,
+ void (*fixup_walt_sched_stats)(struct rq *rq, struct task_struct *p,
u32 new_task_load, u32 new_pred_demand);
#endif
};
@@ -1758,7 +1744,7 @@
#ifdef CONFIG_SCHED_WALT
if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
- util = cpu_rq(cpu)->hmp_stats.cumulative_runnable_avg;
+ util = cpu_rq(cpu)->walt_stats.cumulative_runnable_avg;
util = div64_u64(util,
sched_ravg_window >> SCHED_CAPACITY_SHIFT);
}
@@ -2211,11 +2197,13 @@
#ifdef CONFIG_SCHED_WALT
u64 sched_ktime_clock(void);
+void note_task_waking(struct task_struct *p, u64 wallclock);
#else /* CONFIG_SCHED_WALT */
static inline u64 sched_ktime_clock(void)
{
return 0;
}
+static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
#endif /* CONFIG_SCHED_WALT */
#ifdef CONFIG_CPU_FREQ
@@ -2350,13 +2338,11 @@
extern unsigned int __read_mostly sched_load_granule;
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
-extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
extern int update_preferred_cluster(struct related_thread_group *grp,
struct task_struct *p, u32 old_load);
extern void set_preferred_cluster(struct related_thread_group *grp);
extern void add_new_task_to_grp(struct task_struct *new);
extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
-extern void update_avg(u64 *avg, u64 sample);
#define NO_BOOST 0
#define FULL_THROTTLE_BOOST 1
@@ -2538,6 +2524,11 @@
return p->ravg.demand;
}
+static inline unsigned int task_pl(struct task_struct *p)
+{
+ return p->ravg.pred_demand;
+}
+
#define pct_to_real(tunable) \
(div64_u64((u64)tunable * (u64)max_task_load(), 100))
@@ -2571,54 +2562,11 @@
#define BOOST_KICK 0
#define CPU_RESERVED 1
-static inline u64 cpu_cravg_sync(int cpu, int sync)
-{
- struct rq *rq = cpu_rq(cpu);
- u64 load;
-
- load = rq->hmp_stats.cumulative_runnable_avg;
-
- /*
- * If load is being checked in a sync wakeup environment,
- * we may want to discount the load of the currently running
- * task.
- */
- if (sync && cpu == smp_processor_id()) {
- if (load > rq->curr->ravg.demand)
- load -= rq->curr->ravg.demand;
- else
- load = 0;
- }
-
- return load;
-}
-
-extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
-extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
extern int sched_boost(void);
-extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
- enum sched_boost_policy boost_policy);
-extern int task_will_fit(struct task_struct *p, int cpu);
-extern u64 cpu_load(int cpu);
-extern u64 cpu_load_sync(int cpu, int sync);
extern int preferred_cluster(struct sched_cluster *cluster,
struct task_struct *p);
-extern void inc_rq_hmp_stats(struct rq *rq,
- struct task_struct *p, int change_cra);
-extern void dec_rq_hmp_stats(struct rq *rq,
- struct task_struct *p, int change_cra);
-extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
-extern int upmigrate_discouraged(struct task_struct *p);
extern struct sched_cluster *rq_cluster(struct rq *rq);
-extern int nr_big_tasks(struct rq *rq);
extern void reset_task_stats(struct task_struct *p);
-extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra);
-extern void inc_hmp_sched_stats_fair(struct rq *rq,
- struct task_struct *p, int change_cra);
-extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
- struct cftype *cft);
-extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
- struct cftype *cft, u64 upmigrate_discourage);
extern void clear_top_tasks_bitmap(unsigned long *bitmap);
#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
@@ -2654,53 +2602,42 @@
{
struct rq *rq = cpu_rq(cpu);
- return test_bit(CPU_RESERVED, &rq->hmp_flags);
+ return test_bit(CPU_RESERVED, &rq->walt_flags);
}
static inline int mark_reserved(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
+ return test_and_set_bit(CPU_RESERVED, &rq->walt_flags);
}
static inline void clear_reserved(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- clear_bit(CPU_RESERVED, &rq->hmp_flags);
-}
-
-static inline bool
-__task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
-{
- return (p->on_rq || p->last_sleep_ts >= rq->window_start);
+ clear_bit(CPU_RESERVED, &rq->walt_flags);
}
static inline bool
task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
{
- return cpu_of(rq) == task_cpu(p) && __task_in_cum_window_demand(rq, p);
+ return cpu_of(rq) == task_cpu(p) && (p->on_rq || p->last_sleep_ts >=
+ rq->window_start);
}
-static inline void
-dec_cum_window_demand(struct rq *rq, struct task_struct *p)
-{
- rq->cum_window_demand -= p->ravg.demand;
- WARN_ON_ONCE(rq->cum_window_demand < 0);
-}
-
-static inline void
-inc_cum_window_demand(struct rq *rq, struct task_struct *p, s64 delta)
+static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 delta)
{
rq->cum_window_demand += delta;
+ if (unlikely((s64)rq->cum_window_demand < 0))
+ rq->cum_window_demand = 0;
}
extern void update_cpu_cluster_capacity(const cpumask_t *cpus);
extern unsigned long thermal_cap(int cpu);
-extern void clear_hmp_request(int cpu);
+extern void clear_walt_request(int cpu);
extern int got_boost_kick(void);
extern void clear_boost_kick(int cpu);
@@ -2709,18 +2646,14 @@
extern void clear_ed_task(struct task_struct *p, struct rq *rq);
extern bool early_detection_notify(struct rq *rq, u64 wallclock);
-#ifdef CONFIG_SCHED_HMP
-extern unsigned int power_cost(int cpu, u64 demand);
-#else
static inline unsigned int power_cost(int cpu, u64 demand)
{
return cpu_max_possible_capacity(cpu);
}
-#endif
#else /* CONFIG_SCHED_WALT */
-struct hmp_sched_stats;
+struct walt_sched_stats;
struct related_thread_group;
struct sched_cluster;
@@ -2731,44 +2664,13 @@
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
-static inline int task_will_fit(struct task_struct *p, int cpu)
-{
- return 1;
-}
-
static inline int sched_boost(void)
{
return 0;
}
-static inline int is_big_task(struct task_struct *p)
-{
- return 0;
-}
-
-static inline int nr_big_tasks(struct rq *rq)
-{
- return 0;
-}
-
-static inline int is_cpu_throttling_imminent(int cpu)
-{
- return 0;
-}
-
-static inline int is_task_migration_throttled(struct task_struct *p)
-{
- return 0;
-}
-
static inline bool is_max_capacity_cpu(int cpu) { return true; }
-static inline void
-inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
-
-static inline void
-dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
-
static inline int
preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
{
@@ -2804,6 +2706,7 @@
}
static inline u32 task_load(struct task_struct *p) { return 0; }
+static inline u32 task_pl(struct task_struct *p) { return 0; }
static inline int update_preferred_cluster(struct related_thread_group *grp,
struct task_struct *p, u32 old_load)
@@ -2828,17 +2731,7 @@
#define trace_sched_cpu_load_cgroup(...)
#define trace_sched_cpu_load_wakeup(...)
-static inline bool
-__task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
-{
- return 0;
-}
-
-static inline void
-dec_cum_window_demand(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-inc_cum_window_demand(struct rq *rq, struct task_struct *p, s64 delta) { }
+static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 delta) { }
static inline void update_cpu_cluster_capacity(const cpumask_t *cpus) { }
@@ -2849,7 +2742,7 @@
}
#endif
-static inline void clear_hmp_request(int cpu) { }
+static inline void clear_walt_request(int cpu) { }
static inline int got_boost_kick(void)
{
@@ -2879,88 +2772,7 @@
#endif /* CONFIG_SCHED_WALT */
-#ifdef CONFIG_SCHED_HMP
-#define energy_aware() false
-
-extern int is_big_task(struct task_struct *p);
-extern unsigned int pct_task_load(struct task_struct *p);
-extern void notify_migration(int src_cpu, int dest_cpu,
- bool src_cpu_dead, struct task_struct *p);
-extern void note_task_waking(struct task_struct *p, u64 wallclock);
-extern void
-check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
-extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
- struct task_struct *p, s64 delta);
-extern unsigned int cpu_temp(int cpu);
-extern void pre_big_task_count_change(const struct cpumask *cpus);
-extern void post_big_task_count_change(const struct cpumask *cpus);
-extern void set_hmp_defaults(void);
-extern void update_avg_burst(struct task_struct *p);
-extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
-
-extern unsigned int nr_eligible_big_tasks(int cpu);
-
-static inline void
-inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
-{
- if (sched_disable_window_stats)
- return;
-
- if (is_big_task(p))
- stats->nr_big_tasks++;
-}
-
-static inline void
-dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
-{
- if (sched_disable_window_stats)
- return;
-
- if (is_big_task(p))
- stats->nr_big_tasks--;
-
- BUG_ON(stats->nr_big_tasks < 0);
-}
-
-static inline bool is_short_burst_task(struct task_struct *p)
-{
- return p->ravg.avg_burst < sysctl_sched_short_burst &&
- p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
-}
-
-#else
static inline bool energy_aware(void)
{
return sched_feat(ENERGY_AWARE);
}
-
-static inline int pct_task_load(struct task_struct *p) { return 0; }
-
-static inline void notify_migration(int src_cpu, int dest_cpu,
- bool src_cpu_dead, struct task_struct *p) { }
-
-static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
-
-static inline void
-check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
-
-static inline void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
- struct task_struct *p, s64 delta) { }
-
-static inline unsigned int cpu_temp(int cpu)
-{
- return 0;
-}
-
-static inline void pre_big_task_count_change(const struct cpumask *cpus) { }
-
-static inline void post_big_task_count_change(const struct cpumask *cpus) { }
-
-static inline void set_hmp_defaults(void) { }
-
-static inline void update_avg_burst(struct task_struct *p) { }
-
-static inline void set_task_last_switch_out(struct task_struct *p,
- u64 wallclock) { }
-
-#endif /* CONFIG_SCHED_HMP */
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index dcc4a36..11a1888 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -18,41 +18,6 @@
}
#endif /* CONFIG_SMP */
-#ifdef CONFIG_SCHED_WALT
-
-static void
-inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
-{
- inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
-{
- dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-fixup_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
-{
- s64 task_load_delta = (s64)new_task_load - task_load(p);
- s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
- pred_demand_delta);
-}
-
-#else /* CONFIG_SCHED_WALT */
-
-static inline void
-inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
-
-#endif /* CONFIG_SCHED_WALT */
-
static void
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
{
@@ -78,14 +43,14 @@
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
add_nr_running(rq, 1);
- inc_hmp_sched_stats_stop(rq, p);
+ walt_inc_cumulative_runnable_avg(rq, p);
}
static void
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
sub_nr_running(rq, 1);
- dec_hmp_sched_stats_stop(rq, p);
+ walt_dec_cumulative_runnable_avg(rq, p);
}
static void yield_task_stop(struct rq *rq)
@@ -173,6 +138,6 @@
.switched_to = switched_to_stop,
.update_curr = update_curr_stop,
#ifdef CONFIG_SCHED_WALT
- .fixup_hmp_sched_stats = fixup_hmp_sched_stats_stop,
+ .fixup_walt_sched_stats = fixup_walt_sched_stats_common,
#endif
};
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 69bbce2..985668b 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -49,6 +49,9 @@
DEFINE_MUTEX(cluster_lock);
static atomic64_t walt_irq_work_lastq_ws;
+static struct irq_work walt_cpufreq_irq_work;
+static struct irq_work walt_migration_irq_work;
+
u64 sched_ktime_clock(void)
{
if (unlikely(sched_ktime_suspended))
@@ -204,27 +207,28 @@
}
early_param("sched_predl", set_sched_predl);
-void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+void inc_rq_walt_stats(struct rq *rq, struct task_struct *p)
{
- inc_nr_big_task(&rq->hmp_stats, p);
- if (change_cra)
- inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+ inc_nr_big_task(&rq->walt_stats, p);
+ walt_inc_cumulative_runnable_avg(rq, p);
}
-void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+void dec_rq_walt_stats(struct rq *rq, struct task_struct *p)
{
- dec_nr_big_task(&rq->hmp_stats, p);
- if (change_cra)
- dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+ dec_nr_big_task(&rq->walt_stats, p);
+ walt_dec_cumulative_runnable_avg(rq, p);
}
-void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
+void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
{
- stats->nr_big_tasks = 0; /* never happens on EAS */
- if (reset_cra) {
- stats->cumulative_runnable_avg = 0;
- stats->pred_demands_sum = 0;
- }
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+ s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+ fixup_cumulative_runnable_avg(&rq->walt_stats, task_load_delta,
+ pred_demand_delta);
+
+ walt_fixup_cum_window_demand(rq, task_load_delta);
}
/*
@@ -292,9 +296,7 @@
nr_windows = div64_u64(delta, sched_ravg_window);
rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;
- rq->cum_window_demand = rq->hmp_stats.cumulative_runnable_avg;
- if (event == PUT_PREV_TASK)
- rq->cum_window_demand += rq->curr->ravg.demand;
+ rq->cum_window_demand = rq->walt_stats.cumulative_runnable_avg;
return old_window_start;
}
@@ -376,12 +378,12 @@
struct rq *rq = cpu_rq(cpu);
if (!is_max_capacity_cpu(cpu))
- return rq->hmp_stats.nr_big_tasks;
+ return rq->walt_stats.nr_big_tasks;
return rq->nr_running;
}
-void clear_hmp_request(int cpu)
+void clear_walt_request(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
@@ -471,24 +473,28 @@
u64 freq_policy_load(struct rq *rq)
{
unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
+ int freq_aggr_thresh = sched_freq_aggregate_threshold;
struct sched_cluster *cluster = rq->cluster;
u64 aggr_grp_load = cluster->aggr_grp_load;
- u64 load;
+ u64 load, tt_load = 0;
- if (rq->ed_task != NULL)
- return sched_ravg_window;
+ if (rq->ed_task != NULL) {
+ load = sched_ravg_window;
+ goto done;
+ }
- if (aggr_grp_load > sched_freq_aggregate_threshold)
+ if (aggr_grp_load > freq_aggr_thresh)
load = rq->prev_runnable_sum + aggr_grp_load;
else
load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
+ tt_load = top_task_load(rq);
switch (reporting_policy) {
case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
- load = max_t(u64, load, top_task_load(rq));
+ load = max_t(u64, load, tt_load);
break;
case FREQ_REPORT_TOP_TASK:
- load = top_task_load(rq);
+ load = tt_load;
break;
case FREQ_REPORT_CPU_LOAD:
break;
@@ -496,6 +502,9 @@
break;
}
+done:
+ trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, freq_aggr_thresh,
+ load, reporting_policy);
return load;
}
@@ -612,55 +621,6 @@
raw_spin_unlock(&cluster->load_lock);
}
-#ifdef CONFIG_SCHED_HMP
-static inline void
-init_new_task_load_hmp(struct task_struct *p, bool idle_task)
-{
- p->ravg.curr_burst = 0;
- /*
- * Initialize the avg_burst to twice the threshold, so that
- * a task would not be classified as short burst right away
- * after fork. It takes at least 6 sleep-wakeup cycles for
- * the avg_burst to go below the threshold.
- */
- p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
- p->ravg.avg_sleep_time = 0;
-}
-
-static inline void
-update_task_burst(struct task_struct *p, struct rq *rq, int event, u64 runtime)
-{
- /*
- * update_task_demand() has checks for idle task and
- * exit task. The runtime may include the wait time,
- * so update the burst only for the cases where the
- * task is running.
- */
- if (event == PUT_PREV_TASK || (event == TASK_UPDATE &&
- rq->curr == p))
- p->ravg.curr_burst += runtime;
-}
-
-static void reset_task_stats_hmp(struct task_struct *p)
-{
- p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
-}
-#else
-static inline void
-init_new_task_load_hmp(struct task_struct *p, bool idle_task)
-{
-}
-
-static inline void
-update_task_burst(struct task_struct *p, struct rq *rq, int event, int runtime)
-{
-}
-
-static void reset_task_stats_hmp(struct task_struct *p)
-{
-}
-#endif
-
static inline void inter_cluster_migration_fixup
(struct task_struct *p, int new_cpu, int task_cpu, bool new_task)
{
@@ -813,9 +773,15 @@
update_task_cpu_cycles(p, new_cpu);
- if (__task_in_cum_window_demand(src_rq, p)) {
- dec_cum_window_demand(src_rq, p);
- inc_cum_window_demand(dest_rq, p, p->ravg.demand);
+ /*
+ * When a task is migrating during the wakeup, adjust
+ * the task's contribution towards cumulative window
+ * demand.
+ */
+ if (p->state == TASK_WAKING && p->last_sleep_ts >=
+ src_rq->window_start) {
+ walt_fixup_cum_window_demand(src_rq, -(s64)p->ravg.demand);
+ walt_fixup_cum_window_demand(dest_rq, p->ravg.demand);
}
new_task = is_new_task(p);
@@ -870,12 +836,8 @@
migrate_top_tasks(p, src_rq, dest_rq);
- if (!same_freq_domain(new_cpu, task_cpu(p))) {
- cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG |
- SCHED_CPUFREQ_WALT);
- cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG |
- SCHED_CPUFREQ_WALT);
- }
+ if (!same_freq_domain(new_cpu, task_cpu(p)))
+ irq_work_queue(&walt_migration_irq_work);
if (p == src_rq->ed_task) {
src_rq->ed_task = NULL;
@@ -1100,7 +1062,7 @@
if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
!p->dl.dl_throttled))
- p->sched_class->fixup_hmp_sched_stats(rq, p,
+ p->sched_class->fixup_walt_sched_stats(rq, p,
p->ravg.demand,
new);
@@ -1695,21 +1657,28 @@
/*
* A throttled deadline sched class task gets dequeued without
- * changing p->on_rq. Since the dequeue decrements hmp stats
+ * changing p->on_rq. Since the dequeue decrements walt stats
* avoid decrementing it here again.
+ *
+ * When window is rolled over, the cumulative window demand
+ * is reset to the cumulative runnable average (contribution from
+ * the tasks on the runqueue). If the current task is dequeued
+ * already, it's demand is not included in the cumulative runnable
+ * average. So add the task demand separately to cumulative window
+ * demand.
*/
- if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
- !p->dl.dl_throttled))
- p->sched_class->fixup_hmp_sched_stats(rq, p, demand,
- pred_demand);
+ if (!task_has_dl_policy(p) || !p->dl.dl_throttled) {
+ if (task_on_rq_queued(p))
+ p->sched_class->fixup_walt_sched_stats(rq, p, demand,
+ pred_demand);
+ else if (rq->curr == p)
+ walt_fixup_cum_window_demand(rq, demand);
+ }
p->ravg.demand = demand;
p->ravg.coloc_demand = div64_u64(sum, sched_ravg_hist_size);
p->ravg.pred_demand = pred_demand;
- if (__task_in_cum_window_demand(rq, p))
- inc_cum_window_demand(rq, p, p->ravg.demand - prev_demand);
-
done:
trace_sched_update_history(rq, p, runtime, samples, event);
}
@@ -1901,14 +1870,14 @@
result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
rq->window_start);
if (result == old_window_start)
- irq_work_queue(&rq->irq_work);
+ irq_work_queue(&walt_cpufreq_irq_work);
}
/* Reflect task activity on its demand and cpu's busy time statistics */
void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
u64 wallclock, u64 irqtime)
{
- u64 runtime, old_window_start;
+ u64 old_window_start;
if (!rq->window_start || sched_disable_window_stats ||
p->ravg.mark_start == wallclock)
@@ -1924,9 +1893,7 @@
}
update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
- runtime = update_task_demand(p, rq, event, wallclock);
- if (runtime)
- update_task_burst(p, rq, event, runtime);
+ update_task_demand(p, rq, event, wallclock);
update_cpu_busy_time(p, rq, event, wallclock, irqtime);
update_task_pred_demand(rq, p, event);
done:
@@ -1967,8 +1934,6 @@
memset(&p->ravg, 0, sizeof(struct ravg));
p->cpu_cycles = 0;
- init_new_task_load_hmp(p, idle_task);
-
p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
@@ -2024,8 +1989,6 @@
p->ravg.curr_window_cpu = curr_window_ptr;
p->ravg.prev_window_cpu = prev_window_ptr;
- reset_task_stats_hmp(p);
-
/* Retain EXITING_TASK marker */
p->ravg.sum_history[0] = sum;
}
@@ -2896,6 +2859,11 @@
update_cpu_cluster_capacity(cpus);
}
+void note_task_waking(struct task_struct *p, u64 wallclock)
+{
+ p->last_wake_ts = wallclock;
+}
+
/*
* Task's cpu usage is accounted in:
* rq->curr/prev_runnable_sum, when its ->grp is NULL
@@ -3016,6 +2984,11 @@
struct rq *rq;
int cpu;
u64 wc;
+ int flag = SCHED_CPUFREQ_WALT;
+
+ /* Am I the window rollover work or the migration work? */
+ if (irq_work == &walt_migration_irq_work)
+ flag |= SCHED_CPUFREQ_INTERCLUSTER_MIG;
for_each_cpu(cpu, cpu_possible_mask)
raw_spin_lock(&cpu_rq(cpu)->lock);
@@ -3044,15 +3017,15 @@
for_each_sched_cluster(cluster)
for_each_cpu(cpu, &cluster->cpus)
- cpufreq_update_util(cpu_rq(cpu), SCHED_CPUFREQ_WALT);
+ cpufreq_update_util(cpu_rq(cpu), flag);
for_each_cpu(cpu, cpu_possible_mask)
raw_spin_unlock(&cpu_rq(cpu)->lock);
- core_ctl_check(this_rq()->window_start);
+ if (irq_work != &walt_migration_irq_work)
+ core_ctl_check(this_rq()->window_start);
}
-#ifndef CONFIG_SCHED_HMP
int walt_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
@@ -3080,4 +3053,54 @@
return ret;
}
-#endif
+
+void walt_sched_init(struct rq *rq)
+{
+ int j;
+
+ cpumask_set_cpu(cpu_of(rq), &rq->freq_domain_cpumask);
+ init_irq_work(&walt_migration_irq_work, walt_irq_work);
+ init_irq_work(&walt_cpufreq_irq_work, walt_irq_work);
+ rq->walt_stats.cumulative_runnable_avg = 0;
+ rq->window_start = 0;
+ rq->cum_window_start = 0;
+ rq->walt_stats.nr_big_tasks = 0;
+ rq->walt_flags = 0;
+ rq->cur_irqload = 0;
+ rq->avg_irqload = 0;
+ rq->irqload_ts = 0;
+ rq->static_cpu_pwr_cost = 0;
+ rq->cc.cycles = 1;
+ rq->cc.time = 1;
+ rq->cstate = 0;
+ rq->wakeup_latency = 0;
+ rq->wakeup_energy = 0;
+
+ /*
+ * All cpus part of same cluster by default. This avoids the
+ * need to check for rq->cluster being non-NULL in hot-paths
+ * like select_best_cpu()
+ */
+ rq->cluster = &init_cluster;
+ rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+ rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+ memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
+ rq->old_busy_time = 0;
+ rq->old_estimated_time = 0;
+ rq->old_busy_time_group = 0;
+ rq->walt_stats.pred_demands_sum = 0;
+ rq->ed_task = NULL;
+ rq->curr_table = 0;
+ rq->prev_top = 0;
+ rq->curr_top = 0;
+ for (j = 0; j < NUM_TRACKED_WINDOWS; j++) {
+ memset(&rq->load_subs[j], 0,
+ sizeof(struct load_subtractions));
+ rq->top_tasks[j] = kcalloc(NUM_LOAD_INDICES,
+ sizeof(u8), GFP_NOWAIT);
+ /* No other choice */
+ BUG_ON(!rq->top_tasks[j]);
+ clear_top_tasks_bitmap(rq->top_tasks_bitmap[j]);
+ }
+ rq->cum_window_demand = 0;
+}
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 887933f..d669626 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -65,9 +65,8 @@
extern unsigned int nr_eligible_big_tasks(int cpu);
-#ifndef CONFIG_SCHED_HMP
static inline void
-inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+inc_nr_big_task(struct walt_sched_stats *stats, struct task_struct *p)
{
if (sched_disable_window_stats)
return;
@@ -77,7 +76,7 @@
}
static inline void
-dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+dec_nr_big_task(struct walt_sched_stats *stats, struct task_struct *p)
{
if (sched_disable_window_stats)
return;
@@ -87,60 +86,22 @@
BUG_ON(stats->nr_big_tasks < 0);
}
-#endif
static inline void
-adjust_nr_big_tasks(struct hmp_sched_stats *stats, int delta, bool inc)
+walt_adjust_nr_big_tasks(struct rq *rq, int delta, bool inc)
{
- struct rq *rq = container_of(stats, struct rq, hmp_stats);
-
if (sched_disable_window_stats)
return;
sched_update_nr_prod(cpu_of(rq), 0, true);
- stats->nr_big_tasks += inc ? delta : -delta;
+ rq->walt_stats.nr_big_tasks += inc ? delta : -delta;
- BUG_ON(stats->nr_big_tasks < 0);
+ BUG_ON(rq->walt_stats.nr_big_tasks < 0);
}
static inline void
-inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p)
-{
- u32 task_load;
-
- if (sched_disable_window_stats)
- return;
-
- task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
-
- stats->cumulative_runnable_avg += task_load;
- stats->pred_demands_sum += p->ravg.pred_demand;
-}
-
-static inline void
-dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p)
-{
- u32 task_load;
-
- if (sched_disable_window_stats)
- return;
-
- task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
-
- stats->cumulative_runnable_avg -= task_load;
-
- BUG_ON((s64)stats->cumulative_runnable_avg < 0);
-
- stats->pred_demands_sum -= p->ravg.pred_demand;
- BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-
-static inline void
-fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p, s64 task_load_delta,
- s64 pred_demand_delta)
+fixup_cumulative_runnable_avg(struct walt_sched_stats *stats,
+ s64 task_load_delta, s64 pred_demand_delta)
{
if (sched_disable_window_stats)
return;
@@ -152,11 +113,49 @@
BUG_ON((s64)stats->pred_demands_sum < 0);
}
-extern void inc_rq_hmp_stats(struct rq *rq,
- struct task_struct *p, int change_cra);
-extern void dec_rq_hmp_stats(struct rq *rq,
- struct task_struct *p, int change_cra);
-extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
+static inline void
+walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
+{
+ if (sched_disable_window_stats)
+ return;
+
+ fixup_cumulative_runnable_avg(&rq->walt_stats, p->ravg.demand,
+ p->ravg.pred_demand);
+
+ /*
+ * Add a task's contribution to the cumulative window demand when
+ *
+ * (1) task is enqueued with on_rq = 1 i.e migration,
+ * prio/cgroup/class change.
+ * (2) task is waking for the first time in this window.
+ */
+ if (p->on_rq || (p->last_sleep_ts < rq->window_start))
+ walt_fixup_cum_window_demand(rq, p->ravg.demand);
+}
+
+static inline void
+walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
+{
+ if (sched_disable_window_stats)
+ return;
+
+ fixup_cumulative_runnable_avg(&rq->walt_stats, -(s64)p->ravg.demand,
+ -(s64)p->ravg.pred_demand);
+
+ /*
+ * on_rq will be 1 for sleeping tasks. So check if the task
+ * is migrating or dequeuing in RUNNING state to change the
+ * prio/cgroup/class.
+ */
+ if (task_on_rq_migrating(p) || p->state == TASK_RUNNING)
+ walt_fixup_cum_window_demand(rq, -(s64)p->ravg.demand);
+}
+
+extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
+ u32 new_task_load,
+ u32 new_pred_demand);
+extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p);
+extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p);
extern void fixup_busy_time(struct task_struct *p, int new_cpu);
extern void init_new_task_load(struct task_struct *p, bool idle_task);
extern void mark_task_starting(struct task_struct *p);
@@ -291,12 +290,16 @@
void walt_irq_work(struct irq_work *irq_work);
+void walt_sched_init(struct rq *rq);
+
#else /* CONFIG_SCHED_WALT */
+static inline void walt_sched_init(struct rq *rq) { }
+
static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
int event, u64 wallclock, u64 irqtime) { }
-static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p)
+static inline void walt_inc_cumulative_runnable_avg(struct rq *rq,
+ struct task_struct *p)
{
}
@@ -305,21 +308,21 @@
return 0;
}
-static inline void adjust_nr_big_tasks(struct hmp_sched_stats *stats,
+static inline void walt_adjust_nr_big_tasks(struct rq *rq,
int delta, bool inc)
{
}
-static inline void inc_nr_big_task(struct hmp_sched_stats *stats,
+static inline void inc_nr_big_task(struct walt_sched_stats *stats,
struct task_struct *p)
{
}
-static inline void dec_nr_big_task(struct hmp_sched_stats *stats,
+static inline void dec_nr_big_task(struct walt_sched_stats *stats,
struct task_struct *p)
{
}
-static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+static inline void walt_dec_cumulative_runnable_avg(struct rq *rq,
struct task_struct *p)
{
}
@@ -347,6 +350,18 @@
static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
+static inline void
+inc_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
+{
+}
+
#endif /* CONFIG_SCHED_WALT */
#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b076cba..1d894fc 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -128,9 +128,6 @@
static unsigned long one_ul = 1;
static int one_hundred = 100;
static int one_thousand = 1000;
-#ifdef CONFIG_SCHED_HMP
-static int max_freq_reporting_policy = FREQ_REPORT_INVALID_POLICY - 1;
-#endif
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
@@ -305,11 +302,7 @@
.data = &sysctl_sched_group_upmigrate_pct,
.maxlen = sizeof(unsigned int),
.mode = 0644,
-#ifdef CONFIG_SCHED_HMP
- .proc_handler = sched_hmp_proc_update_handler,
-#else
.proc_handler = walt_proc_update_handler,
-#endif
.extra1 = &sysctl_sched_group_downmigrate_pct,
},
{
@@ -317,194 +310,11 @@
.data = &sysctl_sched_group_downmigrate_pct,
.maxlen = sizeof(unsigned int),
.mode = 0644,
-#ifdef CONFIG_SCHED_HMP
- .proc_handler = sched_hmp_proc_update_handler,
-#else
.proc_handler = walt_proc_update_handler,
-#endif
.extra1 = &zero,
.extra2 = &sysctl_sched_group_upmigrate_pct,
},
#endif
-#ifdef CONFIG_SCHED_HMP
- {
- .procname = "sched_freq_reporting_policy",
- .data = &sysctl_sched_freq_reporting_policy,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &max_freq_reporting_policy,
- },
- {
- .procname = "sched_freq_inc_notify",
- .data = &sysctl_sched_freq_inc_notify,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- },
- {
- .procname = "sched_freq_dec_notify",
- .data = &sysctl_sched_freq_dec_notify,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- },
- {
- .procname = "sched_ravg_hist_size",
- .data = &sysctl_sched_ravg_hist_size,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_window_update_handler,
- },
- {
- .procname = "sched_window_stats_policy",
- .data = &sysctl_sched_window_stats_policy,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_window_update_handler,
- },
- {
- .procname = "sched_spill_load",
- .data = &sysctl_sched_spill_load_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_spill_nr_run",
- .data = &sysctl_sched_spill_nr_run,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- },
- {
- .procname = "sched_upmigrate",
- .data = &sysctl_sched_upmigrate_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_downmigrate",
- .data = &sysctl_sched_downmigrate_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_init_task_load",
- .data = &sysctl_sched_init_task_load_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_select_prev_cpu_us",
- .data = &sysctl_sched_select_prev_cpu_us,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- },
- {
- .procname = "sched_restrict_cluster_spill",
- .data = &sysctl_sched_restrict_cluster_spill,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
- },
- {
- .procname = "sched_small_wakee_task_load",
- .data = &sysctl_sched_small_wakee_task_load_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_big_waker_task_load",
- .data = &sysctl_sched_big_waker_task_load_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_prefer_sync_wakee_to_waker",
- .data = &sysctl_sched_prefer_sync_wakee_to_waker,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
- },
- {
- .procname = "sched_enable_thread_grouping",
- .data = &sysctl_sched_enable_thread_grouping,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "sched_pred_alert_freq",
- .data = &sysctl_sched_pred_alert_freq,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- },
- {
- .procname = "sched_freq_aggregate",
- .data = &sysctl_sched_freq_aggregate,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_window_update_handler,
- },
- {
- .procname = "sched_freq_aggregate_threshold",
- .data = &sysctl_sched_freq_aggregate_threshold_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- /*
- * Special handling for sched_freq_aggregate_threshold_pct
- * which can be greater than 100. Use 1000 as an upper bound
- * value which works for all practical use cases.
- */
- .extra2 = &one_thousand,
- },
- {
- .procname = "sched_short_burst_ns",
- .data = &sysctl_sched_short_burst,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "sched_short_sleep_ns",
- .data = &sysctl_sched_short_sleep,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_min_granularity_ns",
@@ -1965,6 +1775,22 @@
.extra2 = (void *)&mmap_rnd_compat_bits_max,
},
#endif
+#ifdef CONFIG_SWAP
+ {
+ .procname = "swap_ratio",
+ .data = &sysctl_swap_ratio,
+ .maxlen = sizeof(sysctl_swap_ratio),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ },
+ {
+ .procname = "swap_ratio_enable",
+ .data = &sysctl_swap_ratio_enable,
+ .maxlen = sizeof(sysctl_swap_ratio_enable),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ },
+#endif
{ }
};
@@ -2463,9 +2289,12 @@
if (write) {
if (*negp)
return -EINVAL;
+ if (*lvalp > UINT_MAX)
+ return -EINVAL;
*valp = *lvalp;
} else {
unsigned int val = *valp;
+ *negp = false;
*lvalp = (unsigned long)val;
}
return 0;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 9055429..44cc350 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
+#include <linux/timer.h>
#include <linux/context_tracking.h>
#include <linux/rq_stats.h>
@@ -933,6 +934,11 @@
now = tick_nohz_start_idle(ts);
+#ifdef CONFIG_SMP
+ if (check_pending_deferrable_timers(cpu))
+ raise_softirq_irqoff(TIMER_SOFTIRQ);
+#endif
+
if (can_stop_idle_tick(cpu, ts)) {
int was_stopped = ts->tick_stopped;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index adede73..3a2dd86 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -207,6 +207,7 @@
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
struct timer_base timer_base_deferrable;
+static atomic_t deferrable_pending;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
unsigned int sysctl_timer_migration = 1;
@@ -1460,6 +1461,31 @@
return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
}
+
+#ifdef CONFIG_SMP
+/*
+ * check_pending_deferrable_timers - Check for unbound deferrable timer expiry
+ * @cpu - Current CPU
+ *
+ * The function checks whether any global deferrable pending timers
+ * are exipired or not. This function does not check cpu bounded
+ * diferrable pending timers expiry.
+ *
+ * The function returns true when a cpu unbounded deferrable timer is expired.
+ */
+bool check_pending_deferrable_timers(int cpu)
+{
+ if (cpu == tick_do_timer_cpu ||
+ tick_do_timer_cpu == TICK_DO_TIMER_NONE) {
+ if (time_after_eq(jiffies, timer_base_deferrable.clk)
+ && !atomic_cmpxchg(&deferrable_pending, 0, 1)) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
/**
* get_next_timer_interrupt - return the time (clock mono) of the next timer
* @basej: base time jiffies
@@ -1619,10 +1645,13 @@
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
__run_timers(base);
- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) {
- __run_timers(&timer_base_deferrable);
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
- }
+
+ if ((atomic_cmpxchg(&deferrable_pending, 1, 0) &&
+ tick_do_timer_cpu == TICK_DO_TIMER_NONE) ||
+ tick_do_timer_cpu == smp_processor_id())
+ __run_timers(&timer_base_deferrable);
}
/*
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
index 31e6a8e..ec9bde7 100644
--- a/kernel/trace/ipc_logging.c
+++ b/kernel/trace/ipc_logging.c
@@ -549,6 +549,7 @@
struct decode_context *dctxt);
struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
unsigned long flags;
+ int ret;
if (size < MAX_MSG_DECODED_SIZE)
return -EINVAL;
@@ -558,6 +559,11 @@
dctxt.size = size;
read_lock_irqsave(&context_list_lock_lha1, flags);
spin_lock(&ilctxt->context_lock_lhb1);
+ if (ilctxt->destroyed) {
+ ret = -EIO;
+ goto done;
+ }
+
while (dctxt.size >= MAX_MSG_DECODED_SIZE &&
!is_nd_read_empty(ilctxt)) {
msg_read(ilctxt, &ectxt);
@@ -573,11 +579,17 @@
read_lock_irqsave(&context_list_lock_lha1, flags);
spin_lock(&ilctxt->context_lock_lhb1);
}
- if ((size - dctxt.size) == 0)
- reinit_completion(&ilctxt->read_avail);
+ ret = size - dctxt.size;
+ if (ret == 0) {
+ if (!ilctxt->destroyed)
+ reinit_completion(&ilctxt->read_avail);
+ else
+ ret = -EIO;
+ }
+done:
spin_unlock(&ilctxt->context_lock_lhb1);
read_unlock_irqrestore(&context_list_lock_lha1, flags);
- return size - dctxt.size;
+ return ret;
}
EXPORT_SYMBOL(ipc_log_extract);
@@ -835,6 +847,8 @@
ctxt->nd_read_page = ctxt->first_page;
ctxt->write_avail = max_num_pages * LOG_PAGE_DATA_SIZE;
ctxt->header_size = sizeof(struct ipc_log_page_header);
+ kref_init(&ctxt->refcount);
+ ctxt->destroyed = false;
create_ctx_debugfs(ctxt, mod_name);
/* set magic last to signal context init is complete */
@@ -857,6 +871,21 @@
}
EXPORT_SYMBOL(ipc_log_context_create);
+void ipc_log_context_free(struct kref *kref)
+{
+ struct ipc_log_context *ilctxt = container_of(kref,
+ struct ipc_log_context, refcount);
+ struct ipc_log_page *pg = NULL;
+
+ while (!list_empty(&ilctxt->page_list)) {
+ pg = get_first_page(ilctxt);
+ list_del(&pg->hdr.list);
+ kfree(pg);
+ }
+
+ kfree(ilctxt);
+}
+
/*
* Destroy debug log context
*
@@ -865,25 +894,24 @@
int ipc_log_context_destroy(void *ctxt)
{
struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
- struct ipc_log_page *pg = NULL;
unsigned long flags;
if (!ilctxt)
return 0;
- while (!list_empty(&ilctxt->page_list)) {
- pg = get_first_page(ctxt);
- list_del(&pg->hdr.list);
- kfree(pg);
- }
+ debugfs_remove_recursive(ilctxt->dent);
+
+ spin_lock(&ilctxt->context_lock_lhb1);
+ ilctxt->destroyed = true;
+ complete_all(&ilctxt->read_avail);
+ spin_unlock(&ilctxt->context_lock_lhb1);
write_lock_irqsave(&context_list_lock_lha1, flags);
list_del(&ilctxt->list);
write_unlock_irqrestore(&context_list_lock_lha1, flags);
- debugfs_remove_recursive(ilctxt->dent);
+ ipc_log_context_put(ilctxt);
- kfree(ilctxt);
return 0;
}
EXPORT_SYMBOL(ipc_log_context_destroy);
diff --git a/kernel/trace/ipc_logging_debug.c b/kernel/trace/ipc_logging_debug.c
index a545387..d733724 100644
--- a/kernel/trace/ipc_logging_debug.c
+++ b/kernel/trace/ipc_logging_debug.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -74,23 +74,42 @@
static ssize_t debug_read_helper(struct file *file, char __user *buff,
size_t count, loff_t *ppos, int cont)
{
- struct ipc_log_context *ilctxt = file->private_data;
+ struct ipc_log_context *ilctxt;
+ struct dentry *d = file->f_path.dentry;
char *buffer;
int bsize;
+ int srcu_idx;
+ int r;
+
+ r = debugfs_use_file_start(d, &srcu_idx);
+ if (!r) {
+ ilctxt = file->private_data;
+ r = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
+ }
+ debugfs_use_file_finish(srcu_idx);
+ if (r)
+ return r;
buffer = kmalloc(count, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
+ if (!buffer) {
+ bsize = -ENOMEM;
+ goto done;
+ }
bsize = debug_log(ilctxt, buffer, count, cont);
+
if (bsize > 0) {
if (copy_to_user(buff, buffer, bsize)) {
+ bsize = -EFAULT;
kfree(buffer);
- return -EFAULT;
+ goto done;
}
*ppos += bsize;
}
kfree(buffer);
+
+done:
+ ipc_log_context_put(ilctxt);
return bsize;
}
@@ -127,7 +146,7 @@
struct ipc_log_context *ilctxt,
const struct file_operations *fops)
{
- debugfs_create_file(name, mode, dent, ilctxt, fops);
+ debugfs_create_file_unsafe(name, mode, dent, ilctxt, fops);
}
static void dfunc_string(struct encode_context *ectxt,
diff --git a/kernel/trace/ipc_logging_private.h b/kernel/trace/ipc_logging_private.h
index 594027a..47c41e9 100644
--- a/kernel/trace/ipc_logging_private.h
+++ b/kernel/trace/ipc_logging_private.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -119,6 +119,8 @@
struct list_head dfunc_info_list;
spinlock_t context_lock_lhb1;
struct completion read_avail;
+ struct kref refcount;
+ bool destroyed;
};
struct dfunc_info {
@@ -147,6 +149,13 @@
((x) < TSV_TYPE_MSG_END))
#define MAX_MSG_DECODED_SIZE (MAX_MSG_SIZE*4)
+void ipc_log_context_free(struct kref *kref);
+
+static inline void ipc_log_context_put(struct ipc_log_context *ilctxt)
+{
+ kref_put(&ilctxt->refcount, ipc_log_context_free);
+}
+
#if (defined(CONFIG_DEBUG_FS))
void check_and_create_debugfs(void);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 8d2b4d8..5ff45ca 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -667,30 +667,25 @@
pr_info("Probe point is not specified.\n");
return -EINVAL;
}
- if (isdigit(argv[1][0])) {
- if (is_return) {
- pr_info("Return probe point must be a symbol.\n");
- return -EINVAL;
- }
- /* an address specified */
- ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
- if (ret) {
- pr_info("Failed to parse address.\n");
- return ret;
- }
- } else {
+
+ /* try to parse an address. if that fails, try to read the
+ * input as a symbol. */
+ if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
/* a symbol specified */
symbol = argv[1];
/* TODO: support .init module functions */
ret = traceprobe_split_symbol_offset(symbol, &offset);
if (ret) {
- pr_info("Failed to parse symbol.\n");
+ pr_info("Failed to parse either an address or a symbol.\n");
return ret;
}
if (offset && is_return) {
pr_info("Return probe must be used without offset.\n");
return -EINVAL;
}
+ } else if (is_return) {
+ pr_info("Return probe point must be a symbol.\n");
+ return -EINVAL;
}
argc -= 2; argv += 2;
diff --git a/mm/Kconfig b/mm/Kconfig
index eb10c90..3363a70 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -730,3 +730,19 @@
always using ZONE_DMA memory.
If unsure, say "n".
+
+config PROCESS_RECLAIM
+ bool "Enable process reclaim"
+ depends on PROC_FS
+ default n
+ help
+ It allows to reclaim pages of the process by /proc/pid/reclaim.
+
+ (echo file > /proc/PID/reclaim) reclaims file-backed pages only.
+ (echo anon > /proc/PID/reclaim) reclaims anonymous pages only.
+ (echo all > /proc/PID/reclaim) reclaims all pages.
+
+ (echo addr size-byte > /proc/PID/reclaim) reclaims pages in
+ (addr, addr + size-bytes) of the process.
+
+ Any other vaule is ignored.
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index b9019d4..78e68f9 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -65,6 +65,16 @@
If unsure, say N
+config PAGE_POISONING_ENABLE_DEFAULT
+ bool "Enable page poisoning by default?"
+ default n
+ depends on PAGE_POISONING
+ ---help---
+ Enable page poisoning of free pages by default? This value
+ can be overridden by page_poison=off|on. This can be used
+ to avoid passing the kernel parameter and let page poisoning
+ feature enabled by default.
+
config PAGE_POISONING_NO_SANITY
depends on PAGE_POISONING
bool "Only poison, don't sanity check"
diff --git a/mm/Makefile b/mm/Makefile
index a7e9b6a..7a9642f 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -37,7 +37,7 @@
mm_init.o mmu_context.o percpu.o slab_common.o \
compaction.o vmacache.o \
interval_tree.o list_lru.o workingset.o \
- debug.o $(mmu-y) showmem.o
+ debug.o $(mmu-y) showmem.o vmpressure.o
obj-y += init-mm.o
@@ -53,7 +53,7 @@
endif
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
-obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
+obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o swap_ratio.o
obj-$(CONFIG_FRONTSWAP) += frontswap.o
obj-$(CONFIG_ZSWAP) += zswap.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
@@ -76,7 +76,7 @@
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o
obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
-obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o
+obj-$(CONFIG_MEMCG) += memcontrol.o
obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o
obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o
obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
@@ -100,3 +100,4 @@
obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
+obj-$(CONFIG_PROCESS_RECLAIM) += process_reclaim.o
diff --git a/mm/cma.c b/mm/cma.c
index 0306bab..3322b30 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -35,6 +35,7 @@
#include <linux/cma.h>
#include <linux/highmem.h>
#include <linux/io.h>
+#include <linux/delay.h>
#include <trace/events/cma.h>
#include "cma.h"
@@ -374,6 +375,7 @@
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
struct page *page = NULL;
int ret;
+ int retry_after_sleep = 0;
if (!cma || !cma->count)
return NULL;
@@ -400,8 +402,24 @@
bitmap_maxno, start, bitmap_count, mask,
offset);
if (bitmap_no >= bitmap_maxno) {
- mutex_unlock(&cma->lock);
- break;
+ if (retry_after_sleep < 2) {
+ start = 0;
+ /*
+ * Page may be momentarily pinned by some other
+ * process which has been scheduled out, eg.
+ * in exit path, during unmap call, or process
+ * fork and so cannot be freed there. Sleep
+ * for 100ms and retry twice to see if it has
+ * been freed later.
+ */
+ mutex_unlock(&cma->lock);
+ msleep(100);
+ retry_after_sleep++;
+ continue;
+ } else {
+ mutex_unlock(&cma->lock);
+ break;
+ }
}
bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
/*
diff --git a/mm/compaction.c b/mm/compaction.c
index 70e6bec..f002a7f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -635,20 +635,52 @@
}
/* Similar to reclaim, but different enough that they don't share logic */
-static bool too_many_isolated(struct zone *zone)
+static bool __too_many_isolated(struct zone *zone, int safe)
{
unsigned long active, inactive, isolated;
- inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
+ if (safe) {
+ inactive = node_page_state_snapshot(zone->zone_pgdat,
+ NR_INACTIVE_FILE) +
+ node_page_state_snapshot(zone->zone_pgdat,
+ NR_INACTIVE_ANON);
+ active = node_page_state_snapshot(zone->zone_pgdat,
+ NR_ACTIVE_FILE) +
+ node_page_state_snapshot(zone->zone_pgdat,
+ NR_ACTIVE_ANON);
+ isolated = node_page_state_snapshot(zone->zone_pgdat,
+ NR_ISOLATED_FILE) +
+ node_page_state_snapshot(zone->zone_pgdat,
+ NR_ISOLATED_ANON);
+ } else {
+ inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
- active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
+ active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
- isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
+ isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
+ }
return isolated > (inactive + active) / 2;
}
+/* Similar to reclaim, but different enough that they don't share logic */
+static bool too_many_isolated(struct compact_control *cc)
+{
+ /*
+ * __too_many_isolated(safe=0) is fast but inaccurate, because it
+ * doesn't account for the vm_stat_diff[] counters. So if it looks
+ * like too_many_isolated() is about to return true, fall back to the
+ * slower, more accurate zone_page_state_snapshot().
+ */
+ if (unlikely(__too_many_isolated(cc->zone, 0))) {
+ if (cc->mode != MIGRATE_ASYNC)
+ return __too_many_isolated(cc->zone, 1);
+ }
+
+ return false;
+}
+
/**
* isolate_migratepages_block() - isolate all migrate-able pages within
* a single pageblock
@@ -686,7 +718,7 @@
* list by either parallel reclaimers or compaction. If there are,
* delay for some time until fewer pages are isolated
*/
- while (unlikely(too_many_isolated(zone))) {
+ while (unlikely(too_many_isolated(cc))) {
/* async migration should just abort */
if (cc->mode == MIGRATE_ASYNC)
return 0;
diff --git a/mm/filemap.c b/mm/filemap.c
index edfb90e..b4c09ec 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -251,10 +251,12 @@
* invalidate any existing cleancache entries. We can't leave
* stale data around in the cleancache once our page is gone
*/
- if (PageUptodate(page) && PageMappedToDisk(page))
+ if (PageUptodate(page) && PageMappedToDisk(page)) {
+ count_vm_event(PGPGOUTCLEAN);
cleancache_put_page(page);
- else
+ } else {
cleancache_invalidate_page(mapping, page);
+ }
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(page_mapped(page), page);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 4df20e1..5cbd2de 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -366,6 +366,7 @@
info.access_size = size;
info.is_write = is_write;
info.ip = ip;
+ info.first_bad_addr = NULL;
kasan_report_error(&info);
}
diff --git a/mm/ksm.c b/mm/ksm.c
index 5f1855b..927aa34 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1964,6 +1964,7 @@
stable_node = page_stable_node(page);
if (!stable_node)
return ret;
+
again:
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fdc790a..3b38b73 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5996,7 +5996,7 @@
VM_BUG_ON_PAGE(!PageLocked(page), page);
- if (vm_swap_full())
+ if (vm_swap_full(page_swap_info(page)))
return true;
if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
return false;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index ce7d416..b335423 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -972,7 +972,7 @@
if (kill)
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
- ret = try_to_unmap(hpage, ttu);
+ ret = try_to_unmap(hpage, ttu, NULL);
if (ret != SWAP_SUCCESS)
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage));
diff --git a/mm/migrate.c b/mm/migrate.c
index 435f674..f0b786d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1020,7 +1020,7 @@
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
page);
try_to_unmap(page,
- TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL);
page_was_mapped = 1;
}
@@ -1238,7 +1238,7 @@
if (page_mapped(hpage)) {
try_to_unmap(hpage,
- TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL);
page_was_mapped = 1;
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index ec9f11d..2efa9c9 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -368,7 +368,7 @@
* State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
* swapents, oom_score_adj value, and name.
*/
-static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
+void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
{
struct task_struct *p;
struct task_struct *task;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 27ddaae..bdd2bea 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -233,10 +233,10 @@
"Unmovable",
"Movable",
"Reclaimable",
- "HighAtomic",
#ifdef CONFIG_CMA
"CMA",
#endif
+ "HighAtomic",
#ifdef CONFIG_MEMORY_ISOLATION
"Isolate",
#endif
@@ -1706,10 +1706,10 @@
return 1;
}
-static inline bool free_pages_prezeroed(bool poisoned)
+static inline bool free_pages_prezeroed(void)
{
return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
- page_poisoning_enabled() && poisoned;
+ page_poisoning_enabled();
}
#ifdef CONFIG_DEBUG_VM
@@ -1763,17 +1763,10 @@
unsigned int alloc_flags)
{
int i;
- bool poisoned = true;
-
- for (i = 0; i < (1 << order); i++) {
- struct page *p = page + i;
- if (poisoned)
- poisoned &= page_is_poisoned(p);
- }
post_alloc_hook(page, order, gfp_flags);
- if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
+ if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
for (i = 0; i < (1 << order); i++)
clear_highpage(page + i);
@@ -1839,6 +1832,11 @@
#endif
};
+int *get_migratetype_fallbacks(int mtype)
+{
+ return fallbacks[mtype];
+}
+
#ifdef CONFIG_CMA
static struct page *__rmqueue_cma_fallback(struct zone *zone,
unsigned int order)
@@ -2209,17 +2207,30 @@
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
- if (migratetype == MIGRATE_MOVABLE)
- page = __rmqueue_cma_fallback(zone, order);
-
- if (!page)
- page = __rmqueue_fallback(zone, order, migratetype);
+ page = __rmqueue_fallback(zone, order, migratetype);
}
trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}
+#ifdef CONFIG_CMA
+static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
+{
+ struct page *page = 0;
+ if (IS_ENABLED(CONFIG_CMA))
+ if (!zone->cma_alloc)
+ page = __rmqueue_cma_fallback(zone, order);
+ trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
+ return page;
+}
+#else
+static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
+{
+ return NULL;
+}
+#endif
+
/*
* Obtain a specified number of elements from the buddy allocator, all under
* a single hold of the lock, for efficiency. Add them to the supplied list.
@@ -2233,7 +2244,17 @@
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
- struct page *page = __rmqueue(zone, order, migratetype);
+ struct page *page;
+
+ /*
+ * If migrate type CMA is being requested only try to
+ * satisfy the request with CMA pages to try and increase
+ * CMA utlization.
+ */
+ if (is_migrate_cma(migratetype))
+ page = __rmqueue_cma(zone, order);
+ else
+ page = __rmqueue(zone, order, migratetype);
if (unlikely(page == NULL))
break;
@@ -2271,6 +2292,28 @@
return alloced;
}
+/*
+ * Return the pcp list that corresponds to the migrate type if that list isn't
+ * empty.
+ * If the list is empty return NULL.
+ */
+static struct list_head *get_populated_pcp_list(struct zone *zone,
+ unsigned int order, struct per_cpu_pages *pcp,
+ int migratetype, int cold)
+{
+ struct list_head *list = &pcp->lists[migratetype];
+
+ if (list_empty(list)) {
+ pcp->count += rmqueue_bulk(zone, order,
+ pcp->batch, list,
+ migratetype, cold);
+
+ if (list_empty(list))
+ list = NULL;
+ }
+ return list;
+}
+
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@@ -2631,22 +2674,33 @@
int migratetype)
{
unsigned long flags;
- struct page *page;
+ struct page *page = NULL;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
- struct list_head *list;
+ struct list_head *list = NULL;
local_irq_save(flags);
do {
pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- if (list_empty(list)) {
- pcp->count += rmqueue_bulk(zone, 0,
- pcp->batch, list,
- migratetype, cold);
- if (unlikely(list_empty(list)))
+
+ /* First try to get CMA pages */
+ if (migratetype == MIGRATE_MOVABLE &&
+ gfp_flags & __GFP_CMA) {
+ list = get_populated_pcp_list(zone, 0, pcp,
+ get_cma_migrate_type(), cold);
+ }
+
+ if (list == NULL) {
+ /*
+ * Either CMA is not suitable or there are no
+ * free CMA pages.
+ */
+ list = get_populated_pcp_list(zone, 0, pcp,
+ migratetype, cold);
+ if (unlikely(list == NULL) ||
+ unlikely(list_empty(list)))
goto failed;
}
@@ -2674,9 +2728,14 @@
if (page)
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
+ if (!page && migratetype == MIGRATE_MOVABLE &&
+ gfp_flags & __GFP_CMA)
+ page = __rmqueue_cma(zone, order);
+
if (!page)
page = __rmqueue(zone, order, migratetype);
} while (page && check_new_pages(page, order));
+
spin_unlock(&zone->lock);
if (!page)
goto failed;
@@ -2834,6 +2893,14 @@
return true;
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
+#ifdef CONFIG_CMA
+ /*
+ * Note that this check is needed only
+ * when MIGRATE_CMA < MIGRATE_PCPTYPES.
+ */
+ if (mt == MIGRATE_CMA)
+ continue;
+#endif
if (!list_empty(&area->free_list[mt]))
return true;
}
@@ -7311,6 +7378,7 @@
if (ret)
return ret;
+ cc.zone->cma_alloc = 1;
/*
* In case of -EBUSY, we'd like to know which page causes problem.
* So, just fall through. We will check it in test_pages_isolated().
@@ -7386,6 +7454,7 @@
done:
undo_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
+ cc.zone->cma_alloc = 0;
return ret;
}
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 121dcff..fc3e7ff 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -59,9 +59,6 @@
static struct page_ext_operations *page_ext_ops[] = {
&debug_guardpage_ops,
-#ifdef CONFIG_PAGE_POISONING
- &page_poisoning_ops,
-#endif
#ifdef CONFIG_PAGE_OWNER
&page_owner_ops,
#endif
diff --git a/mm/page_owner.c b/mm/page_owner.c
index d2db436..65e24fb 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -285,7 +285,11 @@
continue;
if (PageBuddy(page)) {
- pfn += (1UL << page_order(page)) - 1;
+ unsigned long freepage_order;
+
+ freepage_order = page_order_unsafe(page);
+ if (freepage_order < MAX_ORDER)
+ pfn += (1UL << freepage_order) - 1;
continue;
}
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 0abd75e..a2f6a4e 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -6,8 +6,8 @@
#include <linux/poison.h>
#include <linux/ratelimit.h>
-static bool __page_poisoning_enabled __read_mostly;
-static bool want_page_poisoning __read_mostly;
+static bool want_page_poisoning __read_mostly
+ = IS_ENABLED(CONFIG_PAGE_POISONING_ENABLE_DEFAULT);
static int early_page_poison_param(char *buf)
{
@@ -19,74 +19,21 @@
bool page_poisoning_enabled(void)
{
- return __page_poisoning_enabled;
-}
-
-static bool need_page_poisoning(void)
-{
- return want_page_poisoning;
-}
-
-static void init_page_poisoning(void)
-{
/*
- * page poisoning is debug page alloc for some arches. If either
- * of those options are enabled, enable poisoning
+ * Assumes that debug_pagealloc_enabled is set before
+ * free_all_bootmem.
+ * Page poisoning is debug page alloc for some arches. If
+ * either of those options are enabled, enable poisoning.
*/
- if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) {
- if (!want_page_poisoning && !debug_pagealloc_enabled())
- return;
- } else {
- if (!want_page_poisoning)
- return;
- }
-
- __page_poisoning_enabled = true;
-}
-
-struct page_ext_operations page_poisoning_ops = {
- .need = need_page_poisoning,
- .init = init_page_poisoning,
-};
-
-static inline void set_page_poison(struct page *page)
-{
- struct page_ext *page_ext;
-
- page_ext = lookup_page_ext(page);
- if (unlikely(!page_ext))
- return;
-
- __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
-}
-
-static inline void clear_page_poison(struct page *page)
-{
- struct page_ext *page_ext;
-
- page_ext = lookup_page_ext(page);
- if (unlikely(!page_ext))
- return;
-
- __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
-}
-
-bool page_is_poisoned(struct page *page)
-{
- struct page_ext *page_ext;
-
- page_ext = lookup_page_ext(page);
- if (unlikely(!page_ext))
- return false;
-
- return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
+ return (want_page_poisoning ||
+ (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
+ debug_pagealloc_enabled()));
}
static void poison_page(struct page *page)
{
void *addr = kmap_atomic(page);
- set_page_poison(page);
memset(addr, PAGE_POISON, PAGE_SIZE);
kunmap_atomic(addr);
}
@@ -144,12 +91,13 @@
{
void *addr;
- if (!page_is_poisoned(page))
- return;
-
addr = kmap_atomic(page);
+ /*
+ * Page poisoning when enabled poisons each and every page
+ * that is freed to buddy. Thus no extra check is done to
+ * see if a page was posioned.
+ */
check_poison_mem(page, addr, PAGE_SIZE);
- clear_page_poison(page);
kunmap_atomic(addr);
}
diff --git a/mm/process_reclaim.c b/mm/process_reclaim.c
new file mode 100644
index 0000000..36516eb
--- /dev/null
+++ b/mm/process_reclaim.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/sort.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
+#include <linux/rcupdate.h>
+#include <linux/notifier.h>
+#include <linux/vmpressure.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/process_reclaim.h>
+
+#define MAX_SWAP_TASKS SWAP_CLUSTER_MAX
+
+static void swap_fn(struct work_struct *work);
+DECLARE_WORK(swap_work, swap_fn);
+
+/* User knob to enable/disable process reclaim feature */
+static int enable_process_reclaim;
+module_param_named(enable_process_reclaim, enable_process_reclaim, int, 0644);
+
+/* The max number of pages tried to be reclaimed in a single run */
+int per_swap_size = SWAP_CLUSTER_MAX * 32;
+module_param_named(per_swap_size, per_swap_size, int, 0644);
+
+int reclaim_avg_efficiency;
+module_param_named(reclaim_avg_efficiency, reclaim_avg_efficiency, int, 0444);
+
+/* The vmpressure region where process reclaim operates */
+static unsigned long pressure_min = 50;
+static unsigned long pressure_max = 90;
+module_param_named(pressure_min, pressure_min, ulong, 0644);
+module_param_named(pressure_max, pressure_max, ulong, 0644);
+
+static short min_score_adj = 360;
+module_param_named(min_score_adj, min_score_adj, short, 0644);
+
+/*
+ * Scheduling process reclaim workqueue unecessarily
+ * when the reclaim efficiency is low does not make
+ * sense. We try to detect a drop in efficiency and
+ * disable reclaim for a time period. This period and the
+ * period for which we monitor a drop in efficiency is
+ * defined by swap_eff_win. swap_opt_eff is the optimal
+ * efficincy used as theshold for this.
+ */
+static int swap_eff_win = 2;
+module_param_named(swap_eff_win, swap_eff_win, int, 0644);
+
+static int swap_opt_eff = 50;
+module_param_named(swap_opt_eff, swap_opt_eff, int, 0644);
+
+static atomic_t skip_reclaim = ATOMIC_INIT(0);
+/* Not atomic since only a single instance of swap_fn run at a time */
+static int monitor_eff;
+
+struct selected_task {
+ struct task_struct *p;
+ int tasksize;
+ short oom_score_adj;
+};
+
+int selected_cmp(const void *a, const void *b)
+{
+ const struct selected_task *x = a;
+ const struct selected_task *y = b;
+ int ret;
+
+ ret = x->tasksize < y->tasksize ? -1 : 1;
+
+ return ret;
+}
+
+static int test_task_flag(struct task_struct *p, int flag)
+{
+ struct task_struct *t = p;
+
+ rcu_read_lock();
+ for_each_thread(p, t) {
+ task_lock(t);
+ if (test_tsk_thread_flag(t, flag)) {
+ task_unlock(t);
+ rcu_read_unlock();
+ return 1;
+ }
+ task_unlock(t);
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static void swap_fn(struct work_struct *work)
+{
+ struct task_struct *tsk;
+ struct reclaim_param rp;
+
+ /* Pick the best MAX_SWAP_TASKS tasks in terms of anon size */
+ struct selected_task selected[MAX_SWAP_TASKS] = {{0, 0, 0},};
+ int si = 0;
+ int i;
+ int tasksize;
+ int total_sz = 0;
+ int total_scan = 0;
+ int total_reclaimed = 0;
+ int nr_to_reclaim;
+ int efficiency;
+
+ rcu_read_lock();
+ for_each_process(tsk) {
+ struct task_struct *p;
+ short oom_score_adj;
+
+ if (tsk->flags & PF_KTHREAD)
+ continue;
+
+ if (test_task_flag(tsk, TIF_MEMDIE))
+ continue;
+
+ p = find_lock_task_mm(tsk);
+ if (!p)
+ continue;
+
+ oom_score_adj = p->signal->oom_score_adj;
+ if (oom_score_adj < min_score_adj) {
+ task_unlock(p);
+ continue;
+ }
+
+ tasksize = get_mm_counter(p->mm, MM_ANONPAGES);
+ task_unlock(p);
+
+ if (tasksize <= 0)
+ continue;
+
+ if (si == MAX_SWAP_TASKS) {
+ sort(&selected[0], MAX_SWAP_TASKS,
+ sizeof(struct selected_task),
+ &selected_cmp, NULL);
+ if (tasksize < selected[0].tasksize)
+ continue;
+ selected[0].p = p;
+ selected[0].oom_score_adj = oom_score_adj;
+ selected[0].tasksize = tasksize;
+ } else {
+ selected[si].p = p;
+ selected[si].oom_score_adj = oom_score_adj;
+ selected[si].tasksize = tasksize;
+ si++;
+ }
+ }
+
+ for (i = 0; i < si; i++)
+ total_sz += selected[i].tasksize;
+
+ /* Skip reclaim if total size is too less */
+ if (total_sz < SWAP_CLUSTER_MAX) {
+ rcu_read_unlock();
+ return;
+ }
+
+ for (i = 0; i < si; i++)
+ get_task_struct(selected[i].p);
+
+ rcu_read_unlock();
+
+ while (si--) {
+ nr_to_reclaim =
+ (selected[si].tasksize * per_swap_size) / total_sz;
+ /* scan atleast a page */
+ if (!nr_to_reclaim)
+ nr_to_reclaim = 1;
+
+ rp = reclaim_task_anon(selected[si].p, nr_to_reclaim);
+
+ trace_process_reclaim(selected[si].tasksize,
+ selected[si].oom_score_adj, rp.nr_scanned,
+ rp.nr_reclaimed, per_swap_size, total_sz,
+ nr_to_reclaim);
+ total_scan += rp.nr_scanned;
+ total_reclaimed += rp.nr_reclaimed;
+ put_task_struct(selected[si].p);
+ }
+
+ if (total_scan) {
+ efficiency = (total_reclaimed * 100) / total_scan;
+
+ if (efficiency < swap_opt_eff) {
+ if (++monitor_eff == swap_eff_win) {
+ atomic_set(&skip_reclaim, swap_eff_win);
+ monitor_eff = 0;
+ }
+ } else {
+ monitor_eff = 0;
+ }
+
+ reclaim_avg_efficiency =
+ (efficiency + reclaim_avg_efficiency) / 2;
+ trace_process_reclaim_eff(efficiency, reclaim_avg_efficiency);
+ }
+}
+
+static int vmpressure_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ unsigned long pressure = action;
+
+ if (!enable_process_reclaim)
+ return 0;
+
+ if (!current_is_kswapd())
+ return 0;
+
+ if (atomic_dec_if_positive(&skip_reclaim) >= 0)
+ return 0;
+
+ if ((pressure >= pressure_min) && (pressure < pressure_max))
+ if (!work_pending(&swap_work))
+ queue_work(system_unbound_wq, &swap_work);
+ return 0;
+}
+
+static struct notifier_block vmpr_nb = {
+ .notifier_call = vmpressure_notifier,
+};
+
+static int __init process_reclaim_init(void)
+{
+ vmpressure_notifier_register(&vmpr_nb);
+ return 0;
+}
+
+static void __exit process_reclaim_exit(void)
+{
+ vmpressure_notifier_unregister(&vmpr_nb);
+}
+
+module_init(process_reclaim_init);
+module_exit(process_reclaim_exit);
diff --git a/mm/rmap.c b/mm/rmap.c
index cd37c1c..dfb19f0 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1616,9 +1616,12 @@
* try_to_unmap - try to remove all page table mappings to a page
* @page: the page to get unmapped
* @flags: action and flags
+ * @vma : target vma for reclaim
*
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock.
+ * If @vma is not NULL, this function try to remove @page from only @vma
+ * without peeking all mapped vma for @page.
* Return values are:
*
* SWAP_SUCCESS - we succeeded in removing all mappings
@@ -1626,7 +1629,8 @@
* SWAP_FAIL - the page is unswappable
* SWAP_MLOCK - page is mlocked.
*/
-int try_to_unmap(struct page *page, enum ttu_flags flags)
+int try_to_unmap(struct page *page, enum ttu_flags flags,
+ struct vm_area_struct *vma)
{
int ret;
struct rmap_private rp = {
@@ -1639,6 +1643,7 @@
.arg = &rp,
.done = page_mapcount_is_zero,
.anon_lock = page_lock_anon_vma_read,
+ .target_vma = vma,
};
/*
@@ -1698,6 +1703,7 @@
.arg = &rp,
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
+ .target_vma = NULL,
};
@@ -1760,6 +1766,11 @@
struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
+ if (rwc->target_vma) {
+ unsigned long address = vma_address(page, rwc->target_vma);
+ return rwc->rmap_one(page, rwc->target_vma, address, rwc->arg);
+ }
+
if (locked) {
anon_vma = page_anon_vma(page);
/* anon_vma disappear under us? */
@@ -1767,6 +1778,7 @@
} else {
anon_vma = rmap_walk_anon_lock(page, rwc);
}
+
if (!anon_vma)
return ret;
@@ -1811,6 +1823,7 @@
struct address_space *mapping = page_mapping(page);
pgoff_t pgoff;
struct vm_area_struct *vma;
+ unsigned long address;
int ret = SWAP_AGAIN;
/*
@@ -1827,6 +1840,13 @@
pgoff = page_to_pgoff(page);
if (!locked)
i_mmap_lock_read(mapping);
+
+ if (rwc->target_vma) {
+ address = vma_address(page, rwc->target_vma);
+ ret = rwc->rmap_one(page, rwc->target_vma, address, rwc->arg);
+ goto done;
+ }
+
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
diff --git a/mm/swap_ratio.c b/mm/swap_ratio.c
new file mode 100644
index 0000000..4ca5783
--- /dev/null
+++ b/mm/swap_ratio.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm_types.h>
+#include <linux/swapfile.h>
+#include <linux/swap.h>
+
+#define SWAP_RATIO_GROUP_START (SWAP_FLAG_PRIO_MASK - 9) /* 32758 */
+#define SWAP_RATIO_GROUP_END (SWAP_FLAG_PRIO_MASK) /* 32767 */
+#define SWAP_FAST_WRITES (SWAPFILE_CLUSTER * (SWAP_CLUSTER_MAX / 8))
+#define SWAP_SLOW_WRITES SWAPFILE_CLUSTER
+
+/*
+ * The fast/slow swap write ratio.
+ * 100 indicates that all writes should
+ * go to fast swap device.
+ */
+int sysctl_swap_ratio = 100;
+
+/* Enable the swap ratio feature */
+int sysctl_swap_ratio_enable;
+
+static bool is_same_group(struct swap_info_struct *a,
+ struct swap_info_struct *b)
+{
+ if (!sysctl_swap_ratio_enable)
+ return false;
+
+ if (!is_swap_ratio_group(a->prio))
+ return false;
+
+ if (a->prio == b->prio)
+ return true;
+
+ return false;
+}
+
+/* Caller must hold swap_avail_lock */
+static int calculate_write_pending(struct swap_info_struct *si,
+ struct swap_info_struct *n)
+{
+ int ratio = sysctl_swap_ratio;
+
+ if ((ratio < 0) || (ratio > 100))
+ return -EINVAL;
+
+ if (WARN_ON(!(si->flags & SWP_FAST)))
+ return -ENODEV;
+
+ if ((n->flags & SWP_FAST) || !is_same_group(si, n))
+ return -ENODEV;
+
+ si->max_writes = ratio ? SWAP_FAST_WRITES : 0;
+ n->max_writes = ratio ? (SWAP_FAST_WRITES * 100) /
+ ratio - SWAP_FAST_WRITES : SWAP_SLOW_WRITES;
+
+ si->write_pending = si->max_writes;
+ n->write_pending = n->max_writes;
+
+ return 0;
+}
+
+static int swap_ratio_slow(struct swap_info_struct **si)
+{
+ struct swap_info_struct *n = NULL;
+ int ret = 0;
+
+ spin_lock(&(*si)->lock);
+ spin_lock(&swap_avail_lock);
+ if (&(*si)->avail_list == plist_last(&swap_avail_head)) {
+ /* just to make skip work */
+ n = *si;
+ ret = -ENODEV;
+ goto skip;
+ }
+ n = plist_next_entry(&(*si)->avail_list,
+ struct swap_info_struct,
+ avail_list);
+ if (n == *si) {
+ /* No other swap device */
+ ret = -ENODEV;
+ goto skip;
+ }
+
+ spin_unlock(&swap_avail_lock);
+ spin_lock(&n->lock);
+ spin_lock(&swap_avail_lock);
+
+ if ((*si)->flags & SWP_FAST) {
+ if ((*si)->write_pending) {
+ (*si)->write_pending--;
+ goto exit;
+ } else {
+ if ((n->flags & SWP_FAST) || !is_same_group(*si, n)) {
+ /* Should never happen */
+ ret = -ENODEV;
+ } else if (n->write_pending) {
+ /*
+ * Requeue fast device, since there are pending
+ * writes for slow device.
+ */
+ plist_requeue(&(*si)->avail_list,
+ &swap_avail_head);
+ n->write_pending--;
+ spin_unlock(&(*si)->lock);
+ *si = n;
+ goto skip;
+ } else {
+ if (calculate_write_pending(*si, n) < 0) {
+ ret = -ENODEV;
+ goto exit;
+ }
+ /* Restart from fast device */
+ (*si)->write_pending--;
+ }
+ }
+ } else {
+ if (!(n->flags & SWP_FAST) || !is_same_group(*si, n)) {
+ /* Should never happen */
+ ret = -ENODEV;
+ } else if (n->write_pending) {
+ /*
+ * Pending writes for fast device.
+ * We reach here when slow device is swapped on first,
+ * before fast device.
+ */
+ /* requeue slow device to the end */
+ plist_requeue(&(*si)->avail_list, &swap_avail_head);
+ n->write_pending--;
+ spin_unlock(&(*si)->lock);
+ *si = n;
+ goto skip;
+ } else {
+ if ((*si)->write_pending) {
+ (*si)->write_pending--;
+ } else {
+ if (calculate_write_pending(n, *si) < 0) {
+ ret = -ENODEV;
+ goto exit;
+ }
+ n->write_pending--;
+ plist_requeue(&(*si)->avail_list,
+ &swap_avail_head);
+ spin_unlock(&(*si)->lock);
+ *si = n;
+ goto skip;
+ }
+ }
+ }
+exit:
+ spin_unlock(&(*si)->lock);
+skip:
+ spin_unlock(&swap_avail_lock);
+ /* n and si would have got interchanged */
+ spin_unlock(&n->lock);
+ return ret;
+}
+
+bool is_swap_ratio_group(int prio)
+{
+ return ((prio >= SWAP_RATIO_GROUP_START) &&
+ (prio <= SWAP_RATIO_GROUP_END)) ? true : false;
+}
+
+void setup_swap_ratio(struct swap_info_struct *p, int prio)
+{
+ /* Used only if sysctl_swap_ratio_enable is set */
+ if (is_swap_ratio_group(prio)) {
+ if (p->flags & SWP_FAST)
+ p->write_pending = SWAP_FAST_WRITES;
+ else
+ p->write_pending = SWAP_SLOW_WRITES;
+ p->max_writes = p->write_pending;
+ }
+}
+
+int swap_ratio(struct swap_info_struct **si)
+{
+ if (!sysctl_swap_ratio_enable)
+ return -ENODEV;
+
+ if (is_swap_ratio_group((*si)->prio))
+ return swap_ratio_slow(si);
+ else
+ return -ENODEV;
+}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 35d7e0e..5ac5846 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -478,7 +478,7 @@
unsigned long mask;
struct blk_plug plug;
- mask = swapin_nr_pages(offset) - 1;
+ mask = is_swap_fast(entry) ? 0 : swapin_nr_pages(offset) - 1;
if (!mask)
goto skip;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d76b2a1..9cf2595 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -81,8 +81,8 @@
* is held and the locking order requires swap_lock to be taken
* before any swap_info_struct->lock.
*/
-static PLIST_HEAD(swap_avail_head);
-static DEFINE_SPINLOCK(swap_avail_lock);
+PLIST_HEAD(swap_avail_head);
+DEFINE_SPINLOCK(swap_avail_lock);
struct swap_info_struct *swap_info[MAX_SWAPFILES];
@@ -97,6 +97,26 @@
return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
}
+bool is_swap_fast(swp_entry_t entry)
+{
+ struct swap_info_struct *p;
+ unsigned long type;
+
+ if (non_swap_entry(entry))
+ return false;
+
+ type = swp_type(entry);
+ if (type >= nr_swapfiles)
+ return false;
+
+ p = swap_info[type];
+
+ if (p->flags & SWP_FAST)
+ return true;
+
+ return false;
+}
+
/* returns 1 if swap entry is freed */
static int
__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
@@ -196,7 +216,6 @@
}
}
-#define SWAPFILE_CLUSTER 256
#define LATENCY_LIMIT 256
static inline void cluster_set_flag(struct swap_cluster_info *info,
@@ -573,7 +592,7 @@
scan_base = offset = si->lowest_bit;
/* reuse swap entry of cache-only swap if not busy. */
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
spin_unlock(&si->lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -613,7 +632,8 @@
spin_lock(&si->lock);
goto checks;
}
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) &&
+ si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&si->lock);
goto checks;
}
@@ -628,7 +648,8 @@
spin_lock(&si->lock);
goto checks;
}
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) &&
+ si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&si->lock);
goto checks;
}
@@ -649,18 +670,39 @@
{
struct swap_info_struct *si, *next;
pgoff_t offset;
+ int swap_ratio_off = 0;
if (atomic_long_read(&nr_swap_pages) <= 0)
goto noswap;
atomic_long_dec(&nr_swap_pages);
+lock_and_start:
spin_lock(&swap_avail_lock);
start_over:
plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) {
+
+ if (sysctl_swap_ratio && !swap_ratio_off) {
+ int ret;
+
+ spin_unlock(&swap_avail_lock);
+ ret = swap_ratio(&si);
+ if (ret < 0) {
+ /*
+ * Error. Start again with swap
+ * ratio disabled.
+ */
+ swap_ratio_off = 1;
+ goto lock_and_start;
+ } else {
+ goto start;
+ }
+ }
+
/* requeue si to after same-priority siblings */
plist_requeue(&si->avail_list, &swap_avail_head);
spin_unlock(&swap_avail_lock);
+start:
spin_lock(&si->lock);
if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
spin_lock(&swap_avail_lock);
@@ -2539,11 +2581,16 @@
}
}
+ if (p->bdev && blk_queue_fast(bdev_get_queue(p->bdev)))
+ p->flags |= SWP_FAST;
+
mutex_lock(&swapon_mutex);
prio = -1;
- if (swap_flags & SWAP_FLAG_PREFER)
+ if (swap_flags & SWAP_FLAG_PREFER) {
prio =
(swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
+ setup_swap_ratio(p, prio);
+ }
enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 195de42..ed89128 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -301,6 +301,57 @@
static unsigned long vmap_area_pcpu_hole;
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+#define POSSIBLE_VMALLOC_START PAGE_OFFSET
+
+#define VMALLOC_BITMAP_SIZE ((VMALLOC_END - PAGE_OFFSET) >> \
+ PAGE_SHIFT)
+#define VMALLOC_TO_BIT(addr) ((addr - PAGE_OFFSET) >> PAGE_SHIFT)
+#define BIT_TO_VMALLOC(i) (PAGE_OFFSET + i * PAGE_SIZE)
+
+unsigned long total_vmalloc_size;
+unsigned long vmalloc_reserved;
+
+DECLARE_BITMAP(possible_areas, VMALLOC_BITMAP_SIZE);
+
+void mark_vmalloc_reserved_area(void *x, unsigned long size)
+{
+ unsigned long addr = (unsigned long)x;
+
+ bitmap_set(possible_areas, VMALLOC_TO_BIT(addr), size >> PAGE_SHIFT);
+ vmalloc_reserved += size;
+}
+
+int is_vmalloc_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+
+ if (addr < POSSIBLE_VMALLOC_START || addr >= VMALLOC_END)
+ return 0;
+
+ if (test_bit(VMALLOC_TO_BIT(addr), possible_areas))
+ return 0;
+
+ return 1;
+}
+
+static void calc_total_vmalloc_size(void)
+{
+ total_vmalloc_size = VMALLOC_END - POSSIBLE_VMALLOC_START -
+ vmalloc_reserved;
+}
+#else
+int is_vmalloc_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+
+ return addr >= VMALLOC_START && addr < VMALLOC_END;
+}
+
+static void calc_total_vmalloc_size(void) { }
+#endif
+EXPORT_SYMBOL(is_vmalloc_addr);
+
static struct vmap_area *__find_vmap_area(unsigned long addr)
{
struct rb_node *n = vmap_area_root.rb_node;
@@ -375,7 +426,7 @@
BUG_ON(offset_in_page(size));
BUG_ON(!is_power_of_2(align));
- might_sleep_if(gfpflags_allow_blocking(gfp_mask));
+ might_sleep();
va = kmalloc_node(sizeof(struct vmap_area),
gfp_mask & GFP_RECLAIM_MASK, node);
@@ -611,6 +662,13 @@
static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
+/*
+ * Serialize vmap purging. There is no actual criticial section protected
+ * by this look, but we want to avoid concurrent calls for performance
+ * reasons and to make the pcpu_get_vm_areas more deterministic.
+ */
+static DEFINE_MUTEX(vmap_purge_lock);
+
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);
@@ -625,59 +683,40 @@
/*
* Purges all lazily-freed vmap areas.
- *
- * If sync is 0 then don't purge if there is already a purge in progress.
- * If force_flush is 1, then flush kernel TLBs between *start and *end even
- * if we found no lazy vmap areas to unmap (callers can use this to optimise
- * their own TLB flushing).
- * Returns with *start = min(*start, lowest purged address)
- * *end = max(*end, highest purged address)
*/
-static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
- int sync, int force_flush)
+static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
{
- static DEFINE_SPINLOCK(purge_lock);
struct llist_node *valist;
struct vmap_area *va;
struct vmap_area *n_va;
- int nr = 0;
+ bool do_free = false;
- /*
- * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
- * should not expect such behaviour. This just simplifies locking for
- * the case that isn't actually used at the moment anyway.
- */
- if (!sync && !force_flush) {
- if (!spin_trylock(&purge_lock))
- return;
- } else
- spin_lock(&purge_lock);
-
- if (sync)
- purge_fragmented_blocks_allcpus();
+ lockdep_assert_held(&vmap_purge_lock);
valist = llist_del_all(&vmap_purge_list);
llist_for_each_entry(va, valist, purge_list) {
- if (va->va_start < *start)
- *start = va->va_start;
- if (va->va_end > *end)
- *end = va->va_end;
- nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
+ if (va->va_start < start)
+ start = va->va_start;
+ if (va->va_end > end)
+ end = va->va_end;
+ do_free = true;
}
- if (nr)
+ if (!do_free)
+ return false;
+
+ flush_tlb_kernel_range(start, end);
+
+ spin_lock(&vmap_area_lock);
+ llist_for_each_entry_safe(va, n_va, valist, purge_list) {
+ int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+
+ __free_vmap_area(va);
atomic_sub(nr, &vmap_lazy_nr);
-
- if (nr || force_flush)
- flush_tlb_kernel_range(*start, *end);
-
- if (nr) {
- spin_lock(&vmap_area_lock);
- llist_for_each_entry_safe(va, n_va, valist, purge_list)
- __free_vmap_area(va);
- spin_unlock(&vmap_area_lock);
+ cond_resched_lock(&vmap_area_lock);
}
- spin_unlock(&purge_lock);
+ spin_unlock(&vmap_area_lock);
+ return true;
}
/*
@@ -686,9 +725,10 @@
*/
static void try_purge_vmap_area_lazy(void)
{
- unsigned long start = ULONG_MAX, end = 0;
-
- __purge_vmap_area_lazy(&start, &end, 0, 0);
+ if (mutex_trylock(&vmap_purge_lock)) {
+ __purge_vmap_area_lazy(ULONG_MAX, 0);
+ mutex_unlock(&vmap_purge_lock);
+ }
}
/*
@@ -696,9 +736,10 @@
*/
static void purge_vmap_area_lazy(void)
{
- unsigned long start = ULONG_MAX, end = 0;
-
- __purge_vmap_area_lazy(&start, &end, 1, 0);
+ mutex_lock(&vmap_purge_lock);
+ purge_fragmented_blocks_allcpus();
+ __purge_vmap_area_lazy(ULONG_MAX, 0);
+ mutex_unlock(&vmap_purge_lock);
}
/*
@@ -721,22 +762,13 @@
}
/*
- * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
- * called for the correct range previously.
- */
-static void free_unmap_vmap_area_noflush(struct vmap_area *va)
-{
- unmap_vmap_area(va);
- free_vmap_area_noflush(va);
-}
-
-/*
* Free and unmap a vmap area
*/
static void free_unmap_vmap_area(struct vmap_area *va)
{
flush_cache_vunmap(va->va_start, va->va_end);
- free_unmap_vmap_area_noflush(va);
+ unmap_vmap_area(va);
+ free_vmap_area_noflush(va);
}
static struct vmap_area *find_vmap_area(unsigned long addr)
@@ -750,16 +782,6 @@
return va;
}
-static void free_unmap_vmap_area_addr(unsigned long addr)
-{
- struct vmap_area *va;
-
- va = find_vmap_area(addr);
- BUG_ON(!va);
- free_unmap_vmap_area(va);
-}
-
-
/*** Per cpu kva allocator ***/
/*
@@ -1080,6 +1102,8 @@
if (unlikely(!vmap_initialized))
return;
+ might_sleep();
+
for_each_possible_cpu(cpu) {
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
struct vmap_block *vb;
@@ -1104,7 +1128,11 @@
rcu_read_unlock();
}
- __purge_vmap_area_lazy(&start, &end, 1, flush);
+ mutex_lock(&vmap_purge_lock);
+ purge_fragmented_blocks_allcpus();
+ if (!__purge_vmap_area_lazy(start, end) && flush)
+ flush_tlb_kernel_range(start, end);
+ mutex_unlock(&vmap_purge_lock);
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
@@ -1117,7 +1145,9 @@
{
unsigned long size = (unsigned long)count << PAGE_SHIFT;
unsigned long addr = (unsigned long)mem;
+ struct vmap_area *va;
+ might_sleep();
BUG_ON(!addr);
BUG_ON(addr < VMALLOC_START);
BUG_ON(addr > VMALLOC_END);
@@ -1126,10 +1156,14 @@
debug_check_no_locks_freed(mem, size);
vmap_debug_free_range(addr, addr+size);
- if (likely(count <= VMAP_MAX_ALLOC))
+ if (likely(count <= VMAP_MAX_ALLOC)) {
vb_free(mem, size);
- else
- free_unmap_vmap_area_addr(addr);
+ return;
+ }
+
+ va = find_vmap_area(addr);
+ BUG_ON(!va);
+ free_unmap_vmap_area(va);
}
EXPORT_SYMBOL(vm_unmap_ram);
@@ -1178,6 +1212,33 @@
EXPORT_SYMBOL(vm_map_ram);
static struct vm_struct *vmlist __initdata;
+
+/**
+ * vm_area_check_early - check if vmap area is already mapped
+ * @vm: vm_struct to be checked
+ *
+ * This function is used to check if the vmap area has been
+ * mapped already. @vm->addr, @vm->size and @vm->flags should
+ * contain proper values.
+ *
+ */
+int __init vm_area_check_early(struct vm_struct *vm)
+{
+ struct vm_struct *tmp, **p;
+
+ BUG_ON(vmap_initialized);
+ for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+ if (tmp->addr >= vm->addr) {
+ if (tmp->addr < vm->addr + vm->size)
+ return 1;
+ } else {
+ if (tmp->addr + tmp->size > vm->addr)
+ return 1;
+ }
+ }
+ return 0;
+}
+
/**
* vm_area_add_early - add vmap area early during boot
* @vm: vm_struct to add
@@ -1258,7 +1319,7 @@
}
vmap_area_pcpu_hole = VMALLOC_END;
-
+ calc_total_vmalloc_size();
vmap_initialized = true;
}
@@ -1422,16 +1483,27 @@
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
+ NUMA_NO_NODE, GFP_KERNEL,
+ __builtin_return_address(0));
+#else
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL,
__builtin_return_address(0));
+#endif
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
const void *caller)
{
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
+ NUMA_NO_NODE, GFP_KERNEL, caller);
+#else
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL, caller);
+#endif
}
/**
@@ -1465,6 +1537,8 @@
{
struct vmap_area *va;
+ might_sleep();
+
va = find_vmap_area((unsigned long)addr);
if (va && va->flags & VM_VM_AREA) {
struct vm_struct *vm = va->vm;
@@ -1520,7 +1594,39 @@
kfree(area);
return;
}
-
+
+static inline void __vfree_deferred(const void *addr)
+{
+ /*
+ * Use raw_cpu_ptr() because this can be called from preemptible
+ * context. Preemption is absolutely fine here, because the llist_add()
+ * implementation is lockless, so it works even if we are adding to
+ * nother cpu's list. schedule_work() should be fine with this too.
+ */
+ struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
+
+ if (llist_add((struct llist_node *)addr, &p->list))
+ schedule_work(&p->wq);
+}
+
+/**
+ * vfree_atomic - release memory allocated by vmalloc()
+ * @addr: memory base address
+ *
+ * This one is just like vfree() but can be called in any atomic context
+ * except NMIs.
+ */
+void vfree_atomic(const void *addr)
+{
+ BUG_ON(in_nmi());
+
+ kmemleak_free(addr);
+
+ if (!addr)
+ return;
+ __vfree_deferred(addr);
+}
+
/**
* vfree - release memory allocated by vmalloc()
* @addr: memory base address
@@ -1543,11 +1649,9 @@
if (!addr)
return;
- if (unlikely(in_interrupt())) {
- struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
- if (llist_add((struct llist_node *)addr, &p->list))
- schedule_work(&p->wq);
- } else
+ if (unlikely(in_interrupt()))
+ __vfree_deferred(addr);
+ else
__vunmap(addr, 1);
}
EXPORT_SYMBOL(vfree);
@@ -2683,6 +2787,9 @@
if (is_vmalloc_addr(v->pages))
seq_puts(m, " vpages");
+ if (v->flags & VM_LOWMEM)
+ seq_puts(m, " lowmem");
+
show_numa_info(m, v);
seq_putc(m, '\n');
return 0;
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 6063581..1306f32 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -22,6 +22,9 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/printk.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/vmpressure.h>
/*
@@ -38,7 +41,7 @@
* TODO: Make the window size depend on machine size, as we do for vmstat
* thresholds. Currently we set it to 512 pages (2MB for 4KB pages).
*/
-static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
+static unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
/*
* These thresholds are used when we account memory pressure through
@@ -49,6 +52,33 @@
static const unsigned int vmpressure_level_med = 60;
static const unsigned int vmpressure_level_critical = 95;
+static unsigned long vmpressure_scale_max = 100;
+module_param_named(vmpressure_scale_max, vmpressure_scale_max,
+ ulong, 0644);
+
+/* vmpressure values >= this will be scaled based on allocstalls */
+static unsigned long allocstall_threshold = 70;
+module_param_named(allocstall_threshold, allocstall_threshold,
+ ulong, 0644);
+
+static struct vmpressure global_vmpressure;
+static BLOCKING_NOTIFIER_HEAD(vmpressure_notifier);
+
+int vmpressure_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&vmpressure_notifier, nb);
+}
+
+int vmpressure_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&vmpressure_notifier, nb);
+}
+
+static void vmpressure_notify(unsigned long pressure)
+{
+ blocking_notifier_call_chain(&vmpressure_notifier, pressure, NULL);
+}
+
/*
* When there are too little pages left to scan, vmpressure() may miss the
* critical pressure as number of pages will be less than "window size".
@@ -75,6 +105,7 @@
return container_of(work, struct vmpressure, work);
}
+#ifdef CONFIG_MEMCG
static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
{
struct cgroup_subsys_state *css = vmpressure_to_css(vmpr);
@@ -85,6 +116,12 @@
return NULL;
return memcg_to_vmpressure(memcg);
}
+#else
+static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
+{
+ return NULL;
+}
+#endif
enum vmpressure_levels {
VMPRESSURE_LOW = 0,
@@ -108,7 +145,7 @@
return VMPRESSURE_LOW;
}
-static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
+static unsigned long vmpressure_calc_pressure(unsigned long scanned,
unsigned long reclaimed)
{
unsigned long scale = scanned + reclaimed;
@@ -135,7 +172,20 @@
pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure,
scanned, reclaimed);
- return vmpressure_level(pressure);
+ return pressure;
+}
+
+static unsigned long vmpressure_account_stall(unsigned long pressure,
+ unsigned long stall, unsigned long scanned)
+{
+ unsigned long scale;
+
+ if (pressure < allocstall_threshold)
+ return pressure;
+
+ scale = ((vmpressure_scale_max - pressure) * stall) / scanned;
+
+ return pressure + scale;
}
struct vmpressure_event {
@@ -169,6 +219,7 @@
struct vmpressure *vmpr = work_to_vmpressure(work);
unsigned long scanned;
unsigned long reclaimed;
+ unsigned long pressure;
enum vmpressure_levels level;
spin_lock(&vmpr->sr_lock);
@@ -191,7 +242,8 @@
vmpr->tree_reclaimed = 0;
spin_unlock(&vmpr->sr_lock);
- level = vmpressure_calc_level(scanned, reclaimed);
+ pressure = vmpressure_calc_pressure(scanned, reclaimed);
+ level = vmpressure_level(pressure);
do {
if (vmpressure_event(vmpr, level))
@@ -203,28 +255,8 @@
} while ((vmpr = vmpressure_parent(vmpr)));
}
-/**
- * vmpressure() - Account memory pressure through scanned/reclaimed ratio
- * @gfp: reclaimer's gfp mask
- * @memcg: cgroup memory controller handle
- * @tree: legacy subtree mode
- * @scanned: number of pages scanned
- * @reclaimed: number of pages reclaimed
- *
- * This function should be called from the vmscan reclaim path to account
- * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
- * pressure index is then further refined and averaged over time.
- *
- * If @tree is set, vmpressure is in traditional userspace reporting
- * mode: @memcg is considered the pressure root and userspace is
- * notified of the entire subtree's reclaim efficiency.
- *
- * If @tree is not set, reclaim efficiency is recorded for @memcg, and
- * only in-kernel users are notified.
- *
- * This function does not return any value.
- */
-void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
+#ifdef CONFIG_MEMCG
+static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
unsigned long scanned, unsigned long reclaimed)
{
struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
@@ -265,6 +297,7 @@
schedule_work(&vmpr->work);
} else {
enum vmpressure_levels level;
+ unsigned long pressure;
/* For now, no users for root-level efficiency */
if (!memcg || memcg == root_mem_cgroup)
@@ -280,7 +313,8 @@
vmpr->scanned = vmpr->reclaimed = 0;
spin_unlock(&vmpr->sr_lock);
- level = vmpressure_calc_level(scanned, reclaimed);
+ pressure = vmpressure_calc_pressure(scanned, reclaimed);
+ level = vmpressure_level(pressure);
if (level > VMPRESSURE_LOW) {
/*
@@ -295,6 +329,106 @@
}
}
}
+#else
+static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
+ unsigned long scanned, unsigned long reclaimed) { }
+#endif
+
+static void calculate_vmpressure_win(void)
+{
+ long x;
+
+ x = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
+ total_swapcache_pages() +
+ global_page_state(NR_FREE_PAGES);
+ if (x < 1)
+ x = 1;
+ /*
+ * For low (free + cached), vmpressure window should be
+ * small, and high for higher values of (free + cached).
+ * But it should not be linear as well. This ensures
+ * timely vmpressure notifications when system is under
+ * memory pressure, and optimal number of events when
+ * cached is high. The sqaure root function is empirically
+ * found to serve the purpose.
+ */
+ x = int_sqrt(x);
+ vmpressure_win = x;
+}
+
+static void vmpressure_global(gfp_t gfp, unsigned long scanned,
+ unsigned long reclaimed)
+{
+ struct vmpressure *vmpr = &global_vmpressure;
+ unsigned long pressure;
+ unsigned long stall;
+
+ if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS)))
+ return;
+
+ if (!scanned)
+ return;
+
+ spin_lock(&vmpr->sr_lock);
+ if (!vmpr->scanned)
+ calculate_vmpressure_win();
+
+ vmpr->scanned += scanned;
+ vmpr->reclaimed += reclaimed;
+
+ if (!current_is_kswapd())
+ vmpr->stall += scanned;
+
+ stall = vmpr->stall;
+ scanned = vmpr->scanned;
+ reclaimed = vmpr->reclaimed;
+ spin_unlock(&vmpr->sr_lock);
+
+ if (scanned < vmpressure_win)
+ return;
+
+ spin_lock(&vmpr->sr_lock);
+ vmpr->scanned = 0;
+ vmpr->reclaimed = 0;
+ vmpr->stall = 0;
+ spin_unlock(&vmpr->sr_lock);
+
+ pressure = vmpressure_calc_pressure(scanned, reclaimed);
+ pressure = vmpressure_account_stall(pressure, stall, scanned);
+ vmpressure_notify(pressure);
+}
+
+/**
+ * vmpressure() - Account memory pressure through scanned/reclaimed ratio
+ * @gfp: reclaimer's gfp mask
+ * @memcg: cgroup memory controller handle
+ * @tree: legacy subtree mode
+ * @scanned: number of pages scanned
+ * @reclaimed: number of pages reclaimed
+ *
+ * This function should be called from the vmscan reclaim path to account
+ * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
+ * pressure index is then further refined and averaged over time.
+ *
+ * If @tree is set, vmpressure is in traditional userspace reporting
+ * mode: @memcg is considered the pressure root and userspace is
+ * notified of the entire subtree's reclaim efficiency.
+ *
+ * If @tree is not set, reclaim efficiency is recorded for @memcg, and
+ * only in-kernel users are notified.
+ *
+ * This function does not return any value.
+ */
+void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
+ unsigned long scanned, unsigned long reclaimed)
+{
+ if (!memcg)
+ vmpressure_global(gfp, scanned, reclaimed);
+
+ if (IS_ENABLED(CONFIG_MEMCG))
+ vmpressure_memcg(gfp, memcg, tree, scanned, reclaimed);
+}
/**
* vmpressure_prio() - Account memory pressure through reclaimer priority level
@@ -427,3 +561,10 @@
*/
flush_work(&vmpr->work);
}
+
+static int vmpressure_global_init(void)
+{
+ vmpressure_init(&global_vmpressure);
+ return 0;
+}
+late_initcall(vmpressure_global_init);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b4d398b..7b5848cf 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -108,6 +108,13 @@
/* Number of pages freed so far during a call to shrink_zones() */
unsigned long nr_reclaimed;
+
+ /*
+ * Reclaim pages from a vma. If the page is shared by other tasks
+ * it is zapped from a vma without reclaim so it ends up remaining
+ * on memory until last task zap it.
+ */
+ struct vm_area_struct *target_vma;
};
#ifdef ARCH_HAS_PREFETCH
@@ -963,7 +970,7 @@
struct address_space *mapping;
struct page *page;
int may_enter_fs;
- enum page_references references = PAGEREF_RECLAIM_CLEAN;
+ enum page_references references = PAGEREF_RECLAIM;
bool dirty, writeback;
bool lazyfree = false;
int ret = SWAP_SUCCESS;
@@ -977,6 +984,8 @@
goto keep;
VM_BUG_ON_PAGE(PageActive(page), page);
+ if (pgdat)
+ VM_BUG_ON_PAGE(page_pgdat(page) != pgdat, page);
sc->nr_scanned++;
@@ -1055,7 +1064,7 @@
/* Case 1 above */
if (current_is_kswapd() &&
PageReclaim(page) &&
- test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
+ (pgdat && test_bit(PGDAT_WRITEBACK, &pgdat->flags))) {
nr_immediate++;
goto keep_locked;
@@ -1129,7 +1138,8 @@
if (page_mapped(page) && mapping) {
switch (ret = try_to_unmap(page, lazyfree ?
(ttu_flags | TTU_BATCH_FLUSH | TTU_LZFREE) :
- (ttu_flags | TTU_BATCH_FLUSH))) {
+ (ttu_flags | TTU_BATCH_FLUSH),
+ sc->target_vma)) {
case SWAP_FAIL:
goto activate_locked;
case SWAP_AGAIN:
@@ -1151,7 +1161,8 @@
*/
if (page_is_file_cache(page) &&
(!current_is_kswapd() ||
- !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
+ (pgdat &&
+ !test_bit(PGDAT_DIRTY, &pgdat->flags)))) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
@@ -1267,6 +1278,13 @@
* appear not as the counts should be low
*/
list_add(&page->lru, &free_pages);
+ /*
+ * If pagelist are from multiple zones, we should decrease
+ * NR_ISOLATED_ANON + x on freed pages in here.
+ */
+ if (!pgdat)
+ dec_node_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
continue;
cull_mlocked:
@@ -1312,6 +1330,8 @@
.gfp_mask = GFP_KERNEL,
.priority = DEF_PRIORITY,
.may_unmap = 1,
+ /* Doesn't allow to write out dirty page */
+ .may_writepage = 0,
};
unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
struct page *page, *next;
@@ -1333,6 +1353,42 @@
return ret;
}
+#ifdef CONFIG_PROCESS_RECLAIM
+unsigned long reclaim_pages_from_list(struct list_head *page_list,
+ struct vm_area_struct *vma)
+{
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .priority = DEF_PRIORITY,
+ .may_writepage = 1,
+ .may_unmap = 1,
+ .may_swap = 1,
+ .target_vma = vma,
+ };
+
+ unsigned long nr_reclaimed;
+ struct page *page;
+ unsigned long dummy1, dummy2, dummy3, dummy4, dummy5;
+
+ list_for_each_entry(page, page_list, lru)
+ ClearPageActive(page);
+
+ nr_reclaimed = shrink_page_list(page_list, NULL, &sc,
+ TTU_UNMAP|TTU_IGNORE_ACCESS,
+ &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+
+ while (!list_empty(page_list)) {
+ page = lru_to_page(page_list);
+ list_del(&page->lru);
+ dec_node_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+ putback_lru_page(page);
+ }
+
+ return nr_reclaimed;
+}
+#endif
+
/*
* Attempt to remove the specified page from its LRU. Only take this page
* if it is of the appropriate PageActive status. Pages which are being
@@ -1585,30 +1641,31 @@
return ret;
}
-/*
- * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
- * then get resheduled. When there are massive number of tasks doing page
- * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
- * the LRU list will go small and be scanned faster than necessary, leading to
- * unnecessary swapping, thrashing and OOM.
- */
-static int too_many_isolated(struct pglist_data *pgdat, int file,
- struct scan_control *sc)
+static int __too_many_isolated(struct pglist_data *pgdat, int file,
+ struct scan_control *sc, int safe)
{
unsigned long inactive, isolated;
- if (current_is_kswapd())
- return 0;
-
- if (!sane_reclaim(sc))
- return 0;
-
if (file) {
- inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
- isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
+ if (safe) {
+ inactive = node_page_state_snapshot(pgdat,
+ NR_INACTIVE_FILE);
+ isolated = node_page_state_snapshot(pgdat,
+ NR_ISOLATED_FILE);
+ } else {
+ inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
+ isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
+ }
} else {
- inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
- isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
+ if (safe) {
+ inactive = node_page_state_snapshot(pgdat,
+ NR_INACTIVE_ANON);
+ isolated = node_page_state_snapshot(pgdat,
+ NR_ISOLATED_ANON);
+ } else {
+ inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
+ isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
+ }
}
/*
@@ -1622,6 +1679,32 @@
return isolated > inactive;
}
+/*
+ * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
+ * then get resheduled. When there are massive number of tasks doing page
+ * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
+ * the LRU list will go small and be scanned faster than necessary, leading to
+ * unnecessary swapping, thrashing and OOM.
+ */
+static int too_many_isolated(struct pglist_data *pgdat, int file,
+ struct scan_control *sc, int safe)
+{
+ if (current_is_kswapd())
+ return 0;
+
+ if (!sane_reclaim(sc))
+ return 0;
+
+ if (unlikely(__too_many_isolated(pgdat, file, sc, 0))) {
+ if (safe)
+ return __too_many_isolated(pgdat, file, sc, safe);
+ else
+ return 1;
+ }
+
+ return 0;
+}
+
static noinline_for_stack void
putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
{
@@ -1733,18 +1816,21 @@
unsigned long nr_immediate = 0;
isolate_mode_t isolate_mode = 0;
int file = is_file_lru(lru);
+ int safe = 0;
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
if (!inactive_reclaimable_pages(lruvec, sc, lru))
return 0;
- while (unlikely(too_many_isolated(pgdat, file, sc))) {
+ while (unlikely(too_many_isolated(pgdat, file, sc, safe))) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
/* We are about to die and free our memory. Return now. */
if (fatal_signal_pending(current))
return SWAP_CLUSTER_MAX;
+
+ safe = 1;
}
lru_add_drain();
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 25a1f39..513c37a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -982,6 +982,7 @@
/* enum vm_event_item counters */
"pgpgin",
"pgpgout",
+ "pgpgoutclean",
"pswpin",
"pswpout",
@@ -1121,6 +1122,7 @@
/* Walk all the zones in a node and print using a callback */
static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
+ bool nolock,
void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
{
struct zone *zone;
@@ -1131,9 +1133,11 @@
if (!populated_zone(zone))
continue;
- spin_lock_irqsave(&zone->lock, flags);
+ if (!nolock)
+ spin_lock_irqsave(&zone->lock, flags);
print(m, pgdat, zone);
- spin_unlock_irqrestore(&zone->lock, flags);
+ if (!nolock)
+ spin_unlock_irqrestore(&zone->lock, flags);
}
}
#endif
@@ -1156,7 +1160,7 @@
static int frag_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
- walk_zones_in_node(m, pgdat, frag_show_print);
+ walk_zones_in_node(m, pgdat, false, frag_show_print);
return 0;
}
@@ -1197,7 +1201,7 @@
seq_printf(m, "%6d ", order);
seq_putc(m, '\n');
- walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
+ walk_zones_in_node(m, pgdat, false, pagetypeinfo_showfree_print);
return 0;
}
@@ -1249,7 +1253,8 @@
for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
seq_printf(m, "%12s ", migratetype_names[mtype]);
seq_putc(m, '\n');
- walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
+ walk_zones_in_node(m, pgdat, false,
+ pagetypeinfo_showblockcount_print);
return 0;
}
@@ -1275,7 +1280,8 @@
seq_printf(m, "%12s ", migratetype_names[mtype]);
seq_putc(m, '\n');
- walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
+ walk_zones_in_node(m, pgdat, true,
+ pagetypeinfo_showmixedcount_print);
#endif /* CONFIG_PAGE_OWNER */
}
@@ -1432,7 +1438,7 @@
static int zoneinfo_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
- walk_zones_in_node(m, pgdat, zoneinfo_show_print);
+ walk_zones_in_node(m, pgdat, false, zoneinfo_show_print);
return 0;
}
@@ -1861,7 +1867,7 @@
if (!node_state(pgdat->node_id, N_MEMORY))
return 0;
- walk_zones_in_node(m, pgdat, unusable_show_print);
+ walk_zones_in_node(m, pgdat, false, unusable_show_print);
return 0;
}
@@ -1913,7 +1919,7 @@
{
pg_data_t *pgdat = (pg_data_t *)arg;
- walk_zones_in_node(m, pgdat, extfrag_show_print);
+ walk_zones_in_node(m, pgdat, false, extfrag_show_print);
return 0;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f45f619..227c249 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -687,7 +687,7 @@
NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
if (!neigh->dead) {
- pr_warn("Destroying alive neighbour %p\n", neigh);
+ pr_warn("Destroying alive neighbour %pK\n", neigh);
dump_stack();
return;
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 08605a4..51ac77e 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -41,6 +41,10 @@
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int tcp_delack_seg_min = TCP_DELACK_MIN;
+static int tcp_delack_seg_max = 60;
+static int tcp_use_userconfig_min;
+static int tcp_use_userconfig_max = 1;
/* Update system visible IP port range */
static void set_local_port_range(struct net *net, int range[2])
@@ -684,6 +688,25 @@
.proc_handler = proc_dointvec_minmax,
.extra1 = &one
},
+ {
+ .procname = "tcp_delack_seg",
+ .data = &sysctl_tcp_delack_seg,
+ .maxlen = sizeof(sysctl_tcp_delack_seg),
+ .mode = 0644,
+ .proc_handler = tcp_proc_delayed_ack_control,
+ .extra1 = &tcp_delack_seg_min,
+ .extra2 = &tcp_delack_seg_max,
+ },
+ {
+ .procname = "tcp_use_userconfig",
+ .data = &sysctl_tcp_use_userconfig,
+ .maxlen = sizeof(sysctl_tcp_use_userconfig),
+ .mode = 0644,
+ .proc_handler = tcp_use_userconfig_sysctl_handler,
+ .extra1 = &tcp_use_userconfig_min,
+ .extra2 = &tcp_use_userconfig_max,
+ },
+
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 86fbf0f..c27382f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -300,6 +300,12 @@
atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
EXPORT_SYMBOL(tcp_memory_allocated);
+int sysctl_tcp_delack_seg __read_mostly = TCP_DELACK_SEG;
+EXPORT_SYMBOL(sysctl_tcp_delack_seg);
+
+int sysctl_tcp_use_userconfig __read_mostly;
+EXPORT_SYMBOL(sysctl_tcp_use_userconfig);
+
/*
* Current number of TCP sockets.
*/
@@ -1438,8 +1444,11 @@
/* Delayed ACKs frequently hit locked sockets during bulk
* receive. */
if (icsk->icsk_ack.blocked ||
- /* Once-per-two-segments ACK was not sent by tcp_input.c */
- tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
+ /* Once-per-sysctl_tcp_delack_seg segments
+ * ACK was not sent by tcp_input.c
+ */
+ tp->rcv_nxt - tp->rcv_wup > (icsk->icsk_ack.rcv_mss) *
+ sysctl_tcp_delack_seg ||
/*
* If this read emptied read buffer, we send ACK, if
* connection is not bidirectional, user drained
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a03f1e8..3d980d6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5104,7 +5104,8 @@
struct tcp_sock *tp = tcp_sk(sk);
/* More than one full frame received... */
- if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
+ if (((tp->rcv_nxt - tp->rcv_wup) > (inet_csk(sk)->icsk_ack.rcv_mss) *
+ sysctl_tcp_delack_seg &&
/* ... and right edge of window advances far enough.
* (tcp_recvmsg() will send ACK otherwise). Or...
*/
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index b1e65b3..732060d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -24,13 +24,45 @@
int sysctl_tcp_thin_linear_timeouts __read_mostly;
+static void set_tcp_default(void)
+{
+ sysctl_tcp_delack_seg = TCP_DELACK_SEG;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_proc_delayed_ack_control(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+ /* The ret value will be 0 if the input validation is successful
+ * and the values are written to sysctl table. If not, the stack
+ * will continue to work with currently configured values
+ */
+ return ret;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_use_userconfig_sysctl_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+ if (write && ret == 0) {
+ if (!sysctl_tcp_use_userconfig)
+ set_tcp_default();
+ }
+ return ret;
+}
+
/**
* tcp_write_err() - close socket and save error info
* @sk: The socket the error has appeared on.
*
* Returns: Nothing (void)
*/
-
static void tcp_write_err(struct sock *sk)
{
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 545c79a..031273a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3308,10 +3308,11 @@
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *sdata_iter;
enum nl80211_iftype iftype = sdata->wdev.iftype;
- int num[NUM_NL80211_IFTYPES];
struct ieee80211_chanctx *ctx;
- int num_different_channels = 0;
int total = 1;
+ struct iface_combination_params params = {
+ .radar_detect = radar_detect,
+ };
lockdep_assert_held(&local->chanctx_mtx);
@@ -3322,9 +3323,6 @@
!chandef->chan))
return -EINVAL;
- if (chandef)
- num_different_channels = 1;
-
if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
return -EINVAL;
@@ -3335,24 +3333,26 @@
return 0;
}
- memset(num, 0, sizeof(num));
+ if (chandef)
+ params.num_different_channels = 1;
if (iftype != NL80211_IFTYPE_UNSPECIFIED)
- num[iftype] = 1;
+ params.iftype_num[iftype] = 1;
list_for_each_entry(ctx, &local->chanctx_list, list) {
if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
continue;
- radar_detect |= ieee80211_chanctx_radar_detect(local, ctx);
+ params.radar_detect |=
+ ieee80211_chanctx_radar_detect(local, ctx);
if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) {
- num_different_channels++;
+ params.num_different_channels++;
continue;
}
if (chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
cfg80211_chandef_compatible(chandef,
&ctx->conf.def))
continue;
- num_different_channels++;
+ params.num_different_channels++;
}
list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) {
@@ -3365,16 +3365,14 @@
local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
continue;
- num[wdev_iter->iftype]++;
+ params.iftype_num[wdev_iter->iftype]++;
total++;
}
- if (total == 1 && !radar_detect)
+ if (total == 1 && !params.radar_detect)
return 0;
- return cfg80211_check_combinations(local->hw.wiphy,
- num_different_channels,
- radar_detect, num);
+ return cfg80211_check_combinations(local->hw.wiphy, ¶ms);
}
static void
@@ -3390,12 +3388,10 @@
int ieee80211_max_num_channels(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
- int num[NUM_NL80211_IFTYPES] = {};
struct ieee80211_chanctx *ctx;
- int num_different_channels = 0;
- u8 radar_detect = 0;
u32 max_num_different_channels = 1;
int err;
+ struct iface_combination_params params = {0};
lockdep_assert_held(&local->chanctx_mtx);
@@ -3403,17 +3399,17 @@
if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
continue;
- num_different_channels++;
+ params.num_different_channels++;
- radar_detect |= ieee80211_chanctx_radar_detect(local, ctx);
+ params.radar_detect |=
+ ieee80211_chanctx_radar_detect(local, ctx);
}
list_for_each_entry_rcu(sdata, &local->interfaces, list)
- num[sdata->wdev.iftype]++;
+ params.iftype_num[sdata->wdev.iftype]++;
- err = cfg80211_iter_combinations(local->hw.wiphy,
- num_different_channels, radar_detect,
- num, ieee80211_iter_max_chans,
+ err = cfg80211_iter_combinations(local->hw.wiphy, ¶ms,
+ ieee80211_iter_max_chans,
&max_num_different_channels);
if (err < 0)
return err;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 6bd1508..19b89b1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1022,7 +1022,7 @@
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
{
- INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
+ INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
gc_work->next_gc_run = HZ;
gc_work->exiting = false;
}
diff --git a/net/netfilter/xt_HARDIDLETIMER.c b/net/netfilter/xt_HARDIDLETIMER.c
index fc0b83f..c6f70da9 100644
--- a/net/netfilter/xt_HARDIDLETIMER.c
+++ b/net/netfilter/xt_HARDIDLETIMER.c
@@ -72,7 +72,7 @@
{
char iface_msg[NLMSG_MAX_SIZE];
char state_msg[NLMSG_MAX_SIZE];
- static const char * const envp[] = { iface_msg, state_msg, NULL };
+ char *envp[] = { iface_msg, state_msg, NULL };
int res;
res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
diff --git a/net/rmnet_data/rmnet_data_config.c b/net/rmnet_data/rmnet_data_config.c
index 2a30d55..50d9b51 100644
--- a/net/rmnet_data/rmnet_data_config.c
+++ b/net/rmnet_data/rmnet_data_config.c
@@ -1173,6 +1173,7 @@
{
int i, j;
struct net_device *vndev;
+ struct rmnet_phys_ep_config *config;
struct rmnet_logical_ep_conf_s *cfg;
struct rmnet_free_vnd_work *vnd_work;
@@ -1228,6 +1229,16 @@
kfree(vnd_work);
}
+ config = _rmnet_get_phys_ep_config(dev);
+
+ if (config) {
+ cfg = &config->local_ep;
+
+ if (cfg && cfg->refcount)
+ rmnet_unset_logical_endpoint_config
+ (cfg->egress_dev, RMNET_LOCAL_LOGICAL_ENDPOINT);
+ }
+
/* Clear the mappings on the phys ep */
trace_rmnet_unregister_cb_clear_lepcs(dev);
rmnet_unset_logical_endpoint_config(dev, RMNET_LOCAL_LOGICAL_ENDPOINT);
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index 46fdf5a..35be79e 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -552,12 +552,9 @@
LOGD("headroom of %d bytes", required_headroom);
if (skb_headroom(skb) < required_headroom) {
- if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) {
- LOGD("Failed to add headroom of %d bytes",
- required_headroom);
- kfree_skb(skb);
- return 1;
- }
+ LOGE("Not enough headroom for %d bytes", required_headroom);
+ kfree_skb(skb);
+ return 1;
}
if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 5f5867f..80890c0 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -477,7 +477,7 @@
u32 *mask);
int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
- u32 beacon_int);
+ enum nl80211_iftype iftype, u32 beacon_int);
void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
enum nl80211_iftype iftype, int num);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 9ed6b0f..4ba0d590 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1060,6 +1060,10 @@
nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
c->radar_detect_regions)))
goto nla_put_failure;
+ if (c->beacon_int_min_gcd &&
+ nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD,
+ c->beacon_int_min_gcd))
+ goto nla_put_failure;
nla_nest_end(msg, nl_combi);
}
@@ -3790,7 +3794,8 @@
params.dtim_period =
nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
- err = cfg80211_validate_beacon_int(rdev, params.beacon_interval);
+ err = cfg80211_validate_beacon_int(rdev, dev->ieee80211_ptr->iftype,
+ params.beacon_interval);
if (err)
return err;
@@ -8163,7 +8168,8 @@
ibss.beacon_interval =
nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
- err = cfg80211_validate_beacon_int(rdev, ibss.beacon_interval);
+ err = cfg80211_validate_beacon_int(rdev, NL80211_IFTYPE_ADHOC,
+ ibss.beacon_interval);
if (err)
return err;
@@ -9428,7 +9434,9 @@
setup.beacon_interval =
nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
- err = cfg80211_validate_beacon_int(rdev, setup.beacon_interval);
+ err = cfg80211_validate_beacon_int(rdev,
+ NL80211_IFTYPE_MESH_POINT,
+ setup.beacon_interval);
if (err)
return err;
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 7c8b406..3da17e3 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -15,6 +15,7 @@
#include <linux/mpls.h>
#include <net/ndisc.h>
#include <linux/if_arp.h>
+#include <linux/gcd.h>
#include "core.h"
#include "rdev-ops.h"
@@ -1559,31 +1560,57 @@
}
EXPORT_SYMBOL(ieee80211_chandef_to_operating_class);
-int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
- u32 beacon_int)
+static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int,
+ u32 *beacon_int_gcd,
+ bool *beacon_int_different)
{
struct wireless_dev *wdev;
- int res = 0;
+
+ *beacon_int_gcd = 0;
+ *beacon_int_different = false;
+
+ list_for_each_entry(wdev, &wiphy->wdev_list, list) {
+ if (!wdev->beacon_interval)
+ continue;
+
+ if (!*beacon_int_gcd) {
+ *beacon_int_gcd = wdev->beacon_interval;
+ continue;
+ }
+
+ if (wdev->beacon_interval == *beacon_int_gcd)
+ continue;
+
+ *beacon_int_different = true;
+ *beacon_int_gcd = gcd(*beacon_int_gcd, wdev->beacon_interval);
+ }
+
+ if (new_beacon_int && *beacon_int_gcd != new_beacon_int) {
+ if (*beacon_int_gcd)
+ *beacon_int_different = true;
+ *beacon_int_gcd = gcd(*beacon_int_gcd, new_beacon_int);
+ }
+}
+
+int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
+ enum nl80211_iftype iftype, u32 beacon_int)
+{
+ /*
+ * This is just a basic pre-condition check; if interface combinations
+ * are possible the driver must already be checking those with a call
+ * to cfg80211_check_combinations(), in which case we'll validate more
+ * through the cfg80211_calculate_bi_data() call and code in
+ * cfg80211_iter_combinations().
+ */
if (beacon_int < 10 || beacon_int > 10000)
return -EINVAL;
- list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
- if (!wdev->beacon_interval)
- continue;
- if (wdev->beacon_interval != beacon_int) {
- res = -EINVAL;
- break;
- }
- }
-
- return res;
+ return 0;
}
int cfg80211_iter_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES],
+ struct iface_combination_params *params,
void (*iter)(const struct ieee80211_iface_combination *c,
void *data),
void *data)
@@ -1593,8 +1620,23 @@
int i, j, iftype;
int num_interfaces = 0;
u32 used_iftypes = 0;
+ u32 beacon_int_gcd;
+ bool beacon_int_different;
- if (radar_detect) {
+ /*
+ * This is a bit strange, since the iteration used to rely only on
+ * the data given by the driver, but here it now relies on context,
+ * in form of the currently operating interfaces.
+ * This is OK for all current users, and saves us from having to
+ * push the GCD calculations into all the drivers.
+ * In the future, this should probably rely more on data that's in
+ * cfg80211 already - the only thing not would appear to be any new
+ * interfaces (while being brought up) and channel/radar data.
+ */
+ cfg80211_calculate_bi_data(wiphy, params->new_beacon_int,
+ &beacon_int_gcd, &beacon_int_different);
+
+ if (params->radar_detect) {
rcu_read_lock();
regdom = rcu_dereference(cfg80211_regdomain);
if (regdom)
@@ -1603,8 +1645,8 @@
}
for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
- num_interfaces += iftype_num[iftype];
- if (iftype_num[iftype] > 0 &&
+ num_interfaces += params->iftype_num[iftype];
+ if (params->iftype_num[iftype] > 0 &&
!(wiphy->software_iftypes & BIT(iftype)))
used_iftypes |= BIT(iftype);
}
@@ -1618,7 +1660,7 @@
if (num_interfaces > c->max_interfaces)
continue;
- if (num_different_channels > c->num_different_channels)
+ if (params->num_different_channels > c->num_different_channels)
continue;
limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
@@ -1633,16 +1675,17 @@
all_iftypes |= limits[j].types;
if (!(limits[j].types & BIT(iftype)))
continue;
- if (limits[j].max < iftype_num[iftype])
+ if (limits[j].max < params->iftype_num[iftype])
goto cont;
- limits[j].max -= iftype_num[iftype];
+ limits[j].max -= params->iftype_num[iftype];
}
}
- if (radar_detect != (c->radar_detect_widths & radar_detect))
+ if (params->radar_detect !=
+ (c->radar_detect_widths & params->radar_detect))
goto cont;
- if (radar_detect && c->radar_detect_regions &&
+ if (params->radar_detect && c->radar_detect_regions &&
!(c->radar_detect_regions & BIT(region)))
goto cont;
@@ -1654,6 +1697,14 @@
if ((all_iftypes & used_iftypes) != used_iftypes)
goto cont;
+ if (beacon_int_gcd) {
+ if (c->beacon_int_min_gcd &&
+ beacon_int_gcd < c->beacon_int_min_gcd)
+ goto cont;
+ if (!c->beacon_int_min_gcd && beacon_int_different)
+ goto cont;
+ }
+
/* This combination covered all interface types and
* supported the requested numbers, so we're good.
*/
@@ -1676,14 +1727,11 @@
}
int cfg80211_check_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES])
+ struct iface_combination_params *params)
{
int err, num = 0;
- err = cfg80211_iter_combinations(wiphy, num_different_channels,
- radar_detect, iftype_num,
+ err = cfg80211_iter_combinations(wiphy, params,
cfg80211_iter_sum_ifcombs, &num);
if (err)
return err;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
old mode 100755
new mode 100644
index f742c65..1b9e67b
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -145,6 +145,25 @@
${CC} ${aflags} -c -o ${2} ${afile}
}
+# Generates ${2} .o file with RTIC MP's from the ${1} object file (vmlinux)
+# ${3} the file name where the sizes of the RTIC MP structure are stored
+# just in case, save copy of the RTIC mp to ${4}
+# Note: RTIC_MPGEN has to be set if MPGen is available
+rtic_mp()
+{
+ # assume that RTIC_MP_O generation may fail
+ RTIC_MP_O=
+
+ ${RTIC_MPGEN} --objcopy="${OBJCOPY}" --objdump="${OBJDUMP}" \
+ --binpath='' --vmlinux=${1} --config=${KCONFIG_CONFIG} && \
+ cat rtic_mp.c | ${CC} -c -o ${2} -x c - && \
+ cp rtic_mp.c ${4} && \
+ ${NM} --print-size --size-sort ${2} > ${3} && \
+ RTIC_MP_O=${2}
+ # NM - save generated variable sizes for verification
+ # RTIC_MP_O is our retval - great success if set to generated .o file
+}
+
# Create map file with all symbols from ${1}
# See mksymap for additional details
mksysmap()
@@ -169,6 +188,8 @@
rm -f System.map
rm -f vmlinux
rm -f vmlinux.o
+ rm -f .tmp_rtic_mp_sz*
+ rm -f rtic_mp.*
}
on_exit()
@@ -231,6 +252,15 @@
# final build of init/
${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}"
+# Generate RTIC MP placeholder compile unit of the correct size
+# and add it to the list of link objects
+# this needs to be done before generating kallsyms
+if [ ! -z ${RTIC_MPGEN+x} ]; then
+ rtic_mp vmlinux.o rtic_mp.o .tmp_rtic_mp_sz1 .tmp_rtic_mp1.c
+ KBUILD_VMLINUX_MAIN+=" "
+ KBUILD_VMLINUX_MAIN+=$RTIC_MP_O
+fi
+
kallsymso=""
kallsyms_vmlinux=""
if [ -n "${CONFIG_KALLSYMS}" ]; then
@@ -276,6 +306,18 @@
fi
fi
+# Update RTIC MP object by replacing the place holder
+# with actual MP data of the same size
+# Also double check that object size did not change
+if [ ! -z ${RTIC_MPGEN+x} ]; then
+ rtic_mp "${kallsyms_vmlinux}" rtic_mp.o .tmp_rtic_mp_sz2 \
+ .tmp_rtic_mp2.c
+ if ! cmp -s .tmp_rtic_mp_sz1 .tmp_rtic_mp_sz2; then
+ echo >&2 'ERROR: RTIC MP object files size mismatch'
+ exit 1
+ fi
+fi
+
info LD vmlinux
vmlinux_link "${kallsymso}" vmlinux
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 56c458d..8d9330a 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -437,7 +437,7 @@
static struct key *request_master_key(struct encrypted_key_payload *epayload,
const u8 **master_key, size_t *master_keylen)
{
- struct key *mkey = NULL;
+ struct key *mkey = ERR_PTR(-EINVAL);
if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
KEY_TRUSTED_PREFIX_LEN)) {
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index f800858..a472bf2 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -882,7 +882,6 @@
tristate
depends on WCD9XXX_CODEC_CORE
select SND_SOC_WCD9XXX
- select SND_SOC_WCD_MBHC
select SND_SOC_WCD_MBHC_LEGACY
select SND_SOC_WCD_CPE
@@ -900,7 +899,6 @@
config SND_SOC_WCD934X_MBHC
tristate
depends on SND_SOC_WCD934X
- select SND_SOC_WCD_MBHC
select SND_SOC_WCD_MBHC_ADC
config REGMAP_SWR
@@ -912,10 +910,6 @@
depends on REGMAP_SWR
select MSM_CDC_PINCTRL
-config SND_SOC_WSA881X_ANALOG
- tristate
- select REGMAP_I2C
-
config SND_SOC_WCD9XXX
tristate
default y if SND_SOC_WCD9335=y || SND_SOC_WCD934X=y
@@ -933,9 +927,11 @@
config SND_SOC_WCD_MBHC_LEGACY
tristate
+ select SND_SOC_WCD_MBHC
config SND_SOC_WCD_MBHC_ADC
tristate
+ select SND_SOC_WCD_MBHC
config SND_SOC_WCD_DSP_MGR
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index d5e4ab2..daf05d8 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -162,23 +162,16 @@
snd-soc-wcd934x-objs := wcd934x.o
snd-soc-wcd9xxx-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o \
wcdcal-hwdep.o wcd-dsp-mgr.o wcd-dsp-utils.o \
- wcd9xxx-soc-init.o
-ifeq ($(CONFIG_COMMON_CLK_MSM), y)
- snd-soc-wcd9xxx-objs += audio-ext-clk.o
-endif
-
-ifeq ($(CONFIG_COMMON_CLK_QCOM), y)
- snd-soc-wcd9xxx-objs += audio-ext-clk-up.o
-endif
+ wcd9xxx-soc-init.o audio-ext-clk-up.o
snd-soc-wcd-cpe-objs := wcd_cpe_services.o wcd_cpe_core.o
snd-soc-wsa881x-objs := wsa881x.o wsa881x-tables.o wsa881x-regmap.o wsa881x-temp-sensor.o
+snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o
ifneq (,$(filter $(CONFIG_SND_SOC_WCD_MBHC_LEGACY),y m))
- snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o wcd-mbhc-legacy.o
-else ifneq (,$(filter $(CONFIG_SND_SOC_WCD_MBHC_ADC),y m))
- snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o wcd-mbhc-adc.o
+ snd-soc-wcd-mbhc-objs += wcd-mbhc-legacy.o
endif
-snd-soc-wsa881x-analog-objs := wsa881x-analog.o wsa881x-tables-analog.o
-snd-soc-wsa881x-analog-objs += wsa881x-regmap-analog.o wsa881x-irq.o
+ifneq (,$(filter $(CONFIG_SND_SOC_WCD_MBHC_ADC),y m))
+ snd-soc-wcd-mbhc-objs += wcd-mbhc-adc.o
+endif
snd-soc-wcd-spi-objs := wcd-spi.o
snd-soc-wl1273-objs := wl1273.o
snd-soc-wm-adsp-objs := wm_adsp.o
@@ -410,7 +403,6 @@
obj-$(CONFIG_SND_SOC_WCD_CPE) += snd-soc-wcd-cpe.o
obj-$(CONFIG_SND_SOC_WCD_MBHC) += snd-soc-wcd-mbhc.o
obj-$(CONFIG_SND_SOC_WSA881X) += snd-soc-wsa881x.o
-obj-$(CONFIG_SND_SOC_WSA881X_ANALOG) += snd-soc-wsa881x-analog.o
obj-$(CONFIG_SND_SOC_WL1273) += snd-soc-wl1273.o
obj-$(CONFIG_SND_SOC_WCD_SPI) += snd-soc-wcd-spi.o
obj-$(CONFIG_SND_SOC_WM0010) += snd-soc-wm0010.o
diff --git a/sound/soc/codecs/audio-ext-clk.c b/sound/soc/codecs/audio-ext-clk.c
deleted file mode 100644
index 72f16f5..0000000
--- a/sound/soc/codecs/audio-ext-clk.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/clk.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <dt-bindings/clock/audio-ext-clk.h>
-#include <sound/q6afe-v2.h>
-#include "audio-ext-clk-up.h"
-
-struct pinctrl_info {
- struct pinctrl *pinctrl;
- struct pinctrl_state *sleep;
- struct pinctrl_state *active;
-};
-
-struct audio_ext_ap_clk {
- bool enabled;
- int gpio;
- struct clk c;
-};
-
-struct audio_ext_pmi_clk {
- int gpio;
- struct clk c;
-};
-
-struct audio_ext_ap_clk2 {
- bool enabled;
- struct pinctrl_info pnctrl_info;
- struct clk c;
-};
-
-static struct afe_clk_set clk2_config = {
- Q6AFE_LPASS_CLK_CONFIG_API_VERSION,
- Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_OSR,
- Q6AFE_LPASS_IBIT_CLK_11_P2896_MHZ,
- Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
- Q6AFE_LPASS_CLK_ROOT_DEFAULT,
- 0,
-};
-
-static inline struct audio_ext_ap_clk *to_audio_ap_clk(struct clk *clk)
-{
- return container_of(clk, struct audio_ext_ap_clk, c);
-}
-
-static int audio_ext_clk_prepare(struct clk *clk)
-{
- struct audio_ext_ap_clk *audio_clk = to_audio_ap_clk(clk);
-
- pr_debug("%s: gpio: %d\n", __func__, audio_clk->gpio);
- if (gpio_is_valid(audio_clk->gpio))
- return gpio_direction_output(audio_clk->gpio, 1);
- return 0;
-}
-
-static void audio_ext_clk_unprepare(struct clk *clk)
-{
- struct audio_ext_ap_clk *audio_clk = to_audio_ap_clk(clk);
-
- pr_debug("%s: gpio: %d\n", __func__, audio_clk->gpio);
- if (gpio_is_valid(audio_clk->gpio))
- gpio_direction_output(audio_clk->gpio, 0);
-}
-
-static inline struct audio_ext_ap_clk2 *to_audio_ap_clk2(struct clk *clk)
-{
- return container_of(clk, struct audio_ext_ap_clk2, c);
-}
-
-static int audio_ext_clk2_prepare(struct clk *clk)
-{
- struct audio_ext_ap_clk2 *audio_clk2 = to_audio_ap_clk2(clk);
- struct pinctrl_info *pnctrl_info = &audio_clk2->pnctrl_info;
- int ret;
-
-
- if (!pnctrl_info->pinctrl || !pnctrl_info->active)
- return 0;
-
- ret = pinctrl_select_state(pnctrl_info->pinctrl,
- pnctrl_info->active);
- if (ret) {
- pr_err("%s: active state select failed with %d\n",
- __func__, ret);
- return -EIO;
- }
-
- clk2_config.enable = 1;
- ret = afe_set_lpass_clk_cfg(IDX_RSVD_3, &clk2_config);
- if (ret < 0) {
- pr_err("%s: failed to set clock, ret = %d\n", __func__, ret);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void audio_ext_clk2_unprepare(struct clk *clk)
-{
- struct audio_ext_ap_clk2 *audio_clk2 = to_audio_ap_clk2(clk);
- struct pinctrl_info *pnctrl_info = &audio_clk2->pnctrl_info;
- int ret;
-
- if (!pnctrl_info->pinctrl || !pnctrl_info->sleep)
- return;
-
- ret = pinctrl_select_state(pnctrl_info->pinctrl,
- pnctrl_info->sleep);
- if (ret)
- pr_err("%s: sleep state select failed with %d\n",
- __func__, ret);
-
- clk2_config.enable = 0;
- ret = afe_set_lpass_clk_cfg(IDX_RSVD_3, &clk2_config);
- if (ret < 0)
- pr_err("%s: failed to reset clock, ret = %d\n", __func__, ret);
-}
-
-static const struct clk_ops audio_ext_ap_clk_ops = {
- .prepare = audio_ext_clk_prepare,
- .unprepare = audio_ext_clk_unprepare,
-};
-
-static const struct clk_ops audio_ext_ap_clk2_ops = {
- .prepare = audio_ext_clk2_prepare,
- .unprepare = audio_ext_clk2_unprepare,
-};
-
-static struct audio_ext_pmi_clk audio_pmi_clk = {
- .gpio = -EINVAL,
- .c = {
- .dbg_name = "audio_ext_pmi_clk",
- .ops = &clk_ops_dummy,
- CLK_INIT(audio_pmi_clk.c),
- },
-};
-
-static struct audio_ext_pmi_clk audio_pmi_lnbb_clk = {
- .gpio = -EINVAL,
- .c = {
- .dbg_name = "audio_ext_pmi_lnbb_clk",
- .ops = &clk_ops_dummy,
- CLK_INIT(audio_pmi_lnbb_clk.c),
- },
-};
-
-static struct audio_ext_ap_clk audio_ap_clk = {
- .gpio = -EINVAL,
- .c = {
- .dbg_name = "audio_ext_ap_clk",
- .ops = &audio_ext_ap_clk_ops,
- CLK_INIT(audio_ap_clk.c),
- },
-};
-
-static struct audio_ext_ap_clk2 audio_ap_clk2 = {
- .c = {
- .dbg_name = "audio_ext_ap_clk2",
- .ops = &audio_ext_ap_clk2_ops,
- CLK_INIT(audio_ap_clk2.c),
- },
-};
-
-static struct clk_lookup audio_ref_clock[] = {
- CLK_LIST(audio_ap_clk),
- CLK_LIST(audio_pmi_clk),
- CLK_LIST(audio_pmi_lnbb_clk),
- CLK_LIST(audio_ap_clk2),
-};
-
-static int audio_get_pinctrl(struct platform_device *pdev)
-{
- struct pinctrl_info *pnctrl_info;
- struct pinctrl *pinctrl;
- int ret;
-
- pnctrl_info = &audio_ap_clk2.pnctrl_info;
-
- if (pnctrl_info->pinctrl) {
- dev_dbg(&pdev->dev, "%s: already requested before\n",
- __func__);
- return -EINVAL;
- }
-
- pinctrl = devm_pinctrl_get(&pdev->dev);
- if (IS_ERR_OR_NULL(pinctrl)) {
- dev_dbg(&pdev->dev, "%s: Unable to get pinctrl handle\n",
- __func__);
- return -EINVAL;
- }
- pnctrl_info->pinctrl = pinctrl;
- /* get all state handles from Device Tree */
- pnctrl_info->sleep = pinctrl_lookup_state(pinctrl, "sleep");
- if (IS_ERR(pnctrl_info->sleep)) {
- dev_err(&pdev->dev, "%s: could not get sleep pinstate\n",
- __func__);
- goto err;
- }
- pnctrl_info->active = pinctrl_lookup_state(pinctrl, "active");
- if (IS_ERR(pnctrl_info->active)) {
- dev_err(&pdev->dev, "%s: could not get active pinstate\n",
- __func__);
- goto err;
- }
- /* Reset the TLMM pins to a default state */
- ret = pinctrl_select_state(pnctrl_info->pinctrl,
- pnctrl_info->sleep);
- if (ret) {
- dev_err(&pdev->dev, "%s: Disable TLMM pins failed with %d\n",
- __func__, ret);
- goto err;
- }
- return 0;
-
-err:
- devm_pinctrl_put(pnctrl_info->pinctrl);
- return -EINVAL;
-}
-
-static int audio_ref_clk_probe(struct platform_device *pdev)
-{
- int clk_gpio;
- int ret;
- struct clk *audio_clk;
-
- clk_gpio = of_get_named_gpio(pdev->dev.of_node,
- "qcom,audio-ref-clk-gpio", 0);
- if (clk_gpio > 0) {
- ret = gpio_request(clk_gpio, "EXT_CLK");
- if (ret) {
- dev_err(&pdev->dev,
- "Request ext clk gpio failed %d, err:%d\n",
- clk_gpio, ret);
- goto err;
- }
- if (of_property_read_bool(pdev->dev.of_node,
- "qcom,node_has_rpm_clock")) {
- audio_clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(audio_clk)) {
- dev_err(&pdev->dev, "Failed to get RPM div clk\n");
- ret = PTR_ERR(audio_clk);
- goto err_gpio;
- }
- audio_pmi_clk.c.parent = audio_clk;
- audio_pmi_clk.gpio = clk_gpio;
- } else
- audio_ap_clk.gpio = clk_gpio;
-
- } else {
- if (of_property_read_bool(pdev->dev.of_node,
- "qcom,node_has_rpm_clock")) {
- audio_clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(audio_clk)) {
- dev_err(&pdev->dev, "Failed to get lnbbclk2\n");
- ret = PTR_ERR(audio_clk);
- goto err;
- }
- audio_pmi_lnbb_clk.c.parent = audio_clk;
- audio_pmi_lnbb_clk.gpio = -EINVAL;
- }
- }
-
- ret = audio_get_pinctrl(pdev);
- if (ret)
- dev_dbg(&pdev->dev, "%s: Parsing pinctrl failed\n",
- __func__);
-
- ret = of_msm_clock_register(pdev->dev.of_node, audio_ref_clock,
- ARRAY_SIZE(audio_ref_clock));
- if (ret) {
- dev_err(&pdev->dev, "%s: audio ref clock register failed\n",
- __func__);
- goto err_gpio;
- }
-
- return 0;
-
-err_gpio:
- gpio_free(clk_gpio);
-
-err:
- return ret;
-}
-
-static int audio_ref_clk_remove(struct platform_device *pdev)
-{
- struct pinctrl_info *pnctrl_info = &audio_ap_clk2.pnctrl_info;
-
- if (audio_pmi_clk.gpio > 0)
- gpio_free(audio_pmi_clk.gpio);
- else if (audio_ap_clk.gpio > 0)
- gpio_free(audio_ap_clk.gpio);
-
- if (pnctrl_info->pinctrl) {
- devm_pinctrl_put(pnctrl_info->pinctrl);
- pnctrl_info->pinctrl = NULL;
- }
-
- return 0;
-}
-
-static const struct of_device_id audio_ref_clk_match[] = {
- {.compatible = "qcom,audio-ref-clk"},
- {}
-};
-MODULE_DEVICE_TABLE(of, audio_ref_clk_match);
-
-static struct platform_driver audio_ref_clk_driver = {
- .driver = {
- .name = "audio-ref-clk",
- .owner = THIS_MODULE,
- .of_match_table = audio_ref_clk_match,
- },
- .probe = audio_ref_clk_probe,
- .remove = audio_ref_clk_remove,
-};
-
-int audio_ref_clk_platform_init(void)
-{
- return platform_driver_register(&audio_ref_clk_driver);
-}
-
-void audio_ref_clk_platform_exit(void)
-{
- platform_driver_unregister(&audio_ref_clk_driver);
-}
-
-MODULE_DESCRIPTION("Audio Ref Clock module platform driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
index cfe42e0..34227a0 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -937,9 +937,8 @@
static int msm_sdw_vi_feed_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct msm_sdw_priv *msm_sdw_p = snd_soc_codec_get_drvdata(codec);
@@ -951,9 +950,8 @@
static int msm_sdw_vi_feed_mixer_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct msm_sdw_priv *msm_sdw_p = snd_soc_codec_get_drvdata(codec);
struct soc_multi_mixer_control *mixer =
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 9c365a7..7899a2c 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -1108,6 +1108,13 @@
DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
}
},
+ {
+ .ident = "Thinkpad Helix 2nd",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix 2nd")
+ }
+ },
{ }
};
diff --git a/sound/soc/codecs/sdm660_cdc/Kconfig b/sound/soc/codecs/sdm660_cdc/Kconfig
index 2f36c39..e618258 100644
--- a/sound/soc/codecs/sdm660_cdc/Kconfig
+++ b/sound/soc/codecs/sdm660_cdc/Kconfig
@@ -1,5 +1,4 @@
config SND_SOC_SDM660_CDC
tristate "MSM Internal PMIC based codec"
- select SND_SOC_WCD_MBHC
select SND_SOC_WCD_MBHC_LEGACY
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index 7892f61..f126d35 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -2055,6 +2055,9 @@
"ZERO", "RX2", "RX1"
};
+static const struct snd_kcontrol_new adc1_switch =
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
static const struct soc_enum rdac2_mux_enum =
SOC_ENUM_SINGLE(MSM89XX_PMIC_DIGITAL_CDC_CONN_HPHR_DAC_CTL,
0, 3, rdac2_mux_text);
@@ -3105,7 +3108,8 @@
{"ADC2 MUX", "INP2", "ADC2_INP2"},
{"ADC2 MUX", "INP3", "ADC2_INP3"},
- {"ADC1", NULL, "AMIC1"},
+ {"ADC1", NULL, "ADC1_INP1"},
+ {"ADC1_INP1", "Switch", "AMIC1"},
{"ADC2_INP2", NULL, "AMIC2"},
{"ADC2_INP3", NULL, "AMIC3"},
@@ -3446,6 +3450,8 @@
SND_SOC_DAPM_SPK("Ext Spk", msm_anlg_cdc_codec_enable_spk_ext_pa),
+ SND_SOC_DAPM_SWITCH("ADC1_INP1", SND_SOC_NOPM, 0, 0,
+ &adc1_switch),
SND_SOC_DAPM_SUPPLY("RX1 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("RX2 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index 68a1d8d..5e0a104 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -122,9 +122,7 @@
static int msm_dig_cdc_put_dec_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *w = wlist->widgets[0];
+ struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int dec_mux, decimator;
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index 3b2426d..eb67de9 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -329,6 +329,7 @@
/* Disable micbias, pullup & enable cs */
wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
mutex_unlock(&mbhc->hphl_pa_lock);
+ clear_bit(WCD_MBHC_ANC0_OFF_ACK, &mbhc->hph_anc_state);
break;
case WCD_EVENT_PRE_HPHR_PA_OFF:
mutex_lock(&mbhc->hphr_pa_lock);
@@ -346,6 +347,7 @@
/* Disable micbias, pullup & enable cs */
wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
mutex_unlock(&mbhc->hphr_pa_lock);
+ clear_bit(WCD_MBHC_ANC1_OFF_ACK, &mbhc->hph_anc_state);
break;
case WCD_EVENT_PRE_HPHL_PA_ON:
set_bit(WCD_MBHC_EVENT_PA_HPHL, &mbhc->event_state);
@@ -439,6 +441,25 @@
__func__);
usleep_range(wg_time * 1000, wg_time * 1000 + 50);
}
+
+ if (test_and_clear_bit(WCD_MBHC_ANC0_OFF_ACK,
+ &mbhc->hph_anc_state)) {
+ usleep_range(20000, 20100);
+ pr_debug("%s: HPHL ANC clear flag and enable ANC_EN\n",
+ __func__);
+ if (mbhc->mbhc_cb->update_anc_state)
+ mbhc->mbhc_cb->update_anc_state(mbhc->codec, true, 0);
+ }
+
+ if (test_and_clear_bit(WCD_MBHC_ANC1_OFF_ACK,
+ &mbhc->hph_anc_state)) {
+ usleep_range(20000, 20100);
+ pr_debug("%s: HPHR ANC clear flag and enable ANC_EN\n",
+ __func__);
+ if (mbhc->mbhc_cb->update_anc_state)
+ mbhc->mbhc_cb->update_anc_state(mbhc->codec, true, 1);
+ }
+
}
static bool wcd_mbhc_is_hph_pa_on(struct wcd_mbhc *mbhc)
@@ -471,6 +492,20 @@
}
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPH_PA_EN, 0);
usleep_range(wg_time * 1000, wg_time * 1000 + 50);
+
+
+ if (mbhc->mbhc_cb->is_anc_on && mbhc->mbhc_cb->is_anc_on(mbhc)) {
+ usleep_range(20000, 20100);
+ pr_debug("%s ANC is on, setting ANC_OFF_ACK\n", __func__);
+ set_bit(WCD_MBHC_ANC0_OFF_ACK, &mbhc->hph_anc_state);
+ set_bit(WCD_MBHC_ANC1_OFF_ACK, &mbhc->hph_anc_state);
+ if (mbhc->mbhc_cb->update_anc_state) {
+ mbhc->mbhc_cb->update_anc_state(mbhc->codec, false, 0);
+ mbhc->mbhc_cb->update_anc_state(mbhc->codec, false, 1);
+ } else {
+ pr_debug("%s ANC is off\n", __func__);
+ }
+ }
}
int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
@@ -581,7 +616,8 @@
jack_type == SND_JACK_LINEOUT) &&
(mbhc->hph_status && mbhc->hph_status != jack_type)) {
- if (mbhc->micbias_enable) {
+ if (mbhc->micbias_enable &&
+ mbhc->hph_status == SND_JACK_HEADSET) {
if (mbhc->mbhc_cb->mbhc_micbias_control)
mbhc->mbhc_cb->mbhc_micbias_control(
codec, MIC_BIAS_2,
@@ -1108,7 +1144,7 @@
* For ADC MBHC, ADC_COMPLETE interrupt will be generated
* in this case. So skip the check here.
*/
- if (!WCD_MBHC_DETECTION &&
+ if (mbhc->mbhc_detection_logic == WCD_DETECTION_LEGACY &&
mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
wcd_mbhc_find_plug_and_report(mbhc, MBHC_PLUG_TYPE_HEADSET);
goto exit;
@@ -1922,7 +1958,7 @@
init_waitqueue_head(&mbhc->wait_btn_press);
mutex_init(&mbhc->codec_resource_lock);
- switch (WCD_MBHC_DETECTION) {
+ switch (mbhc->mbhc_detection_logic) {
case WCD_DETECTION_LEGACY:
wcd_mbhc_legacy_init(mbhc);
break;
@@ -1931,7 +1967,7 @@
break;
default:
pr_err("%s: Unknown detection logic type %d\n",
- __func__, WCD_MBHC_DETECTION);
+ __func__, mbhc->mbhc_detection_logic);
break;
}
diff --git a/sound/soc/codecs/wcd-mbhc-v2.h b/sound/soc/codecs/wcd-mbhc-v2.h
index 7ed06c3..c8714fc 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.h
+++ b/sound/soc/codecs/wcd-mbhc-v2.h
@@ -157,12 +157,6 @@
WCD_DETECTION_ADC,
};
-#ifdef CONFIG_SND_SOC_WCD_MBHC_ADC
-#define WCD_MBHC_DETECTION WCD_DETECTION_ADC
-#else
-#define WCD_MBHC_DETECTION WCD_DETECTION_LEGACY
-#endif
-
enum wcd_mbhc_cs_mb_en_flag {
WCD_MBHC_EN_CS = 0,
WCD_MBHC_EN_MB,
@@ -240,6 +234,11 @@
WCD_MBHC_HPHR_PA_OFF_ACK,
};
+enum anc_ack_flags {
+ WCD_MBHC_ANC0_OFF_ACK = 0,
+ WCD_MBHC_ANC1_OFF_ACK,
+};
+
enum wcd_mbhc_btn_det_mem {
WCD_MBHC_BTN_DET_V_BTN_LOW,
WCD_MBHC_BTN_DET_V_BTN_HIGH
@@ -488,6 +487,9 @@
void (*hph_pull_down_ctrl)(struct snd_soc_codec *, bool);
void (*mbhc_moisture_config)(struct wcd_mbhc *);
bool (*hph_register_recovery)(struct wcd_mbhc *);
+ void (*update_anc_state)(struct snd_soc_codec *codec,
+ bool enable, int anc_num);
+ bool (*is_anc_on)(struct wcd_mbhc *mbhc);
};
struct wcd_mbhc_fn {
@@ -538,6 +540,7 @@
/* track PA/DAC state to sync with userspace */
unsigned long hph_pa_dac_state;
+ unsigned long hph_anc_state;
unsigned long event_state;
unsigned long jiffies_atreport;
@@ -565,6 +568,9 @@
struct mutex hphl_pa_lock;
struct mutex hphr_pa_lock;
+ /* Holds mbhc detection method - ADC/Legacy */
+ unsigned int mbhc_detection_logic;
+
unsigned long intr_status;
bool is_hph_ocp_pending;
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index f8cd9e2..a6a5350 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -2014,6 +2014,32 @@
tasha_mbhc_hph_l_pull_up_control(codec, mbhc->moist_iref);
}
+static void tasha_update_anc_state(struct snd_soc_codec *codec, bool enable,
+ int anc_num)
+{
+ if (enable)
+ snd_soc_update_bits(codec, WCD9335_CDC_RX1_RX_PATH_CFG0 +
+ (20 * anc_num), 0x10, 0x10);
+ else
+ snd_soc_update_bits(codec, WCD9335_CDC_RX1_RX_PATH_CFG0 +
+ (20 * anc_num), 0x10, 0x00);
+}
+
+static bool tasha_is_anc_on(struct wcd_mbhc *mbhc)
+{
+ bool anc_on = false;
+ u16 ancl, ancr;
+
+ ancl =
+ (snd_soc_read(mbhc->codec, WCD9335_CDC_RX1_RX_PATH_CFG0)) & 0x10;
+ ancr =
+ (snd_soc_read(mbhc->codec, WCD9335_CDC_RX2_RX_PATH_CFG0)) & 0x10;
+
+ anc_on = !!(ancl | ancr);
+
+ return anc_on;
+}
+
static const struct wcd_mbhc_cb mbhc_cb = {
.request_irq = tasha_mbhc_request_irq,
.irq_control = tasha_mbhc_irq_control,
@@ -2036,6 +2062,8 @@
.mbhc_gnd_det_ctrl = tasha_mbhc_gnd_det_ctrl,
.hph_pull_down_ctrl = tasha_mbhc_hph_pull_down_ctrl,
.mbhc_moisture_config = tasha_mbhc_moisture_config,
+ .update_anc_state = tasha_update_anc_state,
+ .is_anc_on = tasha_is_anc_on,
};
static int tasha_get_anc_slot(struct snd_kcontrol *kcontrol,
@@ -2233,9 +2261,8 @@
static int tasha_vi_feed_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
@@ -2247,9 +2274,8 @@
static int tasha_vi_feed_mixer_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = tasha_p->wcd9xxx;
@@ -2300,9 +2326,8 @@
static int slim_tx_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
@@ -2313,9 +2338,8 @@
static int slim_tx_mixer_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
@@ -2408,9 +2432,8 @@
static int slim_rx_mux_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
@@ -2426,9 +2449,8 @@
static int slim_rx_mux_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
@@ -8477,9 +8499,8 @@
static int tasha_put_dec_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val;
@@ -8537,9 +8558,8 @@
static int tasha_int_dem_inp_mux_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val;
@@ -8871,9 +8891,8 @@
static int tasha_codec_aif4_mixer_switch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
@@ -8890,9 +8909,8 @@
static int tasha_codec_aif4_mixer_switch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
index a1a5e2d..ea19caa 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
@@ -829,6 +829,32 @@
return wcd934x_mbhc->is_hph_recover;
}
+static void tavil_update_anc_state(struct snd_soc_codec *codec, bool enable,
+ int anc_num)
+{
+ if (enable)
+ snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CFG0 +
+ (20 * anc_num), 0x10, 0x10);
+ else
+ snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CFG0 +
+ (20 * anc_num), 0x10, 0x00);
+}
+
+static bool tavil_is_anc_on(struct wcd_mbhc *mbhc)
+{
+ bool anc_on = false;
+ u16 ancl, ancr;
+
+ ancl =
+ (snd_soc_read(mbhc->codec, WCD934X_CDC_RX1_RX_PATH_CFG0)) & 0x10;
+ ancr =
+ (snd_soc_read(mbhc->codec, WCD934X_CDC_RX2_RX_PATH_CFG0)) & 0x10;
+
+ anc_on = !!(ancl | ancr);
+
+ return anc_on;
+}
+
static const struct wcd_mbhc_cb mbhc_cb = {
.request_irq = tavil_mbhc_request_irq,
.irq_control = tavil_mbhc_irq_control,
@@ -852,6 +878,8 @@
.hph_pull_down_ctrl = tavil_mbhc_hph_pull_down_ctrl,
.mbhc_moisture_config = tavil_mbhc_moisture_config,
.hph_register_recovery = tavil_hph_register_recovery,
+ .update_anc_state = tavil_update_anc_state,
+ .is_anc_on = tavil_is_anc_on,
};
static struct regulator *tavil_codec_find_ondemand_regulator(
@@ -998,19 +1026,26 @@
struct snd_soc_codec *codec)
{
int ret;
+ struct wcd_mbhc *wcd_mbhc;
if (!mbhc || !codec)
return -EINVAL;
- wcd_mbhc_deinit(&mbhc->wcd_mbhc);
- ret = wcd_mbhc_init(&mbhc->wcd_mbhc, codec, &mbhc_cb, &intr_ids,
+ wcd_mbhc = &mbhc->wcd_mbhc;
+ if (wcd_mbhc == NULL) {
+ pr_err("%s: wcd_mbhc is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ wcd_mbhc_deinit(wcd_mbhc);
+ ret = wcd_mbhc_init(wcd_mbhc, codec, &mbhc_cb, &intr_ids,
wcd_mbhc_registers, TAVIL_ZDET_SUPPORTED);
if (ret) {
dev_err(codec->dev, "%s: mbhc initialization failed\n",
__func__);
goto done;
}
- if (!WCD_MBHC_DETECTION) {
+ if (wcd_mbhc->mbhc_detection_logic == WCD_DETECTION_LEGACY) {
snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
}
@@ -1033,6 +1068,7 @@
{
struct regulator *supply;
struct wcd934x_mbhc *wcd934x_mbhc;
+ struct wcd_mbhc *wcd_mbhc;
int ret;
wcd934x_mbhc = devm_kzalloc(codec->dev, sizeof(struct wcd934x_mbhc),
@@ -1043,8 +1079,18 @@
wcd934x_mbhc->wcd9xxx = dev_get_drvdata(codec->dev->parent);
wcd934x_mbhc->fw_data = fw_data;
BLOCKING_INIT_NOTIFIER_HEAD(&wcd934x_mbhc->notifier);
+ wcd_mbhc = &wcd934x_mbhc->wcd_mbhc;
+ if (wcd_mbhc == NULL) {
+ pr_err("%s: wcd_mbhc is NULL\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
- ret = wcd_mbhc_init(&wcd934x_mbhc->wcd_mbhc, codec, &mbhc_cb,
+
+ /* Setting default mbhc detection logic to ADC for Tavil */
+ wcd_mbhc->mbhc_detection_logic = WCD_DETECTION_ADC;
+
+ ret = wcd_mbhc_init(wcd_mbhc, codec, &mbhc_cb,
&intr_ids, wcd_mbhc_registers,
TAVIL_ZDET_SUPPORTED);
if (ret) {
@@ -1070,7 +1116,7 @@
snd_soc_add_codec_controls(codec, hph_type_detect_controls,
ARRAY_SIZE(hph_type_detect_controls));
- if (!WCD_MBHC_DETECTION) {
+ if (wcd_mbhc->mbhc_detection_logic == WCD_DETECTION_LEGACY) {
snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
}
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 3079cca..fe1ce45 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -1110,9 +1110,8 @@
static int tavil_vi_feed_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
@@ -1124,9 +1123,8 @@
static int tavil_vi_feed_mixer_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
@@ -1176,9 +1174,8 @@
static int slim_tx_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
@@ -1189,9 +1186,8 @@
static int slim_tx_mixer_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
@@ -1271,9 +1267,8 @@
static int slim_rx_mux_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
@@ -1285,9 +1280,8 @@
static int slim_rx_mux_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
@@ -5929,9 +5923,8 @@
static int tavil_dec_enum_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val;
@@ -5989,9 +5982,8 @@
static int tavil_int_dem_inp_mux_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val;
diff --git a/sound/soc/codecs/wsa881x-analog.c b/sound/soc/codecs/wsa881x-analog.c
deleted file mode 100644
index 4de9624..0000000
--- a/sound/soc/codecs/wsa881x-analog.c
+++ /dev/null
@@ -1,1446 +0,0 @@
-/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/of_gpio.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/printk.h>
-#include <linux/bitops.h>
-#include <linux/regulator/consumer.h>
-#include <linux/pm_runtime.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/tlv.h>
-#include <sound/q6afe-v2.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/regmap.h>
-#include "wsa881x-analog.h"
-#include "wsa881x-temp-sensor.h"
-#include "../msm/msm-audio-pinctrl.h"
-
-#define SPK_GAIN_12DB 4
-#define WIDGET_NAME_MAX_SIZE 80
-
-/*
- * Private data Structure for wsa881x. All parameters related to
- * WSA881X codec needs to be defined here.
- */
-struct wsa881x_pdata {
- struct regmap *regmap[2];
- struct i2c_client *client[2];
- struct snd_soc_codec *codec;
-
- /* track wsa881x status during probe */
- int status;
- bool boost_enable;
- bool visense_enable;
- int spk_pa_gain;
- struct i2c_msg xfer_msg[2];
- struct mutex xfer_lock;
- bool regmap_flag;
- bool wsa_active;
- int index;
- int (*enable_mclk)(struct snd_soc_card *, bool);
- struct wsa881x_tz_priv tz_pdata;
- int bg_cnt;
- int clk_cnt;
- int enable_cnt;
- int version;
- struct mutex bg_lock;
- struct mutex res_lock;
- struct delayed_work ocp_ctl_work;
-};
-
-enum {
- WSA881X_STATUS_PROBING,
- WSA881X_STATUS_I2C,
-};
-
-#define WSA881X_OCP_CTL_TIMER_SEC 2
-#define WSA881X_OCP_CTL_TEMP_CELSIUS 25
-#define WSA881X_OCP_CTL_POLL_TIMER_SEC 60
-
-static int wsa881x_ocp_poll_timer_sec = WSA881X_OCP_CTL_POLL_TIMER_SEC;
-module_param(wsa881x_ocp_poll_timer_sec, int, 0664);
-MODULE_PARM_DESC(wsa881x_ocp_poll_timer_sec, "timer for ocp ctl polling");
-
-static int32_t wsa881x_resource_acquire(struct snd_soc_codec *codec,
- bool enable);
-
-const char *wsa_tz_names[] = {"wsa881x.0e", "wsa881x.0f"};
-
-struct wsa881x_pdata wsa_pdata[MAX_WSA881X_DEVICE];
-
-static bool pinctrl_init;
-
-static int wsa881x_populate_dt_pdata(struct device *dev);
-static int wsa881x_reset(struct wsa881x_pdata *pdata, bool enable);
-static int wsa881x_startup(struct wsa881x_pdata *pdata);
-static int wsa881x_shutdown(struct wsa881x_pdata *pdata);
-
-static int delay_array_msec[] = {10, 20, 30, 40, 50};
-
-static int wsa881x_i2c_addr = -1;
-static int wsa881x_probing_count;
-static int wsa881x_presence_count;
-
-static const char * const wsa881x_spk_pa_gain_text[] = {
-"POS_13P5_DB", "POS_12_DB", "POS_10P5_DB", "POS_9_DB", "POS_7P5_DB",
-"POS_6_DB", "POS_4P5_DB", "POS_3_DB", "POS_1P5_DB", "POS_0_DB"};
-
-static const struct soc_enum wsa881x_spk_pa_gain_enum[] = {
- SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(wsa881x_spk_pa_gain_text),
- wsa881x_spk_pa_gain_text),
-};
-
-static int wsa881x_spk_pa_gain_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- ucontrol->value.integer.value[0] = wsa881x->spk_pa_gain;
-
- dev_dbg(codec->dev, "%s: spk_pa_gain = %ld\n", __func__,
- ucontrol->value.integer.value[0]);
-
- return 0;
-}
-
-static int wsa881x_spk_pa_gain_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- if (ucontrol->value.integer.value[0] < 0 ||
- ucontrol->value.integer.value[0] > 0xC) {
- dev_err(codec->dev, "%s: Unsupported gain val %ld\n",
- __func__, ucontrol->value.integer.value[0]);
- return -EINVAL;
- }
- wsa881x->spk_pa_gain = ucontrol->value.integer.value[0];
- dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
- __func__, ucontrol->value.integer.value[0]);
-
- return 0;
-}
-
-static int get_i2c_wsa881x_device_index(u16 reg)
-{
- u16 mask = 0x0f00;
- int value = 0;
-
- value = ((reg & mask) >> 8) & 0x000f;
-
- switch (value) {
- case 0:
- return 0;
- case 1:
- return 1;
- default:
- break;
- }
- return -EINVAL;
-}
-
-static int wsa881x_i2c_write_device(struct wsa881x_pdata *wsa881x,
- unsigned int reg, unsigned int val)
-{
- int i = 0, rc = 0;
- int wsa881x_index;
- struct i2c_msg *msg;
- int ret = 0;
- int bytes = 1;
- u8 reg_addr = 0;
- u8 data[bytes + 1];
-
- wsa881x_index = get_i2c_wsa881x_device_index(reg);
- if (wsa881x_index < 0) {
- pr_err("%s:invalid register to write\n", __func__);
- return -EINVAL;
- }
- if (wsa881x->regmap_flag) {
- rc = regmap_write(wsa881x->regmap[wsa881x_index], reg, val);
- for (i = 0; rc && i < ARRAY_SIZE(delay_array_msec); i++) {
- pr_err("Failed writing reg=%u - retry(%d)\n", reg, i);
- /* retry after delay of increasing order */
- msleep(delay_array_msec[i]);
- rc = regmap_write(wsa881x->regmap[wsa881x_index],
- reg, val);
- }
- if (rc)
- pr_err("Failed writing reg=%u rc=%d\n", reg, rc);
- else
- pr_err("write success register = %x val = %x\n",
- reg, val);
- } else {
- reg_addr = (u8)reg;
- msg = &wsa881x->xfer_msg[0];
- msg->addr = wsa881x->client[wsa881x_index]->addr;
- msg->len = bytes + 1;
- msg->flags = 0;
- data[0] = reg;
- data[1] = (u8)val;
- msg->buf = data;
- ret = i2c_transfer(wsa881x->client[wsa881x_index]->adapter,
- wsa881x->xfer_msg, 1);
- /* Try again if the write fails */
- if (ret != 1) {
- ret = i2c_transfer(
- wsa881x->client[wsa881x_index]->adapter,
- wsa881x->xfer_msg, 1);
- if (ret != 1) {
- pr_err("failed to write the device\n");
- return ret;
- }
- }
- pr_debug("write success reg = %x val = %x\n", reg, data[1]);
- }
- return rc;
-}
-
-static int wsa881x_i2c_read_device(struct wsa881x_pdata *wsa881x,
- unsigned int reg)
-{
- int wsa881x_index;
- int i = 0, rc = 0;
- unsigned int val;
- struct i2c_msg *msg;
- int ret = 0;
- u8 reg_addr = 0;
- u8 dest[5];
-
- wsa881x_index = get_i2c_wsa881x_device_index(reg);
- if (wsa881x_index < 0) {
- pr_err("%s:invalid register to read\n", __func__);
- return -EINVAL;
- }
- if (wsa881x->regmap_flag) {
- rc = regmap_read(wsa881x->regmap[wsa881x_index], reg, &val);
- for (i = 0; rc && i < ARRAY_SIZE(delay_array_msec); i++) {
- pr_err("Failed reading reg=%u - retry(%d)\n", reg, i);
- /* retry after delay of increasing order */
- msleep(delay_array_msec[i]);
- rc = regmap_read(wsa881x->regmap[wsa881x_index],
- reg, &val);
- }
- if (rc) {
- pr_err("Failed reading reg=%u rc=%d\n", reg, rc);
- return rc;
- }
- pr_debug("read success reg = %x val = %x\n",
- reg, val);
- } else {
- reg_addr = (u8)reg;
- msg = &wsa881x->xfer_msg[0];
- msg->addr = wsa881x->client[wsa881x_index]->addr;
- msg->len = 1;
- msg->flags = 0;
- msg->buf = ®_addr;
-
- msg = &wsa881x->xfer_msg[1];
- msg->addr = wsa881x->client[wsa881x_index]->addr;
- msg->len = 1;
- msg->flags = I2C_M_RD;
- msg->buf = dest;
- ret = i2c_transfer(wsa881x->client[wsa881x_index]->adapter,
- wsa881x->xfer_msg, 2);
-
- /* Try again if read fails first time */
- if (ret != 2) {
- ret = i2c_transfer(
- wsa881x->client[wsa881x_index]->adapter,
- wsa881x->xfer_msg, 2);
- if (ret != 2) {
- pr_err("failed to read wsa register:%d\n",
- reg);
- return ret;
- }
- }
- val = dest[0];
- }
- return val;
-}
-
-static unsigned int wsa881x_i2c_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- struct wsa881x_pdata *wsa881x;
- unsigned int val;
- int ret;
-
- if (codec == NULL) {
- pr_err("%s: invalid codec\n", __func__);
- return -EINVAL;
- }
- wsa881x = snd_soc_codec_get_drvdata(codec);
- if (!wsa881x->wsa_active) {
- ret = snd_soc_cache_read(codec, reg, &val);
- if (ret >= 0)
- return val;
- dev_err(codec->dev,
- "cache read failed for reg: 0x%x ret: %d\n",
- reg, ret);
- return ret;
- }
- return wsa881x_i2c_read_device(wsa881x, reg);
-}
-
-static int wsa881x_i2c_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int val)
-{
- struct wsa881x_pdata *wsa881x;
- int ret = 0;
-
- if (codec == NULL) {
- pr_err("%s: invalid codec\n", __func__);
- return -EINVAL;
- }
- wsa881x = snd_soc_codec_get_drvdata(codec);
- if (!wsa881x->wsa_active) {
- ret = snd_soc_cache_write(codec, reg, val);
- if (ret != 0)
- dev_err(codec->dev, "cache write to %x failed: %d\n",
- reg, ret);
- return ret;
- }
- return wsa881x_i2c_write_device(wsa881x, reg, val);
-}
-
-static int wsa881x_i2c_get_client_index(struct i2c_client *client,
- int *wsa881x_index)
-{
- int ret = 0;
-
- switch (client->addr) {
- case WSA881X_I2C_SPK0_SLAVE0_ADDR:
- case WSA881X_I2C_SPK0_SLAVE1_ADDR:
- *wsa881x_index = WSA881X_I2C_SPK0_SLAVE0;
- break;
- case WSA881X_I2C_SPK1_SLAVE0_ADDR:
- case WSA881X_I2C_SPK1_SLAVE1_ADDR:
- *wsa881x_index = WSA881X_I2C_SPK1_SLAVE0;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int wsa881x_boost_ctrl(struct snd_soc_codec *codec, bool enable)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: enable:%d\n", __func__, enable);
- if (enable) {
- if (!WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_ANA_CTL,
- 0x01, 0x01);
- snd_soc_update_bits(codec, WSA881X_ANA_CTL,
- 0x04, 0x04);
- snd_soc_update_bits(codec, WSA881X_BOOST_PS_CTL,
- 0x40, 0x00);
- snd_soc_update_bits(codec, WSA881X_BOOST_PRESET_OUT1,
- 0xF0, 0xB0);
- snd_soc_update_bits(codec, WSA881X_BOOST_ZX_CTL,
- 0x20, 0x00);
- snd_soc_update_bits(codec, WSA881X_BOOST_EN_CTL,
- 0x80, 0x80);
- } else {
- snd_soc_update_bits(codec, WSA881X_BOOST_LOOP_STABILITY,
- 0x03, 0x03);
- snd_soc_update_bits(codec, WSA881X_BOOST_MISC2_CTL,
- 0xFF, 0x14);
- snd_soc_update_bits(codec, WSA881X_BOOST_START_CTL,
- 0x80, 0x80);
- snd_soc_update_bits(codec, WSA881X_BOOST_START_CTL,
- 0x03, 0x00);
- snd_soc_update_bits(codec,
- WSA881X_BOOST_SLOPE_COMP_ISENSE_FB,
- 0x0C, 0x04);
- snd_soc_update_bits(codec,
- WSA881X_BOOST_SLOPE_COMP_ISENSE_FB,
- 0x03, 0x00);
- snd_soc_update_bits(codec, WSA881X_BOOST_PRESET_OUT1,
- 0xF0, 0x70);
- snd_soc_update_bits(codec, WSA881X_ANA_CTL, 0x03, 0x01);
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN,
- 0x08, 0x08);
- snd_soc_update_bits(codec, WSA881X_ANA_CTL, 0x04, 0x04);
- snd_soc_update_bits(codec, WSA881X_BOOST_CURRENT_LIMIT,
- 0x0F, 0x08);
- snd_soc_update_bits(codec, WSA881X_BOOST_EN_CTL,
- 0x80, 0x80);
- }
- /* For WSA8810, start-up time is 1500us as per qcrg sequence */
- usleep_range(1500, 1510);
- } else {
- /* ENSURE: Class-D amp is shutdown. CLK is still on */
- snd_soc_update_bits(codec, WSA881X_BOOST_EN_CTL, 0x80, 0x00);
- /* boost settle time is 1500us as per qcrg sequence */
- usleep_range(1500, 1510);
- }
- return 0;
-}
-
-static int wsa881x_visense_txfe_ctrl(struct snd_soc_codec *codec, bool enable,
- u8 isense1_gain, u8 isense2_gain,
- u8 vsense_gain)
-{
- u8 value = 0;
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: enable:%d\n", __func__, enable);
-
- if (enable) {
- if (WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_OTP_REG_28,
- 0x3F, 0x3A);
- snd_soc_update_bits(codec, WSA881X_BONGO_RESRV_REG1,
- 0xFF, 0xB2);
- snd_soc_update_bits(codec, WSA881X_BONGO_RESRV_REG2,
- 0xFF, 0x05);
- }
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_FE_VSENSE_VCM,
- 0x08, 0x00);
- if (WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_ATEST2,
- 0x1C, 0x04);
- } else {
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_ATEST2,
- 0x08, 0x08);
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_ATEST2,
- 0x02, 0x02);
- }
- value = ((isense2_gain << 6) | (isense1_gain << 4) |
- (vsense_gain << 3));
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_FE_GAIN,
- 0xF8, value);
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_FE_GAIN,
- 0x01, 0x01);
- } else {
- if (WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec,
- WSA881X_SPKR_PROT_FE_VSENSE_VCM, 0x10, 0x10);
- else
- snd_soc_update_bits(codec,
- WSA881X_SPKR_PROT_FE_VSENSE_VCM, 0x08, 0x08);
- /*
- * 200us sleep is needed after visense txfe disable as per
- * HW requirement.
- */
- usleep_range(200, 210);
-
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_FE_GAIN,
- 0x01, 0x00);
- }
- return 0;
-}
-
-static int wsa881x_visense_adc_ctrl(struct snd_soc_codec *codec, bool enable)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: enable:%d\n", __func__, enable);
- if (enable) {
- if (!WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec, WSA881X_ADC_SEL_IBIAS,
- 0x70, 0x40);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_SEL_IBIAS,
- 0x07, 0x04);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_V, 0x80, 0x80);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_I, 0x80, 0x80);
- } else {
- /* Ensure: Speaker Protection has been stopped */
- snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_V, 0x80, 0x00);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_I, 0x80, 0x00);
- }
-
- return 0;
-}
-
-static void wsa881x_bandgap_ctrl(struct snd_soc_codec *codec, bool enable)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- dev_dbg(codec->dev, "%s: enable:%d, bg_count:%d\n", __func__,
- enable, wsa881x->bg_cnt);
- mutex_lock(&wsa881x->bg_lock);
- if (enable) {
- ++wsa881x->bg_cnt;
- if (wsa881x->bg_cnt == 1) {
- snd_soc_update_bits(codec, WSA881X_TEMP_OP,
- 0x08, 0x08);
- /* 400usec sleep is needed as per HW requirement */
- usleep_range(400, 410);
- snd_soc_update_bits(codec, WSA881X_TEMP_OP, 0x04, 0x04);
- }
- } else {
- --wsa881x->bg_cnt;
- if (wsa881x->bg_cnt <= 0) {
- WARN_ON(wsa881x->bg_cnt < 0);
- wsa881x->bg_cnt = 0;
- snd_soc_update_bits(codec, WSA881X_TEMP_OP, 0x04, 0x00);
- snd_soc_update_bits(codec, WSA881X_TEMP_OP, 0x08, 0x00);
- }
- }
- mutex_unlock(&wsa881x->bg_lock);
-}
-
-static void wsa881x_clk_ctrl(struct snd_soc_codec *codec, bool enable)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- dev_dbg(codec->dev, "%s:ss enable:%d, clk_count:%d\n", __func__,
- enable, wsa881x->clk_cnt);
- mutex_lock(&wsa881x->res_lock);
- if (enable) {
- ++wsa881x->clk_cnt;
- if (wsa881x->clk_cnt == 1) {
- snd_soc_write(codec, WSA881X_CDC_RST_CTL, 0x02);
- snd_soc_write(codec, WSA881X_CDC_RST_CTL, 0x03);
- snd_soc_write(codec, WSA881X_CLOCK_CONFIG, 0x01);
- snd_soc_write(codec, WSA881X_CDC_DIG_CLK_CTL, 0x01);
- snd_soc_write(codec, WSA881X_CDC_ANA_CLK_CTL, 0x01);
- }
- } else {
- --wsa881x->clk_cnt;
- if (wsa881x->clk_cnt <= 0) {
- WARN_ON(wsa881x->clk_cnt < 0);
- wsa881x->clk_cnt = 0;
- snd_soc_write(codec, WSA881X_CDC_ANA_CLK_CTL, 0x00);
- snd_soc_write(codec, WSA881X_CDC_DIG_CLK_CTL, 0x00);
- if (WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec,
- WSA881X_CDC_TOP_CLK_CTL, 0x01, 0x00);
- }
- }
- mutex_unlock(&wsa881x->res_lock);
-}
-
-static int wsa881x_rdac_ctrl(struct snd_soc_codec *codec, bool enable)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: enable:%d\n", __func__, enable);
- if (enable) {
- snd_soc_update_bits(codec, WSA881X_ANA_CTL, 0x08, 0x00);
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_GAIN, 0x08, 0x08);
- snd_soc_update_bits(codec, WSA881X_SPKR_DAC_CTL, 0x20, 0x20);
- snd_soc_update_bits(codec, WSA881X_SPKR_DAC_CTL, 0x20, 0x00);
- snd_soc_update_bits(codec, WSA881X_SPKR_DAC_CTL, 0x40, 0x40);
- snd_soc_update_bits(codec, WSA881X_SPKR_DAC_CTL, 0x80, 0x80);
- if (WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_SPKR_BIAS_CAL,
- 0x01, 0x01);
- snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL,
- 0x30, 0x30);
- snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL,
- 0x0C, 0x00);
- }
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_GAIN, 0xF0, 0x40);
- snd_soc_update_bits(codec, WSA881X_SPKR_MISC_CTL1, 0x01, 0x01);
- } else {
- /* Ensure class-D amp is off */
- snd_soc_update_bits(codec, WSA881X_SPKR_DAC_CTL, 0x80, 0x00);
- }
- return 0;
-}
-
-static int wsa881x_spkr_pa_ctrl(struct snd_soc_codec *codec, bool enable)
-{
- int ret = 0;
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: enable:%d\n", __func__, enable);
- if (enable) {
- /*
- * Ensure: Boost is enabled and stable, Analog input is up
- * and outputting silence
- */
- if (!WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_ADC_EN_DET_TEST_I,
- 0xFF, 0x01);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_V,
- 0x02, 0x02);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_DET_TEST_V,
- 0xFF, 0x10);
- snd_soc_update_bits(codec, WSA881X_SPKR_PWRSTG_DBG,
- 0xA0, 0xA0);
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN,
- 0x80, 0x80);
- usleep_range(700, 710);
- snd_soc_update_bits(codec, WSA881X_SPKR_PWRSTG_DBG,
- 0x00, 0x00);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_DET_TEST_V,
- 0xFF, 0x00);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_V,
- 0x02, 0x00);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_DET_TEST_I,
- 0xFF, 0x00);
- } else
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN,
- 0x80, 0x80);
- /* add 1000us delay as per qcrg */
- usleep_range(1000, 1010);
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN, 0x01, 0x01);
- if (WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec, WSA881X_SPKR_BIAS_CAL,
- 0x01, 0x00);
- usleep_range(1000, 1010);
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_GAIN, 0xF0,
- (wsa881x->spk_pa_gain << 4));
- if (wsa881x->visense_enable) {
- ret = msm_gpioset_activate(CLIENT_WSA_BONGO_1,
- "wsa_vi");
- if (ret) {
- pr_err("%s: gpio set cannot be activated %s\n",
- __func__, "wsa_vi");
- return ret;
- }
- wsa881x_visense_txfe_ctrl(codec, true,
- 0x00, 0x01, 0x00);
- wsa881x_visense_adc_ctrl(codec, true);
- }
- } else {
- /*
- * Ensure: Boost is still on, Stream from Analog input and
- * Speaker Protection has been stopped and input is at 0V
- */
- if (WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_SPKR_BIAS_CAL,
- 0x01, 0x01);
- usleep_range(1000, 1010);
- snd_soc_update_bits(codec, WSA881X_SPKR_BIAS_CAL,
- 0x01, 0x00);
- msleep(20);
- snd_soc_update_bits(codec, WSA881X_ANA_CTL,
- 0x03, 0x00);
- usleep_range(200, 210);
- }
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN, 0x80, 0x00);
- }
- return 0;
-}
-
-static int wsa881x_get_boost(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
-
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- ucontrol->value.integer.value[0] = wsa881x->boost_enable;
- return 0;
-}
-
-static int wsa881x_set_boost(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
- int value = ucontrol->value.integer.value[0];
-
- dev_dbg(codec->dev, "%s: Boost enable current %d, new %d\n",
- __func__, wsa881x->boost_enable, value);
- wsa881x->boost_enable = value;
- return 0;
-}
-
-static int wsa881x_get_visense(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
-
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- ucontrol->value.integer.value[0] = wsa881x->visense_enable;
- return 0;
-}
-
-static int wsa881x_set_visense(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
- int value = ucontrol->value.integer.value[0];
-
- dev_dbg(codec->dev, "%s: VIsense enable current %d, new %d\n",
- __func__, wsa881x->visense_enable, value);
- wsa881x->visense_enable = value;
- return 0;
-}
-
-static const struct snd_kcontrol_new wsa881x_snd_controls[] = {
- SOC_SINGLE_EXT("BOOST Switch", SND_SOC_NOPM, 0, 1, 0,
- wsa881x_get_boost, wsa881x_set_boost),
-
- SOC_SINGLE_EXT("VISENSE Switch", SND_SOC_NOPM, 0, 1, 0,
- wsa881x_get_visense, wsa881x_set_visense),
-
- SOC_ENUM_EXT("WSA_SPK PA Gain", wsa881x_spk_pa_gain_enum[0],
- wsa881x_spk_pa_gain_get, wsa881x_spk_pa_gain_put),
-};
-
-static const char * const rdac_text[] = {
- "ZERO", "Switch",
-};
-
-static const struct soc_enum rdac_enum =
- SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(rdac_text), rdac_text);
-
-static const struct snd_kcontrol_new rdac_mux[] = {
- SOC_DAPM_ENUM("RDAC", rdac_enum)
-};
-
-static int wsa881x_rdac_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = w->codec;
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
-
- dev_dbg(codec->dev, "%s: %s %d boost %d visense %d\n",
- __func__, w->name, event,
- wsa881x->boost_enable, wsa881x->visense_enable);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- ret = wsa881x_startup(wsa881x);
- if (ret) {
- pr_err("%s: wsa startup failed ret: %d", __func__, ret);
- return ret;
- }
- wsa881x_clk_ctrl(codec, true);
- snd_soc_update_bits(codec, WSA881X_SPKR_DAC_CTL, 0x02, 0x02);
- if (!WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec, WSA881X_BIAS_REF_CTRL,
- 0x0F, 0x08);
- wsa881x_bandgap_ctrl(codec, true);
- if (!WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec, WSA881X_SPKR_BBM_CTL,
- 0x02, 0x02);
- snd_soc_update_bits(codec, WSA881X_SPKR_MISC_CTL1, 0xC0, 0x80);
- snd_soc_update_bits(codec, WSA881X_SPKR_MISC_CTL1, 0x06, 0x06);
- if (!WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_SPKR_MISC_CTL2,
- 0x04, 0x04);
- snd_soc_update_bits(codec, WSA881X_SPKR_BIAS_INT,
- 0x09, 0x09);
- }
- snd_soc_update_bits(codec, WSA881X_SPKR_PA_INT, 0xF0, 0x20);
- if (WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec, WSA881X_SPKR_PA_INT,
- 0x0E, 0x0E);
- if (wsa881x->boost_enable)
- wsa881x_boost_ctrl(codec, true);
- break;
- case SND_SOC_DAPM_POST_PMU:
- wsa881x_rdac_ctrl(codec, true);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- wsa881x_rdac_ctrl(codec, false);
- if (wsa881x->visense_enable) {
- wsa881x_visense_adc_ctrl(codec, false);
- wsa881x_visense_txfe_ctrl(codec, false,
- 0x00, 0x01, 0x00);
- ret = msm_gpioset_suspend(CLIENT_WSA_BONGO_1,
- "wsa_vi");
- if (ret) {
- pr_err("%s: gpio set cannot be suspended %s\n",
- __func__, "wsa_vi");
- return ret;
- }
- }
- break;
- case SND_SOC_DAPM_POST_PMD:
- if (wsa881x->boost_enable)
- wsa881x_boost_ctrl(codec, false);
- wsa881x_clk_ctrl(codec, false);
- wsa881x_bandgap_ctrl(codec, false);
- ret = wsa881x_shutdown(wsa881x);
- if (ret < 0) {
- pr_err("%s: wsa shutdown failed ret: %d",
- __func__, ret);
- return ret;
- }
- break;
- default:
- pr_err("%s: invalid event:%d\n", __func__, event);
- return -EINVAL;
- }
- return 0;
-}
-
-static void wsa881x_ocp_ctl_work(struct work_struct *work)
-{
- struct wsa881x_pdata *wsa881x;
- struct delayed_work *dwork;
- struct snd_soc_codec *codec;
- unsigned long temp_val;
-
- dwork = to_delayed_work(work);
- wsa881x = container_of(dwork, struct wsa881x_pdata, ocp_ctl_work);
-
- if (!wsa881x)
- return;
-
- codec = wsa881x->codec;
- wsa881x_get_temp(wsa881x->tz_pdata.tz_dev, &temp_val);
- dev_dbg(codec->dev, " temp = %ld\n", temp_val);
-
- if (temp_val <= WSA881X_OCP_CTL_TEMP_CELSIUS)
- snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0xC0, 0x00);
- else
- snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0xC0, 0xC0);
-
- schedule_delayed_work(&wsa881x->ocp_ctl_work,
- msecs_to_jiffies(wsa881x_ocp_poll_timer_sec * 1000));
-}
-
-static int wsa881x_spkr_pa_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = w->codec;
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: %s %d\n", __func__, w->name, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0xC0, 0x80);
- break;
- case SND_SOC_DAPM_POST_PMU:
- wsa881x_spkr_pa_ctrl(codec, true);
- schedule_delayed_work(&wsa881x->ocp_ctl_work,
- msecs_to_jiffies(WSA881X_OCP_CTL_TIMER_SEC * 1000));
- break;
- case SND_SOC_DAPM_PRE_PMD:
- wsa881x_spkr_pa_ctrl(codec, false);
- break;
- case SND_SOC_DAPM_POST_PMD:
- cancel_delayed_work_sync(&wsa881x->ocp_ctl_work);
- snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0xC0, 0xC0);
- break;
- default:
- pr_err("%s: invalid event:%d\n", __func__, event);
- return -EINVAL;
- }
- return 0;
-}
-
-
-static const struct snd_soc_dapm_widget wsa881x_dapm_widgets[] = {
- SND_SOC_DAPM_INPUT("WSA_IN"),
-
- SND_SOC_DAPM_DAC_E("RDAC Analog", NULL, SND_SOC_NOPM, 0, 0,
- wsa881x_rdac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX("WSA_RDAC", SND_SOC_NOPM, 0, 0,
- rdac_mux),
-
- SND_SOC_DAPM_PGA_S("WSA_SPKR PGA", 1, SND_SOC_NOPM, 0, 0,
- wsa881x_spkr_pa_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_OUTPUT("WSA_SPKR"),
-};
-
-static const struct snd_soc_dapm_route wsa881x_audio_map[] = {
- {"WSA_RDAC", "Switch", "WSA_IN"},
- {"RDAC Analog", NULL, "WSA_RDAC"},
- {"WSA_SPKR PGA", NULL, "RDAC Analog"},
- {"WSA_SPKR", NULL, "WSA_SPKR PGA"},
-};
-
-
-static int wsa881x_startup(struct wsa881x_pdata *pdata)
-{
- int ret = 0;
- struct snd_soc_codec *codec = pdata->codec;
- struct snd_soc_card *card = codec->component.card;
-
- pr_debug("%s(): wsa startup, enable_cnt:%d\n", __func__,
- pdata->enable_cnt);
-
- if (pdata->enable_cnt++ > 0)
- return 0;
- ret = msm_gpioset_activate(CLIENT_WSA_BONGO_1, "wsa_clk");
- if (ret) {
- pr_err("%s: gpio set cannot be activated %s\n",
- __func__, "wsa_clk");
- return ret;
- }
- if (pdata->enable_mclk) {
- ret = pdata->enable_mclk(card, true);
- if (ret < 0) {
- dev_err_ratelimited(codec->dev,
- "%s: mclk enable failed %d\n",
- __func__, ret);
- return ret;
- }
- }
- ret = wsa881x_reset(pdata, true);
- return ret;
-}
-
-static int wsa881x_shutdown(struct wsa881x_pdata *pdata)
-{
- int ret = 0, reg;
- struct snd_soc_codec *codec = pdata->codec;
- struct snd_soc_card *card = codec->component.card;
-
- pr_debug("%s(): wsa shutdown, enable_cnt:%d\n", __func__,
- pdata->enable_cnt);
- if (--pdata->enable_cnt > 0)
- return 0;
- ret = wsa881x_reset(pdata, false);
- if (ret) {
- pr_err("%s: wsa reset failed suspend %d\n",
- __func__, ret);
- return ret;
- }
-
- if (pdata->enable_mclk) {
- ret = pdata->enable_mclk(card, false);
- if (ret < 0) {
- pr_err("%s: mclk disable failed %d\n",
- __func__, ret);
- return ret;
- }
- }
-
- ret = msm_gpioset_suspend(CLIENT_WSA_BONGO_1, "wsa_clk");
- if (ret) {
- pr_err("%s: gpio set cannot be suspended %s\n",
- __func__, "wsa_clk");
- return ret;
- }
- if (pdata->codec) {
- /* restore defaults to cache */
- for (reg = 0; reg < ARRAY_SIZE(wsa881x_ana_reg_defaults);
- reg++) {
- if (wsa881x_ana_reg_readable[reg])
- snd_soc_cache_write(pdata->codec,
- wsa881x_ana_reg_defaults[reg].reg,
- wsa881x_ana_reg_defaults[reg].def);
- }
- }
- return 0;
-}
-
-static int32_t wsa881x_resource_acquire(struct snd_soc_codec *codec,
- bool enable)
-{
- int ret = 0;
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- if (enable) {
- ret = wsa881x_startup(wsa881x);
- if (ret < 0) {
- dev_err_ratelimited(codec->dev,
- "%s: failed to startup\n", __func__);
- return ret;
- }
- }
- wsa881x_clk_ctrl(codec, enable);
- wsa881x_bandgap_ctrl(codec, enable);
- if (!enable) {
- ret = wsa881x_shutdown(wsa881x);
- if (ret < 0)
- dev_err_ratelimited(codec->dev,
- "%s: failed to shutdown\n", __func__);
- }
- return ret;
-}
-
-static int32_t wsa881x_temp_reg_read(struct snd_soc_codec *codec,
- struct wsa_temp_register *wsa_temp_reg)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
-
- if (!wsa881x) {
- dev_err(codec->dev, "%s: wsa881x is NULL\n", __func__);
- return -EINVAL;
- }
- ret = wsa881x_resource_acquire(codec, true);
- if (ret) {
- dev_err_ratelimited(codec->dev,
- "%s: resource acquire fail\n", __func__);
- return ret;
- }
-
- if (WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_TADC_VALUE_CTL, 0x01, 0x00);
- wsa_temp_reg->dmeas_msb = snd_soc_read(codec, WSA881X_TEMP_MSB);
- wsa_temp_reg->dmeas_lsb = snd_soc_read(codec, WSA881X_TEMP_LSB);
- snd_soc_update_bits(codec, WSA881X_TADC_VALUE_CTL, 0x01, 0x01);
- } else {
- wsa_temp_reg->dmeas_msb = snd_soc_read(codec,
- WSA881X_TEMP_DOUT_MSB);
- wsa_temp_reg->dmeas_lsb = snd_soc_read(codec,
- WSA881X_TEMP_DOUT_LSB);
- }
- wsa_temp_reg->d1_msb = snd_soc_read(codec, WSA881X_OTP_REG_1);
- wsa_temp_reg->d1_lsb = snd_soc_read(codec, WSA881X_OTP_REG_2);
- wsa_temp_reg->d2_msb = snd_soc_read(codec, WSA881X_OTP_REG_3);
- wsa_temp_reg->d2_lsb = snd_soc_read(codec, WSA881X_OTP_REG_4);
-
- ret = wsa881x_resource_acquire(codec, false);
- if (ret)
- dev_err_ratelimited(codec->dev,
- "%s: resource release fail\n", __func__);
-
- return ret;
-}
-
-static int wsa881x_probe(struct snd_soc_codec *codec)
-{
- struct i2c_client *client;
- int ret = 0;
- int wsa881x_index = 0;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
- char *widget_name = NULL;
- struct snd_soc_card *card = codec->component.card;
- struct snd_soc_codec_conf *codec_conf = card->codec_conf;
-
- client = dev_get_drvdata(codec->dev);
- ret = wsa881x_i2c_get_client_index(client, &wsa881x_index);
- if (ret != 0) {
- dev_err(&client->dev, "%s: I2C get codec I2C\n"
- "client failed\n", __func__);
- return ret;
- }
- mutex_init(&wsa_pdata[wsa881x_index].bg_lock);
- mutex_init(&wsa_pdata[wsa881x_index].res_lock);
- snprintf(wsa_pdata[wsa881x_index].tz_pdata.name, 100, "%s",
- wsa_tz_names[wsa881x_index]);
- wsa_pdata[wsa881x_index].codec = codec;
- wsa_pdata[wsa881x_index].spk_pa_gain = SPK_GAIN_12DB;
- wsa_pdata[wsa881x_index].codec = codec;
- wsa_pdata[wsa881x_index].tz_pdata.codec = codec;
- wsa_pdata[wsa881x_index].tz_pdata.wsa_temp_reg_read =
- wsa881x_temp_reg_read;
- snd_soc_codec_set_drvdata(codec, &wsa_pdata[wsa881x_index]);
- wsa881x_init_thermal(&wsa_pdata[wsa881x_index].tz_pdata);
- INIT_DELAYED_WORK(&wsa_pdata[wsa881x_index].ocp_ctl_work,
- wsa881x_ocp_ctl_work);
-
- if (codec_conf->name_prefix) {
- widget_name = kcalloc(WIDGET_NAME_MAX_SIZE, sizeof(char),
- GFP_KERNEL);
- if (!widget_name)
- return -ENOMEM;
-
- snprintf(widget_name, WIDGET_NAME_MAX_SIZE,
- "%s WSA_SPKR", codec_conf->name_prefix);
- snd_soc_dapm_ignore_suspend(dapm, widget_name);
- snprintf(widget_name, WIDGET_NAME_MAX_SIZE,
- "%s WSA_IN", codec_conf->name_prefix);
- snd_soc_dapm_ignore_suspend(dapm, widget_name);
- kfree(widget_name);
- } else {
- snd_soc_dapm_ignore_suspend(dapm, "WSA_SPKR");
- snd_soc_dapm_ignore_suspend(dapm, "WSA_IN");
- }
-
- snd_soc_dapm_sync(dapm);
- return 0;
-}
-
-static int wsa881x_remove(struct snd_soc_codec *codec)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- if (wsa881x->tz_pdata.tz_dev)
- wsa881x_deinit_thermal(wsa881x->tz_pdata.tz_dev);
-
- mutex_destroy(&wsa881x->bg_lock);
- mutex_destroy(&wsa881x->res_lock);
- return 0;
-}
-
-static struct snd_soc_codec_driver soc_codec_dev_wsa881x = {
- .probe = wsa881x_probe,
- .remove = wsa881x_remove,
-
- .read = wsa881x_i2c_read,
- .write = wsa881x_i2c_write,
-
- .reg_cache_size = WSA881X_CACHE_SIZE,
- .reg_cache_default = wsa881x_ana_reg_defaults,
- .reg_word_size = 1,
-
- .component_driver = {
- .controls = wsa881x_snd_controls,
- .num_controls = ARRAY_SIZE(wsa881x_snd_controls),
- .dapm_widgets = wsa881x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wsa881x_dapm_widgets),
- .dapm_routes = wsa881x_audio_map,
- .num_dapm_routes = ARRAY_SIZE(wsa881x_audio_map),
- },
-};
-
-static int wsa881x_reset(struct wsa881x_pdata *pdata, bool enable)
-{
- int ret = 0;
-
- /*
- * shutdown the GPIOs WSA_EN, WSA_MCLK, regulators
- * and restore defaults in soc cache when shutdown.
- * Enable regulators, GPIOs WSA_MCLK, WSA_EN when powerup.
- */
- if (enable) {
- if (pdata->wsa_active)
- return 0;
- ret = msm_gpioset_activate(CLIENT_WSA_BONGO_1, "wsa_reset");
- if (ret) {
- pr_err("%s: gpio set cannot be activated %s\n",
- __func__, "wsa_reset");
- return ret;
- }
- ret = msm_gpioset_suspend(CLIENT_WSA_BONGO_1, "wsa_reset");
- if (ret) {
- pr_err("%s: gpio set cannot be suspended(powerup) %s\n",
- __func__, "wsa_reset");
- return ret;
- }
- ret = msm_gpioset_activate(CLIENT_WSA_BONGO_1, "wsa_reset");
- if (ret) {
- pr_err("%s: gpio set cannot be activated %s\n",
- __func__, "wsa_reset");
- return ret;
- }
- pdata->wsa_active = true;
- } else {
- if (!pdata->wsa_active)
- return 0;
- ret = msm_gpioset_suspend(CLIENT_WSA_BONGO_1, "wsa_reset");
- if (ret) {
- pr_err("%s: gpio set cannot be suspended %s\n",
- __func__, "wsa_reset");
- return ret;
- }
- pdata->wsa_active = false;
- }
- return ret;
-}
-
-int wsa881x_get_client_index(void)
-{
- return wsa881x_i2c_addr;
-}
-EXPORT_SYMBOL(wsa881x_get_client_index);
-
-int wsa881x_get_probing_count(void)
-{
- return wsa881x_probing_count;
-}
-EXPORT_SYMBOL(wsa881x_get_probing_count);
-
-int wsa881x_get_presence_count(void)
-{
- return wsa881x_presence_count;
-}
-EXPORT_SYMBOL(wsa881x_get_presence_count);
-
-int wsa881x_set_mclk_callback(
- int (*enable_mclk_callback)(struct snd_soc_card *, bool))
-{
- int i;
-
- for (i = 0; i < MAX_WSA881X_DEVICE; i++) {
- if (wsa_pdata[i].status == WSA881X_STATUS_I2C)
- wsa_pdata[i].enable_mclk = enable_mclk_callback;
- }
- return 0;
-}
-EXPORT_SYMBOL(wsa881x_set_mclk_callback);
-
-static int check_wsa881x_presence(struct i2c_client *client)
-{
- int ret = 0;
- int wsa881x_index = 0;
-
- ret = wsa881x_i2c_get_client_index(client, &wsa881x_index);
- if (ret != 0) {
- dev_err(&client->dev, "%s: I2C get codec I2C\n"
- "client failed\n", __func__);
- return ret;
- }
- ret = wsa881x_i2c_read_device(&wsa_pdata[wsa881x_index],
- WSA881X_CDC_RST_CTL);
- if (ret < 0) {
- dev_err(&client->dev, "failed to read wsa881x with addr %x\n",
- client->addr);
- return ret;
- }
- ret = wsa881x_i2c_write_device(&wsa_pdata[wsa881x_index],
- WSA881X_CDC_RST_CTL, 0x01);
- if (ret < 0) {
- dev_err(&client->dev, "failed write addr %x reg:0x5 val:0x1\n",
- client->addr);
- return ret;
- }
- /* allow 20ms before trigger next write to verify WSA881x presence */
- msleep(20);
- ret = wsa881x_i2c_write_device(&wsa_pdata[wsa881x_index],
- WSA881X_CDC_RST_CTL, 0x00);
- if (ret < 0) {
- dev_err(&client->dev, "failed write addr %x reg:0x5 val:0x0\n",
- client->addr);
- return ret;
- }
- return ret;
-}
-
-static int wsa881x_populate_dt_pdata(struct device *dev)
-{
- int ret = 0;
-
- /* reading the gpio configurations from dtsi file */
- if (!pinctrl_init) {
- ret = msm_gpioset_initialize(CLIENT_WSA_BONGO_1, dev);
- if (ret < 0) {
- dev_err(dev,
- "%s: error reading dtsi files%d\n", __func__, ret);
- goto err;
- }
- pinctrl_init = true;
- }
-err:
- return ret;
-}
-
-static int wsa881x_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int ret = 0;
- int wsa881x_index = 0;
- struct wsa881x_pdata *pdata = NULL;
-
- ret = wsa881x_i2c_get_client_index(client, &wsa881x_index);
- if (ret != 0) {
- dev_err(&client->dev, "%s: I2C get codec I2C\n"
- "client failed\n", __func__);
- return ret;
- }
-
- pdata = &wsa_pdata[wsa881x_index];
-
- if ((client->addr == WSA881X_I2C_SPK0_SLAVE1_ADDR ||
- client->addr == WSA881X_I2C_SPK1_SLAVE1_ADDR) &&
- (pdata->status == WSA881X_STATUS_PROBING))
- return ret;
-
- if (pdata->status == WSA881X_STATUS_I2C) {
- dev_dbg(&client->dev, "%s:probe for other slaves\n"
- "devices of codec I2C slave Addr = %x\n",
- __func__, client->addr);
-
- dev_dbg(&client->dev, "%s:wsa_idx = %d SLAVE = %d\n",
- __func__, wsa881x_index, WSA881X_ANALOG_SLAVE);
- pdata->regmap[WSA881X_ANALOG_SLAVE] =
- devm_regmap_init_i2c(
- client,
- &wsa881x_ana_regmap_config[WSA881X_ANALOG_SLAVE]);
- regcache_cache_bypass(pdata->regmap[WSA881X_ANALOG_SLAVE],
- true);
- if (IS_ERR(pdata->regmap[WSA881X_ANALOG_SLAVE])) {
- ret = PTR_ERR(pdata->regmap[WSA881X_ANALOG_SLAVE]);
- dev_err(&client->dev,
- "%s: regmap_init failed %d\n",
- __func__, ret);
- }
- client->dev.platform_data = pdata;
- i2c_set_clientdata(client, pdata);
- pdata->client[WSA881X_ANALOG_SLAVE] = client;
- if (pdata->version == WSA881X_2_0)
- wsa881x_update_regmap_2_0(
- pdata->regmap[WSA881X_ANALOG_SLAVE],
- WSA881X_ANALOG_SLAVE);
-
- return ret;
- } else if (pdata->status == WSA881X_STATUS_PROBING) {
- pdata->index = wsa881x_index;
- if (client->dev.of_node) {
- dev_dbg(&client->dev, "%s:Platform data\n"
- "from device tree\n", __func__);
- ret = wsa881x_populate_dt_pdata(&client->dev);
- if (ret < 0) {
- dev_err(&client->dev,
- "%s: Fail to obtain pdata from device tree\n",
- __func__);
- ret = -EINVAL;
- goto err;
- }
- client->dev.platform_data = pdata;
- } else {
- dev_dbg(&client->dev, "%s:Platform data from\n"
- "board file\n", __func__);
- pdata = client->dev.platform_data;
- }
- if (!pdata) {
- dev_dbg(&client->dev, "no platform data?\n");
- ret = -EINVAL;
- goto err;
- }
- i2c_set_clientdata(client, pdata);
- dev_set_drvdata(&client->dev, client);
-
- pdata->regmap[WSA881X_DIGITAL_SLAVE] =
- devm_regmap_init_i2c(
- client,
- &wsa881x_ana_regmap_config[WSA881X_DIGITAL_SLAVE]);
- regcache_cache_bypass(pdata->regmap[WSA881X_DIGITAL_SLAVE],
- true);
- if (IS_ERR(pdata->regmap[WSA881X_DIGITAL_SLAVE])) {
- ret = PTR_ERR(pdata->regmap[WSA881X_DIGITAL_SLAVE]);
- dev_err(&client->dev, "%s: regmap_init failed %d\n",
- __func__, ret);
- goto err;
- }
- /* bus reset sequence */
- ret = wsa881x_reset(pdata, true);
- if (ret < 0) {
- dev_err(&client->dev, "%s: WSA enable Failed %d\n",
- __func__, ret);
- goto err;
- }
- pdata->client[WSA881X_DIGITAL_SLAVE] = client;
- pdata->regmap_flag = true;
- ret = check_wsa881x_presence(client);
- if (ret < 0) {
- dev_err(&client->dev,
- "failed to ping wsa with addr:%x, ret = %d\n",
- client->addr, ret);
- wsa881x_probing_count++;
- goto err1;
- }
- pdata->version = wsa881x_i2c_read_device(pdata,
- WSA881X_CHIP_ID1);
- pr_debug("%s: wsa881x version: %d\n", __func__, pdata->version);
- if (pdata->version == WSA881X_2_0) {
- wsa881x_update_reg_defaults_2_0();
- wsa881x_update_regmap_2_0(
- pdata->regmap[WSA881X_DIGITAL_SLAVE],
- WSA881X_DIGITAL_SLAVE);
- }
- wsa881x_presence_count++;
- wsa881x_probing_count++;
- ret = snd_soc_register_codec(&client->dev,
- &soc_codec_dev_wsa881x,
- NULL, 0);
- if (ret < 0)
- goto err1;
- pdata->status = WSA881X_STATUS_I2C;
- }
-err1:
- wsa881x_reset(pdata, false);
-err:
- return 0;
-}
-
-static int wsa881x_i2c_remove(struct i2c_client *client)
-{
- struct wsa881x_pdata *wsa881x = i2c_get_clientdata(client);
-
- snd_soc_unregister_codec(&client->dev);
- i2c_set_clientdata(client, NULL);
- kfree(wsa881x);
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int wsa881x_i2c_suspend(struct device *dev)
-{
- pr_debug("%s: system suspend\n", __func__);
- return 0;
-}
-
-static int wsa881x_i2c_resume(struct device *dev)
-{
- pr_debug("%s: system resume\n", __func__);
- return 0;
-}
-
-static const struct dev_pm_ops wsa881x_i2c_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(wsa881x_i2c_suspend, wsa881x_i2c_resume)
-};
-#endif /* CONFIG_PM_SLEEP */
-
-static const struct i2c_device_id wsa881x_i2c_id[] = {
- {"wsa881x-i2c-dev", WSA881X_I2C_SPK0_SLAVE0_ADDR},
- {"wsa881x-i2c-dev", WSA881X_I2C_SPK0_SLAVE1_ADDR},
- {"wsa881x-i2c-dev", WSA881X_I2C_SPK1_SLAVE0_ADDR},
- {"wsa881x-i2c-dev", WSA881X_I2C_SPK1_SLAVE1_ADDR},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, wsa881x_i2c_id);
-
-
-static const struct of_device_id msm_match_table[] = {
- {.compatible = "qcom,wsa881x-i2c-codec"},
- {}
-};
-MODULE_DEVICE_TABLE(of, msm_match_table);
-
-static struct i2c_driver wsa881x_codec_driver = {
- .driver = {
- .name = "wsa881x-i2c-codec",
- .owner = THIS_MODULE,
-#ifdef CONFIG_PM_SLEEP
- .pm = &wsa881x_i2c_pm_ops,
-#endif
- .of_match_table = msm_match_table,
- },
- .id_table = wsa881x_i2c_id,
- .probe = wsa881x_i2c_probe,
- .remove = wsa881x_i2c_remove,
-};
-
-static int __init wsa881x_codec_init(void)
-{
- int i = 0;
-
- for (i = 0; i < MAX_WSA881X_DEVICE; i++)
- wsa_pdata[i].status = WSA881X_STATUS_PROBING;
- return i2c_add_driver(&wsa881x_codec_driver);
-}
-module_init(wsa881x_codec_init);
-
-static void __exit wsa881x_codec_exit(void)
-{
- i2c_del_driver(&wsa881x_codec_driver);
-}
-
-module_exit(wsa881x_codec_exit);
-
-MODULE_DESCRIPTION("WSA881x Codec driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wsa881x-analog.h b/sound/soc/codecs/wsa881x-analog.h
deleted file mode 100644
index a2ef2a2..0000000
--- a/sound/soc/codecs/wsa881x-analog.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _WSA881X_H
-#define _WSA881X_H
-
-#include <linux/regmap.h>
-#include "wsa881x-registers-analog.h"
-#include <sound/soc.h>
-
-#define WSA881X_I2C_SPK0_SLAVE0_ADDR 0x0E
-#define WSA881X_I2C_SPK0_SLAVE1_ADDR 0x44
-#define WSA881X_I2C_SPK1_SLAVE0_ADDR 0x0F
-#define WSA881X_I2C_SPK1_SLAVE1_ADDR 0x45
-
-#define WSA881X_I2C_SPK0_SLAVE0 0
-#define WSA881X_I2C_SPK1_SLAVE0 1
-#define MAX_WSA881X_DEVICE 2
-#define WSA881X_DIGITAL_SLAVE 0
-#define WSA881X_ANALOG_SLAVE 1
-
-enum {
- WSA881X_1_X = 0,
- WSA881X_2_0,
-};
-
-#define WSA881X_IS_2_0(ver) \
- ((ver == WSA881X_2_0) ? 1 : 0)
-
-extern const u8 wsa881x_ana_reg_readable[WSA881X_CACHE_SIZE];
-extern struct reg_default wsa881x_ana_reg_defaults[WSA881X_CACHE_SIZE];
-extern struct regmap_config wsa881x_ana_regmap_config[2];
-int wsa881x_get_client_index(void);
-int wsa881x_get_probing_count(void);
-int wsa881x_get_presence_count(void);
-int wsa881x_set_mclk_callback(
- int (*enable_mclk_callback)(struct snd_soc_card *, bool));
-void wsa881x_update_reg_defaults_2_0(void);
-void wsa881x_update_regmap_2_0(struct regmap *regmap, int flag);
-
-#endif /* _WSA881X_H */
diff --git a/sound/soc/codecs/wsa881x-irq.c b/sound/soc/codecs/wsa881x-irq.c
deleted file mode 100644
index 9afbd92..0000000
--- a/sound/soc/codecs/wsa881x-irq.c
+++ /dev/null
@@ -1,610 +0,0 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/slab.h>
-#include <linux/ratelimit.h>
-#include <linux/pm_qos.h>
-#include <soc/qcom/pm.h>
-#include "wsa881x-irq.h"
-#include "wsa881x-registers-analog.h"
-
-#define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
-#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
-
-
-#define WSA_MAX_NUM_IRQS 8
-
-#ifndef NO_IRQ
-#define NO_IRQ (-1)
-#endif
-
-static int virq_to_phyirq(
- struct wsa_resource *wsa_res, int virq);
-static int phyirq_to_virq(
- struct wsa_resource *wsa_res, int irq);
-static unsigned int wsa_irq_get_upstream_irq(
- struct wsa_resource *wsa_res);
-static void wsa_irq_put_upstream_irq(
- struct wsa_resource *wsa_res);
-static int wsa_map_irq(
- struct wsa_resource *wsa_res, int irq);
-
-static struct snd_soc_codec *ptr_codec;
-
-/**
- * wsa_set_codec() - to update codec pointer
- * @codec: codec pointer.
- *
- * To update the codec pointer, which is used to read/write
- * wsa register.
- *
- * Return: void.
- */
-void wsa_set_codec(struct snd_soc_codec *codec)
-{
- if (codec == NULL) {
- pr_err("%s: codec pointer is NULL\n", __func__);
- ptr_codec = NULL;
- return;
- }
- ptr_codec = codec;
- /* Initialize interrupt mask and level registers */
- snd_soc_write(codec, WSA881X_INTR_LEVEL, 0x8F);
- snd_soc_write(codec, WSA881X_INTR_MASK, 0x8F);
-}
-
-static void wsa_irq_lock(struct irq_data *data)
-{
- struct wsa_resource *wsa_res =
- irq_data_get_irq_chip_data(data);
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res pointer is NULL\n", __func__);
- return;
- }
- mutex_lock(&wsa_res->irq_lock);
-}
-
-static void wsa_irq_sync_unlock(struct irq_data *data)
-{
- struct wsa_resource *wsa_res =
- irq_data_get_irq_chip_data(data);
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res pointer is NULL\n", __func__);
- return;
- }
- if (wsa_res->codec == NULL) {
- pr_err("%s: codec pointer not registered\n", __func__);
- if (ptr_codec == NULL) {
- pr_err("%s: did not receive valid codec pointer\n",
- __func__);
- goto unlock;
- } else {
- wsa_res->codec = ptr_codec;
- }
- }
-
- /*
- * If there's been a change in the mask write it back
- * to the hardware.
- */
- if (wsa_res->irq_masks_cur !=
- wsa_res->irq_masks_cache) {
-
- wsa_res->irq_masks_cache =
- wsa_res->irq_masks_cur;
- snd_soc_write(wsa_res->codec,
- WSA881X_INTR_MASK,
- wsa_res->irq_masks_cur);
- }
-unlock:
- mutex_unlock(&wsa_res->irq_lock);
-}
-
-static void wsa_irq_enable(struct irq_data *data)
-{
- struct wsa_resource *wsa_res =
- irq_data_get_irq_chip_data(data);
- int wsa_irq;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res pointer is NULL\n", __func__);
- return;
- }
- wsa_irq = virq_to_phyirq(wsa_res, data->irq);
- pr_debug("%s: wsa_irq = %d\n", __func__, wsa_irq);
- wsa_res->irq_masks_cur &=
- ~(BYTE_BIT_MASK(wsa_irq));
-}
-
-static void wsa_irq_disable(struct irq_data *data)
-{
- struct wsa_resource *wsa_res =
- irq_data_get_irq_chip_data(data);
- int wsa_irq;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res pointer is NULL\n", __func__);
- return;
- }
- wsa_irq = virq_to_phyirq(wsa_res, data->irq);
- pr_debug("%s: wsa_irq = %d\n", __func__, wsa_irq);
- wsa_res->irq_masks_cur
- |= BYTE_BIT_MASK(wsa_irq);
-}
-
-static void wsa_irq_ack(struct irq_data *data)
-{
- int wsa_irq = 0;
- struct wsa_resource *wsa_res =
- irq_data_get_irq_chip_data(data);
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- wsa_irq = virq_to_phyirq(wsa_res, data->irq);
- pr_debug("%s: IRQ_ACK called for WCD9XXX IRQ: %d\n",
- __func__, wsa_irq);
-}
-
-static void wsa_irq_mask(struct irq_data *d)
-{
- /* do nothing but required as linux calls irq_mask without NULL check */
-}
-
-static struct irq_chip wsa_irq_chip = {
- .name = "wsa",
- .irq_bus_lock = wsa_irq_lock,
- .irq_bus_sync_unlock = wsa_irq_sync_unlock,
- .irq_disable = wsa_irq_disable,
- .irq_enable = wsa_irq_enable,
- .irq_mask = wsa_irq_mask,
- .irq_ack = wsa_irq_ack,
-};
-
-static irqreturn_t wsa_irq_thread(int irq, void *data)
-{
- struct wsa_resource *wsa_res = data;
- int i;
- u8 status;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return IRQ_HANDLED;
- }
- if (wsa_res->codec == NULL) {
- pr_err("%s: codec pointer not registered\n", __func__);
- if (ptr_codec == NULL) {
- pr_err("%s: did not receive valid codec pointer\n",
- __func__);
- return IRQ_HANDLED;
- }
- wsa_res->codec = ptr_codec;
- }
- status = snd_soc_read(wsa_res->codec, WSA881X_INTR_STATUS);
- /* Apply masking */
- status &= ~wsa_res->irq_masks_cur;
-
- for (i = 0; i < wsa_res->num_irqs; i++) {
- if (status & BYTE_BIT_MASK(i)) {
- mutex_lock(&wsa_res->nested_irq_lock);
- handle_nested_irq(phyirq_to_virq(wsa_res, i));
- mutex_unlock(&wsa_res->nested_irq_lock);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-/**
- * wsa_free_irq() - to free an interrupt
- * @irq: interrupt number.
- * @data: pointer to wsa resource.
- *
- * To free already requested interrupt.
- *
- * Return: void.
- */
-void wsa_free_irq(int irq, void *data)
-{
- struct wsa_resource *wsa_res = data;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- free_irq(phyirq_to_virq(wsa_res, irq), data);
-}
-
-/**
- * wsa_enable_irq() - to enable an interrupt
- * @wsa_res: pointer to wsa resource.
- * @irq: interrupt number.
- *
- * This function is to enable an interrupt.
- *
- * Return: void.
- */
-void wsa_enable_irq(struct wsa_resource *wsa_res, int irq)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- enable_irq(phyirq_to_virq(wsa_res, irq));
-}
-
-/**
- * wsa_disable_irq() - to disable an interrupt
- * @wsa_res: pointer to wsa resource.
- * @irq: interrupt number.
- *
- * To disable an interrupt without waiting for executing
- * handler to complete.
- *
- * Return: void.
- */
-void wsa_disable_irq(struct wsa_resource *wsa_res, int irq)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- disable_irq_nosync(phyirq_to_virq(wsa_res, irq));
-}
-
-/**
- * wsa_disable_irq_sync() - to disable an interrupt
- * @wsa_res: pointer to wsa resource.
- * @irq: interrupt number.
- *
- * To disable an interrupt, wait for executing IRQ
- * handler to complete.
- *
- * Return: void.
- */
-void wsa_disable_irq_sync(
- struct wsa_resource *wsa_res, int irq)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- disable_irq(phyirq_to_virq(wsa_res, irq));
-}
-
-static int wsa_irq_setup_downstream_irq(struct wsa_resource *wsa_res)
-{
- int irq, virq, ret;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return -EINVAL;
- }
- pr_debug("%s: enter\n", __func__);
-
- for (irq = 0; irq < wsa_res->num_irqs; irq++) {
- /* Map OF irq */
- virq = wsa_map_irq(wsa_res, irq);
- pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
- if (virq == NO_IRQ) {
- pr_err("%s, No interrupt specifier for irq %d\n",
- __func__, irq);
- return NO_IRQ;
- }
-
- ret = irq_set_chip_data(virq, wsa_res);
- if (ret) {
- pr_err("%s: Failed to configure irq %d (%d)\n",
- __func__, irq, ret);
- return ret;
- }
-
- if (wsa_res->irq_level_high[irq])
- irq_set_chip_and_handler(virq, &wsa_irq_chip,
- handle_level_irq);
- else
- irq_set_chip_and_handler(virq, &wsa_irq_chip,
- handle_edge_irq);
-
- irq_set_nested_thread(virq, 1);
- }
-
- pr_debug("%s: leave\n", __func__);
-
- return 0;
-}
-
-static int wsa_irq_init(struct wsa_resource *wsa_res)
-{
- int i, ret;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return -EINVAL;
- }
- mutex_init(&wsa_res->irq_lock);
- mutex_init(&wsa_res->nested_irq_lock);
-
- wsa_res->irq = wsa_irq_get_upstream_irq(wsa_res);
- if (!wsa_res->irq) {
- pr_warn("%s: irq driver is not yet initialized\n", __func__);
- mutex_destroy(&wsa_res->irq_lock);
- mutex_destroy(&wsa_res->nested_irq_lock);
- return -EPROBE_DEFER;
- }
- pr_debug("%s: probed irq %d\n", __func__, wsa_res->irq);
-
- /* Setup downstream IRQs */
- ret = wsa_irq_setup_downstream_irq(wsa_res);
- if (ret) {
- pr_err("%s: Failed to setup downstream IRQ\n", __func__);
- goto fail_irq_init;
- }
-
- /* mask all the interrupts */
- for (i = 0; i < wsa_res->num_irqs; i++) {
- wsa_res->irq_masks_cur |= BYTE_BIT_MASK(i);
- wsa_res->irq_masks_cache |= BYTE_BIT_MASK(i);
- }
-
- ret = request_threaded_irq(wsa_res->irq, NULL, wsa_irq_thread,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "wsa", wsa_res);
- if (ret != 0) {
- dev_err(wsa_res->dev, "Failed to request IRQ %d: %d\n",
- wsa_res->irq, ret);
- } else {
- ret = enable_irq_wake(wsa_res->irq);
- if (ret) {
- dev_err(wsa_res->dev,
- "Failed to set wake interrupt on IRQ %d: %d\n",
- wsa_res->irq, ret);
- free_irq(wsa_res->irq, wsa_res);
- }
- }
-
- if (ret)
- goto fail_irq_init;
-
- return ret;
-
-fail_irq_init:
- dev_err(wsa_res->dev,
- "%s: Failed to init wsa irq\n", __func__);
- wsa_irq_put_upstream_irq(wsa_res);
- mutex_destroy(&wsa_res->irq_lock);
- mutex_destroy(&wsa_res->nested_irq_lock);
- return ret;
-}
-
-/**
- * wsa_request_irq() - to request/register an interrupt
- * @wsa_res: pointer to wsa_resource.
- * @irq: interrupt number.
- * @handler: interrupt handler function pointer.
- * @name: interrupt name.
- * @data: device info.
- *
- * Convert physical irq to virtual irq and then
- * reguest for threaded handler.
- *
- * Return: Retuns success/failure.
- */
-int wsa_request_irq(struct wsa_resource *wsa_res,
- int irq, irq_handler_t handler,
- const char *name, void *data)
-{
- int virq;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return -EINVAL;
- }
- virq = phyirq_to_virq(wsa_res, irq);
-
- /*
- * ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so.
- */
-#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- set_irq_flags(virq, IRQF_VALID);
-#else
- set_irq_noprobe(virq);
-#endif
-
- return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
- name, data);
-}
-
-/**
- * wsa_irq_exit() - to disable/clear interrupt/resources
- * @wsa_res: pointer to wsa_resource
- *
- * Disable and free the interrupts and then release resources.
- *
- * Return: void.
- */
-void wsa_irq_exit(struct wsa_resource *wsa_res)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- dev_dbg(wsa_res->dev, "%s: Cleaning up irq %d\n", __func__,
- wsa_res->irq);
-
- if (wsa_res->irq) {
- disable_irq_wake(wsa_res->irq);
- free_irq(wsa_res->irq, wsa_res);
- /* Release parent's of node */
- wsa_irq_put_upstream_irq(wsa_res);
- }
- mutex_destroy(&wsa_res->irq_lock);
- mutex_destroy(&wsa_res->nested_irq_lock);
-}
-
-static int phyirq_to_virq(struct wsa_resource *wsa_res, int offset)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return -EINVAL;
- }
- return irq_linear_revmap(wsa_res->domain, offset);
-}
-
-static int virq_to_phyirq(struct wsa_resource *wsa_res, int virq)
-{
- struct irq_data *irq_data = irq_get_irq_data(virq);
-
- if (unlikely(!irq_data)) {
- pr_err("%s: irq_data is NULL\n", __func__);
- return -EINVAL;
- }
- return irq_data->hwirq;
-}
-
-static unsigned int wsa_irq_get_upstream_irq(struct wsa_resource *wsa_res)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return -EINVAL;
- }
- return wsa_res->irq;
-}
-
-static void wsa_irq_put_upstream_irq(struct wsa_resource *wsa_res)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- /* Hold parent's of node */
- of_node_put(wsa_res->dev->of_node);
-}
-
-static int wsa_map_irq(struct wsa_resource *wsa_res, int irq)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return -EINVAL;
- }
- return of_irq_to_resource(wsa_res->dev->of_node, irq, NULL);
-}
-
-static int wsa_irq_probe(struct platform_device *pdev)
-{
- int irq;
- struct wsa_resource *wsa_res = NULL;
- int ret = -EINVAL;
-
- irq = platform_get_irq_byname(pdev, "wsa-int");
- if (irq < 0) {
- dev_err(&pdev->dev, "%s: Couldn't find wsa-int node(%d)\n",
- __func__, irq);
- return -EINVAL;
- }
- pr_debug("%s: node %s\n", __func__, pdev->name);
- wsa_res = kzalloc(sizeof(*wsa_res), GFP_KERNEL);
- if (!wsa_res) {
- pr_err("%s: could not allocate memory\n", __func__);
- return -ENOMEM;
- }
- /*
- * wsa interrupt controller supports N to N irq mapping with
- * single cell binding with irq numbers(offsets) only.
- * Use irq_domain_simple_ops that has irq_domain_simple_map and
- * irq_domain_xlate_onetwocell.
- */
- wsa_res->dev = &pdev->dev;
- wsa_res->domain = irq_domain_add_linear(wsa_res->dev->of_node,
- WSA_MAX_NUM_IRQS, &irq_domain_simple_ops,
- wsa_res);
- if (!wsa_res->domain) {
- dev_err(&pdev->dev, "%s: domain is NULL\n", __func__);
- ret = -ENOMEM;
- goto err;
- }
- wsa_res->dev = &pdev->dev;
-
- dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
- wsa_res->irq = irq;
- wsa_res->num_irq_regs = 1;
- wsa_res->num_irqs = WSA_NUM_IRQS;
- ret = wsa_irq_init(wsa_res);
- if (ret < 0) {
- dev_err(&pdev->dev, "%s: failed to do irq init %d\n",
- __func__, ret);
- goto err;
- }
-
- return ret;
-err:
- kfree(wsa_res);
- return ret;
-}
-
-static int wsa_irq_remove(struct platform_device *pdev)
-{
- struct irq_domain *domain;
- struct wsa_resource *data;
-
- domain = irq_find_host(pdev->dev.of_node);
- if (unlikely(!domain)) {
- pr_err("%s: domain is NULL\n", __func__);
- return -EINVAL;
- }
- data = (struct wsa_resource *)domain->host_data;
- data->irq = 0;
-
- return 0;
-}
-
-static const struct of_device_id of_match[] = {
- { .compatible = "qcom,wsa-irq" },
- { }
-};
-
-static struct platform_driver wsa_irq_driver = {
- .probe = wsa_irq_probe,
- .remove = wsa_irq_remove,
- .driver = {
- .name = "wsa_intc",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(of_match),
- },
-};
-
-static int wsa_irq_drv_init(void)
-{
- return platform_driver_register(&wsa_irq_driver);
-}
-subsys_initcall(wsa_irq_drv_init);
-
-static void wsa_irq_drv_exit(void)
-{
- platform_driver_unregister(&wsa_irq_driver);
-}
-module_exit(wsa_irq_drv_exit);
-
-MODULE_DESCRIPTION("WSA881x IRQ driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wsa881x-irq.h b/sound/soc/codecs/wsa881x-irq.h
deleted file mode 100644
index 270eb91..0000000
--- a/sound/soc/codecs/wsa881x-irq.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __WSA881X_IRQ_H__
-#define __WSA881X_IRQ_H__
-
-#include <linux/irqdomain.h>
-#include <linux/interrupt.h>
-#include <sound/soc.h>
-
-/**
- * enum wsa_interrupts - wsa interrupt number
- * @WSA_INT_SAF2WAR: Temp irq interrupt, from safe state to warning state.
- * @WSA_INT_WAR2SAF: Temp irq interrupt, from warning state to safe state.
- * @WSA_INT_DISABLE: Disable Temp sensor interrupts.
- * @WSA_INT_OCP: OCP interrupt.
- * @WSA_INT_CLIP: CLIP detect interrupt.
- * @WSA_NUM_IRQS: MAX Interrupt number.
- *
- * WSA IRQ Interrupt numbers.
- */
-enum wsa_interrupts {
- WSA_INT_SAF2WAR = 0,
- WSA_INT_WAR2SAF,
- WSA_INT_DISABLE,
- WSA_INT_OCP,
- WSA_INT_CLIP,
- WSA_NUM_IRQS,
-};
-
-/**
- * struct wsa_resource - the basic wsa_resource structure
- * @irq_lock: lock used by irq_chip functions.
- * @nested_irq_lock: lock used while handling nested interrupts.
- * @irq: interrupt number.
- * @irq_masks_cur: current mask value to be written to mask registers.
- * @irq_masks_cache: cached mask value.
- * @num_irqs: number of supported interrupts.
- * @num_irq_regs: number of irq registers.
- * @parent: parent pointer.
- * @dev: device pointer.
- * @domain: irq domain pointer.
- * codec: codec pointer.
- *
- * Contains required members used in wsa irq driver.
- */
-
-struct wsa_resource {
- struct mutex irq_lock;
- struct mutex nested_irq_lock;
- unsigned int irq;
- u8 irq_masks_cur;
- u8 irq_masks_cache;
- bool irq_level_high[8];
- int num_irqs;
- int num_irq_regs;
- void *parent;
- struct device *dev;
- struct irq_domain *domain;
- struct snd_soc_codec *codec;
-};
-
-void wsa_set_codec(struct snd_soc_codec *codec);
-void wsa_free_irq(int irq, void *data);
-void wsa_enable_irq(struct wsa_resource *wsa_res, int irq);
-void wsa_disable_irq(struct wsa_resource *wsa_res, int irq);
-void wsa_disable_irq_sync(struct wsa_resource *wsa_res, int irq);
-int wsa_request_irq(struct wsa_resource *wsa_res,
- int irq, irq_handler_t handler,
- const char *name, void *data);
-
-void wsa_irq_exit(struct wsa_resource *wsa_res);
-
-#endif /* __WSA881X_IRQ_H__ */
diff --git a/sound/soc/codecs/wsa881x-registers-analog.h b/sound/soc/codecs/wsa881x-registers-analog.h
deleted file mode 100644
index a5ebf8e1..0000000
--- a/sound/soc/codecs/wsa881x-registers-analog.h
+++ /dev/null
@@ -1,206 +0,0 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef WSA881X_REGISTERS_H
-#define WSA881X_REGISTERS_H
-
-#define WSA881X_DIGITAL_BASE 0x0000
-#define WSA881X_ANALOG_BASE 0x0100
-
-#define WSA881X_CHIP_ID0 (WSA881X_DIGITAL_BASE+0x0000)
-#define WSA881X_CHIP_ID1 (WSA881X_DIGITAL_BASE+0x0001)
-#define WSA881X_CHIP_ID2 (WSA881X_DIGITAL_BASE+0x0002)
-#define WSA881X_CHIP_ID3 (WSA881X_DIGITAL_BASE+0x0003)
-#define WSA881X_BUS_ID (WSA881X_DIGITAL_BASE+0x0004)
-#define WSA881X_CDC_RST_CTL (WSA881X_DIGITAL_BASE+0x0005)
-#define WSA881X_CDC_TOP_CLK_CTL (WSA881X_DIGITAL_BASE+0x0006)
-#define WSA881X_CDC_ANA_CLK_CTL (WSA881X_DIGITAL_BASE+0x0007)
-#define WSA881X_CDC_DIG_CLK_CTL (WSA881X_DIGITAL_BASE+0x0008)
-#define WSA881X_CLOCK_CONFIG (WSA881X_DIGITAL_BASE+0x0009)
-#define WSA881X_ANA_CTL (WSA881X_DIGITAL_BASE+0x000A)
-#define WSA881X_SWR_RESET_EN (WSA881X_DIGITAL_BASE+0x000B)
-#define WSA881X_RESET_CTL (WSA881X_DIGITAL_BASE+0x000C)
-#define WSA881X_TADC_VALUE_CTL (WSA881X_DIGITAL_BASE+0x000F)
-#define WSA881X_TEMP_DETECT_CTL (WSA881X_DIGITAL_BASE+0x0010)
-#define WSA881X_TEMP_MSB (WSA881X_DIGITAL_BASE+0x0011)
-#define WSA881X_TEMP_LSB (WSA881X_DIGITAL_BASE+0x0012)
-#define WSA881X_TEMP_CONFIG0 (WSA881X_DIGITAL_BASE+0x0013)
-#define WSA881X_TEMP_CONFIG1 (WSA881X_DIGITAL_BASE+0x0014)
-#define WSA881X_CDC_CLIP_CTL (WSA881X_DIGITAL_BASE+0x0015)
-#define WSA881X_SDM_PDM9_LSB (WSA881X_DIGITAL_BASE+0x0016)
-#define WSA881X_SDM_PDM9_MSB (WSA881X_DIGITAL_BASE+0x0017)
-#define WSA881X_CDC_RX_CTL (WSA881X_DIGITAL_BASE+0x0018)
-#define WSA881X_DEM_BYPASS_DATA0 (WSA881X_DIGITAL_BASE+0x0019)
-#define WSA881X_DEM_BYPASS_DATA1 (WSA881X_DIGITAL_BASE+0x001A)
-#define WSA881X_DEM_BYPASS_DATA2 (WSA881X_DIGITAL_BASE+0x001B)
-#define WSA881X_DEM_BYPASS_DATA3 (WSA881X_DIGITAL_BASE+0x001C)
-#define WSA881X_OTP_CTRL0 (WSA881X_DIGITAL_BASE+0x001D)
-#define WSA881X_OTP_CTRL1 (WSA881X_DIGITAL_BASE+0x001E)
-#define WSA881X_HDRIVE_CTL_GROUP1 (WSA881X_DIGITAL_BASE+0x001F)
-#define WSA881X_INTR_MODE (WSA881X_DIGITAL_BASE+0x0020)
-#define WSA881X_INTR_MASK (WSA881X_DIGITAL_BASE+0x0021)
-#define WSA881X_INTR_STATUS (WSA881X_DIGITAL_BASE+0x0022)
-#define WSA881X_INTR_CLEAR (WSA881X_DIGITAL_BASE+0x0023)
-#define WSA881X_INTR_LEVEL (WSA881X_DIGITAL_BASE+0x0024)
-#define WSA881X_INTR_SET (WSA881X_DIGITAL_BASE+0x0025)
-#define WSA881X_INTR_TEST (WSA881X_DIGITAL_BASE+0x0026)
-#define WSA881X_PDM_TEST_MODE (WSA881X_DIGITAL_BASE+0x0030)
-#define WSA881X_ATE_TEST_MODE (WSA881X_DIGITAL_BASE+0x0031)
-#define WSA881X_PIN_CTL_MODE (WSA881X_DIGITAL_BASE+0x0032)
-#define WSA881X_PIN_CTL_OE (WSA881X_DIGITAL_BASE+0x0033)
-#define WSA881X_PIN_WDATA_IOPAD (WSA881X_DIGITAL_BASE+0x0034)
-#define WSA881X_PIN_STATUS (WSA881X_DIGITAL_BASE+0x0035)
-#define WSA881X_DIG_DEBUG_MODE (WSA881X_DIGITAL_BASE+0x0037)
-#define WSA881X_DIG_DEBUG_SEL (WSA881X_DIGITAL_BASE+0x0038)
-#define WSA881X_DIG_DEBUG_EN (WSA881X_DIGITAL_BASE+0x0039)
-#define WSA881X_SWR_HM_TEST1 (WSA881X_DIGITAL_BASE+0x003B)
-#define WSA881X_SWR_HM_TEST2 (WSA881X_DIGITAL_BASE+0x003C)
-#define WSA881X_TEMP_DETECT_DBG_CTL (WSA881X_DIGITAL_BASE+0x003D)
-#define WSA881X_TEMP_DEBUG_MSB (WSA881X_DIGITAL_BASE+0x003E)
-#define WSA881X_TEMP_DEBUG_LSB (WSA881X_DIGITAL_BASE+0x003F)
-#define WSA881X_SAMPLE_EDGE_SEL (WSA881X_DIGITAL_BASE+0x0044)
-#define WSA881X_IOPAD_CTL (WSA881X_DIGITAL_BASE+0x0045)
-#define WSA881X_SPARE_0 (WSA881X_DIGITAL_BASE+0x0050)
-#define WSA881X_SPARE_1 (WSA881X_DIGITAL_BASE+0x0051)
-#define WSA881X_SPARE_2 (WSA881X_DIGITAL_BASE+0x0052)
-#define WSA881X_OTP_REG_0 (WSA881X_DIGITAL_BASE+0x0080)
-#define WSA881X_OTP_REG_1 (WSA881X_DIGITAL_BASE+0x0081)
-#define WSA881X_OTP_REG_2 (WSA881X_DIGITAL_BASE+0x0082)
-#define WSA881X_OTP_REG_3 (WSA881X_DIGITAL_BASE+0x0083)
-#define WSA881X_OTP_REG_4 (WSA881X_DIGITAL_BASE+0x0084)
-#define WSA881X_OTP_REG_5 (WSA881X_DIGITAL_BASE+0x0085)
-#define WSA881X_OTP_REG_6 (WSA881X_DIGITAL_BASE+0x0086)
-#define WSA881X_OTP_REG_7 (WSA881X_DIGITAL_BASE+0x0087)
-#define WSA881X_OTP_REG_8 (WSA881X_DIGITAL_BASE+0x0088)
-#define WSA881X_OTP_REG_9 (WSA881X_DIGITAL_BASE+0x0089)
-#define WSA881X_OTP_REG_10 (WSA881X_DIGITAL_BASE+0x008A)
-#define WSA881X_OTP_REG_11 (WSA881X_DIGITAL_BASE+0x008B)
-#define WSA881X_OTP_REG_12 (WSA881X_DIGITAL_BASE+0x008C)
-#define WSA881X_OTP_REG_13 (WSA881X_DIGITAL_BASE+0x008D)
-#define WSA881X_OTP_REG_14 (WSA881X_DIGITAL_BASE+0x008E)
-#define WSA881X_OTP_REG_15 (WSA881X_DIGITAL_BASE+0x008F)
-#define WSA881X_OTP_REG_16 (WSA881X_DIGITAL_BASE+0x0090)
-#define WSA881X_OTP_REG_17 (WSA881X_DIGITAL_BASE+0x0091)
-#define WSA881X_OTP_REG_18 (WSA881X_DIGITAL_BASE+0x0092)
-#define WSA881X_OTP_REG_19 (WSA881X_DIGITAL_BASE+0x0093)
-#define WSA881X_OTP_REG_20 (WSA881X_DIGITAL_BASE+0x0094)
-#define WSA881X_OTP_REG_21 (WSA881X_DIGITAL_BASE+0x0095)
-#define WSA881X_OTP_REG_22 (WSA881X_DIGITAL_BASE+0x0096)
-#define WSA881X_OTP_REG_23 (WSA881X_DIGITAL_BASE+0x0097)
-#define WSA881X_OTP_REG_24 (WSA881X_DIGITAL_BASE+0x0098)
-#define WSA881X_OTP_REG_25 (WSA881X_DIGITAL_BASE+0x0099)
-#define WSA881X_OTP_REG_26 (WSA881X_DIGITAL_BASE+0x009A)
-#define WSA881X_OTP_REG_27 (WSA881X_DIGITAL_BASE+0x009B)
-#define WSA881X_OTP_REG_28 (WSA881X_DIGITAL_BASE+0x009C)
-#define WSA881X_OTP_REG_29 (WSA881X_DIGITAL_BASE+0x009D)
-#define WSA881X_OTP_REG_30 (WSA881X_DIGITAL_BASE+0x009E)
-#define WSA881X_OTP_REG_31 (WSA881X_DIGITAL_BASE+0x009F)
-#define WSA881X_OTP_REG_32 (WSA881X_DIGITAL_BASE+0x00A0)
-#define WSA881X_OTP_REG_33 (WSA881X_DIGITAL_BASE+0x00A1)
-#define WSA881X_OTP_REG_34 (WSA881X_DIGITAL_BASE+0x00A2)
-#define WSA881X_OTP_REG_35 (WSA881X_DIGITAL_BASE+0x00A3)
-#define WSA881X_OTP_REG_36 (WSA881X_DIGITAL_BASE+0x00A4)
-#define WSA881X_OTP_REG_37 (WSA881X_DIGITAL_BASE+0x00A5)
-#define WSA881X_OTP_REG_38 (WSA881X_DIGITAL_BASE+0x00A6)
-#define WSA881X_OTP_REG_39 (WSA881X_DIGITAL_BASE+0x00A7)
-#define WSA881X_OTP_REG_40 (WSA881X_DIGITAL_BASE+0x00A8)
-#define WSA881X_OTP_REG_41 (WSA881X_DIGITAL_BASE+0x00A9)
-#define WSA881X_OTP_REG_42 (WSA881X_DIGITAL_BASE+0x00AA)
-#define WSA881X_OTP_REG_43 (WSA881X_DIGITAL_BASE+0x00AB)
-#define WSA881X_OTP_REG_44 (WSA881X_DIGITAL_BASE+0x00AC)
-#define WSA881X_OTP_REG_45 (WSA881X_DIGITAL_BASE+0x00AD)
-#define WSA881X_OTP_REG_46 (WSA881X_DIGITAL_BASE+0x00AE)
-#define WSA881X_OTP_REG_47 (WSA881X_DIGITAL_BASE+0x00AF)
-#define WSA881X_OTP_REG_48 (WSA881X_DIGITAL_BASE+0x00B0)
-#define WSA881X_OTP_REG_49 (WSA881X_DIGITAL_BASE+0x00B1)
-#define WSA881X_OTP_REG_50 (WSA881X_DIGITAL_BASE+0x00B2)
-#define WSA881X_OTP_REG_51 (WSA881X_DIGITAL_BASE+0x00B3)
-#define WSA881X_OTP_REG_52 (WSA881X_DIGITAL_BASE+0x00B4)
-#define WSA881X_OTP_REG_53 (WSA881X_DIGITAL_BASE+0x00B5)
-#define WSA881X_OTP_REG_54 (WSA881X_DIGITAL_BASE+0x00B6)
-#define WSA881X_OTP_REG_55 (WSA881X_DIGITAL_BASE+0x00B7)
-#define WSA881X_OTP_REG_56 (WSA881X_DIGITAL_BASE+0x00B8)
-#define WSA881X_OTP_REG_57 (WSA881X_DIGITAL_BASE+0x00B9)
-#define WSA881X_OTP_REG_58 (WSA881X_DIGITAL_BASE+0x00BA)
-#define WSA881X_OTP_REG_59 (WSA881X_DIGITAL_BASE+0x00BB)
-#define WSA881X_OTP_REG_60 (WSA881X_DIGITAL_BASE+0x00BC)
-#define WSA881X_OTP_REG_61 (WSA881X_DIGITAL_BASE+0x00BD)
-#define WSA881X_OTP_REG_62 (WSA881X_DIGITAL_BASE+0x00BE)
-#define WSA881X_OTP_REG_63 (WSA881X_DIGITAL_BASE+0x00BF)
-/* Analog Register address space */
-#define WSA881X_BIAS_REF_CTRL (WSA881X_ANALOG_BASE+0x0000)
-#define WSA881X_BIAS_TEST (WSA881X_ANALOG_BASE+0x0001)
-#define WSA881X_BIAS_BIAS (WSA881X_ANALOG_BASE+0x0002)
-#define WSA881X_TEMP_OP (WSA881X_ANALOG_BASE+0x0003)
-#define WSA881X_TEMP_IREF_CTRL (WSA881X_ANALOG_BASE+0x0004)
-#define WSA881X_TEMP_ISENS_CTRL (WSA881X_ANALOG_BASE+0x0005)
-#define WSA881X_TEMP_CLK_CTRL (WSA881X_ANALOG_BASE+0x0006)
-#define WSA881X_TEMP_TEST (WSA881X_ANALOG_BASE+0x0007)
-#define WSA881X_TEMP_BIAS (WSA881X_ANALOG_BASE+0x0008)
-#define WSA881X_TEMP_ADC_CTRL (WSA881X_ANALOG_BASE+0x0009)
-#define WSA881X_TEMP_DOUT_MSB (WSA881X_ANALOG_BASE+0x000A)
-#define WSA881X_TEMP_DOUT_LSB (WSA881X_ANALOG_BASE+0x000B)
-#define WSA881X_ADC_EN_MODU_V (WSA881X_ANALOG_BASE+0x0010)
-#define WSA881X_ADC_EN_MODU_I (WSA881X_ANALOG_BASE+0x0011)
-#define WSA881X_ADC_EN_DET_TEST_V (WSA881X_ANALOG_BASE+0x0012)
-#define WSA881X_ADC_EN_DET_TEST_I (WSA881X_ANALOG_BASE+0x0013)
-#define WSA881X_ADC_SEL_IBIAS (WSA881X_ANALOG_BASE+0x0014)
-#define WSA881X_ADC_EN_SEL_IBIAS (WSA881X_ANALOG_BASE+0x0015)
-#define WSA881X_SPKR_DRV_EN (WSA881X_ANALOG_BASE+0x001A)
-#define WSA881X_SPKR_DRV_GAIN (WSA881X_ANALOG_BASE+0x001B)
-#define WSA881X_SPKR_DAC_CTL (WSA881X_ANALOG_BASE+0x001C)
-#define WSA881X_SPKR_DRV_DBG (WSA881X_ANALOG_BASE+0x001D)
-#define WSA881X_SPKR_PWRSTG_DBG (WSA881X_ANALOG_BASE+0x001E)
-#define WSA881X_SPKR_OCP_CTL (WSA881X_ANALOG_BASE+0x001F)
-#define WSA881X_SPKR_CLIP_CTL (WSA881X_ANALOG_BASE+0x0020)
-#define WSA881X_SPKR_BBM_CTL (WSA881X_ANALOG_BASE+0x0021)
-#define WSA881X_SPKR_MISC_CTL1 (WSA881X_ANALOG_BASE+0x0022)
-#define WSA881X_SPKR_MISC_CTL2 (WSA881X_ANALOG_BASE+0x0023)
-#define WSA881X_SPKR_BIAS_INT (WSA881X_ANALOG_BASE+0x0024)
-#define WSA881X_SPKR_PA_INT (WSA881X_ANALOG_BASE+0x0025)
-#define WSA881X_SPKR_BIAS_CAL (WSA881X_ANALOG_BASE+0x0026)
-#define WSA881X_SPKR_BIAS_PSRR (WSA881X_ANALOG_BASE+0x0027)
-#define WSA881X_SPKR_STATUS1 (WSA881X_ANALOG_BASE+0x0028)
-#define WSA881X_SPKR_STATUS2 (WSA881X_ANALOG_BASE+0x0029)
-#define WSA881X_BOOST_EN_CTL (WSA881X_ANALOG_BASE+0x002A)
-#define WSA881X_BOOST_CURRENT_LIMIT (WSA881X_ANALOG_BASE+0x002B)
-#define WSA881X_BOOST_PS_CTL (WSA881X_ANALOG_BASE+0x002C)
-#define WSA881X_BOOST_PRESET_OUT1 (WSA881X_ANALOG_BASE+0x002D)
-#define WSA881X_BOOST_PRESET_OUT2 (WSA881X_ANALOG_BASE+0x002E)
-#define WSA881X_BOOST_FORCE_OUT (WSA881X_ANALOG_BASE+0x002F)
-#define WSA881X_BOOST_LDO_PROG (WSA881X_ANALOG_BASE+0x0030)
-#define WSA881X_BOOST_SLOPE_COMP_ISENSE_FB (WSA881X_ANALOG_BASE+0x0031)
-#define WSA881X_BOOST_RON_CTL (WSA881X_ANALOG_BASE+0x0032)
-#define WSA881X_BOOST_LOOP_STABILITY (WSA881X_ANALOG_BASE+0x0033)
-#define WSA881X_BOOST_ZX_CTL (WSA881X_ANALOG_BASE+0x0034)
-#define WSA881X_BOOST_START_CTL (WSA881X_ANALOG_BASE+0x0035)
-#define WSA881X_BOOST_MISC1_CTL (WSA881X_ANALOG_BASE+0x0036)
-#define WSA881X_BOOST_MISC2_CTL (WSA881X_ANALOG_BASE+0x0037)
-#define WSA881X_BOOST_MISC3_CTL (WSA881X_ANALOG_BASE+0x0038)
-#define WSA881X_BOOST_ATEST_CTL (WSA881X_ANALOG_BASE+0x0039)
-#define WSA881X_SPKR_PROT_FE_GAIN (WSA881X_ANALOG_BASE+0x003A)
-#define WSA881X_SPKR_PROT_FE_CM_LDO_SET (WSA881X_ANALOG_BASE+0x003B)
-#define WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1 (WSA881X_ANALOG_BASE+0x003C)
-#define WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2 (WSA881X_ANALOG_BASE+0x003D)
-#define WSA881X_SPKR_PROT_ATEST1 (WSA881X_ANALOG_BASE+0x003E)
-#define WSA881X_SPKR_PROT_ATEST2 (WSA881X_ANALOG_BASE+0x003F)
-#define WSA881X_SPKR_PROT_FE_VSENSE_VCM (WSA881X_ANALOG_BASE+0x0040)
-#define WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1 (WSA881X_ANALOG_BASE+0x0041)
-#define WSA881X_BONGO_RESRV_REG1 (WSA881X_ANALOG_BASE+0x0042)
-#define WSA881X_BONGO_RESRV_REG2 (WSA881X_ANALOG_BASE+0x0043)
-#define WSA881X_SPKR_PROT_SAR (WSA881X_ANALOG_BASE+0x0044)
-#define WSA881X_SPKR_STATUS3 (WSA881X_ANALOG_BASE+0x0045)
-
-#define WSA881X_NUM_REGISTERS (WSA881X_SPKR_STATUS3+1)
-#define WSA881X_MAX_REGISTER (WSA881X_NUM_REGISTERS-1)
-#define WSA881X_CACHE_SIZE WSA881X_NUM_REGISTERS
-#endif /* WSA881X_REGISTERS_H */
diff --git a/sound/soc/codecs/wsa881x-regmap-analog.c b/sound/soc/codecs/wsa881x-regmap-analog.c
deleted file mode 100644
index 2bc3c9e..0000000
--- a/sound/soc/codecs/wsa881x-regmap-analog.c
+++ /dev/null
@@ -1,499 +0,0 @@
-/*
- * Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/regmap.h>
-#include <linux/device.h>
-#include "wsa881x-registers-analog.h"
-#include "wsa881x-analog.h"
-
-struct reg_default wsa881x_ana_reg_defaults[] = {
- {WSA881X_CHIP_ID0, 0x00},
- {WSA881X_CHIP_ID1, 0x00},
- {WSA881X_CHIP_ID2, 0x00},
- {WSA881X_CHIP_ID3, 0x02},
- {WSA881X_BUS_ID, 0x00},
- {WSA881X_CDC_RST_CTL, 0x00},
- {WSA881X_CDC_TOP_CLK_CTL, 0x03},
- {WSA881X_CDC_ANA_CLK_CTL, 0x00},
- {WSA881X_CDC_DIG_CLK_CTL, 0x00},
- {WSA881X_CLOCK_CONFIG, 0x00},
- {WSA881X_ANA_CTL, 0x08},
- {WSA881X_SWR_RESET_EN, 0x00},
- {WSA881X_TEMP_DETECT_CTL, 0x01},
- {WSA881X_TEMP_MSB, 0x00},
- {WSA881X_TEMP_LSB, 0x00},
- {WSA881X_TEMP_CONFIG0, 0x00},
- {WSA881X_TEMP_CONFIG1, 0x00},
- {WSA881X_CDC_CLIP_CTL, 0x03},
- {WSA881X_SDM_PDM9_LSB, 0x00},
- {WSA881X_SDM_PDM9_MSB, 0x00},
- {WSA881X_CDC_RX_CTL, 0x7E},
- {WSA881X_DEM_BYPASS_DATA0, 0x00},
- {WSA881X_DEM_BYPASS_DATA1, 0x00},
- {WSA881X_DEM_BYPASS_DATA2, 0x00},
- {WSA881X_DEM_BYPASS_DATA3, 0x00},
- {WSA881X_OTP_CTRL0, 0x00},
- {WSA881X_OTP_CTRL1, 0x00},
- {WSA881X_HDRIVE_CTL_GROUP1, 0x00},
- {WSA881X_INTR_MODE, 0x00},
- {WSA881X_INTR_MASK, 0x1F},
- {WSA881X_INTR_STATUS, 0x00},
- {WSA881X_INTR_CLEAR, 0x00},
- {WSA881X_INTR_LEVEL, 0x00},
- {WSA881X_INTR_SET, 0x00},
- {WSA881X_INTR_TEST, 0x00},
- {WSA881X_PDM_TEST_MODE, 0x00},
- {WSA881X_ATE_TEST_MODE, 0x00},
- {WSA881X_PIN_CTL_MODE, 0x00},
- {WSA881X_PIN_CTL_OE, 0x00},
- {WSA881X_PIN_WDATA_IOPAD, 0x00},
- {WSA881X_PIN_STATUS, 0x00},
- {WSA881X_DIG_DEBUG_MODE, 0x00},
- {WSA881X_DIG_DEBUG_SEL, 0x00},
- {WSA881X_DIG_DEBUG_EN, 0x00},
- {WSA881X_SWR_HM_TEST1, 0x08},
- {WSA881X_SWR_HM_TEST2, 0x00},
- {WSA881X_TEMP_DETECT_DBG_CTL, 0x00},
- {WSA881X_TEMP_DEBUG_MSB, 0x00},
- {WSA881X_TEMP_DEBUG_LSB, 0x00},
- {WSA881X_SAMPLE_EDGE_SEL, 0x0C},
- {WSA881X_SPARE_0, 0x00},
- {WSA881X_SPARE_1, 0x00},
- {WSA881X_SPARE_2, 0x00},
- {WSA881X_OTP_REG_0, 0x01},
- {WSA881X_OTP_REG_1, 0xFF},
- {WSA881X_OTP_REG_2, 0xC0},
- {WSA881X_OTP_REG_3, 0xFF},
- {WSA881X_OTP_REG_4, 0xC0},
- {WSA881X_OTP_REG_5, 0xFF},
- {WSA881X_OTP_REG_6, 0xFF},
- {WSA881X_OTP_REG_7, 0xFF},
- {WSA881X_OTP_REG_8, 0xFF},
- {WSA881X_OTP_REG_9, 0xFF},
- {WSA881X_OTP_REG_10, 0xFF},
- {WSA881X_OTP_REG_11, 0xFF},
- {WSA881X_OTP_REG_12, 0xFF},
- {WSA881X_OTP_REG_13, 0xFF},
- {WSA881X_OTP_REG_14, 0xFF},
- {WSA881X_OTP_REG_15, 0xFF},
- {WSA881X_OTP_REG_16, 0xFF},
- {WSA881X_OTP_REG_17, 0xFF},
- {WSA881X_OTP_REG_18, 0xFF},
- {WSA881X_OTP_REG_19, 0xFF},
- {WSA881X_OTP_REG_20, 0xFF},
- {WSA881X_OTP_REG_21, 0xFF},
- {WSA881X_OTP_REG_22, 0xFF},
- {WSA881X_OTP_REG_23, 0xFF},
- {WSA881X_OTP_REG_24, 0x03},
- {WSA881X_OTP_REG_25, 0x01},
- {WSA881X_OTP_REG_26, 0x03},
- {WSA881X_OTP_REG_27, 0x11},
- {WSA881X_OTP_REG_28, 0xFF},
- {WSA881X_OTP_REG_29, 0xFF},
- {WSA881X_OTP_REG_30, 0xFF},
- {WSA881X_OTP_REG_31, 0xFF},
- {WSA881X_OTP_REG_63, 0x40},
- /* WSA881x Analog registers */
- {WSA881X_BIAS_REF_CTRL, 0x6C},
- {WSA881X_BIAS_TEST, 0x16},
- {WSA881X_BIAS_BIAS, 0xF0},
- {WSA881X_TEMP_OP, 0x00},
- {WSA881X_TEMP_IREF_CTRL, 0x56},
- {WSA881X_TEMP_ISENS_CTRL, 0x47},
- {WSA881X_TEMP_CLK_CTRL, 0x87},
- {WSA881X_TEMP_TEST, 0x00},
- {WSA881X_TEMP_BIAS, 0x51},
- {WSA881X_TEMP_ADC_CTRL, 0x00},
- {WSA881X_TEMP_DOUT_MSB, 0x00},
- {WSA881X_TEMP_DOUT_LSB, 0x00},
- {WSA881X_ADC_EN_MODU_V, 0x00},
- {WSA881X_ADC_EN_MODU_I, 0x00},
- {WSA881X_ADC_EN_DET_TEST_V, 0x00},
- {WSA881X_ADC_EN_DET_TEST_I, 0x00},
- {WSA881X_ADC_SEL_IBIAS, 0x25},
- {WSA881X_ADC_EN_SEL_IBIAS, 0x10},
- {WSA881X_SPKR_DRV_EN, 0x74},
- {WSA881X_SPKR_DRV_GAIN, 0x01},
- {WSA881X_SPKR_DAC_CTL, 0x40},
- {WSA881X_SPKR_DRV_DBG, 0x15},
- {WSA881X_SPKR_PWRSTG_DBG, 0x00},
- {WSA881X_SPKR_OCP_CTL, 0xD4},
- {WSA881X_SPKR_CLIP_CTL, 0x90},
- {WSA881X_SPKR_BBM_CTL, 0x00},
- {WSA881X_SPKR_MISC_CTL1, 0x80},
- {WSA881X_SPKR_MISC_CTL2, 0x00},
- {WSA881X_SPKR_BIAS_INT, 0x56},
- {WSA881X_SPKR_PA_INT, 0x54},
- {WSA881X_SPKR_BIAS_CAL, 0xAC},
- {WSA881X_SPKR_BIAS_PSRR, 0x54},
- {WSA881X_SPKR_STATUS1, 0x00},
- {WSA881X_SPKR_STATUS2, 0x00},
- {WSA881X_BOOST_EN_CTL, 0x18},
- {WSA881X_BOOST_CURRENT_LIMIT, 0x7A},
- {WSA881X_BOOST_PS_CTL, 0xC0},
- {WSA881X_BOOST_PRESET_OUT1, 0x77},
- {WSA881X_BOOST_PRESET_OUT2, 0x70},
- {WSA881X_BOOST_FORCE_OUT, 0x0E},
- {WSA881X_BOOST_LDO_PROG, 0x16},
- {WSA881X_BOOST_SLOPE_COMP_ISENSE_FB, 0x71},
- {WSA881X_BOOST_RON_CTL, 0x0F},
- {WSA881X_BOOST_LOOP_STABILITY, 0xAD},
- {WSA881X_BOOST_ZX_CTL, 0x34},
- {WSA881X_BOOST_START_CTL, 0x23},
- {WSA881X_BOOST_MISC1_CTL, 0x80},
- {WSA881X_BOOST_MISC2_CTL, 0x00},
- {WSA881X_BOOST_MISC3_CTL, 0x00},
- {WSA881X_BOOST_ATEST_CTL, 0x00},
- {WSA881X_SPKR_PROT_FE_GAIN, 0x46},
- {WSA881X_SPKR_PROT_FE_CM_LDO_SET, 0x3B},
- {WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1, 0x8D},
- {WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2, 0x8D},
- {WSA881X_SPKR_PROT_ATEST1, 0x01},
- {WSA881X_SPKR_PROT_ATEST2, 0x00},
- {WSA881X_SPKR_PROT_FE_VSENSE_VCM, 0x8D},
- {WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1, 0x4D},
- {WSA881X_BONGO_RESRV_REG1, 0x00},
- {WSA881X_BONGO_RESRV_REG2, 0x00},
- {WSA881X_SPKR_PROT_SAR, 0x00},
- {WSA881X_SPKR_STATUS3, 0x00},
-};
-
-struct reg_default wsa881x_ana_reg_defaults_0[] = {
- {WSA881X_CHIP_ID0, 0x00},
- {WSA881X_CHIP_ID1, 0x00},
- {WSA881X_CHIP_ID2, 0x00},
- {WSA881X_CHIP_ID3, 0x02},
- {WSA881X_BUS_ID, 0x00},
- {WSA881X_CDC_RST_CTL, 0x00},
- {WSA881X_CDC_TOP_CLK_CTL, 0x03},
- {WSA881X_CDC_ANA_CLK_CTL, 0x00},
- {WSA881X_CDC_DIG_CLK_CTL, 0x00},
- {WSA881X_CLOCK_CONFIG, 0x00},
- {WSA881X_ANA_CTL, 0x08},
- {WSA881X_SWR_RESET_EN, 0x00},
- {WSA881X_TEMP_DETECT_CTL, 0x01},
- {WSA881X_TEMP_MSB, 0x00},
- {WSA881X_TEMP_LSB, 0x00},
- {WSA881X_TEMP_CONFIG0, 0x00},
- {WSA881X_TEMP_CONFIG1, 0x00},
- {WSA881X_CDC_CLIP_CTL, 0x03},
- {WSA881X_SDM_PDM9_LSB, 0x00},
- {WSA881X_SDM_PDM9_MSB, 0x00},
- {WSA881X_CDC_RX_CTL, 0x7E},
- {WSA881X_DEM_BYPASS_DATA0, 0x00},
- {WSA881X_DEM_BYPASS_DATA1, 0x00},
- {WSA881X_DEM_BYPASS_DATA2, 0x00},
- {WSA881X_DEM_BYPASS_DATA3, 0x00},
- {WSA881X_OTP_CTRL0, 0x00},
- {WSA881X_OTP_CTRL1, 0x00},
- {WSA881X_HDRIVE_CTL_GROUP1, 0x00},
- {WSA881X_INTR_MODE, 0x00},
- {WSA881X_INTR_MASK, 0x1F},
- {WSA881X_INTR_STATUS, 0x00},
- {WSA881X_INTR_CLEAR, 0x00},
- {WSA881X_INTR_LEVEL, 0x00},
- {WSA881X_INTR_SET, 0x00},
- {WSA881X_INTR_TEST, 0x00},
- {WSA881X_PDM_TEST_MODE, 0x00},
- {WSA881X_ATE_TEST_MODE, 0x00},
- {WSA881X_PIN_CTL_MODE, 0x00},
- {WSA881X_PIN_CTL_OE, 0x00},
- {WSA881X_PIN_WDATA_IOPAD, 0x00},
- {WSA881X_PIN_STATUS, 0x00},
- {WSA881X_DIG_DEBUG_MODE, 0x00},
- {WSA881X_DIG_DEBUG_SEL, 0x00},
- {WSA881X_DIG_DEBUG_EN, 0x00},
- {WSA881X_SWR_HM_TEST1, 0x08},
- {WSA881X_SWR_HM_TEST2, 0x00},
- {WSA881X_TEMP_DETECT_DBG_CTL, 0x00},
- {WSA881X_TEMP_DEBUG_MSB, 0x00},
- {WSA881X_TEMP_DEBUG_LSB, 0x00},
- {WSA881X_SAMPLE_EDGE_SEL, 0x0C},
- {WSA881X_SPARE_0, 0x00},
- {WSA881X_SPARE_1, 0x00},
- {WSA881X_SPARE_2, 0x00},
- {WSA881X_OTP_REG_0, 0x01},
- {WSA881X_OTP_REG_1, 0xFF},
- {WSA881X_OTP_REG_2, 0xC0},
- {WSA881X_OTP_REG_3, 0xFF},
- {WSA881X_OTP_REG_4, 0xC0},
- {WSA881X_OTP_REG_5, 0xFF},
- {WSA881X_OTP_REG_6, 0xFF},
- {WSA881X_OTP_REG_7, 0xFF},
- {WSA881X_OTP_REG_8, 0xFF},
- {WSA881X_OTP_REG_9, 0xFF},
- {WSA881X_OTP_REG_10, 0xFF},
- {WSA881X_OTP_REG_11, 0xFF},
- {WSA881X_OTP_REG_12, 0xFF},
- {WSA881X_OTP_REG_13, 0xFF},
- {WSA881X_OTP_REG_14, 0xFF},
- {WSA881X_OTP_REG_15, 0xFF},
- {WSA881X_OTP_REG_16, 0xFF},
- {WSA881X_OTP_REG_17, 0xFF},
- {WSA881X_OTP_REG_18, 0xFF},
- {WSA881X_OTP_REG_19, 0xFF},
- {WSA881X_OTP_REG_20, 0xFF},
- {WSA881X_OTP_REG_21, 0xFF},
- {WSA881X_OTP_REG_22, 0xFF},
- {WSA881X_OTP_REG_23, 0xFF},
- {WSA881X_OTP_REG_24, 0x03},
- {WSA881X_OTP_REG_25, 0x01},
- {WSA881X_OTP_REG_26, 0x03},
- {WSA881X_OTP_REG_27, 0x11},
- {WSA881X_OTP_REG_28, 0xFF},
- {WSA881X_OTP_REG_29, 0xFF},
- {WSA881X_OTP_REG_30, 0xFF},
- {WSA881X_OTP_REG_31, 0xFF},
- {WSA881X_OTP_REG_63, 0x40},
-};
-
-struct reg_default wsa881x_ana_reg_defaults_1[] = {
- {WSA881X_BIAS_REF_CTRL - WSA881X_ANALOG_BASE, 0x6C},
- {WSA881X_BIAS_TEST - WSA881X_ANALOG_BASE, 0x16},
- {WSA881X_BIAS_BIAS - WSA881X_ANALOG_BASE, 0xF0},
- {WSA881X_TEMP_OP - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_TEMP_IREF_CTRL - WSA881X_ANALOG_BASE, 0x56},
- {WSA881X_TEMP_ISENS_CTRL - WSA881X_ANALOG_BASE, 0x47},
- {WSA881X_TEMP_CLK_CTRL - WSA881X_ANALOG_BASE, 0x87},
- {WSA881X_TEMP_TEST - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_TEMP_BIAS - WSA881X_ANALOG_BASE, 0x51},
- {WSA881X_TEMP_ADC_CTRL - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_TEMP_DOUT_MSB - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_TEMP_DOUT_LSB - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_ADC_EN_MODU_V - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_ADC_EN_MODU_I - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_ADC_EN_DET_TEST_V - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_ADC_EN_DET_TEST_I - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_ADC_SEL_IBIAS - WSA881X_ANALOG_BASE, 0x25},
- {WSA881X_ADC_EN_SEL_IBIAS - WSA881X_ANALOG_BASE, 0x10},
- {WSA881X_SPKR_DRV_EN - WSA881X_ANALOG_BASE, 0x74},
- {WSA881X_SPKR_DRV_GAIN - WSA881X_ANALOG_BASE, 0x01},
- {WSA881X_SPKR_DAC_CTL - WSA881X_ANALOG_BASE, 0x40},
- {WSA881X_SPKR_DRV_DBG - WSA881X_ANALOG_BASE, 0x15},
- {WSA881X_SPKR_PWRSTG_DBG - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_OCP_CTL - WSA881X_ANALOG_BASE, 0xD4},
- {WSA881X_SPKR_CLIP_CTL - WSA881X_ANALOG_BASE, 0x90},
- {WSA881X_SPKR_BBM_CTL - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_MISC_CTL1 - WSA881X_ANALOG_BASE, 0x80},
- {WSA881X_SPKR_MISC_CTL2 - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_BIAS_INT - WSA881X_ANALOG_BASE, 0x56},
- {WSA881X_SPKR_PA_INT - WSA881X_ANALOG_BASE, 0x54},
- {WSA881X_SPKR_BIAS_CAL - WSA881X_ANALOG_BASE, 0xAC},
- {WSA881X_SPKR_BIAS_PSRR - WSA881X_ANALOG_BASE, 0x54},
- {WSA881X_SPKR_STATUS1 - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_STATUS2 - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_BOOST_EN_CTL - WSA881X_ANALOG_BASE, 0x18},
- {WSA881X_BOOST_CURRENT_LIMIT - WSA881X_ANALOG_BASE, 0x7A},
- {WSA881X_BOOST_PS_CTL - WSA881X_ANALOG_BASE, 0xC0},
- {WSA881X_BOOST_PRESET_OUT1 - WSA881X_ANALOG_BASE, 0x77},
- {WSA881X_BOOST_PRESET_OUT2 - WSA881X_ANALOG_BASE, 0x70},
- {WSA881X_BOOST_FORCE_OUT - WSA881X_ANALOG_BASE, 0x0E},
- {WSA881X_BOOST_LDO_PROG - WSA881X_ANALOG_BASE, 0x16},
- {WSA881X_BOOST_SLOPE_COMP_ISENSE_FB - WSA881X_ANALOG_BASE, 0x71},
- {WSA881X_BOOST_RON_CTL - WSA881X_ANALOG_BASE, 0x0F},
- {WSA881X_BOOST_LOOP_STABILITY - WSA881X_ANALOG_BASE, 0xAD},
- {WSA881X_BOOST_ZX_CTL - WSA881X_ANALOG_BASE, 0x34},
- {WSA881X_BOOST_START_CTL - WSA881X_ANALOG_BASE, 0x23},
- {WSA881X_BOOST_MISC1_CTL - WSA881X_ANALOG_BASE, 0x80},
- {WSA881X_BOOST_MISC2_CTL - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_BOOST_MISC3_CTL - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_BOOST_ATEST_CTL - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_PROT_FE_GAIN - WSA881X_ANALOG_BASE, 0x46},
- {WSA881X_SPKR_PROT_FE_CM_LDO_SET - WSA881X_ANALOG_BASE, 0x3B},
- {WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1 - WSA881X_ANALOG_BASE, 0x8D},
- {WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2 - WSA881X_ANALOG_BASE, 0x8D},
- {WSA881X_SPKR_PROT_ATEST1 - WSA881X_ANALOG_BASE, 0x01},
- {WSA881X_SPKR_PROT_ATEST2 - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_PROT_FE_VSENSE_VCM - WSA881X_ANALOG_BASE, 0x8D},
- {WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1 - WSA881X_ANALOG_BASE, 0x4D},
- {WSA881X_BONGO_RESRV_REG1 - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_BONGO_RESRV_REG2 - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_PROT_SAR - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_STATUS3 - WSA881X_ANALOG_BASE, 0x00},
-};
-
-struct reg_default wsa881x_rev_2_0_dig[] = {
- {WSA881X_RESET_CTL, 0x00},
- {WSA881X_TADC_VALUE_CTL, 0x01},
- {WSA881X_INTR_MASK, 0x1B},
- {WSA881X_IOPAD_CTL, 0x00},
- {WSA881X_OTP_REG_28, 0x3F},
- {WSA881X_OTP_REG_29, 0x3F},
- {WSA881X_OTP_REG_30, 0x01},
- {WSA881X_OTP_REG_31, 0x01},
-};
-
-struct reg_default wsa881x_rev_2_0_ana[] = {
- {WSA881X_TEMP_ADC_CTRL, 0x03},
- {WSA881X_ADC_SEL_IBIAS, 0x45},
- {WSA881X_SPKR_DRV_GAIN, 0xC1},
- {WSA881X_SPKR_DAC_CTL, 0x42},
- {WSA881X_SPKR_BBM_CTL, 0x02},
- {WSA881X_SPKR_MISC_CTL1, 0x40},
- {WSA881X_SPKR_MISC_CTL2, 0x07},
- {WSA881X_SPKR_BIAS_INT, 0x5F},
- {WSA881X_SPKR_BIAS_PSRR, 0x44},
- {WSA881X_BOOST_PS_CTL, 0xA0},
- {WSA881X_BOOST_PRESET_OUT1, 0xB7},
- {WSA881X_BOOST_LOOP_STABILITY, 0x8D},
- {WSA881X_SPKR_PROT_ATEST2, 0x02},
- {WSA881X_BONGO_RESRV_REG1, 0x5E},
- {WSA881X_BONGO_RESRV_REG2, 0x07},
-};
-
-struct reg_default wsa881x_rev_2_0_regmap_ana[] = {
- {WSA881X_TEMP_ADC_CTRL - WSA881X_ANALOG_BASE, 0x03},
- {WSA881X_ADC_SEL_IBIAS - WSA881X_ANALOG_BASE, 0x45},
- {WSA881X_SPKR_DRV_GAIN - WSA881X_ANALOG_BASE, 0xC1},
- {WSA881X_SPKR_DAC_CTL - WSA881X_ANALOG_BASE, 0x42},
- {WSA881X_SPKR_BBM_CTL - WSA881X_ANALOG_BASE, 0x02},
- {WSA881X_SPKR_MISC_CTL1 - WSA881X_ANALOG_BASE, 0x40},
- {WSA881X_SPKR_MISC_CTL2 - WSA881X_ANALOG_BASE, 0x07},
- {WSA881X_SPKR_BIAS_INT - WSA881X_ANALOG_BASE, 0x5F},
- {WSA881X_SPKR_BIAS_PSRR - WSA881X_ANALOG_BASE, 0x44},
- {WSA881X_BOOST_PS_CTL - WSA881X_ANALOG_BASE, 0xA0},
- {WSA881X_BOOST_PRESET_OUT1 - WSA881X_ANALOG_BASE, 0xB7},
- {WSA881X_BOOST_LOOP_STABILITY - WSA881X_ANALOG_BASE, 0x8D},
- {WSA881X_SPKR_PROT_ATEST2 - WSA881X_ANALOG_BASE, 0x02},
- {WSA881X_BONGO_RESRV_REG1 - WSA881X_ANALOG_BASE, 0x5E},
- {WSA881X_BONGO_RESRV_REG2 - WSA881X_ANALOG_BASE, 0x07},
-};
-
-/**
- * wsa881x_update_reg_defaults_2_0 - update default values of regs for v2.0
- *
- * WSA881x v2.0 has different default values for certain analog and digital
- * registers compared to v1.x. Therefore, update the values of these registers
- * with the values from tables defined above for v2.0.
- */
-void wsa881x_update_reg_defaults_2_0(void)
-{
- int i, j;
-
- for (i = 0; i < ARRAY_SIZE(wsa881x_rev_2_0_dig); i++) {
- for (j = 0; j < ARRAY_SIZE(wsa881x_ana_reg_defaults); j++)
- if (wsa881x_ana_reg_defaults[j].reg ==
- wsa881x_rev_2_0_dig[i].reg)
- wsa881x_ana_reg_defaults[j].def =
- wsa881x_rev_2_0_dig[i].def;
- }
- for (i = 0; i < ARRAY_SIZE(wsa881x_rev_2_0_ana); i++) {
- for (j = 0; j < ARRAY_SIZE(wsa881x_ana_reg_defaults); j++)
- if (wsa881x_ana_reg_defaults[j].reg ==
- wsa881x_rev_2_0_ana[i].reg)
- wsa881x_ana_reg_defaults[j].def =
- wsa881x_rev_2_0_ana[i].def;
- }
-}
-EXPORT_SYMBOL(wsa881x_update_reg_defaults_2_0);
-
-/**
- * wsa881x_update_regmap_2_0 - update regmap framework with new tables
- * @regmap: pointer to WSA881x regmap structure
- * @flag: indicates digital or analog WSA881x slave
- *
- * WSA881x v2.0 has some new registers for both analog and digital slaves.
- * Update the regmap framework with all the new registers.
- */
-void wsa881x_update_regmap_2_0(struct regmap *regmap, int flag)
-{
- u16 ret = 0;
-
- switch (flag) {
- case WSA881X_DIGITAL_SLAVE:
- ret = regmap_register_patch(regmap, wsa881x_rev_2_0_dig,
- ARRAY_SIZE(wsa881x_rev_2_0_dig));
- break;
- case WSA881X_ANALOG_SLAVE:
- ret = regmap_register_patch(regmap, wsa881x_rev_2_0_ana,
- ARRAY_SIZE(wsa881x_rev_2_0_ana));
- break;
- default:
- pr_debug("%s: unknown version", __func__);
- ret = -EINVAL;
- break;
- }
- if (ret)
- pr_err("%s: Failed to update regmap defaults ret= %d\n",
- __func__, ret);
-}
-EXPORT_SYMBOL(wsa881x_update_regmap_2_0);
-
-static bool wsa881x_readable_register(struct device *dev, unsigned int reg)
-{
- return wsa881x_ana_reg_readable[reg];
-}
-
-static bool wsa881x_volatile_register(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case WSA881X_CHIP_ID0:
- case WSA881X_CHIP_ID1:
- case WSA881X_CHIP_ID2:
- case WSA881X_CHIP_ID3:
- case WSA881X_BUS_ID:
- case WSA881X_TEMP_MSB:
- case WSA881X_TEMP_LSB:
- case WSA881X_SDM_PDM9_LSB:
- case WSA881X_SDM_PDM9_MSB:
- case WSA881X_OTP_REG_0:
- case WSA881X_OTP_REG_1:
- case WSA881X_OTP_REG_2:
- case WSA881X_OTP_REG_3:
- case WSA881X_OTP_REG_4:
- case WSA881X_OTP_REG_5:
- case WSA881X_OTP_REG_31:
- case WSA881X_TEMP_DOUT_MSB:
- case WSA881X_TEMP_DOUT_LSB:
- case WSA881X_TEMP_OP:
- case WSA881X_OTP_CTRL1:
- case WSA881X_INTR_STATUS:
- case WSA881X_ATE_TEST_MODE:
- case WSA881X_PIN_STATUS:
- case WSA881X_SWR_HM_TEST2:
- case WSA881X_SPKR_STATUS1:
- case WSA881X_SPKR_STATUS2:
- case WSA881X_SPKR_STATUS3:
- case WSA881X_SPKR_PROT_SAR:
- return true;
- default:
- return false;
- }
-}
-
-struct regmap_config wsa881x_ana_regmap_config[] = {
-{
- .reg_bits = 8,
- .val_bits = 8,
- .cache_type = REGCACHE_NONE,
- .reg_defaults = wsa881x_ana_reg_defaults_0,
- .num_reg_defaults = ARRAY_SIZE(wsa881x_ana_reg_defaults_0),
- .max_register = WSA881X_MAX_REGISTER,
- .volatile_reg = wsa881x_volatile_register,
- .readable_reg = wsa881x_readable_register,
- .reg_format_endian = REGMAP_ENDIAN_NATIVE,
- .val_format_endian = REGMAP_ENDIAN_NATIVE,
-},
-{
- .reg_bits = 8,
- .val_bits = 8,
- .cache_type = REGCACHE_NONE,
- .reg_defaults = wsa881x_ana_reg_defaults_1,
- .num_reg_defaults = ARRAY_SIZE(wsa881x_ana_reg_defaults_1),
- .max_register = WSA881X_MAX_REGISTER,
- .volatile_reg = wsa881x_volatile_register,
- .readable_reg = wsa881x_readable_register,
- .reg_format_endian = REGMAP_ENDIAN_NATIVE,
- .val_format_endian = REGMAP_ENDIAN_NATIVE,
-}
-};
diff --git a/sound/soc/codecs/wsa881x-tables-analog.c b/sound/soc/codecs/wsa881x-tables-analog.c
deleted file mode 100644
index 061ed6f..0000000
--- a/sound/soc/codecs/wsa881x-tables-analog.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/regmap.h>
-#include <linux/device.h>
-#include "wsa881x-registers-analog.h"
-
-const u8 wsa881x_ana_reg_readable[WSA881X_CACHE_SIZE] = {
- [WSA881X_CHIP_ID0] = 1,
- [WSA881X_CHIP_ID1] = 1,
- [WSA881X_CHIP_ID2] = 1,
- [WSA881X_CHIP_ID3] = 1,
- [WSA881X_BUS_ID] = 1,
- [WSA881X_CDC_RST_CTL] = 1,
- [WSA881X_CDC_TOP_CLK_CTL] = 1,
- [WSA881X_CDC_ANA_CLK_CTL] = 1,
- [WSA881X_CDC_DIG_CLK_CTL] = 1,
- [WSA881X_CLOCK_CONFIG] = 1,
- [WSA881X_ANA_CTL] = 1,
- [WSA881X_SWR_RESET_EN] = 1,
- [WSA881X_RESET_CTL] = 1,
- [WSA881X_TADC_VALUE_CTL] = 1,
- [WSA881X_TEMP_DETECT_CTL] = 1,
- [WSA881X_TEMP_MSB] = 1,
- [WSA881X_TEMP_LSB] = 1,
- [WSA881X_TEMP_CONFIG0] = 1,
- [WSA881X_TEMP_CONFIG1] = 1,
- [WSA881X_CDC_CLIP_CTL] = 1,
- [WSA881X_SDM_PDM9_LSB] = 1,
- [WSA881X_SDM_PDM9_MSB] = 1,
- [WSA881X_CDC_RX_CTL] = 1,
- [WSA881X_DEM_BYPASS_DATA0] = 1,
- [WSA881X_DEM_BYPASS_DATA1] = 1,
- [WSA881X_DEM_BYPASS_DATA2] = 1,
- [WSA881X_DEM_BYPASS_DATA3] = 1,
- [WSA881X_OTP_CTRL0] = 1,
- [WSA881X_OTP_CTRL1] = 1,
- [WSA881X_HDRIVE_CTL_GROUP1] = 1,
- [WSA881X_INTR_MODE] = 1,
- [WSA881X_INTR_MASK] = 1,
- [WSA881X_INTR_STATUS] = 1,
- [WSA881X_INTR_CLEAR] = 1,
- [WSA881X_INTR_LEVEL] = 1,
- [WSA881X_INTR_SET] = 1,
- [WSA881X_INTR_TEST] = 1,
- [WSA881X_PDM_TEST_MODE] = 1,
- [WSA881X_ATE_TEST_MODE] = 1,
- [WSA881X_PIN_CTL_MODE] = 1,
- [WSA881X_PIN_CTL_OE] = 1,
- [WSA881X_PIN_WDATA_IOPAD] = 1,
- [WSA881X_PIN_STATUS] = 1,
- [WSA881X_DIG_DEBUG_MODE] = 1,
- [WSA881X_DIG_DEBUG_SEL] = 1,
- [WSA881X_DIG_DEBUG_EN] = 1,
- [WSA881X_SWR_HM_TEST1] = 1,
- [WSA881X_SWR_HM_TEST2] = 1,
- [WSA881X_TEMP_DETECT_DBG_CTL] = 1,
- [WSA881X_TEMP_DEBUG_MSB] = 1,
- [WSA881X_TEMP_DEBUG_LSB] = 1,
- [WSA881X_SAMPLE_EDGE_SEL] = 1,
- [WSA881X_IOPAD_CTL] = 1,
- [WSA881X_SPARE_0] = 1,
- [WSA881X_SPARE_1] = 1,
- [WSA881X_SPARE_2] = 1,
- [WSA881X_OTP_REG_0] = 1,
- [WSA881X_OTP_REG_1] = 1,
- [WSA881X_OTP_REG_2] = 1,
- [WSA881X_OTP_REG_3] = 1,
- [WSA881X_OTP_REG_4] = 1,
- [WSA881X_OTP_REG_5] = 1,
- [WSA881X_OTP_REG_6] = 1,
- [WSA881X_OTP_REG_7] = 1,
- [WSA881X_OTP_REG_8] = 1,
- [WSA881X_OTP_REG_9] = 1,
- [WSA881X_OTP_REG_10] = 1,
- [WSA881X_OTP_REG_11] = 1,
- [WSA881X_OTP_REG_12] = 1,
- [WSA881X_OTP_REG_13] = 1,
- [WSA881X_OTP_REG_14] = 1,
- [WSA881X_OTP_REG_15] = 1,
- [WSA881X_OTP_REG_16] = 1,
- [WSA881X_OTP_REG_17] = 1,
- [WSA881X_OTP_REG_18] = 1,
- [WSA881X_OTP_REG_19] = 1,
- [WSA881X_OTP_REG_20] = 1,
- [WSA881X_OTP_REG_21] = 1,
- [WSA881X_OTP_REG_22] = 1,
- [WSA881X_OTP_REG_23] = 1,
- [WSA881X_OTP_REG_24] = 1,
- [WSA881X_OTP_REG_25] = 1,
- [WSA881X_OTP_REG_26] = 1,
- [WSA881X_OTP_REG_27] = 1,
- [WSA881X_OTP_REG_28] = 1,
- [WSA881X_OTP_REG_29] = 1,
- [WSA881X_OTP_REG_30] = 1,
- [WSA881X_OTP_REG_31] = 1,
- [WSA881X_OTP_REG_63] = 1,
- /* Analog Registers */
- [WSA881X_BIAS_REF_CTRL] = 1,
- [WSA881X_BIAS_TEST] = 1,
- [WSA881X_BIAS_BIAS] = 1,
- [WSA881X_TEMP_OP] = 1,
- [WSA881X_TEMP_IREF_CTRL] = 1,
- [WSA881X_TEMP_ISENS_CTRL] = 1,
- [WSA881X_TEMP_CLK_CTRL] = 1,
- [WSA881X_TEMP_TEST] = 1,
- [WSA881X_TEMP_BIAS] = 1,
- [WSA881X_TEMP_ADC_CTRL] = 1,
- [WSA881X_TEMP_DOUT_MSB] = 1,
- [WSA881X_TEMP_DOUT_LSB] = 1,
- [WSA881X_ADC_EN_MODU_V] = 1,
- [WSA881X_ADC_EN_MODU_I] = 1,
- [WSA881X_ADC_EN_DET_TEST_V] = 1,
- [WSA881X_ADC_EN_DET_TEST_I] = 1,
- [WSA881X_ADC_SEL_IBIAS] = 1,
- [WSA881X_ADC_EN_SEL_IBIAS] = 1,
- [WSA881X_SPKR_DRV_EN] = 1,
- [WSA881X_SPKR_DRV_GAIN] = 1,
- [WSA881X_SPKR_DAC_CTL] = 1,
- [WSA881X_SPKR_DRV_DBG] = 1,
- [WSA881X_SPKR_PWRSTG_DBG] = 1,
- [WSA881X_SPKR_OCP_CTL] = 1,
- [WSA881X_SPKR_CLIP_CTL] = 1,
- [WSA881X_SPKR_BBM_CTL] = 1,
- [WSA881X_SPKR_MISC_CTL1] = 1,
- [WSA881X_SPKR_MISC_CTL2] = 1,
- [WSA881X_SPKR_BIAS_INT] = 1,
- [WSA881X_SPKR_PA_INT] = 1,
- [WSA881X_SPKR_BIAS_CAL] = 1,
- [WSA881X_SPKR_BIAS_PSRR] = 1,
- [WSA881X_SPKR_STATUS1] = 1,
- [WSA881X_SPKR_STATUS2] = 1,
- [WSA881X_BOOST_EN_CTL] = 1,
- [WSA881X_BOOST_CURRENT_LIMIT] = 1,
- [WSA881X_BOOST_PS_CTL] = 1,
- [WSA881X_BOOST_PRESET_OUT1] = 1,
- [WSA881X_BOOST_PRESET_OUT2] = 1,
- [WSA881X_BOOST_FORCE_OUT] = 1,
- [WSA881X_BOOST_LDO_PROG] = 1,
- [WSA881X_BOOST_SLOPE_COMP_ISENSE_FB] = 1,
- [WSA881X_BOOST_RON_CTL] = 1,
- [WSA881X_BOOST_LOOP_STABILITY] = 1,
- [WSA881X_BOOST_ZX_CTL] = 1,
- [WSA881X_BOOST_START_CTL] = 1,
- [WSA881X_BOOST_MISC1_CTL] = 1,
- [WSA881X_BOOST_MISC2_CTL] = 1,
- [WSA881X_BOOST_MISC3_CTL] = 1,
- [WSA881X_BOOST_ATEST_CTL] = 1,
- [WSA881X_SPKR_PROT_FE_GAIN] = 1,
- [WSA881X_SPKR_PROT_FE_CM_LDO_SET] = 1,
- [WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1] = 1,
- [WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2] = 1,
- [WSA881X_SPKR_PROT_ATEST1] = 1,
- [WSA881X_SPKR_PROT_ATEST2] = 1,
- [WSA881X_SPKR_PROT_FE_VSENSE_VCM] = 1,
- [WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1] = 1,
- [WSA881X_BONGO_RESRV_REG1] = 1,
- [WSA881X_BONGO_RESRV_REG2] = 1,
- [WSA881X_SPKR_PROT_SAR] = 1,
- [WSA881X_SPKR_STATUS3] = 1,
-};
diff --git a/sound/soc/msm/msm-audio-pinctrl.c b/sound/soc/msm/msm-audio-pinctrl.c
deleted file mode 100644
index f0fba84..0000000
--- a/sound/soc/msm/msm-audio-pinctrl.c
+++ /dev/null
@@ -1,316 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <linux/platform_device.h>
-#include "msm-audio-pinctrl.h"
-
-/*
- * pinctrl -- handle to query pinctrl apis
- * cdc lines -- stores pinctrl handles for pinctrl states
- * active_set -- maintain the overall pinctrl state
- */
-struct cdc_pinctrl_info {
- struct pinctrl *pinctrl;
- struct pinctrl_state **cdc_lines;
- int active_set;
-};
-
-/*
- * gpiosets -- stores all gpiosets mentioned in dtsi file
- * gpiosets_comb_names -- stores all possible gpioset combinations
- * gpioset_state -- maintains counter for each gpioset
- * gpiosets_max -- maintain the total supported gpiosets
- * gpiosets_comb_max -- maintain the total gpiosets combinations
- */
-struct cdc_gpioset_info {
- char **gpiosets;
- char **gpiosets_comb_names;
- uint8_t *gpioset_state;
- int gpiosets_max;
- int gpiosets_comb_max;
-};
-
-static struct cdc_pinctrl_info pinctrl_info[MAX_PINCTRL_CLIENT];
-static struct cdc_gpioset_info gpioset_info[MAX_PINCTRL_CLIENT];
-
-/* Finds the index for the gpio set in the dtsi file */
-int msm_get_gpioset_index(enum pinctrl_client client, char *keyword)
-{
- int i;
-
- for (i = 0; i < gpioset_info[client].gpiosets_max; i++) {
- if (!(strcmp(gpioset_info[client].gpiosets[i], keyword)))
- break;
- }
- /* Checking if the keyword is present in dtsi or not */
- if (i != gpioset_info[client].gpiosets_max)
- return i;
- else
- return -EINVAL;
-}
-
-/*
- * This function reads the following from dtsi file
- * 1. All gpio sets
- * 2. All combinations of gpio sets
- * 3. Pinctrl handles to gpio sets
- *
- * Returns error if there is
- * 1. Problem reading from dtsi file
- * 2. Memory allocation failure
- */
-int msm_gpioset_initialize(enum pinctrl_client client,
- struct device *dev)
-{
- struct pinctrl *pinctrl;
- const char *gpioset_names = "qcom,msm-gpios";
- const char *gpioset_combinations = "qcom,pinctrl-names";
- const char *gpioset_names_str = NULL;
- const char *gpioset_comb_str = NULL;
- int num_strings = 0;
- int ret = 0;
- int i = 0;
-
- pr_debug("%s\n", __func__);
- pinctrl = devm_pinctrl_get(dev);
- if (IS_ERR(pinctrl)) {
- pr_err("%s: Unable to get pinctrl handle\n",
- __func__);
- return -EINVAL;
- }
- pinctrl_info[client].pinctrl = pinctrl;
-
- /* Reading of gpio sets */
- num_strings = of_property_count_strings(dev->of_node,
- gpioset_names);
- if (num_strings < 0) {
- dev_err(dev,
- "%s: missing %s in dt node or length is incorrect\n",
- __func__, gpioset_names);
- goto err;
- }
- gpioset_info[client].gpiosets_max = num_strings;
- gpioset_info[client].gpiosets = devm_kzalloc(dev,
- gpioset_info[client].gpiosets_max *
- sizeof(char *), GFP_KERNEL);
- if (!gpioset_info[client].gpiosets) {
- dev_err(dev, "Can't allocate memory for gpio set names\n");
- ret = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < num_strings; i++) {
- ret = of_property_read_string_index(dev->of_node,
- gpioset_names, i, &gpioset_names_str);
-
- gpioset_info[client].gpiosets[i] = devm_kzalloc(dev,
- (strlen(gpioset_names_str) + 1), GFP_KERNEL);
-
- if (!gpioset_info[client].gpiosets[i]) {
- dev_err(dev, "%s: Can't allocate gpiosets[%d] data\n",
- __func__, i);
- ret = -ENOMEM;
- goto err;
- }
- strlcpy(gpioset_info[client].gpiosets[i],
- gpioset_names_str, strlen(gpioset_names_str)+1);
- gpioset_names_str = NULL;
- }
- num_strings = 0;
-
- /* Allocating memory for gpio set counter */
- gpioset_info[client].gpioset_state = devm_kzalloc(dev,
- gpioset_info[client].gpiosets_max *
- sizeof(uint8_t), GFP_KERNEL);
- if (!gpioset_info[client].gpioset_state) {
- dev_err(dev, "Can't allocate memory for gpio set counter\n");
- ret = -ENOMEM;
- goto err;
- }
-
- /* Reading of all combinations of gpio sets */
- num_strings = of_property_count_strings(dev->of_node,
- gpioset_combinations);
- if (num_strings < 0) {
- dev_err(dev,
- "%s: missing %s in dt node or length is incorrect\n",
- __func__, gpioset_combinations);
- goto err;
- }
- gpioset_info[client].gpiosets_comb_max = num_strings;
- gpioset_info[client].gpiosets_comb_names = devm_kzalloc(dev,
- num_strings * sizeof(char *), GFP_KERNEL);
- if (!gpioset_info[client].gpiosets_comb_names) {
- ret = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < gpioset_info[client].gpiosets_comb_max; i++) {
- ret = of_property_read_string_index(dev->of_node,
- gpioset_combinations, i, &gpioset_comb_str);
-
- gpioset_info[client].gpiosets_comb_names[i] = devm_kzalloc(dev,
- (strlen(gpioset_comb_str) + 1), GFP_KERNEL);
- if (!gpioset_info[client].gpiosets_comb_names[i]) {
- ret = -ENOMEM;
- goto err;
- }
-
- strlcpy(gpioset_info[client].gpiosets_comb_names[i],
- gpioset_comb_str,
- strlen(gpioset_comb_str)+1);
- pr_debug("%s: GPIO configuration %s\n",
- __func__,
- gpioset_info[client].gpiosets_comb_names[i]);
- gpioset_comb_str = NULL;
- }
-
- /* Allocating memory for handles to pinctrl states */
- pinctrl_info[client].cdc_lines = devm_kzalloc(dev,
- num_strings * sizeof(char *), GFP_KERNEL);
- if (!pinctrl_info[client].cdc_lines) {
- ret = -ENOMEM;
- goto err;
- }
-
- /* Get pinctrl handles for gpio sets in dtsi file */
- for (i = 0; i < num_strings; i++) {
- pinctrl_info[client].cdc_lines[i] = pinctrl_lookup_state(
- pinctrl,
- (const char *)gpioset_info[client].
- gpiosets_comb_names[i]);
- if (IS_ERR(pinctrl_info[client].cdc_lines[i]))
- pr_err("%s: Unable to get pinctrl handle for %s\n",
- __func__, gpioset_info[client].
- gpiosets_comb_names[i]);
- }
- goto success;
-
-err:
- /* Free up memory allocated for gpio set combinations */
- for (i = 0; i < gpioset_info[client].gpiosets_max; i++) {
- if (gpioset_info[client].gpiosets[i] != NULL) {
- devm_kfree(dev, gpioset_info[client].gpiosets[i]);
- gpioset_info[client].gpiosets[i] = NULL;
- }
- }
- if (gpioset_info[client].gpiosets != NULL) {
- devm_kfree(dev, gpioset_info[client].gpiosets);
- gpioset_info[client].gpiosets = NULL;
- }
-
- /* Free up memory allocated for gpio set combinations */
- for (i = 0; i < gpioset_info[client].gpiosets_comb_max; i++) {
- if (gpioset_info[client].gpiosets_comb_names[i] != NULL) {
- devm_kfree(dev,
- gpioset_info[client].gpiosets_comb_names[i]);
- gpioset_info[client].gpiosets_comb_names[i] = NULL;
- }
- }
- if (gpioset_info[client].gpiosets_comb_names != NULL) {
- devm_kfree(dev, gpioset_info[client].gpiosets_comb_names);
- gpioset_info[client].gpiosets_comb_names = NULL;
- }
-
- /* Free up memory allocated for handles to pinctrl states */
- if (pinctrl_info[client].cdc_lines != NULL) {
- devm_kfree(dev, pinctrl_info[client].cdc_lines);
- pinctrl_info[client].cdc_lines = NULL;
- }
-
- /* Free up memory allocated for counter of gpio sets */
- if (gpioset_info[client].gpioset_state != NULL) {
- devm_kfree(dev, gpioset_info[client].gpioset_state);
- gpioset_info[client].gpioset_state = NULL;
- }
-
-success:
- return ret;
-}
-
-int msm_gpioset_activate(enum pinctrl_client client, char *keyword)
-{
- int ret = 0;
- int gp_set = 0;
- int active_set = 0;
-
- gp_set = msm_get_gpioset_index(client, keyword);
- if (gp_set < 0) {
- pr_err("%s: gpio set name does not exist\n",
- __func__);
- return gp_set;
- }
-
- if (!gpioset_info[client].gpioset_state[gp_set]) {
- /*
- * If pinctrl pointer is not valid,
- * no need to proceed further
- */
- active_set = pinctrl_info[client].active_set;
- if (IS_ERR(pinctrl_info[client].cdc_lines[active_set]))
- return 0;
-
- pinctrl_info[client].active_set |= (1 << gp_set);
- active_set = pinctrl_info[client].active_set;
- pr_debug("%s: pinctrl.active_set: %d\n", __func__, active_set);
-
- /* Select the appropriate pinctrl state */
- ret = pinctrl_select_state(pinctrl_info[client].pinctrl,
- pinctrl_info[client].cdc_lines[active_set]);
- }
- gpioset_info[client].gpioset_state[gp_set]++;
-
- return ret;
-}
-
-int msm_gpioset_suspend(enum pinctrl_client client, char *keyword)
-{
- int ret = 0;
- int gp_set = 0;
- int active_set = 0;
-
- gp_set = msm_get_gpioset_index(client, keyword);
- if (gp_set < 0) {
- pr_err("%s: gpio set name does not exist\n",
- __func__);
- return gp_set;
- }
-
- if (gpioset_info[client].gpioset_state[gp_set] == 1) {
- pinctrl_info[client].active_set &= ~(1 << gp_set);
- /*
- * If pinctrl pointer is not valid,
- * no need to proceed further
- */
- active_set = pinctrl_info[client].active_set;
- if (IS_ERR(pinctrl_info[client].cdc_lines[active_set]))
- return -EINVAL;
-
- pr_debug("%s: pinctrl.active_set: %d\n", __func__,
- pinctrl_info[client].active_set);
- /* Select the appropriate pinctrl state */
- ret = pinctrl_select_state(pinctrl_info[client].pinctrl,
- pinctrl_info[client].cdc_lines[pinctrl_info[client].
- active_set]);
- }
- if (!(gpioset_info[client].gpioset_state[gp_set])) {
- pr_err("%s: Invalid call to de activate gpios: %d\n", __func__,
- gpioset_info[client].gpioset_state[gp_set]);
- return -EINVAL;
- }
-
- gpioset_info[client].gpioset_state[gp_set]--;
-
- return ret;
-}
diff --git a/sound/soc/msm/msm-audio-pinctrl.h b/sound/soc/msm/msm-audio-pinctrl.h
deleted file mode 100644
index ec7c6aa..0000000
--- a/sound/soc/msm/msm-audio-pinctrl.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MSM_AUDIO_PINCTRL_H
-#define __MSM_AUDIO_PINCTRL_H
-
-enum pinctrl_client {
- CLIENT_WCD,
- CLIENT_WSA_BONGO_1,
- CLIENT_WSA_BONGO_2,
- MAX_PINCTRL_CLIENT,
-};
-
-
-/* finds the index for the gpio set in the dtsi file */
-int msm_get_gpioset_index(enum pinctrl_client client, char *keyword);
-
-/*
- * this function reads the following from dtsi file
- * 1. all gpio sets
- * 2. all combinations of gpio sets
- * 3. pinctrl handles to gpio sets
- *
- * returns error if there is
- * 1. problem reading from dtsi file
- * 2. memory allocation failure
- */
-int msm_gpioset_initialize(enum pinctrl_client client, struct device *dev);
-
-int msm_gpioset_activate(enum pinctrl_client client, char *keyword);
-
-int msm_gpioset_suspend(enum pinctrl_client client, char *keyword);
-
-#endif /* __MSM_AUDIO_PINCTRL_H */
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index c319ccf..30a4d59 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -2496,8 +2496,21 @@
.rate_min = 8000,
.rate_max = 384000,
},
+ .capture = {
+ .stream_name = "MultiMedia16 Capture",
+ .aif_name = "MM_UL16",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
.ops = &msm_fe_Multimedia_dai_ops,
- .compress_new = snd_soc_new_compress,
.name = "MultiMedia16",
.probe = fe_dai_probe,
},
diff --git a/sound/soc/msm/msm8996.c b/sound/soc/msm/msm8996.c
index 45c5479..0890037 100644
--- a/sound/soc/msm/msm8996.c
+++ b/sound/soc/msm/msm8996.c
@@ -2915,12 +2915,13 @@
.id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{
- .name = "MSM8996 Compress9",
- .stream_name = "Compress9",
+ .name = "MSM8996 ULL NOIRQ_2",
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index 222c65a..174db28 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -5312,12 +5312,13 @@
.id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{
- .name = MSM_DAILINK_NAME(Compress9),
- .stream_name = "Compress9",
+ .name = MSM_DAILINK_NAME(ULL_NOIRQ_2),
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
index 325d642..75a2bff 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
@@ -30,9 +30,12 @@
#include <sound/control.h>
#include <sound/q6audio-v2.h>
#include <sound/timer.h>
+#include <sound/hwdep.h>
+
#include <asm/dma.h>
#include <sound/tlv.h>
#include <sound/pcm_params.h>
+#include <sound/devdep_params.h>
#include "msm-pcm-q6-v2.h"
#include "msm-pcm-routing-v2.h"
@@ -421,6 +424,42 @@
return ret;
}
+
+static int msm_pcm_mmap_fd(struct snd_pcm_substream *substream,
+ struct snd_pcm_mmap_fd *mmap_fd)
+{
+ struct msm_audio *prtd;
+ struct audio_port_data *apd;
+ struct audio_buffer *ab;
+ int dir = -1;
+
+ if (!substream->runtime) {
+ pr_err("%s substream runtime not found\n", __func__);
+ return -EFAULT;
+ }
+
+ prtd = substream->runtime->private_data;
+ if (!prtd || !prtd->audio_client || !prtd->mmap_flag) {
+ pr_err("%s no audio client or not an mmap session\n", __func__);
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dir = IN;
+ else
+ dir = OUT;
+
+ apd = prtd->audio_client->port;
+ ab = &(apd[dir].buf[0]);
+ mmap_fd->fd = ion_share_dma_buf_fd(ab->client, ab->handle);
+ if (mmap_fd->fd >= 0) {
+ mmap_fd->dir = dir;
+ mmap_fd->actual_size = ab->actual_size;
+ mmap_fd->size = ab->size;
+ }
+ return mmap_fd->fd < 0 ? -EFAULT : 0;
+}
+
static int msm_pcm_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
{
@@ -445,6 +484,15 @@
return snd_pcm_lib_ioctl(substream, cmd, arg);
}
+#ifdef CONFIG_COMPAT
+static int msm_pcm_compat_ioctl(struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg)
+{
+ /* we only handle RESET which is common for both modes */
+ return msm_pcm_ioctl(substream, cmd, arg);
+}
+#endif
+
static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -994,6 +1042,101 @@
return 0;
}
+static int msm_pcm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct snd_pcm *pcm = hw->private_data;
+ struct snd_pcm_mmap_fd __user *_mmap_fd = NULL;
+ struct snd_pcm_mmap_fd mmap_fd;
+ struct snd_pcm_substream *substream = NULL;
+ int32_t dir = -1;
+
+ switch (cmd) {
+ case SNDRV_PCM_IOCTL_MMAP_DATA_FD:
+ _mmap_fd = (struct snd_pcm_mmap_fd __user *)arg;
+ if (get_user(dir, (int32_t __user *)&(_mmap_fd->dir))) {
+ pr_err("%s: error copying mmap_fd from user\n",
+ __func__);
+ ret = -EFAULT;
+ break;
+ }
+ if (dir != OUT && dir != IN) {
+ pr_err("%s invalid stream dir\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+ substream = pcm->streams[dir].substream;
+ if (!substream) {
+ pr_err("%s substream not found\n", __func__);
+ ret = -ENODEV;
+ break;
+ }
+ pr_debug("%s : %s MMAP Data fd\n", __func__,
+ dir == 0 ? "P" : "C");
+ if (msm_pcm_mmap_fd(substream, &mmap_fd) < 0) {
+ pr_err("%s: error getting fd\n",
+ __func__);
+ ret = -EFAULT;
+ break;
+ }
+ if (put_user(mmap_fd.fd, &_mmap_fd->fd) ||
+ put_user(mmap_fd.size, &_mmap_fd->size) ||
+ put_user(mmap_fd.actual_size, &_mmap_fd->actual_size)) {
+ pr_err("%s: error copying fd\n", __func__);
+ return -EFAULT;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static int msm_pcm_hwdep_compat_ioctl(struct snd_hwdep *hw,
+ struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ /* we only support mmap fd. Handling is common in both modes */
+ return msm_pcm_hwdep_ioctl(hw, file, cmd, arg);
+}
+#else
+static int msm_pcm_hwdep_compat_ioctl(struct snd_hwdep *hw,
+ struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ return -EINVAL;
+}
+#endif
+
+static int msm_pcm_add_hwdep_dev(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_hwdep *hwdep;
+ int rc;
+ char id[] = "NOIRQ_NN";
+
+ snprintf(id, sizeof(id), "NOIRQ_%d", runtime->pcm->device);
+ pr_debug("%s: pcm dev %d\n", __func__, runtime->pcm->device);
+ rc = snd_hwdep_new(runtime->card->snd_card,
+ &id[0],
+ HWDEP_FE_BASE + runtime->pcm->device,
+ &hwdep);
+ if (!hwdep || rc < 0) {
+ pr_err("%s: hwdep intf failed to create %s - hwdep\n", __func__,
+ id);
+ return rc;
+ }
+
+ hwdep->iface = SNDRV_HWDEP_IFACE_AUDIO_BE; /* for lack of a FE iface */
+ hwdep->private_data = runtime->pcm; /* of type struct snd_pcm */
+ hwdep->ops.ioctl = msm_pcm_hwdep_ioctl;
+ hwdep->ops.ioctl_compat = msm_pcm_hwdep_compat_ioctl;
+ return 0;
+}
static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
@@ -1027,7 +1170,9 @@
pr_err("%s: Could not add app type controls failed %d\n",
__func__, ret);
}
-
+ ret = msm_pcm_add_hwdep_dev(rtd);
+ if (ret)
+ pr_err("%s: Could not add hw dep node\n", __func__);
pcm->nonatomic = true;
exit:
return ret;
@@ -1040,6 +1185,9 @@
.copy = msm_pcm_copy,
.hw_params = msm_pcm_hw_params,
.ioctl = msm_pcm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = msm_pcm_compat_ioctl,
+#endif
.trigger = msm_pcm_trigger,
.pointer = msm_pcm_pointer,
.mmap = msm_pcm_mmap,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index d67296f..ef50d92 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -1768,9 +1768,8 @@
static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
@@ -1810,9 +1809,8 @@
static int msm_routing_put_listen_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
@@ -1928,9 +1926,8 @@
static int msm_routing_put_voice_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
@@ -1972,9 +1969,8 @@
static int msm_routing_put_voice_stub_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
@@ -2075,9 +2071,8 @@
static int msm_routing_put_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: FM Switch enable %ld\n", __func__,
@@ -2104,9 +2099,8 @@
static int msm_routing_put_hfp_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: HFP Switch enable %ld\n", __func__,
@@ -2133,9 +2127,8 @@
static int msm_routing_put_int0_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: INT0 MI2S Switch enable %ld\n", __func__,
@@ -2162,9 +2155,8 @@
static int msm_routing_put_int4_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: INT4 MI2S Switch enable %ld\n", __func__,
@@ -2191,9 +2183,8 @@
static int msm_routing_put_usb_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: USB Switch enable %ld\n", __func__,
@@ -2220,9 +2211,8 @@
static int msm_routing_put_pri_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: PRI MI2S Switch enable %ld\n", __func__,
@@ -2249,9 +2239,8 @@
static int msm_routing_put_sec_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: SEC MI2S Switch enable %ld\n", __func__,
@@ -2280,9 +2269,8 @@
struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: TERT MI2S Switch enable %ld\n", __func__,
@@ -2311,9 +2299,8 @@
struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: QUAT MI2S Switch enable %ld\n", __func__,
@@ -2340,9 +2327,8 @@
static int msm_routing_put_fm_pcmrx_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: FM Switch enable %ld\n", __func__,
@@ -3490,9 +3476,8 @@
struct snd_ctl_elem_value *ucontrol)
{
int ec_ref_port_id;
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
@@ -3655,6 +3640,11 @@
msm_route_ec_ref_rx_enum[0],
msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul16 =
+ SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL16 MUX Mux",
+ msm_route_ec_ref_rx_enum[0],
+ msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
static const struct snd_kcontrol_new ext_ec_ref_mux_ul17 =
SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL17 MUX Mux",
msm_route_ec_ref_rx_enum[0],
@@ -3684,9 +3674,8 @@
static int msm_routing_ext_ec_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
int mux = ucontrol->value.enumerated.item[0];
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
int ret = 1;
@@ -7103,6 +7092,114 @@
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new mmul16_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INT2_MI2S_TX", MSM_BACKEND_DAI_INT2_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("AUX_PCM_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_AUX_PCM_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_AUX_PCM_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new mmul9_mixer_controls[] = {
SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
@@ -11392,6 +11489,7 @@
SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL8", "MultiMedia8 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL9", "MultiMedia9 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL16", "MultiMedia16 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL17", "MultiMedia17 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL18", "MultiMedia18 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL19", "MultiMedia19 Capture", 0, 0, 0, 0),
@@ -12127,6 +12225,8 @@
mmul8_mixer_controls, ARRAY_SIZE(mmul8_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia9 Mixer", SND_SOC_NOPM, 0, 0,
mmul9_mixer_controls, ARRAY_SIZE(mmul9_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia16 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul16_mixer_controls, ARRAY_SIZE(mmul16_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia17 Mixer", SND_SOC_NOPM, 0, 0,
mmul17_mixer_controls, ARRAY_SIZE(mmul17_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia18 Mixer", SND_SOC_NOPM, 0, 0,
@@ -12457,6 +12557,8 @@
&ext_ec_ref_mux_ul8),
SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL9 MUX", SND_SOC_NOPM, 0, 0,
&ext_ec_ref_mux_ul9),
+ SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL16 MUX", SND_SOC_NOPM, 0, 0,
+ &ext_ec_ref_mux_ul16),
SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL17 MUX", SND_SOC_NOPM, 0, 0,
&ext_ec_ref_mux_ul17),
SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL18 MUX", SND_SOC_NOPM, 0, 0,
@@ -12708,6 +12810,7 @@
{"MultiMedia8 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia3 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+ {"MultiMedia16 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia5 Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
{"MultiMedia5 Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
{"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13276,6 +13379,7 @@
{"MultiMedia2 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia3 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia5 Mixer", "MI2S_TX", "MI2S_TX"},
+ {"MultiMedia16 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia2 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia6 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
@@ -13294,12 +13398,15 @@
{"MultiMedia1 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
{"MultiMedia3 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
{"MultiMedia5 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
+ {"MultiMedia16 Mixer", "AUX_PCM_TX", "AUX_PCM_TX"},
+ {"MultiMedia16 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
{"MultiMedia1 Mixer", "TERT_AUXPCM_UL_TX", "TERT_AUX_PCM_TX"},
{"MultiMedia3 Mixer", "TERT_AUX_PCM_TX", "TERT_AUX_PCM_TX"},
{"MultiMedia5 Mixer", "TERT_AUX_PCM_TX", "TERT_AUX_PCM_TX"},
{"MultiMedia1 Mixer", "QUAT_AUXPCM_UL_TX", "QUAT_AUX_PCM_TX"},
{"MultiMedia3 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
{"MultiMedia5 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
+ {"MultiMedia16 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
{"MultiMedia2 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia2 Mixer", "SLIM_6_TX", "SLIMBUS_6_TX"},
{"MultiMedia2 Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
@@ -13314,9 +13421,11 @@
{"MultiMedia6 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
{"MultiMedia3 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
{"MultiMedia5 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
+ {"MultiMedia16 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
{"MultiMedia6 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia3 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia5 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+ {"MultiMedia16 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia6 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia6 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"MultiMedia6 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
@@ -13478,6 +13587,24 @@
{"MultiMedia6 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
{"MultiMedia8 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+ {"MultiMedia16 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+ {"MultiMedia16 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+ {"MultiMedia16 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+ {"MultiMedia16 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+ {"MultiMedia16 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+ {"MultiMedia16 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+ {"MultiMedia16 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+ {"MultiMedia16 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+ {"MultiMedia16 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+ {"MultiMedia16 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+ {"MultiMedia16 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+ {"MultiMedia16 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+ {"MultiMedia16 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+ {"MultiMedia16 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+ {"MultiMedia16 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+ {"MultiMedia16 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+ {"MultiMedia16 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+
{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -13560,8 +13687,10 @@
{"MultiMedia19 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia5 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia8 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+ {"MultiMedia16 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia1 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia4 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+ {"MultiMedia16 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia17 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia18 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia19 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
@@ -13577,6 +13706,7 @@
{"MultiMedia19 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia5 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia8 Mixer", "AFE_PCM_TX", "PCM_TX"},
+ {"MultiMedia16 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MM_UL1", NULL, "MultiMedia1 Mixer"},
{"MultiMedia2 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MM_UL2", NULL, "MultiMedia2 Mixer"},
@@ -13586,6 +13716,7 @@
{"MM_UL6", NULL, "MultiMedia6 Mixer"},
{"MM_UL8", NULL, "MultiMedia8 Mixer"},
{"MM_UL9", NULL, "MultiMedia9 Mixer"},
+ {"MM_UL16", NULL, "MultiMedia16 Mixer"},
{"MM_UL17", NULL, "MultiMedia17 Mixer"},
{"MM_UL18", NULL, "MultiMedia18 Mixer"},
{"MM_UL19", NULL, "MultiMedia19 Mixer"},
@@ -13996,6 +14127,7 @@
{"MM_UL6", NULL, "AUDIO_REF_EC_UL6 MUX"},
{"MM_UL8", NULL, "AUDIO_REF_EC_UL8 MUX"},
{"MM_UL9", NULL, "AUDIO_REF_EC_UL9 MUX"},
+ {"MM_UL16", NULL, "AUDIO_REF_EC_UL16 MUX"},
{"MM_UL17", NULL, "AUDIO_REF_EC_UL17 MUX"},
{"MM_UL18", NULL, "AUDIO_REF_EC_UL18 MUX"},
{"MM_UL19", NULL, "AUDIO_REF_EC_UL19 MUX"},
diff --git a/sound/soc/msm/sdm660-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c
index 77d3875..6ff29c9 100644
--- a/sound/soc/msm/sdm660-ext-dai-links.c
+++ b/sound/soc/msm/sdm660-ext-dai-links.c
@@ -1270,10 +1270,10 @@
.id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{/* hw:x,33 */
- .name = MSM_DAILINK_NAME(Compress9),
- .stream_name = "Compress9",
+ .name = MSM_DAILINK_NAME(ULL_NOIRQ_2),
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_capture = 1,
.dpcm_playback = 1,
diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c
index 84d1c2e..426c150 100644
--- a/sound/soc/msm/sdm660-external.c
+++ b/sound/soc/msm/sdm660-external.c
@@ -20,7 +20,6 @@
#include <sound/q6core.h>
#include <linux/qdsp6v2/audio_notifier.h>
#include "qdsp6v2/msm-pcm-routing-v2.h"
-#include "msm-audio-pinctrl.h"
#include "sdm660-common.h"
#include "sdm660-external.h"
#include "../codecs/wcd9335.h"
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index a57d6f6..4b9334b 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -2194,10 +2194,10 @@
.id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{/* hw:x,33 */
- .name = MSM_DAILINK_NAME(Compress9),
- .stream_name = "Compress9",
+ .name = MSM_DAILINK_NAME(ULL_NOIRQ_2),
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_capture = 1,
.dpcm_playback = 1,
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index 9248766..838771c 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -134,6 +134,13 @@
u32 msm_is_mi2s_master;
};
+static u32 mi2s_ebit_clk[MI2S_MAX] = {
+ Q6AFE_LPASS_CLK_ID_PRI_MI2S_EBIT,
+ Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT,
+ Q6AFE_LPASS_CLK_ID_TER_MI2S_EBIT,
+ Q6AFE_LPASS_CLK_ID_QUAD_MI2S_EBIT
+};
+
struct auxpcm_conf {
struct mutex lock;
u32 ref_cnt;
@@ -434,6 +441,7 @@
"Five", "Six", "Seven",
"Eight"};
static const char *const hifi_text[] = {"Off", "On"};
+static const char *const qos_text[] = {"Disable", "Enable"};
static SOC_ENUM_SINGLE_EXT_DECL(slim_0_rx_chs, slim_rx_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(slim_2_rx_chs, slim_rx_ch_text);
@@ -495,10 +503,14 @@
static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(mi2s_rx_format, bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(mi2s_tx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(aux_pcm_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(aux_pcm_tx_format, bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(hifi_function, hifi_text);
+static SOC_ENUM_SINGLE_EXT_DECL(qos_vote, qos_text);
static struct platform_device *spdev;
static int msm_hifi_control;
+static int qos_vote_status;
static bool is_initial_boot;
static bool codec_reg_done;
@@ -2252,7 +2264,7 @@
return sample_rate;
}
-static int mi2s_get_format(int value)
+static int mi2s_auxpcm_get_format(int value)
{
int format;
@@ -2276,7 +2288,7 @@
return format;
}
-static int mi2s_get_format_value(int format)
+static int mi2s_auxpcm_get_format_value(int format)
{
int value;
@@ -2441,7 +2453,7 @@
return idx;
ucontrol->value.enumerated.item[0] =
- mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format);
+ mi2s_auxpcm_get_format_value(mi2s_rx_cfg[idx].bit_format);
pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
idx, mi2s_rx_cfg[idx].bit_format,
@@ -2459,7 +2471,7 @@
return idx;
mi2s_rx_cfg[idx].bit_format =
- mi2s_get_format(ucontrol->value.enumerated.item[0]);
+ mi2s_auxpcm_get_format(ucontrol->value.enumerated.item[0]);
pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
idx, mi2s_rx_cfg[idx].bit_format,
@@ -2477,7 +2489,7 @@
return idx;
ucontrol->value.enumerated.item[0] =
- mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format);
+ mi2s_auxpcm_get_format_value(mi2s_tx_cfg[idx].bit_format);
pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
idx, mi2s_tx_cfg[idx].bit_format,
@@ -2495,7 +2507,7 @@
return idx;
mi2s_tx_cfg[idx].bit_format =
- mi2s_get_format(ucontrol->value.enumerated.item[0]);
+ mi2s_auxpcm_get_format(ucontrol->value.enumerated.item[0]);
pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
idx, mi2s_tx_cfg[idx].bit_format,
@@ -2504,6 +2516,78 @@
return 0;
}
+static int msm_aux_pcm_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = aux_pcm_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_auxpcm_get_format_value(aux_pcm_rx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, aux_pcm_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_aux_pcm_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = aux_pcm_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ aux_pcm_rx_cfg[idx].bit_format =
+ mi2s_auxpcm_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, aux_pcm_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_aux_pcm_tx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = aux_pcm_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_auxpcm_get_format_value(aux_pcm_tx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, aux_pcm_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_aux_pcm_tx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = aux_pcm_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ aux_pcm_tx_cfg[idx].bit_format =
+ mi2s_auxpcm_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, aux_pcm_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
static int msm_hifi_ctrl(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
@@ -2554,6 +2638,72 @@
return 0;
}
+static s32 msm_qos_value(struct snd_pcm_runtime *runtime)
+{
+ s32 usecs;
+
+ if (!runtime->rate)
+ return -EINVAL;
+
+ /* take 75% of period time as the deadline */
+ usecs = (750000 / runtime->rate) * runtime->period_size;
+ usecs += ((750000 % runtime->rate) * runtime->period_size) /
+ runtime->rate;
+
+ return usecs;
+}
+
+static int msm_qos_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.enumerated.item[0] = qos_vote_status;
+
+ return 0;
+}
+
+static int msm_qos_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct snd_soc_card *card = codec->component.card;
+ const char *be_name = MSM_DAILINK_NAME(LowLatency);
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_pcm_substream *substream;
+ s32 usecs;
+
+ rtd = snd_soc_get_pcm_runtime(card, be_name);
+ if (!rtd) {
+ pr_err("%s: fail to get pcm runtime for %s\n",
+ __func__, be_name);
+ return -EINVAL;
+ }
+
+ substream = rtd->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (!substream) {
+ pr_err("%s: substream is null\n", __func__);
+ return -EINVAL;
+ }
+
+ qos_vote_status = ucontrol->value.enumerated.item[0];
+ if (qos_vote_status) {
+ if (pm_qos_request_active(&substream->latency_pm_qos_req))
+ pm_qos_remove_request(&substream->latency_pm_qos_req);
+ if (!substream->runtime) {
+ pr_err("%s: runtime is null\n", __func__);
+ return -EINVAL;
+ }
+ usecs = msm_qos_value(substream->runtime);
+ if (usecs >= 0)
+ pm_qos_add_request(&substream->latency_pm_qos_req,
+ PM_QOS_CPU_DMA_LATENCY, usecs);
+ } else {
+ if (pm_qos_request_active(&substream->latency_pm_qos_req))
+ pm_qos_remove_request(&substream->latency_pm_qos_req);
+ }
+
+ return 0;
+}
+
static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("SLIM_0_RX Channels", slim_0_rx_chs,
msm_slim_rx_ch_get, msm_slim_rx_ch_put),
@@ -2765,8 +2915,26 @@
msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
SOC_ENUM_EXT("QUAT_MI2S_TX Format", mi2s_tx_format,
msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+ SOC_ENUM_EXT("PRIM_AUX_PCM_RX Format", aux_pcm_rx_format,
+ msm_aux_pcm_rx_format_get, msm_aux_pcm_rx_format_put),
+ SOC_ENUM_EXT("PRIM_AUX_PCM_TX Format", aux_pcm_tx_format,
+ msm_aux_pcm_tx_format_get, msm_aux_pcm_tx_format_put),
+ SOC_ENUM_EXT("SEC_AUX_PCM_RX Format", aux_pcm_rx_format,
+ msm_aux_pcm_rx_format_get, msm_aux_pcm_rx_format_put),
+ SOC_ENUM_EXT("SEC_AUX_PCM_TX Format", aux_pcm_tx_format,
+ msm_aux_pcm_tx_format_get, msm_aux_pcm_tx_format_put),
+ SOC_ENUM_EXT("TERT_AUX_PCM_RX Format", aux_pcm_rx_format,
+ msm_aux_pcm_rx_format_get, msm_aux_pcm_rx_format_put),
+ SOC_ENUM_EXT("TERT_AUX_PCM_TX Format", aux_pcm_tx_format,
+ msm_aux_pcm_tx_format_get, msm_aux_pcm_tx_format_put),
+ SOC_ENUM_EXT("QUAT_AUX_PCM_RX Format", aux_pcm_rx_format,
+ msm_aux_pcm_rx_format_get, msm_aux_pcm_rx_format_put),
+ SOC_ENUM_EXT("QUAT_AUX_PCM_TX Format", aux_pcm_tx_format,
+ msm_aux_pcm_tx_format_get, msm_aux_pcm_tx_format_put),
SOC_ENUM_EXT("HiFi Function", hifi_function, msm_hifi_get,
msm_hifi_put),
+ SOC_ENUM_EXT("MultiMedia5_RX QOS Vote", qos_vote, msm_qos_ctl_get,
+ msm_qos_ctl_put),
};
static int msm_snd_enable_codec_ext_clk(struct snd_soc_codec *codec,
@@ -3166,6 +3334,8 @@
break;
case MSM_BACKEND_DAI_AUXPCM_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_rx_cfg[PRIM_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_rx_cfg[PRIM_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3173,6 +3343,8 @@
break;
case MSM_BACKEND_DAI_AUXPCM_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_tx_cfg[PRIM_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_tx_cfg[PRIM_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3180,6 +3352,8 @@
break;
case MSM_BACKEND_DAI_SEC_AUXPCM_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_rx_cfg[SEC_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_rx_cfg[SEC_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3187,6 +3361,8 @@
break;
case MSM_BACKEND_DAI_SEC_AUXPCM_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_tx_cfg[SEC_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_tx_cfg[SEC_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3194,6 +3370,8 @@
break;
case MSM_BACKEND_DAI_TERT_AUXPCM_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_rx_cfg[TERT_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_rx_cfg[TERT_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3201,6 +3379,8 @@
break;
case MSM_BACKEND_DAI_TERT_AUXPCM_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_tx_cfg[TERT_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_tx_cfg[TERT_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3208,6 +3388,8 @@
break;
case MSM_BACKEND_DAI_QUAT_AUXPCM_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_rx_cfg[QUAT_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_rx_cfg[QUAT_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3215,6 +3397,8 @@
break;
case MSM_BACKEND_DAI_QUAT_AUXPCM_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_tx_cfg[QUAT_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_tx_cfg[QUAT_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -4070,9 +4254,6 @@
mi2s_clk[dai_id].clk_freq_in_hz =
mi2s_tx_cfg[dai_id].sample_rate * 2 * bit_per_sample;
}
-
- if (!mi2s_intf_conf[dai_id].msm_is_mi2s_master)
- mi2s_clk[dai_id].clk_freq_in_hz = 0;
}
static int msm_mi2s_set_sclk(struct snd_pcm_substream *substream, bool enable)
@@ -4423,6 +4604,11 @@
*/
mutex_lock(&mi2s_intf_conf[index].lock);
if (++mi2s_intf_conf[index].ref_cnt == 1) {
+ /* Check if msm needs to provide the clock to the interface */
+ if (!mi2s_intf_conf[index].msm_is_mi2s_master) {
+ mi2s_clk[index].clk_id = mi2s_ebit_clk[index];
+ fmt = SND_SOC_DAIFMT_CBM_CFM;
+ }
ret = msm_mi2s_set_sclk(substream, true);
if (ret < 0) {
dev_err(rtd->card->dev,
@@ -4442,9 +4628,6 @@
ret = -EINVAL;
goto clk_off;
}
- /* Check if msm needs to provide the clock to the interface */
- if (!mi2s_intf_conf[index].msm_is_mi2s_master)
- fmt = SND_SOC_DAIFMT_CBM_CFM;
ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
if (ret < 0) {
pr_err("%s: set fmt cpu dai failed for MI2S (%d), err:%d\n",
@@ -5219,12 +5402,13 @@
.id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{
- .name = MSM_DAILINK_NAME(Compress9),
- .stream_name = "Compress9",
+ .name = MSM_DAILINK_NAME(ULL_NOIRQ_2),
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 94ea909..d40bfef 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -423,14 +423,13 @@
kfree(data);
}
-struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist(
+static struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist(
const struct snd_kcontrol *kcontrol)
{
struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
return data->wlist;
}
-EXPORT_SYMBOL(dapm_kcontrol_get_wlist);
static int dapm_kcontrol_add_widget(struct snd_kcontrol *kcontrol,
struct snd_soc_dapm_widget *widget)
diff --git a/techpack/.gitignore b/techpack/.gitignore
new file mode 100644
index 0000000..58da0b8
--- /dev/null
+++ b/techpack/.gitignore
@@ -0,0 +1,2 @@
+# ignore all subdirs except stub
+!/stub/
diff --git a/techpack/Kbuild b/techpack/Kbuild
new file mode 100644
index 0000000..3c7c8e6
--- /dev/null
+++ b/techpack/Kbuild
@@ -0,0 +1,5 @@
+techpack-dirs := $(shell find $(srctree)/$(src) -maxdepth 1 -mindepth 1 -type d -not -name ".*")
+obj-y += stub/ $(addsuffix /,$(subst $(srctree)/$(src)/,,$(techpack-dirs)))
+
+techpack-header-dirs := $(shell find $(srctree)/techpack -maxdepth 1 -mindepth 1 -type d -not -name ".*")
+header-y += $(addsuffix /include/uapi/,$(subst $(srctree)/techpack/,,$(techpack-header-dirs)))
diff --git a/techpack/stub/Makefile b/techpack/stub/Makefile
new file mode 100644
index 0000000..184b5c7
--- /dev/null
+++ b/techpack/stub/Makefile
@@ -0,0 +1,2 @@
+ccflags-y := -Wno-unused-function
+obj-y := stub.o
diff --git a/techpack/stub/include/uapi/Kbuild b/techpack/stub/include/uapi/Kbuild
new file mode 100644
index 0000000..87bfa65
--- /dev/null
+++ b/techpack/stub/include/uapi/Kbuild
@@ -0,0 +1 @@
+#Stub place holder
diff --git a/techpack/stub/stub.c b/techpack/stub/stub.c
new file mode 100644
index 0000000..6024341
--- /dev/null
+++ b/techpack/stub/stub.c
@@ -0,0 +1,3 @@
+static void _techpack_stub(void)
+{
+}
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index e33fc1d..d94179f 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -126,4 +126,13 @@
#define WRITE_ONCE(x, val) \
({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+#ifndef __fallthrough
+# if defined(__GNUC__) && __GNUC__ >= 7
+# define __fallthrough __attribute__ ((fallthrough))
+# else
+# define __fallthrough
+# endif
+#endif
+
#endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 8efe904..9e5a02d 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -1573,13 +1573,13 @@
"GB/sec,", "total-speed", "GB/sec total speed");
if (g->p.show_details >= 2) {
- char tname[32];
+ char tname[14 + 2 * 10 + 1];
struct thread_data *td;
for (p = 0; p < g->p.nr_proc; p++) {
for (t = 0; t < g->p.nr_threads; t++) {
- memset(tname, 0, 32);
+ memset(tname, 0, sizeof(tname));
td = g->threads + p*g->p.nr_threads + t;
- snprintf(tname, 32, "process%d:thread%d", p, t);
+ snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
print_res(tname, td->speed_gbs,
"GB/sec", "thread-speed", "GB/sec/thread speed");
print_res(tname, td->system_time_ns / NSEC_PER_SEC,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index fe3af95..0b613e7 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -643,7 +643,7 @@
case -1:
if (errno == EINTR)
continue;
- /* Fall trhu */
+ __fallthrough;
default:
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 20c2e64..aa9276b 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1779,15 +1779,14 @@
}
while (!ret && (ent = readdir(dir))) {
-#define MAX_NAME 100
struct evlist_test e;
- char name[MAX_NAME];
+ char name[2 * NAME_MAX + 1 + 12 + 3];
/* Names containing . are special and cannot be used directly */
if (strchr(ent->d_name, '.'))
continue;
- snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name);
+ snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name);
e.name = name;
e.check = test__checkevent_pmu_events;
@@ -1795,11 +1794,10 @@
ret = test_event(&e);
if (ret)
break;
- snprintf(name, MAX_NAME, "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
+ snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
e.name = name;
e.check = test__checkevent_pmu_events_mix;
ret = test_event(&e);
-#undef MAX_NAME
}
closedir(dir);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 2f3eded..5337f49 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3184,6 +3184,7 @@
case PERF_EVENT_UPDATE__SCALE:
ev_scale = (struct event_update_event_scale *) ev->data;
evsel->scale = ev_scale->scale;
+ break;
case PERF_EVENT_UPDATE__CPUS:
ev_cpus = (struct event_update_event_cpus *) ev->data;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 16c06d3..04387ab 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -22,6 +22,7 @@
#include <errno.h>
#include <stdint.h>
#include <inttypes.h>
+#include <linux/compiler.h>
#include "../cache.h"
#include "../util.h"
@@ -1744,6 +1745,7 @@
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
decoder->continuous_period = false;
+ __fallthrough;
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
intel_pt_log("ERROR: Unexpected packet\n");
@@ -1797,6 +1799,8 @@
decoder->pge = false;
decoder->continuous_period = false;
intel_pt_clear_tx_flags(decoder);
+ __fallthrough;
+
case INTEL_PT_TNT:
decoder->have_tma = false;
intel_pt_log("ERROR: Unexpected packet\n");
@@ -1837,6 +1841,7 @@
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
decoder->continuous_period = false;
+ __fallthrough;
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index 4f7b320..7528ae4 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -17,6 +17,7 @@
#include <string.h>
#include <endian.h>
#include <byteswap.h>
+#include <linux/compiler.h>
#include "intel-pt-pkt-decoder.h"
@@ -498,6 +499,7 @@
case INTEL_PT_FUP:
if (!(packet->count))
return snprintf(buf, buf_len, "%s no ip", name);
+ __fallthrough;
case INTEL_PT_CYC:
case INTEL_PT_VMCS:
case INTEL_PT_MTC:
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 1d9c02b..7ea13f4 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -618,6 +618,67 @@
return ret ? : -ENOENT;
}
+/* Adjust symbol name and address */
+static int post_process_probe_trace_point(struct probe_trace_point *tp,
+ struct map *map, unsigned long offs)
+{
+ struct symbol *sym;
+ u64 addr = tp->address + tp->offset - offs;
+
+ sym = map__find_symbol(map, addr);
+ if (!sym)
+ return -ENOENT;
+
+ if (strcmp(sym->name, tp->symbol)) {
+ /* If we have no realname, use symbol for it */
+ if (!tp->realname)
+ tp->realname = tp->symbol;
+ else
+ free(tp->symbol);
+ tp->symbol = strdup(sym->name);
+ if (!tp->symbol)
+ return -ENOMEM;
+ }
+ tp->offset = addr - sym->start;
+ tp->address -= offs;
+
+ return 0;
+}
+
+/*
+ * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions
+ * and generate new symbols with suffixes such as .constprop.N or .isra.N
+ * etc. Since those symbols are not recorded in DWARF, we have to find
+ * correct generated symbols from offline ELF binary.
+ * For online kernel or uprobes we don't need this because those are
+ * rebased on _text, or already a section relative address.
+ */
+static int
+post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *pathname)
+{
+ struct map *map;
+ unsigned long stext = 0;
+ int i, ret = 0;
+
+ /* Prepare a map for offline binary */
+ map = dso__new_map(pathname);
+ if (!map || get_text_start_address(pathname, &stext) < 0) {
+ pr_warning("Failed to get ELF symbols for %s\n", pathname);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ntevs; i++) {
+ ret = post_process_probe_trace_point(&tevs[i].point,
+ map, stext);
+ if (ret < 0)
+ break;
+ }
+ map__put(map);
+
+ return ret;
+}
+
static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
int ntevs, const char *exec)
{
@@ -694,7 +755,8 @@
/* Skip post process if the target is an offline kernel */
if (symbol_conf.ignore_vmlinux_buildid)
- return 0;
+ return post_process_offline_probe_trace_events(tevs, ntevs,
+ symbol_conf.vmlinux_name);
reloc_sym = kernel_get_ref_reloc_sym();
if (!reloc_sym) {
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index 6516e22..82d28c6 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -1,6 +1,6 @@
libperf-$(CONFIG_LIBPERL) += trace-event-perl.o
libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
-CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default
+CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow
diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c
index bcae659..efb5377 100644
--- a/tools/perf/util/strfilter.c
+++ b/tools/perf/util/strfilter.c
@@ -269,6 +269,7 @@
len = strfilter_node__sprint_pt(node->l, buf);
if (len < 0)
return len;
+ __fallthrough;
case '!':
if (buf) {
*(buf + len++) = *node->p;
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 7f7e072..f4e3444 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -21,6 +21,8 @@
case 'b': case 'B':
if (*p)
goto out_err;
+
+ __fallthrough;
case '\0':
return length;
default:
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index 40585f5..ddec5c5 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -93,7 +93,7 @@
{
DIR *proc;
int max_threads = 32, items, i;
- char path[256];
+ char path[NAME_MAX + 1 + 6];
struct dirent *dirent, **namelist = NULL;
struct thread_map *threads = thread_map__alloc(max_threads);